1 /* $OpenBSD: if_ixl.c,v 1.102 2024/10/30 18:02:45 jan Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 *
37 * Permission to use, copy, modify, and distribute this software for any
38 * purpose with or without fee is hereby granted, provided that the above
39 * copyright notice and this permission notice appear in all copies.
40 *
41 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
42 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
43 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
44 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
45 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
46 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
47 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 */
49
50 #include "bpfilter.h"
51 #include "kstat.h"
52 #include "vlan.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/proc.h>
57 #include <sys/sockio.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66 #include <sys/intrmap.h>
67
68 #include <machine/bus.h>
69 #include <machine/intr.h>
70
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/route.h>
74 #include <net/toeplitz.h>
75
76 #if NBPFILTER > 0
77 #include <net/bpf.h>
78 #endif
79
80 #if NKSTAT > 0
81 #include <sys/kstat.h>
82 #endif
83
84 #include <netinet/in.h>
85 #include <netinet/if_ether.h>
86 #include <netinet/tcp.h>
87 #include <netinet/tcp_timer.h>
88 #include <netinet/tcp_var.h>
89 #include <netinet/udp.h>
90
91 #include <dev/pci/pcireg.h>
92 #include <dev/pci/pcivar.h>
93 #include <dev/pci/pcidevs.h>
94
95 #ifdef __sparc64__
96 #include <dev/ofw/openfirm.h>
97 #endif
98
99 #ifndef CACHE_LINE_SIZE
100 #define CACHE_LINE_SIZE 64
101 #endif
102
103 #define IXL_MAX_VECTORS 8 /* XXX this is pretty arbitrary */
104
105 #define I40E_MASK(mask, shift) ((mask) << (shift))
106 #define I40E_PF_RESET_WAIT_COUNT 200
107 #define I40E_AQ_LARGE_BUF 512
108
109 /* bitfields for Tx queue mapping in QTX_CTL */
110 #define I40E_QTX_CTL_VF_QUEUE 0x0
111 #define I40E_QTX_CTL_VM_QUEUE 0x1
112 #define I40E_QTX_CTL_PF_QUEUE 0x2
113
114 #define I40E_QUEUE_TYPE_EOL 0x7ff
115 #define I40E_INTR_NOTX_QUEUE 0
116
117 #define I40E_QUEUE_TYPE_RX 0x0
118 #define I40E_QUEUE_TYPE_TX 0x1
119 #define I40E_QUEUE_TYPE_PE_CEQ 0x2
120 #define I40E_QUEUE_TYPE_UNKNOWN 0x3
121
122 #define I40E_ITR_INDEX_RX 0x0
123 #define I40E_ITR_INDEX_TX 0x1
124 #define I40E_ITR_INDEX_OTHER 0x2
125 #define I40E_ITR_INDEX_NONE 0x3
126
127 #include <dev/pci/if_ixlreg.h>
128
129 #define I40E_INTR_NOTX_QUEUE 0
130 #define I40E_INTR_NOTX_INTR 0
131 #define I40E_INTR_NOTX_RX_QUEUE 0
132 #define I40E_INTR_NOTX_TX_QUEUE 1
133 #define I40E_INTR_NOTX_RX_MASK I40E_PFINT_ICR0_QUEUE_0_MASK
134 #define I40E_INTR_NOTX_TX_MASK I40E_PFINT_ICR0_QUEUE_1_MASK
135
136 struct ixl_aq_desc {
137 uint16_t iaq_flags;
138 #define IXL_AQ_DD (1U << 0)
139 #define IXL_AQ_CMP (1U << 1)
140 #define IXL_AQ_ERR (1U << 2)
141 #define IXL_AQ_VFE (1U << 3)
142 #define IXL_AQ_LB (1U << 9)
143 #define IXL_AQ_RD (1U << 10)
144 #define IXL_AQ_VFC (1U << 11)
145 #define IXL_AQ_BUF (1U << 12)
146 #define IXL_AQ_SI (1U << 13)
147 #define IXL_AQ_EI (1U << 14)
148 #define IXL_AQ_FE (1U << 15)
149
150 #define IXL_AQ_FLAGS_FMT "\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
151 "\014VFC" "\013DB" "\012LB" "\004VFE" \
152 "\003ERR" "\002CMP" "\001DD"
153
154 uint16_t iaq_opcode;
155
156 uint16_t iaq_datalen;
157 uint16_t iaq_retval;
158
159 uint64_t iaq_cookie;
160
161 uint32_t iaq_param[4];
162 /* iaq_data_hi iaq_param[2] */
163 /* iaq_data_lo iaq_param[3] */
164 } __packed __aligned(8);
165
166 /* aq commands */
167 #define IXL_AQ_OP_GET_VERSION 0x0001
168 #define IXL_AQ_OP_DRIVER_VERSION 0x0002
169 #define IXL_AQ_OP_QUEUE_SHUTDOWN 0x0003
170 #define IXL_AQ_OP_SET_PF_CONTEXT 0x0004
171 #define IXL_AQ_OP_GET_AQ_ERR_REASON 0x0005
172 #define IXL_AQ_OP_REQUEST_RESOURCE 0x0008
173 #define IXL_AQ_OP_RELEASE_RESOURCE 0x0009
174 #define IXL_AQ_OP_LIST_FUNC_CAP 0x000a
175 #define IXL_AQ_OP_LIST_DEV_CAP 0x000b
176 #define IXL_AQ_OP_MAC_ADDRESS_READ 0x0107
177 #define IXL_AQ_OP_CLEAR_PXE_MODE 0x0110
178 #define IXL_AQ_OP_SWITCH_GET_CONFIG 0x0200
179 #define IXL_AQ_OP_RX_CTL_READ 0x0206
180 #define IXL_AQ_OP_RX_CTL_WRITE 0x0207
181 #define IXL_AQ_OP_ADD_VSI 0x0210
182 #define IXL_AQ_OP_UPD_VSI_PARAMS 0x0211
183 #define IXL_AQ_OP_GET_VSI_PARAMS 0x0212
184 #define IXL_AQ_OP_ADD_VEB 0x0230
185 #define IXL_AQ_OP_UPD_VEB_PARAMS 0x0231
186 #define IXL_AQ_OP_GET_VEB_PARAMS 0x0232
187 #define IXL_AQ_OP_ADD_MACVLAN 0x0250
188 #define IXL_AQ_OP_REMOVE_MACVLAN 0x0251
189 #define IXL_AQ_OP_SET_VSI_PROMISC 0x0254
190 #define IXL_AQ_OP_PHY_GET_ABILITIES 0x0600
191 #define IXL_AQ_OP_PHY_SET_CONFIG 0x0601
192 #define IXL_AQ_OP_PHY_SET_MAC_CONFIG 0x0603
193 #define IXL_AQ_OP_PHY_RESTART_AN 0x0605
194 #define IXL_AQ_OP_PHY_LINK_STATUS 0x0607
195 #define IXL_AQ_OP_PHY_SET_EVENT_MASK 0x0613
196 #define IXL_AQ_OP_PHY_SET_REGISTER 0x0628
197 #define IXL_AQ_OP_PHY_GET_REGISTER 0x0629
198 #define IXL_AQ_OP_LLDP_GET_MIB 0x0a00
199 #define IXL_AQ_OP_LLDP_MIB_CHG_EV 0x0a01
200 #define IXL_AQ_OP_LLDP_ADD_TLV 0x0a02
201 #define IXL_AQ_OP_LLDP_UPD_TLV 0x0a03
202 #define IXL_AQ_OP_LLDP_DEL_TLV 0x0a04
203 #define IXL_AQ_OP_LLDP_STOP_AGENT 0x0a05
204 #define IXL_AQ_OP_LLDP_START_AGENT 0x0a06
205 #define IXL_AQ_OP_LLDP_GET_CEE_DCBX 0x0a07
206 #define IXL_AQ_OP_LLDP_SPECIFIC_AGENT 0x0a09
207 #define IXL_AQ_OP_SET_RSS_KEY 0x0b02 /* 722 only */
208 #define IXL_AQ_OP_SET_RSS_LUT 0x0b03 /* 722 only */
209 #define IXL_AQ_OP_GET_RSS_KEY 0x0b04 /* 722 only */
210 #define IXL_AQ_OP_GET_RSS_LUT 0x0b05 /* 722 only */
211
212 struct ixl_aq_mac_addresses {
213 uint8_t pf_lan[ETHER_ADDR_LEN];
214 uint8_t pf_san[ETHER_ADDR_LEN];
215 uint8_t port[ETHER_ADDR_LEN];
216 uint8_t pf_wol[ETHER_ADDR_LEN];
217 } __packed;
218
219 #define IXL_AQ_MAC_PF_LAN_VALID (1U << 4)
220 #define IXL_AQ_MAC_PF_SAN_VALID (1U << 5)
221 #define IXL_AQ_MAC_PORT_VALID (1U << 6)
222 #define IXL_AQ_MAC_PF_WOL_VALID (1U << 7)
223
224 struct ixl_aq_capability {
225 uint16_t cap_id;
226 #define IXL_AQ_CAP_SWITCH_MODE 0x0001
227 #define IXL_AQ_CAP_MNG_MODE 0x0002
228 #define IXL_AQ_CAP_NPAR_ACTIVE 0x0003
229 #define IXL_AQ_CAP_OS2BMC_CAP 0x0004
230 #define IXL_AQ_CAP_FUNCTIONS_VALID 0x0005
231 #define IXL_AQ_CAP_ALTERNATE_RAM 0x0006
232 #define IXL_AQ_CAP_WOL_AND_PROXY 0x0008
233 #define IXL_AQ_CAP_SRIOV 0x0012
234 #define IXL_AQ_CAP_VF 0x0013
235 #define IXL_AQ_CAP_VMDQ 0x0014
236 #define IXL_AQ_CAP_8021QBG 0x0015
237 #define IXL_AQ_CAP_8021QBR 0x0016
238 #define IXL_AQ_CAP_VSI 0x0017
239 #define IXL_AQ_CAP_DCB 0x0018
240 #define IXL_AQ_CAP_FCOE 0x0021
241 #define IXL_AQ_CAP_ISCSI 0x0022
242 #define IXL_AQ_CAP_RSS 0x0040
243 #define IXL_AQ_CAP_RXQ 0x0041
244 #define IXL_AQ_CAP_TXQ 0x0042
245 #define IXL_AQ_CAP_MSIX 0x0043
246 #define IXL_AQ_CAP_VF_MSIX 0x0044
247 #define IXL_AQ_CAP_FLOW_DIRECTOR 0x0045
248 #define IXL_AQ_CAP_1588 0x0046
249 #define IXL_AQ_CAP_IWARP 0x0051
250 #define IXL_AQ_CAP_LED 0x0061
251 #define IXL_AQ_CAP_SDP 0x0062
252 #define IXL_AQ_CAP_MDIO 0x0063
253 #define IXL_AQ_CAP_WSR_PROT 0x0064
254 #define IXL_AQ_CAP_NVM_MGMT 0x0080
255 #define IXL_AQ_CAP_FLEX10 0x00F1
256 #define IXL_AQ_CAP_CEM 0x00F2
257 uint8_t major_rev;
258 uint8_t minor_rev;
259 uint32_t number;
260 uint32_t logical_id;
261 uint32_t phys_id;
262 uint8_t _reserved[16];
263 } __packed __aligned(4);
264
265 #define IXL_LLDP_SHUTDOWN 0x1
266
267 struct ixl_aq_switch_config {
268 uint16_t num_reported;
269 uint16_t num_total;
270 uint8_t _reserved[12];
271 } __packed __aligned(4);
272
273 struct ixl_aq_switch_config_element {
274 uint8_t type;
275 #define IXL_AQ_SW_ELEM_TYPE_MAC 1
276 #define IXL_AQ_SW_ELEM_TYPE_PF 2
277 #define IXL_AQ_SW_ELEM_TYPE_VF 3
278 #define IXL_AQ_SW_ELEM_TYPE_EMP 4
279 #define IXL_AQ_SW_ELEM_TYPE_BMC 5
280 #define IXL_AQ_SW_ELEM_TYPE_PV 16
281 #define IXL_AQ_SW_ELEM_TYPE_VEB 17
282 #define IXL_AQ_SW_ELEM_TYPE_PA 18
283 #define IXL_AQ_SW_ELEM_TYPE_VSI 19
284 uint8_t revision;
285 #define IXL_AQ_SW_ELEM_REV_1 1
286 uint16_t seid;
287
288 uint16_t uplink_seid;
289 uint16_t downlink_seid;
290
291 uint8_t _reserved[3];
292 uint8_t connection_type;
293 #define IXL_AQ_CONN_TYPE_REGULAR 0x1
294 #define IXL_AQ_CONN_TYPE_DEFAULT 0x2
295 #define IXL_AQ_CONN_TYPE_CASCADED 0x3
296
297 uint16_t scheduler_id;
298 uint16_t element_info;
299 } __packed __aligned(4);
300
301 #define IXL_PHY_TYPE_SGMII 0x00
302 #define IXL_PHY_TYPE_1000BASE_KX 0x01
303 #define IXL_PHY_TYPE_10GBASE_KX4 0x02
304 #define IXL_PHY_TYPE_10GBASE_KR 0x03
305 #define IXL_PHY_TYPE_40GBASE_KR4 0x04
306 #define IXL_PHY_TYPE_XAUI 0x05
307 #define IXL_PHY_TYPE_XFI 0x06
308 #define IXL_PHY_TYPE_SFI 0x07
309 #define IXL_PHY_TYPE_XLAUI 0x08
310 #define IXL_PHY_TYPE_XLPPI 0x09
311 #define IXL_PHY_TYPE_40GBASE_CR4_CU 0x0a
312 #define IXL_PHY_TYPE_10GBASE_CR1_CU 0x0b
313 #define IXL_PHY_TYPE_10GBASE_AOC 0x0c
314 #define IXL_PHY_TYPE_40GBASE_AOC 0x0d
315 #define IXL_PHY_TYPE_100BASE_TX 0x11
316 #define IXL_PHY_TYPE_1000BASE_T 0x12
317 #define IXL_PHY_TYPE_10GBASE_T 0x13
318 #define IXL_PHY_TYPE_10GBASE_SR 0x14
319 #define IXL_PHY_TYPE_10GBASE_LR 0x15
320 #define IXL_PHY_TYPE_10GBASE_SFPP_CU 0x16
321 #define IXL_PHY_TYPE_10GBASE_CR1 0x17
322 #define IXL_PHY_TYPE_40GBASE_CR4 0x18
323 #define IXL_PHY_TYPE_40GBASE_SR4 0x19
324 #define IXL_PHY_TYPE_40GBASE_LR4 0x1a
325 #define IXL_PHY_TYPE_1000BASE_SX 0x1b
326 #define IXL_PHY_TYPE_1000BASE_LX 0x1c
327 #define IXL_PHY_TYPE_1000BASE_T_OPTICAL 0x1d
328 #define IXL_PHY_TYPE_20GBASE_KR2 0x1e
329
330 #define IXL_PHY_TYPE_25GBASE_KR 0x1f
331 #define IXL_PHY_TYPE_25GBASE_CR 0x20
332 #define IXL_PHY_TYPE_25GBASE_SR 0x21
333 #define IXL_PHY_TYPE_25GBASE_LR 0x22
334 #define IXL_PHY_TYPE_25GBASE_AOC 0x23
335 #define IXL_PHY_TYPE_25GBASE_ACC 0x24
336
337 struct ixl_aq_module_desc {
338 uint8_t oui[3];
339 uint8_t _reserved1;
340 uint8_t part_number[16];
341 uint8_t revision[4];
342 uint8_t _reserved2[8];
343 } __packed __aligned(4);
344
345 struct ixl_aq_phy_abilities {
346 uint32_t phy_type;
347
348 uint8_t link_speed;
349 #define IXL_AQ_PHY_LINK_SPEED_100MB (1 << 1)
350 #define IXL_AQ_PHY_LINK_SPEED_1000MB (1 << 2)
351 #define IXL_AQ_PHY_LINK_SPEED_10GB (1 << 3)
352 #define IXL_AQ_PHY_LINK_SPEED_40GB (1 << 4)
353 #define IXL_AQ_PHY_LINK_SPEED_20GB (1 << 5)
354 #define IXL_AQ_PHY_LINK_SPEED_25GB (1 << 6)
355 uint8_t abilities;
356 uint16_t eee_capability;
357
358 uint32_t eeer_val;
359
360 uint8_t d3_lpan;
361 uint8_t phy_type_ext;
362 #define IXL_AQ_PHY_TYPE_EXT_25G_KR 0x01
363 #define IXL_AQ_PHY_TYPE_EXT_25G_CR 0x02
364 #define IXL_AQ_PHY_TYPE_EXT_25G_SR 0x04
365 #define IXL_AQ_PHY_TYPE_EXT_25G_LR 0x08
366 uint8_t fec_cfg_curr_mod_ext_info;
367 #define IXL_AQ_ENABLE_FEC_KR 0x01
368 #define IXL_AQ_ENABLE_FEC_RS 0x02
369 #define IXL_AQ_REQUEST_FEC_KR 0x04
370 #define IXL_AQ_REQUEST_FEC_RS 0x08
371 #define IXL_AQ_ENABLE_FEC_AUTO 0x10
372 #define IXL_AQ_MODULE_TYPE_EXT_MASK 0xe0
373 #define IXL_AQ_MODULE_TYPE_EXT_SHIFT 5
374 uint8_t ext_comp_code;
375
376 uint8_t phy_id[4];
377
378 uint8_t module_type[3];
379 #define IXL_SFF8024_ID_SFP 0x03
380 #define IXL_SFF8024_ID_QSFP 0x0c
381 #define IXL_SFF8024_ID_QSFP_PLUS 0x0d
382 #define IXL_SFF8024_ID_QSFP28 0x11
383 uint8_t qualified_module_count;
384 #define IXL_AQ_PHY_MAX_QMS 16
385 struct ixl_aq_module_desc
386 qualified_module[IXL_AQ_PHY_MAX_QMS];
387 } __packed __aligned(4);
388
389 struct ixl_aq_link_param {
390 uint8_t notify;
391 #define IXL_AQ_LINK_NOTIFY 0x03
392 uint8_t _reserved1;
393 uint8_t phy;
394 uint8_t speed;
395 uint8_t status;
396 uint8_t _reserved2[11];
397 } __packed __aligned(4);
398
399 struct ixl_aq_vsi_param {
400 uint16_t uplink_seid;
401 uint8_t connect_type;
402 #define IXL_AQ_VSI_CONN_TYPE_NORMAL (0x1)
403 #define IXL_AQ_VSI_CONN_TYPE_DEFAULT (0x2)
404 #define IXL_AQ_VSI_CONN_TYPE_CASCADED (0x3)
405 uint8_t _reserved1;
406
407 uint8_t vf_id;
408 uint8_t _reserved2;
409 uint16_t vsi_flags;
410 #define IXL_AQ_VSI_TYPE_SHIFT 0x0
411 #define IXL_AQ_VSI_TYPE_MASK (0x3 << IXL_AQ_VSI_TYPE_SHIFT)
412 #define IXL_AQ_VSI_TYPE_VF 0x0
413 #define IXL_AQ_VSI_TYPE_VMDQ2 0x1
414 #define IXL_AQ_VSI_TYPE_PF 0x2
415 #define IXL_AQ_VSI_TYPE_EMP_MNG 0x3
416 #define IXL_AQ_VSI_FLAG_CASCADED_PV 0x4
417
418 uint32_t addr_hi;
419 uint32_t addr_lo;
420 } __packed __aligned(16);
421
422 struct ixl_aq_add_macvlan {
423 uint16_t num_addrs;
424 uint16_t seid0;
425 uint16_t seid1;
426 uint16_t seid2;
427 uint32_t addr_hi;
428 uint32_t addr_lo;
429 } __packed __aligned(16);
430
431 struct ixl_aq_add_macvlan_elem {
432 uint8_t macaddr[6];
433 uint16_t vlan;
434 uint16_t flags;
435 #define IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH 0x0001
436 #define IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN 0x0004
437 uint16_t queue;
438 uint32_t _reserved;
439 } __packed __aligned(16);
440
441 struct ixl_aq_remove_macvlan {
442 uint16_t num_addrs;
443 uint16_t seid0;
444 uint16_t seid1;
445 uint16_t seid2;
446 uint32_t addr_hi;
447 uint32_t addr_lo;
448 } __packed __aligned(16);
449
450 struct ixl_aq_remove_macvlan_elem {
451 uint8_t macaddr[6];
452 uint16_t vlan;
453 uint8_t flags;
454 #define IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH 0x0001
455 #define IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN 0x0008
456 uint8_t _reserved[7];
457 } __packed __aligned(16);
458
459 struct ixl_aq_vsi_reply {
460 uint16_t seid;
461 uint16_t vsi_number;
462
463 uint16_t vsis_used;
464 uint16_t vsis_free;
465
466 uint32_t addr_hi;
467 uint32_t addr_lo;
468 } __packed __aligned(16);
469
470 struct ixl_aq_vsi_data {
471 /* first 96 byte are written by SW */
472 uint16_t valid_sections;
473 #define IXL_AQ_VSI_VALID_SWITCH (1 << 0)
474 #define IXL_AQ_VSI_VALID_SECURITY (1 << 1)
475 #define IXL_AQ_VSI_VALID_VLAN (1 << 2)
476 #define IXL_AQ_VSI_VALID_CAS_PV (1 << 3)
477 #define IXL_AQ_VSI_VALID_INGRESS_UP (1 << 4)
478 #define IXL_AQ_VSI_VALID_EGRESS_UP (1 << 5)
479 #define IXL_AQ_VSI_VALID_QUEUE_MAP (1 << 6)
480 #define IXL_AQ_VSI_VALID_QUEUE_OPT (1 << 7)
481 #define IXL_AQ_VSI_VALID_OUTER_UP (1 << 8)
482 #define IXL_AQ_VSI_VALID_SCHED (1 << 9)
483 /* switch section */
484 uint16_t switch_id;
485 #define IXL_AQ_VSI_SWITCH_ID_SHIFT 0
486 #define IXL_AQ_VSI_SWITCH_ID_MASK (0xfff << IXL_AQ_VSI_SWITCH_ID_SHIFT)
487 #define IXL_AQ_VSI_SWITCH_NOT_STAG (1 << 12)
488 #define IXL_AQ_VSI_SWITCH_LOCAL_LB (1 << 14)
489
490 uint8_t _reserved1[2];
491 /* security section */
492 uint8_t sec_flags;
493 #define IXL_AQ_VSI_SEC_ALLOW_DEST_OVRD (1 << 0)
494 #define IXL_AQ_VSI_SEC_ENABLE_VLAN_CHK (1 << 1)
495 #define IXL_AQ_VSI_SEC_ENABLE_MAC_CHK (1 << 2)
496 uint8_t _reserved2;
497
498 /* vlan section */
499 uint16_t pvid;
500 uint16_t fcoe_pvid;
501
502 uint8_t port_vlan_flags;
503 #define IXL_AQ_VSI_PVLAN_MODE_SHIFT 0
504 #define IXL_AQ_VSI_PVLAN_MODE_MASK (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
505 #define IXL_AQ_VSI_PVLAN_MODE_TAGGED (0x1 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
506 #define IXL_AQ_VSI_PVLAN_MODE_UNTAGGED (0x2 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
507 #define IXL_AQ_VSI_PVLAN_MODE_ALL (0x3 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
508 #define IXL_AQ_VSI_PVLAN_INSERT_PVID (0x4 << IXL_AQ_VSI_PVLAN_MODE_SHIFT)
509 #define IXL_AQ_VSI_PVLAN_EMOD_SHIFT 0x3
510 #define IXL_AQ_VSI_PVLAN_EMOD_MASK (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
511 #define IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
512 #define IXL_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
513 #define IXL_AQ_VSI_PVLAN_EMOD_STR (0x2 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
514 #define IXL_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << IXL_AQ_VSI_PVLAN_EMOD_SHIFT)
515 uint8_t _reserved3[3];
516
517 /* ingress egress up section */
518 uint32_t ingress_table;
519 #define IXL_AQ_VSI_UP_SHIFT(_up) ((_up) * 3)
520 #define IXL_AQ_VSI_UP_MASK(_up) (0x7 << (IXL_AQ_VSI_UP_SHIFT(_up))
521 uint32_t egress_table;
522
523 /* cascaded pv section */
524 uint16_t cas_pv_tag;
525 uint8_t cas_pv_flags;
526 #define IXL_AQ_VSI_CAS_PV_TAGX_SHIFT 0
527 #define IXL_AQ_VSI_CAS_PV_TAGX_MASK (0x3 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
528 #define IXL_AQ_VSI_CAS_PV_TAGX_LEAVE (0x0 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
529 #define IXL_AQ_VSI_CAS_PV_TAGX_REMOVE (0x1 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
530 #define IXL_AQ_VSI_CAS_PV_TAGX_COPY (0x2 << IXL_AQ_VSI_CAS_PV_TAGX_SHIFT)
531 #define IXL_AQ_VSI_CAS_PV_INSERT_TAG (1 << 4)
532 #define IXL_AQ_VSI_CAS_PV_ETAG_PRUNE (1 << 5)
533 #define IXL_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG \
534 (1 << 6)
535 uint8_t _reserved4;
536
537 /* queue mapping section */
538 uint16_t mapping_flags;
539 #define IXL_AQ_VSI_QUE_MAP_MASK 0x1
540 #define IXL_AQ_VSI_QUE_MAP_CONTIG 0x0
541 #define IXL_AQ_VSI_QUE_MAP_NONCONTIG 0x1
542 uint16_t queue_mapping[16];
543 #define IXL_AQ_VSI_QUEUE_SHIFT 0x0
544 #define IXL_AQ_VSI_QUEUE_MASK (0x7ff << IXL_AQ_VSI_QUEUE_SHIFT)
545 uint16_t tc_mapping[8];
546 #define IXL_AQ_VSI_TC_Q_OFFSET_SHIFT 0
547 #define IXL_AQ_VSI_TC_Q_OFFSET_MASK (0x1ff << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT)
548 #define IXL_AQ_VSI_TC_Q_NUMBER_SHIFT 9
549 #define IXL_AQ_VSI_TC_Q_NUMBER_MASK (0x7 << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT)
550
551 /* queueing option section */
552 uint8_t queueing_opt_flags;
553 #define IXL_AQ_VSI_QUE_OPT_MCAST_UDP_EN (1 << 2)
554 #define IXL_AQ_VSI_QUE_OPT_UCAST_UDP_EN (1 << 3)
555 #define IXL_AQ_VSI_QUE_OPT_TCP_EN (1 << 4)
556 #define IXL_AQ_VSI_QUE_OPT_FCOE_EN (1 << 5)
557 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_PF 0
558 #define IXL_AQ_VSI_QUE_OPT_RSS_LUT_VSI (1 << 6)
559 uint8_t _reserved5[3];
560
561 /* scheduler section */
562 uint8_t up_enable_bits;
563 uint8_t _reserved6;
564
565 /* outer up section */
566 uint32_t outer_up_table; /* same as ingress/egress tables */
567 uint8_t _reserved7[8];
568
569 /* last 32 bytes are written by FW */
570 uint16_t qs_handle[8];
571 #define IXL_AQ_VSI_QS_HANDLE_INVALID 0xffff
572 uint16_t stat_counter_idx;
573 uint16_t sched_id;
574
575 uint8_t _reserved8[12];
576 } __packed __aligned(8);
577
578 CTASSERT(sizeof(struct ixl_aq_vsi_data) == 128);
579
580 struct ixl_aq_vsi_promisc_param {
581 uint16_t flags;
582 uint16_t valid_flags;
583 #define IXL_AQ_VSI_PROMISC_FLAG_UCAST (1 << 0)
584 #define IXL_AQ_VSI_PROMISC_FLAG_MCAST (1 << 1)
585 #define IXL_AQ_VSI_PROMISC_FLAG_BCAST (1 << 2)
586 #define IXL_AQ_VSI_PROMISC_FLAG_DFLT (1 << 3)
587 #define IXL_AQ_VSI_PROMISC_FLAG_VLAN (1 << 4)
588 #define IXL_AQ_VSI_PROMISC_FLAG_RXONLY (1 << 15)
589
590 uint16_t seid;
591 #define IXL_AQ_VSI_PROMISC_SEID_VALID (1 << 15)
592 uint16_t vlan;
593 #define IXL_AQ_VSI_PROMISC_VLAN_VALID (1 << 15)
594 uint32_t reserved[2];
595 } __packed __aligned(8);
596
597 struct ixl_aq_veb_param {
598 uint16_t uplink_seid;
599 uint16_t downlink_seid;
600 uint16_t veb_flags;
601 #define IXL_AQ_ADD_VEB_FLOATING (1 << 0)
602 #define IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT 1
603 #define IXL_AQ_ADD_VEB_PORT_TYPE_MASK (0x3 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
604 #define IXL_AQ_ADD_VEB_PORT_TYPE_DEFAULT \
605 (0x2 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
606 #define IXL_AQ_ADD_VEB_PORT_TYPE_DATA (0x4 << IXL_AQ_ADD_VEB_PORT_TYPE_SHIFT)
607 #define IXL_AQ_ADD_VEB_ENABLE_L2_FILTER (1 << 3) /* deprecated */
608 #define IXL_AQ_ADD_VEB_DISABLE_STATS (1 << 4)
609 uint8_t enable_tcs;
610 uint8_t _reserved[9];
611 } __packed __aligned(16);
612
613 struct ixl_aq_veb_reply {
614 uint16_t _reserved1;
615 uint16_t _reserved2;
616 uint16_t _reserved3;
617 uint16_t switch_seid;
618 uint16_t veb_seid;
619 #define IXL_AQ_VEB_ERR_FLAG_NO_VEB (1 << 0)
620 #define IXL_AQ_VEB_ERR_FLAG_NO_SCHED (1 << 1)
621 #define IXL_AQ_VEB_ERR_FLAG_NO_COUNTER (1 << 2)
622 #define IXL_AQ_VEB_ERR_FLAG_NO_ENTRY (1 << 3);
623 uint16_t statistic_index;
624 uint16_t vebs_used;
625 uint16_t vebs_free;
626 } __packed __aligned(16);
627
628 /* GET PHY ABILITIES param[0] */
629 #define IXL_AQ_PHY_REPORT_QUAL (1 << 0)
630 #define IXL_AQ_PHY_REPORT_INIT (1 << 1)
631
632 struct ixl_aq_phy_reg_access {
633 uint8_t phy_iface;
634 #define IXL_AQ_PHY_IF_INTERNAL 0
635 #define IXL_AQ_PHY_IF_EXTERNAL 1
636 #define IXL_AQ_PHY_IF_MODULE 2
637 uint8_t dev_addr;
638 uint16_t recall;
639 #define IXL_AQ_PHY_QSFP_DEV_ADDR 0
640 #define IXL_AQ_PHY_QSFP_LAST 1
641 uint32_t reg;
642 uint32_t val;
643 uint32_t _reserved2;
644 } __packed __aligned(16);
645
646 /* RESTART_AN param[0] */
647 #define IXL_AQ_PHY_RESTART_AN (1 << 1)
648 #define IXL_AQ_PHY_LINK_ENABLE (1 << 2)
649
650 struct ixl_aq_link_status { /* this occupies the iaq_param space */
651 uint16_t command_flags; /* only field set on command */
652 #define IXL_AQ_LSE_MASK 0x3
653 #define IXL_AQ_LSE_NOP 0x0
654 #define IXL_AQ_LSE_DISABLE 0x2
655 #define IXL_AQ_LSE_ENABLE 0x3
656 #define IXL_AQ_LSE_IS_ENABLED 0x1 /* only set in response */
657 uint8_t phy_type;
658 uint8_t link_speed;
659 #define IXL_AQ_LINK_SPEED_1GB (1 << 2)
660 #define IXL_AQ_LINK_SPEED_10GB (1 << 3)
661 #define IXL_AQ_LINK_SPEED_40GB (1 << 4)
662 #define IXL_AQ_LINK_SPEED_25GB (1 << 6)
663 uint8_t link_info;
664 #define IXL_AQ_LINK_UP_FUNCTION 0x01
665 #define IXL_AQ_LINK_FAULT 0x02
666 #define IXL_AQ_LINK_FAULT_TX 0x04
667 #define IXL_AQ_LINK_FAULT_RX 0x08
668 #define IXL_AQ_LINK_FAULT_REMOTE 0x10
669 #define IXL_AQ_LINK_UP_PORT 0x20
670 #define IXL_AQ_MEDIA_AVAILABLE 0x40
671 #define IXL_AQ_SIGNAL_DETECT 0x80
672 uint8_t an_info;
673 #define IXL_AQ_AN_COMPLETED 0x01
674 #define IXL_AQ_LP_AN_ABILITY 0x02
675 #define IXL_AQ_PD_FAULT 0x04
676 #define IXL_AQ_FEC_EN 0x08
677 #define IXL_AQ_PHY_LOW_POWER 0x10
678 #define IXL_AQ_LINK_PAUSE_TX 0x20
679 #define IXL_AQ_LINK_PAUSE_RX 0x40
680 #define IXL_AQ_QUALIFIED_MODULE 0x80
681
682 uint8_t ext_info;
683 #define IXL_AQ_LINK_PHY_TEMP_ALARM 0x01
684 #define IXL_AQ_LINK_XCESSIVE_ERRORS 0x02
685 #define IXL_AQ_LINK_TX_SHIFT 0x02
686 #define IXL_AQ_LINK_TX_MASK (0x03 << IXL_AQ_LINK_TX_SHIFT)
687 #define IXL_AQ_LINK_TX_ACTIVE 0x00
688 #define IXL_AQ_LINK_TX_DRAINED 0x01
689 #define IXL_AQ_LINK_TX_FLUSHED 0x03
690 #define IXL_AQ_LINK_FORCED_40G 0x10
691 /* 25G Error Codes */
692 #define IXL_AQ_25G_NO_ERR 0X00
693 #define IXL_AQ_25G_NOT_PRESENT 0X01
694 #define IXL_AQ_25G_NVM_CRC_ERR 0X02
695 #define IXL_AQ_25G_SBUS_UCODE_ERR 0X03
696 #define IXL_AQ_25G_SERDES_UCODE_ERR 0X04
697 #define IXL_AQ_25G_NIMB_UCODE_ERR 0X05
698 uint8_t loopback;
699 uint16_t max_frame_size;
700
701 uint8_t config;
702 #define IXL_AQ_CONFIG_FEC_KR_ENA 0x01
703 #define IXL_AQ_CONFIG_FEC_RS_ENA 0x02
704 #define IXL_AQ_CONFIG_CRC_ENA 0x04
705 #define IXL_AQ_CONFIG_PACING_MASK 0x78
706 uint8_t power_desc;
707 #define IXL_AQ_LINK_POWER_CLASS_1 0x00
708 #define IXL_AQ_LINK_POWER_CLASS_2 0x01
709 #define IXL_AQ_LINK_POWER_CLASS_3 0x02
710 #define IXL_AQ_LINK_POWER_CLASS_4 0x03
711 #define IXL_AQ_PWR_CLASS_MASK 0x03
712
713 uint8_t reserved[4];
714 } __packed __aligned(4);
715 /* event mask command flags for param[2] */
716 #define IXL_AQ_PHY_EV_MASK 0x3ff
717 #define IXL_AQ_PHY_EV_LINK_UPDOWN (1 << 1)
718 #define IXL_AQ_PHY_EV_MEDIA_NA (1 << 2)
719 #define IXL_AQ_PHY_EV_LINK_FAULT (1 << 3)
720 #define IXL_AQ_PHY_EV_PHY_TEMP_ALARM (1 << 4)
721 #define IXL_AQ_PHY_EV_EXCESS_ERRORS (1 << 5)
722 #define IXL_AQ_PHY_EV_SIGNAL_DETECT (1 << 6)
723 #define IXL_AQ_PHY_EV_AN_COMPLETED (1 << 7)
724 #define IXL_AQ_PHY_EV_MODULE_QUAL_FAIL (1 << 8)
725 #define IXL_AQ_PHY_EV_PORT_TX_SUSPENDED (1 << 9)
726
727 struct ixl_aq_rss_lut { /* 722 */
728 #define IXL_AQ_SET_RSS_LUT_VSI_VALID (1 << 15)
729 #define IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT 0
730 #define IXL_AQ_SET_RSS_LUT_VSI_ID_MASK \
731 (0x3FF << IXL_AQ_SET_RSS_LUT_VSI_ID_SHIFT)
732
733 uint16_t vsi_number;
734 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
735 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_MASK \
736 (0x1 << IXL_AQ_SET_RSS_LUT_TABLE_TYPE_SHIFT)
737 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_VSI 0
738 #define IXL_AQ_SET_RSS_LUT_TABLE_TYPE_PF 1
739 uint16_t flags;
740 uint8_t _reserved[4];
741 uint32_t addr_hi;
742 uint32_t addr_lo;
743 } __packed __aligned(16);
744
745 struct ixl_aq_get_set_rss_key { /* 722 */
746 #define IXL_AQ_SET_RSS_KEY_VSI_VALID (1 << 15)
747 #define IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT 0
748 #define IXL_AQ_SET_RSS_KEY_VSI_ID_MASK \
749 (0x3FF << IXL_AQ_SET_RSS_KEY_VSI_ID_SHIFT)
750 uint16_t vsi_number;
751 uint8_t _reserved[6];
752 uint32_t addr_hi;
753 uint32_t addr_lo;
754 } __packed __aligned(16);
755
756 /* aq response codes */
757 #define IXL_AQ_RC_OK 0 /* success */
758 #define IXL_AQ_RC_EPERM 1 /* Operation not permitted */
759 #define IXL_AQ_RC_ENOENT 2 /* No such element */
760 #define IXL_AQ_RC_ESRCH 3 /* Bad opcode */
761 #define IXL_AQ_RC_EINTR 4 /* operation interrupted */
762 #define IXL_AQ_RC_EIO 5 /* I/O error */
763 #define IXL_AQ_RC_ENXIO 6 /* No such resource */
764 #define IXL_AQ_RC_E2BIG 7 /* Arg too long */
765 #define IXL_AQ_RC_EAGAIN 8 /* Try again */
766 #define IXL_AQ_RC_ENOMEM 9 /* Out of memory */
767 #define IXL_AQ_RC_EACCES 10 /* Permission denied */
768 #define IXL_AQ_RC_EFAULT 11 /* Bad address */
769 #define IXL_AQ_RC_EBUSY 12 /* Device or resource busy */
770 #define IXL_AQ_RC_EEXIST 13 /* object already exists */
771 #define IXL_AQ_RC_EINVAL 14 /* invalid argument */
772 #define IXL_AQ_RC_ENOTTY 15 /* not a typewriter */
773 #define IXL_AQ_RC_ENOSPC 16 /* No space or alloc failure */
774 #define IXL_AQ_RC_ENOSYS 17 /* function not implemented */
775 #define IXL_AQ_RC_ERANGE 18 /* parameter out of range */
776 #define IXL_AQ_RC_EFLUSHED 19 /* cmd flushed due to prev error */
777 #define IXL_AQ_RC_BAD_ADDR 20 /* contains a bad pointer */
778 #define IXL_AQ_RC_EMODE 21 /* not allowed in current mode */
779 #define IXL_AQ_RC_EFBIG 22 /* file too large */
780
781 struct ixl_tx_desc {
782 uint64_t addr;
783 uint64_t cmd;
784 #define IXL_TX_DESC_DTYPE_SHIFT 0
785 #define IXL_TX_DESC_DTYPE_MASK (0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
786 #define IXL_TX_DESC_DTYPE_DATA (0x0ULL << IXL_TX_DESC_DTYPE_SHIFT)
787 #define IXL_TX_DESC_DTYPE_NOP (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
788 #define IXL_TX_DESC_DTYPE_CONTEXT (0x1ULL << IXL_TX_DESC_DTYPE_SHIFT)
789 #define IXL_TX_DESC_DTYPE_FCOE_CTX (0x2ULL << IXL_TX_DESC_DTYPE_SHIFT)
790 #define IXL_TX_DESC_DTYPE_FD (0x8ULL << IXL_TX_DESC_DTYPE_SHIFT)
791 #define IXL_TX_DESC_DTYPE_DDP_CTX (0x9ULL << IXL_TX_DESC_DTYPE_SHIFT)
792 #define IXL_TX_DESC_DTYPE_FLEX_DATA (0xbULL << IXL_TX_DESC_DTYPE_SHIFT)
793 #define IXL_TX_DESC_DTYPE_FLEX_CTX_1 (0xcULL << IXL_TX_DESC_DTYPE_SHIFT)
794 #define IXL_TX_DESC_DTYPE_FLEX_CTX_2 (0xdULL << IXL_TX_DESC_DTYPE_SHIFT)
795 #define IXL_TX_DESC_DTYPE_DONE (0xfULL << IXL_TX_DESC_DTYPE_SHIFT)
796
797 #define IXL_TX_DESC_CMD_SHIFT 4
798 #define IXL_TX_DESC_CMD_MASK (0x3ffULL << IXL_TX_DESC_CMD_SHIFT)
799 #define IXL_TX_DESC_CMD_EOP (0x001 << IXL_TX_DESC_CMD_SHIFT)
800 #define IXL_TX_DESC_CMD_RS (0x002 << IXL_TX_DESC_CMD_SHIFT)
801 #define IXL_TX_DESC_CMD_ICRC (0x004 << IXL_TX_DESC_CMD_SHIFT)
802 #define IXL_TX_DESC_CMD_IL2TAG1 (0x008 << IXL_TX_DESC_CMD_SHIFT)
803 #define IXL_TX_DESC_CMD_DUMMY (0x010 << IXL_TX_DESC_CMD_SHIFT)
804 #define IXL_TX_DESC_CMD_IIPT_MASK (0x060 << IXL_TX_DESC_CMD_SHIFT)
805 #define IXL_TX_DESC_CMD_IIPT_NONIP (0x000 << IXL_TX_DESC_CMD_SHIFT)
806 #define IXL_TX_DESC_CMD_IIPT_IPV6 (0x020 << IXL_TX_DESC_CMD_SHIFT)
807 #define IXL_TX_DESC_CMD_IIPT_IPV4 (0x040 << IXL_TX_DESC_CMD_SHIFT)
808 #define IXL_TX_DESC_CMD_IIPT_IPV4_CSUM (0x060 << IXL_TX_DESC_CMD_SHIFT)
809 #define IXL_TX_DESC_CMD_FCOET (0x080 << IXL_TX_DESC_CMD_SHIFT)
810 #define IXL_TX_DESC_CMD_L4T_EOFT_MASK (0x300 << IXL_TX_DESC_CMD_SHIFT)
811 #define IXL_TX_DESC_CMD_L4T_EOFT_UNK (0x000 << IXL_TX_DESC_CMD_SHIFT)
812 #define IXL_TX_DESC_CMD_L4T_EOFT_TCP (0x100 << IXL_TX_DESC_CMD_SHIFT)
813 #define IXL_TX_DESC_CMD_L4T_EOFT_SCTP (0x200 << IXL_TX_DESC_CMD_SHIFT)
814 #define IXL_TX_DESC_CMD_L4T_EOFT_UDP (0x300 << IXL_TX_DESC_CMD_SHIFT)
815
816 #define IXL_TX_DESC_MACLEN_SHIFT 16
817 #define IXL_TX_DESC_MACLEN_MASK (0x7fULL << IXL_TX_DESC_MACLEN_SHIFT)
818 #define IXL_TX_DESC_IPLEN_SHIFT 23
819 #define IXL_TX_DESC_IPLEN_MASK (0x7fULL << IXL_TX_DESC_IPLEN_SHIFT)
820 #define IXL_TX_DESC_L4LEN_SHIFT 30
821 #define IXL_TX_DESC_L4LEN_MASK (0xfULL << IXL_TX_DESC_L4LEN_SHIFT)
822 #define IXL_TX_DESC_FCLEN_SHIFT 30
823 #define IXL_TX_DESC_FCLEN_MASK (0xfULL << IXL_TX_DESC_FCLEN_SHIFT)
824
825 #define IXL_TX_DESC_BSIZE_SHIFT 34
826 #define IXL_TX_DESC_BSIZE_MAX 0x3fffULL
827 #define IXL_TX_DESC_BSIZE_MASK \
828 (IXL_TX_DESC_BSIZE_MAX << IXL_TX_DESC_BSIZE_SHIFT)
829
830 #define IXL_TX_CTX_DESC_CMD_TSO 0x10
831 #define IXL_TX_CTX_DESC_TLEN_SHIFT 30
832 #define IXL_TX_CTX_DESC_MSS_SHIFT 50
833
834 #define IXL_TX_DESC_L2TAG1_SHIFT 48
835 } __packed __aligned(16);
836
837 struct ixl_rx_rd_desc_16 {
838 uint64_t paddr; /* packet addr */
839 uint64_t haddr; /* header addr */
840 } __packed __aligned(16);
841
842 struct ixl_rx_rd_desc_32 {
843 uint64_t paddr; /* packet addr */
844 uint64_t haddr; /* header addr */
845 uint64_t _reserved1;
846 uint64_t _reserved2;
847 } __packed __aligned(16);
848
849 struct ixl_rx_wb_desc_16 {
850 uint16_t _reserved1;
851 uint16_t l2tag1;
852 uint32_t filter_status;
853 uint64_t qword1;
854 #define IXL_RX_DESC_DD (1 << 0)
855 #define IXL_RX_DESC_EOP (1 << 1)
856 #define IXL_RX_DESC_L2TAG1P (1 << 2)
857 #define IXL_RX_DESC_L3L4P (1 << 3)
858 #define IXL_RX_DESC_CRCP (1 << 4)
859 #define IXL_RX_DESC_TSYNINDX_SHIFT 5 /* TSYNINDX */
860 #define IXL_RX_DESC_TSYNINDX_MASK (7 << IXL_RX_DESC_TSYNINDX_SHIFT)
861 #define IXL_RX_DESC_UMB_SHIFT 9
862 #define IXL_RX_DESC_UMB_MASK (0x3 << IXL_RX_DESC_UMB_SHIFT)
863 #define IXL_RX_DESC_UMB_UCAST (0x0 << IXL_RX_DESC_UMB_SHIFT)
864 #define IXL_RX_DESC_UMB_MCAST (0x1 << IXL_RX_DESC_UMB_SHIFT)
865 #define IXL_RX_DESC_UMB_BCAST (0x2 << IXL_RX_DESC_UMB_SHIFT)
866 #define IXL_RX_DESC_UMB_MIRROR (0x3 << IXL_RX_DESC_UMB_SHIFT)
867 #define IXL_RX_DESC_FLM (1 << 11)
868 #define IXL_RX_DESC_FLTSTAT_SHIFT 12
869 #define IXL_RX_DESC_FLTSTAT_MASK (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
870 #define IXL_RX_DESC_FLTSTAT_NODATA (0x0 << IXL_RX_DESC_FLTSTAT_SHIFT)
871 #define IXL_RX_DESC_FLTSTAT_FDFILTID (0x1 << IXL_RX_DESC_FLTSTAT_SHIFT)
872 #define IXL_RX_DESC_FLTSTAT_RSS (0x3 << IXL_RX_DESC_FLTSTAT_SHIFT)
873 #define IXL_RX_DESC_LPBK (1 << 14)
874 #define IXL_RX_DESC_IPV6EXTADD (1 << 15)
875 #define IXL_RX_DESC_INT_UDP_0 (1 << 18)
876
877 #define IXL_RX_DESC_RXE (1 << 19)
878 #define IXL_RX_DESC_HBO (1 << 21)
879 #define IXL_RX_DESC_IPE (1 << 22)
880 #define IXL_RX_DESC_L4E (1 << 23)
881 #define IXL_RX_DESC_EIPE (1 << 24)
882 #define IXL_RX_DESC_OVERSIZE (1 << 25)
883
884 #define IXL_RX_DESC_PTYPE_SHIFT 30
885 #define IXL_RX_DESC_PTYPE_MASK (0xffULL << IXL_RX_DESC_PTYPE_SHIFT)
886
887 #define IXL_RX_DESC_PLEN_SHIFT 38
888 #define IXL_RX_DESC_PLEN_MASK (0x3fffULL << IXL_RX_DESC_PLEN_SHIFT)
889 #define IXL_RX_DESC_HLEN_SHIFT 42
890 #define IXL_RX_DESC_HLEN_MASK (0x7ffULL << IXL_RX_DESC_HLEN_SHIFT)
891 } __packed __aligned(16);
892
893 struct ixl_rx_wb_desc_32 {
894 uint64_t qword0;
895 uint64_t qword1;
896 uint64_t qword2;
897 uint64_t qword3;
898 } __packed __aligned(16);
899
900 #define IXL_TX_PKT_DESCS 8
901 #define IXL_TX_QUEUE_ALIGN 128
902 #define IXL_RX_QUEUE_ALIGN 128
903
904 #define IXL_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
905 #define IXL_TSO_SIZE ((255 * 1024) - 1)
906 #define IXL_MAX_DMA_SEG_SIZE ((16 * 1024) - 1)
907
908 /*
909 * Our TCP/IP Stack is unable handle packets greater than MAXMCLBYTES.
910 * This interface is unable handle packets greater than IXL_TSO_SIZE.
911 */
912 CTASSERT(MAXMCLBYTES < IXL_TSO_SIZE);
913
914 #define IXL_PCIREG PCI_MAPREG_START
915
916 #define IXL_ITR0 0x0
917 #define IXL_ITR1 0x1
918 #define IXL_ITR2 0x2
919 #define IXL_NOITR 0x2
920
921 #define IXL_AQ_NUM 256
922 #define IXL_AQ_MASK (IXL_AQ_NUM - 1)
923 #define IXL_AQ_ALIGN 64 /* lol */
924 #define IXL_AQ_BUFLEN 4096
925
926 /* Packet Classifier Types for filters */
927 /* bits 0-28 are reserved for future use */
928 #define IXL_PCT_NONF_IPV4_UDP_UCAST (1ULL << 29) /* 722 */
929 #define IXL_PCT_NONF_IPV4_UDP_MCAST (1ULL << 30) /* 722 */
930 #define IXL_PCT_NONF_IPV4_UDP (1ULL << 31)
931 #define IXL_PCT_NONF_IPV4_TCP_SYN_NOACK (1ULL << 32) /* 722 */
932 #define IXL_PCT_NONF_IPV4_TCP (1ULL << 33)
933 #define IXL_PCT_NONF_IPV4_SCTP (1ULL << 34)
934 #define IXL_PCT_NONF_IPV4_OTHER (1ULL << 35)
935 #define IXL_PCT_FRAG_IPV4 (1ULL << 36)
936 /* bits 37-38 are reserved for future use */
937 #define IXL_PCT_NONF_IPV6_UDP_UCAST (1ULL << 39) /* 722 */
938 #define IXL_PCT_NONF_IPV6_UDP_MCAST (1ULL << 40) /* 722 */
939 #define IXL_PCT_NONF_IPV6_UDP (1ULL << 41)
940 #define IXL_PCT_NONF_IPV6_TCP_SYN_NOACK (1ULL << 42) /* 722 */
941 #define IXL_PCT_NONF_IPV6_TCP (1ULL << 43)
942 #define IXL_PCT_NONF_IPV6_SCTP (1ULL << 44)
943 #define IXL_PCT_NONF_IPV6_OTHER (1ULL << 45)
944 #define IXL_PCT_FRAG_IPV6 (1ULL << 46)
945 /* bit 47 is reserved for future use */
946 #define IXL_PCT_FCOE_OX (1ULL << 48)
947 #define IXL_PCT_FCOE_RX (1ULL << 49)
948 #define IXL_PCT_FCOE_OTHER (1ULL << 50)
949 /* bits 51-62 are reserved for future use */
950 #define IXL_PCT_L2_PAYLOAD (1ULL << 63)
951
952 #define IXL_RSS_HENA_BASE_DEFAULT \
953 IXL_PCT_NONF_IPV4_UDP | \
954 IXL_PCT_NONF_IPV4_TCP | \
955 IXL_PCT_NONF_IPV4_SCTP | \
956 IXL_PCT_NONF_IPV4_OTHER | \
957 IXL_PCT_FRAG_IPV4 | \
958 IXL_PCT_NONF_IPV6_UDP | \
959 IXL_PCT_NONF_IPV6_TCP | \
960 IXL_PCT_NONF_IPV6_SCTP | \
961 IXL_PCT_NONF_IPV6_OTHER | \
962 IXL_PCT_FRAG_IPV6 | \
963 IXL_PCT_L2_PAYLOAD
964
965 #define IXL_RSS_HENA_BASE_710 IXL_RSS_HENA_BASE_DEFAULT
966 #define IXL_RSS_HENA_BASE_722 IXL_RSS_HENA_BASE_DEFAULT | \
967 IXL_PCT_NONF_IPV4_UDP_UCAST | \
968 IXL_PCT_NONF_IPV4_UDP_MCAST | \
969 IXL_PCT_NONF_IPV6_UDP_UCAST | \
970 IXL_PCT_NONF_IPV6_UDP_MCAST | \
971 IXL_PCT_NONF_IPV4_TCP_SYN_NOACK | \
972 IXL_PCT_NONF_IPV6_TCP_SYN_NOACK
973
974 #define IXL_HMC_ROUNDUP 512
975 #define IXL_HMC_PGSIZE 4096
976 #define IXL_HMC_DVASZ sizeof(uint64_t)
977 #define IXL_HMC_PGS (IXL_HMC_PGSIZE / IXL_HMC_DVASZ)
978 #define IXL_HMC_L2SZ (IXL_HMC_PGSIZE * IXL_HMC_PGS)
979 #define IXL_HMC_PDVALID 1ULL
980
981 struct ixl_aq_regs {
982 bus_size_t atq_tail;
983 bus_size_t atq_head;
984 bus_size_t atq_len;
985 bus_size_t atq_bal;
986 bus_size_t atq_bah;
987
988 bus_size_t arq_tail;
989 bus_size_t arq_head;
990 bus_size_t arq_len;
991 bus_size_t arq_bal;
992 bus_size_t arq_bah;
993
994 uint32_t atq_len_enable;
995 uint32_t atq_tail_mask;
996 uint32_t atq_head_mask;
997
998 uint32_t arq_len_enable;
999 uint32_t arq_tail_mask;
1000 uint32_t arq_head_mask;
1001 };
1002
1003 struct ixl_phy_type {
1004 uint64_t phy_type;
1005 uint64_t ifm_type;
1006 };
1007
1008 struct ixl_speed_type {
1009 uint8_t dev_speed;
1010 uint64_t net_speed;
1011 };
1012
1013 struct ixl_aq_buf {
1014 SIMPLEQ_ENTRY(ixl_aq_buf)
1015 aqb_entry;
1016 void *aqb_data;
1017 bus_dmamap_t aqb_map;
1018 };
1019 SIMPLEQ_HEAD(ixl_aq_bufs, ixl_aq_buf);
1020
1021 struct ixl_dmamem {
1022 bus_dmamap_t ixm_map;
1023 bus_dma_segment_t ixm_seg;
1024 int ixm_nsegs;
1025 size_t ixm_size;
1026 caddr_t ixm_kva;
1027 };
1028 #define IXL_DMA_MAP(_ixm) ((_ixm)->ixm_map)
1029 #define IXL_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
1030 #define IXL_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
1031 #define IXL_DMA_LEN(_ixm) ((_ixm)->ixm_size)
1032
1033 struct ixl_hmc_entry {
1034 uint64_t hmc_base;
1035 uint32_t hmc_count;
1036 uint32_t hmc_size;
1037 };
1038
1039 #define IXL_HMC_LAN_TX 0
1040 #define IXL_HMC_LAN_RX 1
1041 #define IXL_HMC_FCOE_CTX 2
1042 #define IXL_HMC_FCOE_FILTER 3
1043 #define IXL_HMC_COUNT 4
1044
1045 struct ixl_hmc_pack {
1046 uint16_t offset;
1047 uint16_t width;
1048 uint16_t lsb;
1049 };
1050
1051 /*
1052 * these hmc objects have weird sizes and alignments, so these are abstract
1053 * representations of them that are nice for c to populate.
1054 *
1055 * the packing code relies on little-endian values being stored in the fields,
1056 * no high bits in the fields being set, and the fields must be packed in the
1057 * same order as they are in the ctx structure.
1058 */
1059
1060 struct ixl_hmc_rxq {
1061 uint16_t head;
1062 uint8_t cpuid;
1063 uint64_t base;
1064 #define IXL_HMC_RXQ_BASE_UNIT 128
1065 uint16_t qlen;
1066 uint16_t dbuff;
1067 #define IXL_HMC_RXQ_DBUFF_UNIT 128
1068 uint8_t hbuff;
1069 #define IXL_HMC_RXQ_HBUFF_UNIT 64
1070 uint8_t dtype;
1071 #define IXL_HMC_RXQ_DTYPE_NOSPLIT 0x0
1072 #define IXL_HMC_RXQ_DTYPE_HSPLIT 0x1
1073 #define IXL_HMC_RXQ_DTYPE_SPLIT_ALWAYS 0x2
1074 uint8_t dsize;
1075 #define IXL_HMC_RXQ_DSIZE_16 0
1076 #define IXL_HMC_RXQ_DSIZE_32 1
1077 uint8_t crcstrip;
1078 uint8_t fc_ena;
1079 uint8_t l2tsel;
1080 #define IXL_HMC_RXQ_L2TSEL_2ND_TAG_TO_L2TAG1 \
1081 0
1082 #define IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1 \
1083 1
1084 uint8_t hsplit_0;
1085 uint8_t hsplit_1;
1086 uint8_t showiv;
1087 uint16_t rxmax;
1088 uint8_t tphrdesc_ena;
1089 uint8_t tphwdesc_ena;
1090 uint8_t tphdata_ena;
1091 uint8_t tphhead_ena;
1092 uint8_t lrxqthresh;
1093 uint8_t prefena;
1094 };
1095
1096 static const struct ixl_hmc_pack ixl_hmc_pack_rxq[] = {
1097 { offsetof(struct ixl_hmc_rxq, head), 13, 0 },
1098 { offsetof(struct ixl_hmc_rxq, cpuid), 8, 13 },
1099 { offsetof(struct ixl_hmc_rxq, base), 57, 32 },
1100 { offsetof(struct ixl_hmc_rxq, qlen), 13, 89 },
1101 { offsetof(struct ixl_hmc_rxq, dbuff), 7, 102 },
1102 { offsetof(struct ixl_hmc_rxq, hbuff), 5, 109 },
1103 { offsetof(struct ixl_hmc_rxq, dtype), 2, 114 },
1104 { offsetof(struct ixl_hmc_rxq, dsize), 1, 116 },
1105 { offsetof(struct ixl_hmc_rxq, crcstrip), 1, 117 },
1106 { offsetof(struct ixl_hmc_rxq, fc_ena), 1, 118 },
1107 { offsetof(struct ixl_hmc_rxq, l2tsel), 1, 119 },
1108 { offsetof(struct ixl_hmc_rxq, hsplit_0), 4, 120 },
1109 { offsetof(struct ixl_hmc_rxq, hsplit_1), 2, 124 },
1110 { offsetof(struct ixl_hmc_rxq, showiv), 1, 127 },
1111 { offsetof(struct ixl_hmc_rxq, rxmax), 14, 174 },
1112 { offsetof(struct ixl_hmc_rxq, tphrdesc_ena), 1, 193 },
1113 { offsetof(struct ixl_hmc_rxq, tphwdesc_ena), 1, 194 },
1114 { offsetof(struct ixl_hmc_rxq, tphdata_ena), 1, 195 },
1115 { offsetof(struct ixl_hmc_rxq, tphhead_ena), 1, 196 },
1116 { offsetof(struct ixl_hmc_rxq, lrxqthresh), 3, 198 },
1117 { offsetof(struct ixl_hmc_rxq, prefena), 1, 201 },
1118 };
1119
1120 #define IXL_HMC_RXQ_MINSIZE (201 + 1)
1121
1122 struct ixl_hmc_txq {
1123 uint16_t head;
1124 uint8_t new_context;
1125 uint64_t base;
1126 #define IXL_HMC_TXQ_BASE_UNIT 128
1127 uint8_t fc_ena;
1128 uint8_t timesync_ena;
1129 uint8_t fd_ena;
1130 uint8_t alt_vlan_ena;
1131 uint16_t thead_wb;
1132 uint8_t cpuid;
1133 uint8_t head_wb_ena;
1134 #define IXL_HMC_TXQ_DESC_WB 0
1135 #define IXL_HMC_TXQ_HEAD_WB 1
1136 uint16_t qlen;
1137 uint8_t tphrdesc_ena;
1138 uint8_t tphrpacket_ena;
1139 uint8_t tphwdesc_ena;
1140 uint64_t head_wb_addr;
1141 uint32_t crc;
1142 uint16_t rdylist;
1143 uint8_t rdylist_act;
1144 };
1145
1146 static const struct ixl_hmc_pack ixl_hmc_pack_txq[] = {
1147 { offsetof(struct ixl_hmc_txq, head), 13, 0 },
1148 { offsetof(struct ixl_hmc_txq, new_context), 1, 30 },
1149 { offsetof(struct ixl_hmc_txq, base), 57, 32 },
1150 { offsetof(struct ixl_hmc_txq, fc_ena), 1, 89 },
1151 { offsetof(struct ixl_hmc_txq, timesync_ena), 1, 90 },
1152 { offsetof(struct ixl_hmc_txq, fd_ena), 1, 91 },
1153 { offsetof(struct ixl_hmc_txq, alt_vlan_ena), 1, 92 },
1154 { offsetof(struct ixl_hmc_txq, cpuid), 8, 96 },
1155 /* line 1 */
1156 { offsetof(struct ixl_hmc_txq, thead_wb), 13, 0 + 128 },
1157 { offsetof(struct ixl_hmc_txq, head_wb_ena), 1, 32 + 128 },
1158 { offsetof(struct ixl_hmc_txq, qlen), 13, 33 + 128 },
1159 { offsetof(struct ixl_hmc_txq, tphrdesc_ena), 1, 46 + 128 },
1160 { offsetof(struct ixl_hmc_txq, tphrpacket_ena), 1, 47 + 128 },
1161 { offsetof(struct ixl_hmc_txq, tphwdesc_ena), 1, 48 + 128 },
1162 { offsetof(struct ixl_hmc_txq, head_wb_addr), 64, 64 + 128 },
1163 /* line 7 */
1164 { offsetof(struct ixl_hmc_txq, crc), 32, 0 + (7*128) },
1165 { offsetof(struct ixl_hmc_txq, rdylist), 10, 84 + (7*128) },
1166 { offsetof(struct ixl_hmc_txq, rdylist_act), 1, 94 + (7*128) },
1167 };
1168
1169 #define IXL_HMC_TXQ_MINSIZE (94 + (7*128) + 1)
1170
1171 struct ixl_rss_key {
1172 uint32_t key[13];
1173 };
1174
1175 struct ixl_rss_lut_128 {
1176 uint32_t entries[128 / sizeof(uint32_t)];
1177 };
1178
1179 struct ixl_rss_lut_512 {
1180 uint32_t entries[512 / sizeof(uint32_t)];
1181 };
1182
1183 /* driver structures */
1184
1185 struct ixl_vector;
1186 struct ixl_chip;
1187
1188 struct ixl_tx_map {
1189 struct mbuf *txm_m;
1190 bus_dmamap_t txm_map;
1191 unsigned int txm_eop;
1192 };
1193
1194 struct ixl_tx_ring {
1195 struct ixl_softc *txr_sc;
1196 struct ixl_vector *txr_vector;
1197 struct ifqueue *txr_ifq;
1198
1199 unsigned int txr_prod;
1200 unsigned int txr_cons;
1201
1202 struct ixl_tx_map *txr_maps;
1203 struct ixl_dmamem txr_mem;
1204
1205 bus_size_t txr_tail;
1206 unsigned int txr_qid;
1207 } __aligned(CACHE_LINE_SIZE);
1208
1209 struct ixl_rx_map {
1210 struct mbuf *rxm_m;
1211 bus_dmamap_t rxm_map;
1212 };
1213
1214 struct ixl_rx_ring {
1215 struct ixl_softc *rxr_sc;
1216 struct ixl_vector *rxr_vector;
1217 struct ifiqueue *rxr_ifiq;
1218
1219 struct if_rxring rxr_acct;
1220 struct timeout rxr_refill;
1221
1222 unsigned int rxr_prod;
1223 unsigned int rxr_cons;
1224
1225 struct ixl_rx_map *rxr_maps;
1226 struct ixl_dmamem rxr_mem;
1227
1228 struct mbuf *rxr_m_head;
1229 struct mbuf **rxr_m_tail;
1230
1231 bus_size_t rxr_tail;
1232 unsigned int rxr_qid;
1233 } __aligned(CACHE_LINE_SIZE);
1234
1235 struct ixl_atq {
1236 struct ixl_aq_desc iatq_desc;
1237 void *iatq_arg;
1238 void (*iatq_fn)(struct ixl_softc *, void *);
1239 };
1240 SIMPLEQ_HEAD(ixl_atq_list, ixl_atq);
1241
1242 struct ixl_vector {
1243 struct ixl_softc *iv_sc;
1244 struct ixl_rx_ring *iv_rxr;
1245 struct ixl_tx_ring *iv_txr;
1246 int iv_qid;
1247 void *iv_ihc;
1248 char iv_name[16];
1249 } __aligned(CACHE_LINE_SIZE);
1250
1251 struct ixl_softc {
1252 struct device sc_dev;
1253 const struct ixl_chip *sc_chip;
1254 struct arpcom sc_ac;
1255 struct ifmedia sc_media;
1256 uint64_t sc_media_status;
1257 uint64_t sc_media_active;
1258
1259 pci_chipset_tag_t sc_pc;
1260 pci_intr_handle_t sc_ih;
1261 void *sc_ihc;
1262 pcitag_t sc_tag;
1263
1264 bus_dma_tag_t sc_dmat;
1265 bus_space_tag_t sc_memt;
1266 bus_space_handle_t sc_memh;
1267 bus_size_t sc_mems;
1268
1269 uint16_t sc_api_major;
1270 uint16_t sc_api_minor;
1271 uint8_t sc_pf_id;
1272 uint16_t sc_uplink_seid; /* le */
1273 uint16_t sc_downlink_seid; /* le */
1274 uint16_t sc_veb_seid; /* le */
1275 uint16_t sc_vsi_number; /* le */
1276 uint16_t sc_seid;
1277 unsigned int sc_base_queue;
1278 unsigned int sc_port;
1279
1280 struct ixl_dmamem sc_scratch;
1281
1282 const struct ixl_aq_regs *
1283 sc_aq_regs;
1284
1285 struct ixl_dmamem sc_atq;
1286 unsigned int sc_atq_prod;
1287 unsigned int sc_atq_cons;
1288
1289 struct mutex sc_atq_mtx;
1290 struct ixl_dmamem sc_arq;
1291 struct task sc_arq_task;
1292 struct ixl_aq_bufs sc_arq_idle;
1293 struct ixl_aq_bufs sc_arq_live;
1294 struct if_rxring sc_arq_ring;
1295 unsigned int sc_arq_prod;
1296 unsigned int sc_arq_cons;
1297
1298 struct mutex sc_link_state_mtx;
1299 struct task sc_link_state_task;
1300 struct ixl_atq sc_link_state_atq;
1301
1302 struct ixl_dmamem sc_hmc_sd;
1303 struct ixl_dmamem sc_hmc_pd;
1304 struct ixl_hmc_entry sc_hmc_entries[IXL_HMC_COUNT];
1305
1306 unsigned int sc_tx_ring_ndescs;
1307 unsigned int sc_rx_ring_ndescs;
1308 unsigned int sc_nqueues; /* 1 << sc_nqueues */
1309
1310 struct intrmap *sc_intrmap;
1311 struct ixl_vector *sc_vectors;
1312
1313 struct rwlock sc_cfg_lock;
1314 unsigned int sc_dead;
1315
1316 uint8_t sc_enaddr[ETHER_ADDR_LEN];
1317
1318 #if NKSTAT > 0
1319 struct mutex sc_kstat_mtx;
1320 struct timeout sc_kstat_tmo;
1321 struct kstat *sc_port_kstat;
1322 struct kstat *sc_vsi_kstat;
1323 #endif
1324 };
1325 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
1326
1327 #define delaymsec(_ms) delay(1000 * (_ms))
1328
1329 static void ixl_clear_hw(struct ixl_softc *);
1330 static int ixl_pf_reset(struct ixl_softc *);
1331
1332 static int ixl_dmamem_alloc(struct ixl_softc *, struct ixl_dmamem *,
1333 bus_size_t, u_int);
1334 static void ixl_dmamem_free(struct ixl_softc *, struct ixl_dmamem *);
1335
1336 static int ixl_arq_fill(struct ixl_softc *);
1337 static void ixl_arq_unfill(struct ixl_softc *);
1338
1339 static int ixl_atq_poll(struct ixl_softc *, struct ixl_aq_desc *,
1340 unsigned int);
1341 static void ixl_atq_set(struct ixl_atq *,
1342 void (*)(struct ixl_softc *, void *), void *);
1343 static void ixl_atq_post(struct ixl_softc *, struct ixl_atq *);
1344 static void ixl_atq_done(struct ixl_softc *);
1345 static void ixl_atq_exec(struct ixl_softc *, struct ixl_atq *,
1346 const char *);
1347 static int ixl_get_version(struct ixl_softc *);
1348 static int ixl_pxe_clear(struct ixl_softc *);
1349 static int ixl_lldp_shut(struct ixl_softc *);
1350 static int ixl_get_mac(struct ixl_softc *);
1351 static int ixl_get_switch_config(struct ixl_softc *);
1352 static int ixl_phy_mask_ints(struct ixl_softc *);
1353 static int ixl_get_phy_types(struct ixl_softc *, uint64_t *);
1354 static int ixl_restart_an(struct ixl_softc *);
1355 static int ixl_hmc(struct ixl_softc *);
1356 static void ixl_hmc_free(struct ixl_softc *);
1357 static int ixl_get_vsi(struct ixl_softc *);
1358 static int ixl_set_vsi(struct ixl_softc *);
1359 static int ixl_get_link_status(struct ixl_softc *);
1360 static int ixl_set_link_status(struct ixl_softc *,
1361 const struct ixl_aq_desc *);
1362 static int ixl_add_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1363 uint16_t);
1364 static int ixl_remove_macvlan(struct ixl_softc *, uint8_t *, uint16_t,
1365 uint16_t);
1366 static void ixl_link_state_update(void *);
1367 static void ixl_arq(void *);
1368 static void ixl_hmc_pack(void *, const void *,
1369 const struct ixl_hmc_pack *, unsigned int);
1370
1371 static int ixl_get_sffpage(struct ixl_softc *, struct if_sffpage *);
1372 static int ixl_sff_get_byte(struct ixl_softc *, uint8_t, uint32_t,
1373 uint8_t *);
1374 static int ixl_sff_set_byte(struct ixl_softc *, uint8_t, uint32_t,
1375 uint8_t);
1376
1377 static int ixl_match(struct device *, void *, void *);
1378 static void ixl_attach(struct device *, struct device *, void *);
1379
1380 static void ixl_media_add(struct ixl_softc *, uint64_t);
1381 static int ixl_media_change(struct ifnet *);
1382 static void ixl_media_status(struct ifnet *, struct ifmediareq *);
1383 static void ixl_watchdog(struct ifnet *);
1384 static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
1385 static void ixl_start(struct ifqueue *);
1386 static int ixl_intr0(void *);
1387 static int ixl_intr_vector(void *);
1388 static int ixl_up(struct ixl_softc *);
1389 static int ixl_down(struct ixl_softc *);
1390 static int ixl_iff(struct ixl_softc *);
1391
1392 static struct ixl_tx_ring *
1393 ixl_txr_alloc(struct ixl_softc *, unsigned int);
1394 static void ixl_txr_qdis(struct ixl_softc *, struct ixl_tx_ring *, int);
1395 static void ixl_txr_config(struct ixl_softc *, struct ixl_tx_ring *);
1396 static int ixl_txr_enabled(struct ixl_softc *, struct ixl_tx_ring *);
1397 static int ixl_txr_disabled(struct ixl_softc *, struct ixl_tx_ring *);
1398 static void ixl_txr_unconfig(struct ixl_softc *, struct ixl_tx_ring *);
1399 static void ixl_txr_clean(struct ixl_softc *, struct ixl_tx_ring *);
1400 static void ixl_txr_free(struct ixl_softc *, struct ixl_tx_ring *);
1401 static int ixl_txeof(struct ixl_softc *, struct ixl_tx_ring *);
1402
1403 static struct ixl_rx_ring *
1404 ixl_rxr_alloc(struct ixl_softc *, unsigned int);
1405 static void ixl_rxr_config(struct ixl_softc *, struct ixl_rx_ring *);
1406 static int ixl_rxr_enabled(struct ixl_softc *, struct ixl_rx_ring *);
1407 static int ixl_rxr_disabled(struct ixl_softc *, struct ixl_rx_ring *);
1408 static void ixl_rxr_unconfig(struct ixl_softc *, struct ixl_rx_ring *);
1409 static void ixl_rxr_clean(struct ixl_softc *, struct ixl_rx_ring *);
1410 static void ixl_rxr_free(struct ixl_softc *, struct ixl_rx_ring *);
1411 static int ixl_rxeof(struct ixl_softc *, struct ixl_rx_ring *);
1412 static void ixl_rxfill(struct ixl_softc *, struct ixl_rx_ring *);
1413 static void ixl_rxrefill(void *);
1414 static int ixl_rxrinfo(struct ixl_softc *, struct if_rxrinfo *);
1415 static void ixl_rx_checksum(struct mbuf *, uint64_t);
1416
1417 #if NKSTAT > 0
1418 static void ixl_kstat_attach(struct ixl_softc *);
1419 #endif
1420
1421 struct cfdriver ixl_cd = {
1422 NULL,
1423 "ixl",
1424 DV_IFNET,
1425 };
1426
1427 const struct cfattach ixl_ca = {
1428 sizeof(struct ixl_softc),
1429 ixl_match,
1430 ixl_attach,
1431 };
1432
1433 static const struct ixl_phy_type ixl_phy_type_map[] = {
1434 { 1ULL << IXL_PHY_TYPE_SGMII, IFM_1000_SGMII },
1435 { 1ULL << IXL_PHY_TYPE_1000BASE_KX, IFM_1000_KX },
1436 { 1ULL << IXL_PHY_TYPE_10GBASE_KX4, IFM_10G_KX4 },
1437 { 1ULL << IXL_PHY_TYPE_10GBASE_KR, IFM_10G_KR },
1438 { 1ULL << IXL_PHY_TYPE_40GBASE_KR4, IFM_40G_KR4 },
1439 { 1ULL << IXL_PHY_TYPE_XAUI |
1440 1ULL << IXL_PHY_TYPE_XFI, IFM_10G_CX4 },
1441 { 1ULL << IXL_PHY_TYPE_SFI, IFM_10G_SFI },
1442 { 1ULL << IXL_PHY_TYPE_XLAUI |
1443 1ULL << IXL_PHY_TYPE_XLPPI, IFM_40G_XLPPI },
1444 { 1ULL << IXL_PHY_TYPE_40GBASE_CR4_CU |
1445 1ULL << IXL_PHY_TYPE_40GBASE_CR4, IFM_40G_CR4 },
1446 { 1ULL << IXL_PHY_TYPE_10GBASE_CR1_CU |
1447 1ULL << IXL_PHY_TYPE_10GBASE_CR1, IFM_10G_CR1 },
1448 { 1ULL << IXL_PHY_TYPE_10GBASE_AOC, IFM_10G_AOC },
1449 { 1ULL << IXL_PHY_TYPE_40GBASE_AOC, IFM_40G_AOC },
1450 { 1ULL << IXL_PHY_TYPE_100BASE_TX, IFM_100_TX },
1451 { 1ULL << IXL_PHY_TYPE_1000BASE_T_OPTICAL |
1452 1ULL << IXL_PHY_TYPE_1000BASE_T, IFM_1000_T },
1453 { 1ULL << IXL_PHY_TYPE_10GBASE_T, IFM_10G_T },
1454 { 1ULL << IXL_PHY_TYPE_10GBASE_SR, IFM_10G_SR },
1455 { 1ULL << IXL_PHY_TYPE_10GBASE_LR, IFM_10G_LR },
1456 { 1ULL << IXL_PHY_TYPE_10GBASE_SFPP_CU, IFM_10G_SFP_CU },
1457 { 1ULL << IXL_PHY_TYPE_40GBASE_SR4, IFM_40G_SR4 },
1458 { 1ULL << IXL_PHY_TYPE_40GBASE_LR4, IFM_40G_LR4 },
1459 { 1ULL << IXL_PHY_TYPE_1000BASE_SX, IFM_1000_SX },
1460 { 1ULL << IXL_PHY_TYPE_1000BASE_LX, IFM_1000_LX },
1461 { 1ULL << IXL_PHY_TYPE_20GBASE_KR2, IFM_20G_KR2 },
1462 { 1ULL << IXL_PHY_TYPE_25GBASE_KR, IFM_25G_KR },
1463 { 1ULL << IXL_PHY_TYPE_25GBASE_CR, IFM_25G_CR },
1464 { 1ULL << IXL_PHY_TYPE_25GBASE_SR, IFM_25G_SR },
1465 { 1ULL << IXL_PHY_TYPE_25GBASE_LR, IFM_25G_LR },
1466 { 1ULL << IXL_PHY_TYPE_25GBASE_AOC, IFM_25G_AOC },
1467 { 1ULL << IXL_PHY_TYPE_25GBASE_ACC, IFM_25G_CR },
1468 };
1469
1470 static const struct ixl_speed_type ixl_speed_type_map[] = {
1471 { IXL_AQ_LINK_SPEED_40GB, IF_Gbps(40) },
1472 { IXL_AQ_LINK_SPEED_25GB, IF_Gbps(25) },
1473 { IXL_AQ_LINK_SPEED_10GB, IF_Gbps(10) },
1474 { IXL_AQ_LINK_SPEED_1GB, IF_Gbps(1) },
1475 };
1476
1477 static const struct ixl_aq_regs ixl_pf_aq_regs = {
1478 .atq_tail = I40E_PF_ATQT,
1479 .atq_tail_mask = I40E_PF_ATQT_ATQT_MASK,
1480 .atq_head = I40E_PF_ATQH,
1481 .atq_head_mask = I40E_PF_ATQH_ATQH_MASK,
1482 .atq_len = I40E_PF_ATQLEN,
1483 .atq_bal = I40E_PF_ATQBAL,
1484 .atq_bah = I40E_PF_ATQBAH,
1485 .atq_len_enable = I40E_PF_ATQLEN_ATQENABLE_MASK,
1486
1487 .arq_tail = I40E_PF_ARQT,
1488 .arq_tail_mask = I40E_PF_ARQT_ARQT_MASK,
1489 .arq_head = I40E_PF_ARQH,
1490 .arq_head_mask = I40E_PF_ARQH_ARQH_MASK,
1491 .arq_len = I40E_PF_ARQLEN,
1492 .arq_bal = I40E_PF_ARQBAL,
1493 .arq_bah = I40E_PF_ARQBAH,
1494 .arq_len_enable = I40E_PF_ARQLEN_ARQENABLE_MASK,
1495 };
1496
1497 #define ixl_rd(_s, _r) \
1498 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
1499 #define ixl_wr(_s, _r, _v) \
1500 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
1501 #define ixl_barrier(_s, _r, _l, _o) \
1502 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
1503 #define ixl_intr_enable(_s) \
1504 ixl_wr((_s), I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_INTENA_MASK | \
1505 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | \
1506 (IXL_NOITR << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT))
1507
1508 #define ixl_nqueues(_sc) (1 << (_sc)->sc_nqueues)
1509
1510 #ifdef __LP64__
1511 #define ixl_dmamem_hi(_ixm) (uint32_t)(IXL_DMA_DVA(_ixm) >> 32)
1512 #else
1513 #define ixl_dmamem_hi(_ixm) 0
1514 #endif
1515
1516 #define ixl_dmamem_lo(_ixm) (uint32_t)IXL_DMA_DVA(_ixm)
1517
1518 static inline void
ixl_aq_dva(struct ixl_aq_desc * iaq,bus_addr_t addr)1519 ixl_aq_dva(struct ixl_aq_desc *iaq, bus_addr_t addr)
1520 {
1521 #ifdef __LP64__
1522 htolem32(&iaq->iaq_param[2], addr >> 32);
1523 #else
1524 iaq->iaq_param[2] = htole32(0);
1525 #endif
1526 htolem32(&iaq->iaq_param[3], addr);
1527 }
1528
1529 #if _BYTE_ORDER == _BIG_ENDIAN
1530 #define HTOLE16(_x) (uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
1531 #else
1532 #define HTOLE16(_x) (_x)
1533 #endif
1534
1535 static struct rwlock ixl_sff_lock = RWLOCK_INITIALIZER("ixlsff");
1536
1537 /* deal with differences between chips */
1538
1539 struct ixl_chip {
1540 uint64_t ic_rss_hena;
1541 uint32_t (*ic_rd_ctl)(struct ixl_softc *, uint32_t);
1542 void (*ic_wr_ctl)(struct ixl_softc *, uint32_t,
1543 uint32_t);
1544
1545 int (*ic_set_rss_key)(struct ixl_softc *,
1546 const struct ixl_rss_key *);
1547 int (*ic_set_rss_lut)(struct ixl_softc *,
1548 const struct ixl_rss_lut_128 *);
1549 };
1550
1551 static inline uint64_t
ixl_rss_hena(struct ixl_softc * sc)1552 ixl_rss_hena(struct ixl_softc *sc)
1553 {
1554 return (sc->sc_chip->ic_rss_hena);
1555 }
1556
1557 static inline uint32_t
ixl_rd_ctl(struct ixl_softc * sc,uint32_t r)1558 ixl_rd_ctl(struct ixl_softc *sc, uint32_t r)
1559 {
1560 return ((*sc->sc_chip->ic_rd_ctl)(sc, r));
1561 }
1562
1563 static inline void
ixl_wr_ctl(struct ixl_softc * sc,uint32_t r,uint32_t v)1564 ixl_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
1565 {
1566 (*sc->sc_chip->ic_wr_ctl)(sc, r, v);
1567 }
1568
1569 static inline int
ixl_set_rss_key(struct ixl_softc * sc,const struct ixl_rss_key * rsskey)1570 ixl_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
1571 {
1572 return ((*sc->sc_chip->ic_set_rss_key)(sc, rsskey));
1573 }
1574
1575 static inline int
ixl_set_rss_lut(struct ixl_softc * sc,const struct ixl_rss_lut_128 * lut)1576 ixl_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
1577 {
1578 return ((*sc->sc_chip->ic_set_rss_lut)(sc, lut));
1579 }
1580
1581 /* 710 chip specifics */
1582
1583 static uint32_t ixl_710_rd_ctl(struct ixl_softc *, uint32_t);
1584 static void ixl_710_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1585 static int ixl_710_set_rss_key(struct ixl_softc *,
1586 const struct ixl_rss_key *);
1587 static int ixl_710_set_rss_lut(struct ixl_softc *,
1588 const struct ixl_rss_lut_128 *);
1589
1590 static const struct ixl_chip ixl_710 = {
1591 .ic_rss_hena = IXL_RSS_HENA_BASE_710,
1592 .ic_rd_ctl = ixl_710_rd_ctl,
1593 .ic_wr_ctl = ixl_710_wr_ctl,
1594 .ic_set_rss_key = ixl_710_set_rss_key,
1595 .ic_set_rss_lut = ixl_710_set_rss_lut,
1596 };
1597
1598 /* 722 chip specifics */
1599
1600 static uint32_t ixl_722_rd_ctl(struct ixl_softc *, uint32_t);
1601 static void ixl_722_wr_ctl(struct ixl_softc *, uint32_t, uint32_t);
1602 static int ixl_722_set_rss_key(struct ixl_softc *,
1603 const struct ixl_rss_key *);
1604 static int ixl_722_set_rss_lut(struct ixl_softc *,
1605 const struct ixl_rss_lut_128 *);
1606
1607 static const struct ixl_chip ixl_722 = {
1608 .ic_rss_hena = IXL_RSS_HENA_BASE_722,
1609 .ic_rd_ctl = ixl_722_rd_ctl,
1610 .ic_wr_ctl = ixl_722_wr_ctl,
1611 .ic_set_rss_key = ixl_722_set_rss_key,
1612 .ic_set_rss_lut = ixl_722_set_rss_lut,
1613 };
1614
1615 /*
1616 * 710 chips using an older firmware/API use the same ctl ops as
1617 * 722 chips. or 722 chips use the same ctl ops as 710 chips in early
1618 * firmware/API versions?
1619 */
1620
1621 static const struct ixl_chip ixl_710_decrepit = {
1622 .ic_rss_hena = IXL_RSS_HENA_BASE_710,
1623 .ic_rd_ctl = ixl_722_rd_ctl,
1624 .ic_wr_ctl = ixl_722_wr_ctl,
1625 .ic_set_rss_key = ixl_710_set_rss_key,
1626 .ic_set_rss_lut = ixl_710_set_rss_lut,
1627 };
1628
1629 /* driver code */
1630
1631 struct ixl_device {
1632 const struct ixl_chip *id_chip;
1633 pci_vendor_id_t id_vid;
1634 pci_product_id_t id_pid;
1635 };
1636
1637 static const struct ixl_device ixl_devices[] = {
1638 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP },
1639 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_SFP_2 },
1640 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_40G_BP },
1641 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BP, },
1642 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_1 },
1643 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_QSFP_2 },
1644 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_QSFP },
1645 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_BASET },
1646 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_1 },
1647 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_20G_BP_2 },
1648 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_T4_10G },
1649 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_BP },
1650 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XXV710_25G_SFP28, },
1651 { &ixl_710, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X710_10G_T, },
1652 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_KX },
1653 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_QSFP },
1654 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_1 },
1655 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_1G },
1656 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_T },
1657 { &ixl_722, PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_10G_SFP_2 },
1658 };
1659
1660 static const struct ixl_device *
ixl_device_lookup(struct pci_attach_args * pa)1661 ixl_device_lookup(struct pci_attach_args *pa)
1662 {
1663 pci_vendor_id_t vid = PCI_VENDOR(pa->pa_id);
1664 pci_product_id_t pid = PCI_PRODUCT(pa->pa_id);
1665 const struct ixl_device *id;
1666 unsigned int i;
1667
1668 for (i = 0; i < nitems(ixl_devices); i++) {
1669 id = &ixl_devices[i];
1670 if (id->id_vid == vid && id->id_pid == pid)
1671 return (id);
1672 }
1673
1674 return (NULL);
1675 }
1676
1677 static int
ixl_match(struct device * parent,void * match,void * aux)1678 ixl_match(struct device *parent, void *match, void *aux)
1679 {
1680 return (ixl_device_lookup(aux) != NULL);
1681 }
1682
1683 void
ixl_attach(struct device * parent,struct device * self,void * aux)1684 ixl_attach(struct device *parent, struct device *self, void *aux)
1685 {
1686 struct ixl_softc *sc = (struct ixl_softc *)self;
1687 struct ifnet *ifp = &sc->sc_ac.ac_if;
1688 struct pci_attach_args *pa = aux;
1689 pcireg_t memtype;
1690 uint32_t port, ari, func;
1691 uint64_t phy_types = 0;
1692 unsigned int nqueues, i;
1693 int tries;
1694
1695 rw_init(&sc->sc_cfg_lock, "ixlcfg");
1696
1697 sc->sc_chip = ixl_device_lookup(pa)->id_chip;
1698 sc->sc_pc = pa->pa_pc;
1699 sc->sc_tag = pa->pa_tag;
1700 sc->sc_dmat = pa->pa_dmat;
1701 sc->sc_aq_regs = &ixl_pf_aq_regs;
1702
1703 sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
1704 sc->sc_tx_ring_ndescs = 1024;
1705 sc->sc_rx_ring_ndescs = 1024;
1706
1707 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IXL_PCIREG);
1708 if (pci_mapreg_map(pa, IXL_PCIREG, memtype, 0,
1709 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
1710 printf(": unable to map registers\n");
1711 return;
1712 }
1713
1714 sc->sc_base_queue = (ixl_rd(sc, I40E_PFLAN_QALLOC) &
1715 I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
1716 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
1717
1718 ixl_clear_hw(sc);
1719 if (ixl_pf_reset(sc) == -1) {
1720 /* error printed by ixl_pf_reset */
1721 goto unmap;
1722 }
1723
1724 port = ixl_rd(sc, I40E_PFGEN_PORTNUM);
1725 port &= I40E_PFGEN_PORTNUM_PORT_NUM_MASK;
1726 port >>= I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT;
1727 sc->sc_port = port;
1728 printf(": port %u", port);
1729
1730 ari = ixl_rd(sc, I40E_GLPCI_CAPSUP);
1731 ari &= I40E_GLPCI_CAPSUP_ARI_EN_MASK;
1732 ari >>= I40E_GLPCI_CAPSUP_ARI_EN_SHIFT;
1733
1734 func = ixl_rd(sc, I40E_PF_FUNC_RID);
1735 sc->sc_pf_id = func & (ari ? 0xff : 0x7);
1736
1737 /* initialise the adminq */
1738
1739 mtx_init(&sc->sc_atq_mtx, IPL_NET);
1740
1741 if (ixl_dmamem_alloc(sc, &sc->sc_atq,
1742 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1743 printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
1744 goto unmap;
1745 }
1746
1747 SIMPLEQ_INIT(&sc->sc_arq_idle);
1748 SIMPLEQ_INIT(&sc->sc_arq_live);
1749 if_rxr_init(&sc->sc_arq_ring, 2, IXL_AQ_NUM - 1);
1750 task_set(&sc->sc_arq_task, ixl_arq, sc);
1751 sc->sc_arq_cons = 0;
1752 sc->sc_arq_prod = 0;
1753
1754 if (ixl_dmamem_alloc(sc, &sc->sc_arq,
1755 sizeof(struct ixl_aq_desc) * IXL_AQ_NUM, IXL_AQ_ALIGN) != 0) {
1756 printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
1757 goto free_atq;
1758 }
1759
1760 if (!ixl_arq_fill(sc)) {
1761 printf("\n" "%s: unable to fill arq descriptors\n",
1762 DEVNAME(sc));
1763 goto free_arq;
1764 }
1765
1766 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
1767 0, IXL_DMA_LEN(&sc->sc_atq),
1768 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1769
1770 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
1771 0, IXL_DMA_LEN(&sc->sc_arq),
1772 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1773
1774 for (tries = 0; tries < 10; tries++) {
1775 int rv;
1776
1777 sc->sc_atq_cons = 0;
1778 sc->sc_atq_prod = 0;
1779
1780 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
1781 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
1782 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
1783 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
1784
1785 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
1786
1787 ixl_wr(sc, sc->sc_aq_regs->atq_bal,
1788 ixl_dmamem_lo(&sc->sc_atq));
1789 ixl_wr(sc, sc->sc_aq_regs->atq_bah,
1790 ixl_dmamem_hi(&sc->sc_atq));
1791 ixl_wr(sc, sc->sc_aq_regs->atq_len,
1792 sc->sc_aq_regs->atq_len_enable | IXL_AQ_NUM);
1793
1794 ixl_wr(sc, sc->sc_aq_regs->arq_bal,
1795 ixl_dmamem_lo(&sc->sc_arq));
1796 ixl_wr(sc, sc->sc_aq_regs->arq_bah,
1797 ixl_dmamem_hi(&sc->sc_arq));
1798 ixl_wr(sc, sc->sc_aq_regs->arq_len,
1799 sc->sc_aq_regs->arq_len_enable | IXL_AQ_NUM);
1800
1801 rv = ixl_get_version(sc);
1802 if (rv == 0)
1803 break;
1804 if (rv != ETIMEDOUT) {
1805 printf(", unable to get firmware version\n");
1806 goto shutdown;
1807 }
1808
1809 delaymsec(100);
1810 }
1811
1812 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
1813
1814 if (ixl_pxe_clear(sc) != 0) {
1815 /* error printed by ixl_pxe_clear */
1816 goto shutdown;
1817 }
1818
1819 if (ixl_get_mac(sc) != 0) {
1820 /* error printed by ixl_get_mac */
1821 goto shutdown;
1822 }
1823
1824 if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) {
1825 int nmsix = pci_intr_msix_count(pa);
1826 if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */
1827 nmsix--;
1828
1829 sc->sc_intrmap = intrmap_create(&sc->sc_dev,
1830 nmsix, IXL_MAX_VECTORS, INTRMAP_POWEROF2);
1831 nqueues = intrmap_count(sc->sc_intrmap);
1832 KASSERT(nqueues > 0);
1833 KASSERT(powerof2(nqueues));
1834 sc->sc_nqueues = fls(nqueues) - 1;
1835 }
1836 } else {
1837 if (pci_intr_map_msi(pa, &sc->sc_ih) != 0 &&
1838 pci_intr_map(pa, &sc->sc_ih) != 0) {
1839 printf(", unable to map interrupt\n");
1840 goto shutdown;
1841 }
1842 }
1843
1844 nqueues = ixl_nqueues(sc);
1845
1846 printf(", %s, %d queue%s, address %s\n",
1847 pci_intr_string(sc->sc_pc, sc->sc_ih), ixl_nqueues(sc),
1848 (nqueues > 1 ? "s" : ""),
1849 ether_sprintf(sc->sc_ac.ac_enaddr));
1850
1851 if (ixl_hmc(sc) != 0) {
1852 /* error printed by ixl_hmc */
1853 goto shutdown;
1854 }
1855
1856 if (ixl_lldp_shut(sc) != 0) {
1857 /* error printed by ixl_lldp_shut */
1858 goto free_hmc;
1859 }
1860
1861 if (ixl_phy_mask_ints(sc) != 0) {
1862 /* error printed by ixl_phy_mask_ints */
1863 goto free_hmc;
1864 }
1865
1866 if (ixl_restart_an(sc) != 0) {
1867 /* error printed by ixl_restart_an */
1868 goto free_hmc;
1869 }
1870
1871 if (ixl_get_switch_config(sc) != 0) {
1872 /* error printed by ixl_get_switch_config */
1873 goto free_hmc;
1874 }
1875
1876 if (ixl_get_phy_types(sc, &phy_types) != 0) {
1877 /* error printed by ixl_get_phy_abilities */
1878 goto free_hmc;
1879 }
1880
1881 mtx_init(&sc->sc_link_state_mtx, IPL_NET);
1882 if (ixl_get_link_status(sc) != 0) {
1883 /* error printed by ixl_get_link_status */
1884 goto free_hmc;
1885 }
1886
1887 if (ixl_dmamem_alloc(sc, &sc->sc_scratch,
1888 sizeof(struct ixl_aq_vsi_data), 8) != 0) {
1889 printf("%s: unable to allocate scratch buffer\n", DEVNAME(sc));
1890 goto free_hmc;
1891 }
1892
1893 if (ixl_get_vsi(sc) != 0) {
1894 /* error printed by ixl_get_vsi */
1895 goto free_hmc;
1896 }
1897
1898 if (ixl_set_vsi(sc) != 0) {
1899 /* error printed by ixl_set_vsi */
1900 goto free_scratch;
1901 }
1902
1903 sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
1904 IPL_NET | IPL_MPSAFE, ixl_intr0, sc, DEVNAME(sc));
1905 if (sc->sc_ihc == NULL) {
1906 printf("%s: unable to establish interrupt handler\n",
1907 DEVNAME(sc));
1908 goto free_scratch;
1909 }
1910
1911 sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues,
1912 M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1913 if (sc->sc_vectors == NULL) {
1914 printf("%s: unable to allocate vectors\n", DEVNAME(sc));
1915 goto free_scratch;
1916 }
1917
1918 for (i = 0; i < nqueues; i++) {
1919 struct ixl_vector *iv = &sc->sc_vectors[i];
1920 iv->iv_sc = sc;
1921 iv->iv_qid = i;
1922 snprintf(iv->iv_name, sizeof(iv->iv_name),
1923 "%s:%u", DEVNAME(sc), i); /* truncated? */
1924 }
1925
1926 if (sc->sc_intrmap) {
1927 for (i = 0; i < nqueues; i++) {
1928 struct ixl_vector *iv = &sc->sc_vectors[i];
1929 pci_intr_handle_t ih;
1930 int v = i + 1; /* 0 is used for adminq */
1931
1932 if (pci_intr_map_msix(pa, v, &ih)) {
1933 printf("%s: unable to map msi-x vector %d\n",
1934 DEVNAME(sc), v);
1935 goto free_vectors;
1936 }
1937
1938 iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih,
1939 IPL_NET | IPL_MPSAFE,
1940 intrmap_cpu(sc->sc_intrmap, i),
1941 ixl_intr_vector, iv, iv->iv_name);
1942 if (iv->iv_ihc == NULL) {
1943 printf("%s: unable to establish interrupt %d\n",
1944 DEVNAME(sc), v);
1945 goto free_vectors;
1946 }
1947
1948 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i),
1949 I40E_PFINT_DYN_CTLN_INTENA_MASK |
1950 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1951 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
1952 }
1953 }
1954
1955 /* fixup the chip ops for older fw releases */
1956 if (sc->sc_chip == &ixl_710 &&
1957 sc->sc_api_major == 1 && sc->sc_api_minor < 5)
1958 sc->sc_chip = &ixl_710_decrepit;
1959
1960 ifp->if_softc = sc;
1961 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1962 ifp->if_xflags = IFXF_MPSAFE;
1963 ifp->if_ioctl = ixl_ioctl;
1964 ifp->if_qstart = ixl_start;
1965 ifp->if_watchdog = ixl_watchdog;
1966 ifp->if_hardmtu = IXL_HARDMTU;
1967 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
1968 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
1969
1970 ifp->if_capabilities = IFCAP_VLAN_MTU;
1971 #if NVLAN > 0
1972 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
1973 #endif
1974 ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
1975 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
1976 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
1977 ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6;
1978
1979 ifmedia_init(&sc->sc_media, 0, ixl_media_change, ixl_media_status);
1980
1981 ixl_media_add(sc, phy_types);
1982 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
1983 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
1984
1985 if_attach(ifp);
1986 ether_ifattach(ifp);
1987
1988 if_attach_queues(ifp, nqueues);
1989 if_attach_iqueues(ifp, nqueues);
1990
1991 task_set(&sc->sc_link_state_task, ixl_link_state_update, sc);
1992 ixl_wr(sc, I40E_PFINT_ICR0_ENA,
1993 I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK |
1994 I40E_PFINT_ICR0_ENA_ADMINQ_MASK);
1995 ixl_wr(sc, I40E_PFINT_STAT_CTL0,
1996 IXL_NOITR << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
1997
1998 /* remove default mac filter and replace it so we can see vlans */
1999 ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0, 0);
2000 ixl_remove_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2001 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2002 ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2003 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2004 ixl_add_macvlan(sc, etherbroadcastaddr, 0,
2005 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2006 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2007
2008 ixl_intr_enable(sc);
2009
2010 #if NKSTAT > 0
2011 ixl_kstat_attach(sc);
2012 #endif
2013
2014 return;
2015 free_vectors:
2016 if (sc->sc_intrmap != NULL) {
2017 for (i = 0; i < nqueues; i++) {
2018 struct ixl_vector *iv = &sc->sc_vectors[i];
2019 if (iv->iv_ihc == NULL)
2020 continue;
2021 pci_intr_disestablish(sc->sc_pc, iv->iv_ihc);
2022 }
2023 }
2024 free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors));
2025 free_scratch:
2026 ixl_dmamem_free(sc, &sc->sc_scratch);
2027 free_hmc:
2028 ixl_hmc_free(sc);
2029 shutdown:
2030 ixl_wr(sc, sc->sc_aq_regs->atq_head, 0);
2031 ixl_wr(sc, sc->sc_aq_regs->arq_head, 0);
2032 ixl_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2033 ixl_wr(sc, sc->sc_aq_regs->arq_tail, 0);
2034
2035 ixl_wr(sc, sc->sc_aq_regs->atq_bal, 0);
2036 ixl_wr(sc, sc->sc_aq_regs->atq_bah, 0);
2037 ixl_wr(sc, sc->sc_aq_regs->atq_len, 0);
2038
2039 ixl_wr(sc, sc->sc_aq_regs->arq_bal, 0);
2040 ixl_wr(sc, sc->sc_aq_regs->arq_bah, 0);
2041 ixl_wr(sc, sc->sc_aq_regs->arq_len, 0);
2042
2043 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
2044 0, IXL_DMA_LEN(&sc->sc_arq),
2045 BUS_DMASYNC_POSTREAD);
2046 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
2047 0, IXL_DMA_LEN(&sc->sc_atq),
2048 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2049
2050 ixl_arq_unfill(sc);
2051
2052 free_arq:
2053 ixl_dmamem_free(sc, &sc->sc_arq);
2054 free_atq:
2055 ixl_dmamem_free(sc, &sc->sc_atq);
2056 unmap:
2057 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
2058 sc->sc_mems = 0;
2059
2060 if (sc->sc_intrmap != NULL)
2061 intrmap_destroy(sc->sc_intrmap);
2062 }
2063
2064 static void
ixl_media_add(struct ixl_softc * sc,uint64_t phy_types)2065 ixl_media_add(struct ixl_softc *sc, uint64_t phy_types)
2066 {
2067 struct ifmedia *ifm = &sc->sc_media;
2068 const struct ixl_phy_type *itype;
2069 unsigned int i;
2070
2071 for (i = 0; i < nitems(ixl_phy_type_map); i++) {
2072 itype = &ixl_phy_type_map[i];
2073
2074 if (ISSET(phy_types, itype->phy_type))
2075 ifmedia_add(ifm, IFM_ETHER | itype->ifm_type, 0, NULL);
2076 }
2077 }
2078
2079 static int
ixl_media_change(struct ifnet * ifp)2080 ixl_media_change(struct ifnet *ifp)
2081 {
2082 /* ignore? */
2083 return (EOPNOTSUPP);
2084 }
2085
2086 static void
ixl_media_status(struct ifnet * ifp,struct ifmediareq * ifm)2087 ixl_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
2088 {
2089 struct ixl_softc *sc = ifp->if_softc;
2090
2091 KERNEL_ASSERT_LOCKED();
2092
2093 mtx_enter(&sc->sc_link_state_mtx);
2094 ifm->ifm_status = sc->sc_media_status;
2095 ifm->ifm_active = sc->sc_media_active;
2096 mtx_leave(&sc->sc_link_state_mtx);
2097 }
2098
2099 static void
ixl_watchdog(struct ifnet * ifp)2100 ixl_watchdog(struct ifnet *ifp)
2101 {
2102
2103 }
2104
2105 int
ixl_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)2106 ixl_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
2107 {
2108 struct ixl_softc *sc = (struct ixl_softc *)ifp->if_softc;
2109 struct ifreq *ifr = (struct ifreq *)data;
2110 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
2111 int aqerror, error = 0;
2112
2113 switch (cmd) {
2114 case SIOCSIFADDR:
2115 ifp->if_flags |= IFF_UP;
2116 /* FALLTHROUGH */
2117
2118 case SIOCSIFFLAGS:
2119 if (ISSET(ifp->if_flags, IFF_UP)) {
2120 if (ISSET(ifp->if_flags, IFF_RUNNING))
2121 error = ENETRESET;
2122 else
2123 error = ixl_up(sc);
2124 } else {
2125 if (ISSET(ifp->if_flags, IFF_RUNNING))
2126 error = ixl_down(sc);
2127 }
2128 break;
2129
2130 case SIOCGIFMEDIA:
2131 case SIOCSIFMEDIA:
2132 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
2133 break;
2134
2135 case SIOCGIFRXR:
2136 error = ixl_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
2137 break;
2138
2139 case SIOCADDMULTI:
2140 if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
2141 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2142 if (error != 0)
2143 return (error);
2144
2145 aqerror = ixl_add_macvlan(sc, addrlo, 0,
2146 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2147 if (aqerror == IXL_AQ_RC_ENOSPC) {
2148 ether_delmulti(ifr, &sc->sc_ac);
2149 error = ENOSPC;
2150 }
2151
2152 if (sc->sc_ac.ac_multirangecnt > 0) {
2153 SET(ifp->if_flags, IFF_ALLMULTI);
2154 error = ENETRESET;
2155 }
2156 }
2157 break;
2158
2159 case SIOCDELMULTI:
2160 if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
2161 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
2162 if (error != 0)
2163 return (error);
2164
2165 ixl_remove_macvlan(sc, addrlo, 0,
2166 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2167
2168 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
2169 sc->sc_ac.ac_multirangecnt == 0) {
2170 CLR(ifp->if_flags, IFF_ALLMULTI);
2171 error = ENETRESET;
2172 }
2173 }
2174 break;
2175
2176 case SIOCGIFSFFPAGE:
2177 error = rw_enter(&ixl_sff_lock, RW_WRITE|RW_INTR);
2178 if (error != 0)
2179 break;
2180
2181 error = ixl_get_sffpage(sc, (struct if_sffpage *)data);
2182 rw_exit(&ixl_sff_lock);
2183 break;
2184
2185 default:
2186 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
2187 break;
2188 }
2189
2190 if (error == ENETRESET)
2191 error = ixl_iff(sc);
2192
2193 return (error);
2194 }
2195
2196 static inline void *
ixl_hmc_kva(struct ixl_softc * sc,unsigned int type,unsigned int i)2197 ixl_hmc_kva(struct ixl_softc *sc, unsigned int type, unsigned int i)
2198 {
2199 uint8_t *kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
2200 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2201
2202 if (i >= e->hmc_count)
2203 return (NULL);
2204
2205 kva += e->hmc_base;
2206 kva += i * e->hmc_size;
2207
2208 return (kva);
2209 }
2210
2211 static inline size_t
ixl_hmc_len(struct ixl_softc * sc,unsigned int type)2212 ixl_hmc_len(struct ixl_softc *sc, unsigned int type)
2213 {
2214 struct ixl_hmc_entry *e = &sc->sc_hmc_entries[type];
2215
2216 return (e->hmc_size);
2217 }
2218
2219 static int
ixl_configure_rss(struct ixl_softc * sc)2220 ixl_configure_rss(struct ixl_softc *sc)
2221 {
2222 struct ixl_rss_key rsskey;
2223 struct ixl_rss_lut_128 lut;
2224 uint8_t *lute = (uint8_t *)&lut;
2225 uint64_t rss_hena;
2226 unsigned int i, nqueues;
2227 int error;
2228
2229 #if 0
2230 /* if we want to do a 512 entry LUT, do this. */
2231 uint32_t v = ixl_rd_ctl(sc, I40E_PFQF_CTL_0);
2232 SET(v, I40E_PFQF_CTL_0_HASHLUTSIZE_MASK);
2233 ixl_wr_ctl(sc, I40E_PFQF_CTL_0, v);
2234 #endif
2235
2236 stoeplitz_to_key(&rsskey, sizeof(rsskey));
2237
2238 nqueues = ixl_nqueues(sc);
2239 for (i = 0; i < sizeof(lut); i++) {
2240 /*
2241 * ixl must have a power of 2 rings, so using mod
2242 * to populate the table is fine.
2243 */
2244 lute[i] = i % nqueues;
2245 }
2246
2247 error = ixl_set_rss_key(sc, &rsskey);
2248 if (error != 0)
2249 return (error);
2250
2251 rss_hena = (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(0));
2252 rss_hena |= (uint64_t)ixl_rd_ctl(sc, I40E_PFQF_HENA(1)) << 32;
2253 rss_hena |= ixl_rss_hena(sc);
2254 ixl_wr_ctl(sc, I40E_PFQF_HENA(0), rss_hena);
2255 ixl_wr_ctl(sc, I40E_PFQF_HENA(1), rss_hena >> 32);
2256
2257 error = ixl_set_rss_lut(sc, &lut);
2258 if (error != 0)
2259 return (error);
2260
2261 /* nothing to clena up :( */
2262
2263 return (0);
2264 }
2265
2266 static int
ixl_up(struct ixl_softc * sc)2267 ixl_up(struct ixl_softc *sc)
2268 {
2269 struct ifnet *ifp = &sc->sc_ac.ac_if;
2270 struct ifqueue *ifq;
2271 struct ifiqueue *ifiq;
2272 struct ixl_vector *iv;
2273 struct ixl_rx_ring *rxr;
2274 struct ixl_tx_ring *txr;
2275 unsigned int nqueues, i;
2276 uint32_t reg;
2277 int rv = ENOMEM;
2278
2279 nqueues = ixl_nqueues(sc);
2280
2281 rw_enter_write(&sc->sc_cfg_lock);
2282 if (sc->sc_dead) {
2283 rw_exit_write(&sc->sc_cfg_lock);
2284 return (ENXIO);
2285 }
2286
2287 /* allocation is the only thing that can fail, so do it up front */
2288 for (i = 0; i < nqueues; i++) {
2289 rxr = ixl_rxr_alloc(sc, i);
2290 if (rxr == NULL)
2291 goto free;
2292
2293 txr = ixl_txr_alloc(sc, i);
2294 if (txr == NULL) {
2295 ixl_rxr_free(sc, rxr);
2296 goto free;
2297 }
2298
2299 /* wire everything together */
2300 iv = &sc->sc_vectors[i];
2301 iv->iv_rxr = rxr;
2302 iv->iv_txr = txr;
2303
2304 ifq = ifp->if_ifqs[i];
2305 ifq->ifq_softc = txr;
2306 txr->txr_ifq = ifq;
2307
2308 ifiq = ifp->if_iqs[i];
2309 ifiq->ifiq_softc = rxr;
2310 rxr->rxr_ifiq = ifiq;
2311 }
2312
2313 /* XXX wait 50ms from completion of last RX queue disable */
2314
2315 for (i = 0; i < nqueues; i++) {
2316 iv = &sc->sc_vectors[i];
2317 rxr = iv->iv_rxr;
2318 txr = iv->iv_txr;
2319
2320 ixl_txr_qdis(sc, txr, 1);
2321
2322 ixl_rxr_config(sc, rxr);
2323 ixl_txr_config(sc, txr);
2324
2325 ixl_wr(sc, I40E_QTX_CTL(i), I40E_QTX_CTL_PF_QUEUE |
2326 (sc->sc_pf_id << I40E_QTX_CTL_PF_INDX_SHIFT));
2327
2328 ixl_wr(sc, rxr->rxr_tail, 0);
2329 ixl_rxfill(sc, rxr);
2330
2331 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2332 SET(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2333 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2334
2335 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2336 SET(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2337 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2338 }
2339
2340 for (i = 0; i < nqueues; i++) {
2341 iv = &sc->sc_vectors[i];
2342 rxr = iv->iv_rxr;
2343 txr = iv->iv_txr;
2344
2345 if (ixl_rxr_enabled(sc, rxr) != 0)
2346 goto down;
2347
2348 if (ixl_txr_enabled(sc, txr) != 0)
2349 goto down;
2350 }
2351
2352 ixl_configure_rss(sc);
2353
2354 SET(ifp->if_flags, IFF_RUNNING);
2355
2356 if (sc->sc_intrmap == NULL) {
2357 ixl_wr(sc, I40E_PFINT_LNKLST0,
2358 (I40E_INTR_NOTX_QUEUE <<
2359 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
2360 (I40E_QUEUE_TYPE_RX <<
2361 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2362
2363 ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE),
2364 (I40E_INTR_NOTX_INTR << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2365 (I40E_ITR_INDEX_RX << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2366 (I40E_INTR_NOTX_RX_QUEUE <<
2367 I40E_QINT_RQCTL_MSIX0_INDX_SHIFT) |
2368 (I40E_INTR_NOTX_QUEUE << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2369 (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2370 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2371
2372 ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE),
2373 (I40E_INTR_NOTX_INTR << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2374 (I40E_ITR_INDEX_TX << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2375 (I40E_INTR_NOTX_TX_QUEUE <<
2376 I40E_QINT_TQCTL_MSIX0_INDX_SHIFT) |
2377 (I40E_QUEUE_TYPE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2378 (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2379 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2380 } else {
2381 /* vector 0 has no queues */
2382 ixl_wr(sc, I40E_PFINT_LNKLST0,
2383 I40E_QUEUE_TYPE_EOL <<
2384 I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT);
2385
2386 /* queue n is mapped to vector n+1 */
2387 for (i = 0; i < nqueues; i++) {
2388 /* LNKLSTN(i) configures vector i+1 */
2389 ixl_wr(sc, I40E_PFINT_LNKLSTN(i),
2390 (i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT) |
2391 (I40E_QUEUE_TYPE_RX <<
2392 I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT));
2393 ixl_wr(sc, I40E_QINT_RQCTL(i),
2394 ((i+1) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
2395 (I40E_ITR_INDEX_RX <<
2396 I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
2397 (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
2398 (I40E_QUEUE_TYPE_TX <<
2399 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
2400 I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2401 ixl_wr(sc, I40E_QINT_TQCTL(i),
2402 ((i+1) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
2403 (I40E_ITR_INDEX_TX <<
2404 I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
2405 (I40E_QUEUE_TYPE_EOL <<
2406 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
2407 (I40E_QUEUE_TYPE_RX <<
2408 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT) |
2409 I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2410
2411 ixl_wr(sc, I40E_PFINT_ITRN(0, i), 0x7a);
2412 ixl_wr(sc, I40E_PFINT_ITRN(1, i), 0x7a);
2413 ixl_wr(sc, I40E_PFINT_ITRN(2, i), 0);
2414 }
2415 }
2416
2417 ixl_wr(sc, I40E_PFINT_ITR0(0), 0x7a);
2418 ixl_wr(sc, I40E_PFINT_ITR0(1), 0x7a);
2419 ixl_wr(sc, I40E_PFINT_ITR0(2), 0);
2420
2421 rw_exit_write(&sc->sc_cfg_lock);
2422
2423 return (ENETRESET);
2424
2425 free:
2426 for (i = 0; i < nqueues; i++) {
2427 iv = &sc->sc_vectors[i];
2428 rxr = iv->iv_rxr;
2429 txr = iv->iv_txr;
2430
2431 if (rxr == NULL) {
2432 /*
2433 * tx and rx get set at the same time, so if one
2434 * is NULL, the other is too.
2435 */
2436 continue;
2437 }
2438
2439 ixl_txr_free(sc, txr);
2440 ixl_rxr_free(sc, rxr);
2441 }
2442 rw_exit_write(&sc->sc_cfg_lock);
2443 return (rv);
2444 down:
2445 rw_exit_write(&sc->sc_cfg_lock);
2446 ixl_down(sc);
2447 return (ETIMEDOUT);
2448 }
2449
2450 static int
ixl_iff(struct ixl_softc * sc)2451 ixl_iff(struct ixl_softc *sc)
2452 {
2453 struct ifnet *ifp = &sc->sc_ac.ac_if;
2454 struct ixl_atq iatq;
2455 struct ixl_aq_desc *iaq;
2456 struct ixl_aq_vsi_promisc_param *param;
2457
2458 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2459 return (0);
2460
2461 memset(&iatq, 0, sizeof(iatq));
2462
2463 iaq = &iatq.iatq_desc;
2464 iaq->iaq_opcode = htole16(IXL_AQ_OP_SET_VSI_PROMISC);
2465
2466 param = (struct ixl_aq_vsi_promisc_param *)&iaq->iaq_param;
2467 param->flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2468 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2469 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
2470 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2471 IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2472 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
2473 param->flags |= htole16(IXL_AQ_VSI_PROMISC_FLAG_MCAST);
2474 }
2475 param->valid_flags = htole16(IXL_AQ_VSI_PROMISC_FLAG_UCAST |
2476 IXL_AQ_VSI_PROMISC_FLAG_MCAST | IXL_AQ_VSI_PROMISC_FLAG_BCAST |
2477 IXL_AQ_VSI_PROMISC_FLAG_VLAN);
2478 param->seid = sc->sc_seid;
2479
2480 ixl_atq_exec(sc, &iatq, "ixliff");
2481
2482 if (iaq->iaq_retval != htole16(IXL_AQ_RC_OK))
2483 return (EIO);
2484
2485 if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
2486 ixl_remove_macvlan(sc, sc->sc_enaddr, 0,
2487 IXL_AQ_OP_REMOVE_MACVLAN_IGNORE_VLAN);
2488 ixl_add_macvlan(sc, sc->sc_ac.ac_enaddr, 0,
2489 IXL_AQ_OP_ADD_MACVLAN_IGNORE_VLAN);
2490 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
2491 }
2492 return (0);
2493 }
2494
2495 static int
ixl_down(struct ixl_softc * sc)2496 ixl_down(struct ixl_softc *sc)
2497 {
2498 struct ifnet *ifp = &sc->sc_ac.ac_if;
2499 struct ixl_vector *iv;
2500 struct ixl_rx_ring *rxr;
2501 struct ixl_tx_ring *txr;
2502 unsigned int nqueues, i;
2503 uint32_t reg;
2504 int error = 0;
2505
2506 nqueues = ixl_nqueues(sc);
2507
2508 rw_enter_write(&sc->sc_cfg_lock);
2509
2510 CLR(ifp->if_flags, IFF_RUNNING);
2511
2512 NET_UNLOCK();
2513
2514 /* mask interrupts */
2515 reg = ixl_rd(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE));
2516 CLR(reg, I40E_QINT_RQCTL_CAUSE_ENA_MASK);
2517 ixl_wr(sc, I40E_QINT_RQCTL(I40E_INTR_NOTX_QUEUE), reg);
2518
2519 reg = ixl_rd(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE));
2520 CLR(reg, I40E_QINT_TQCTL_CAUSE_ENA_MASK);
2521 ixl_wr(sc, I40E_QINT_TQCTL(I40E_INTR_NOTX_QUEUE), reg);
2522
2523 ixl_wr(sc, I40E_PFINT_LNKLST0, I40E_QUEUE_TYPE_EOL);
2524
2525 /* make sure the no hw generated work is still in flight */
2526 intr_barrier(sc->sc_ihc);
2527 if (sc->sc_intrmap != NULL) {
2528 for (i = 0; i < nqueues; i++) {
2529 iv = &sc->sc_vectors[i];
2530 rxr = iv->iv_rxr;
2531 txr = iv->iv_txr;
2532
2533 ixl_txr_qdis(sc, txr, 0);
2534
2535 ifq_barrier(txr->txr_ifq);
2536
2537 timeout_del_barrier(&rxr->rxr_refill);
2538
2539 intr_barrier(iv->iv_ihc);
2540 }
2541 }
2542
2543 /* XXX wait at least 400 usec for all tx queues in one go */
2544 delay(500);
2545
2546 for (i = 0; i < nqueues; i++) {
2547 reg = ixl_rd(sc, I40E_QTX_ENA(i));
2548 CLR(reg, I40E_QTX_ENA_QENA_REQ_MASK);
2549 ixl_wr(sc, I40E_QTX_ENA(i), reg);
2550
2551 reg = ixl_rd(sc, I40E_QRX_ENA(i));
2552 CLR(reg, I40E_QRX_ENA_QENA_REQ_MASK);
2553 ixl_wr(sc, I40E_QRX_ENA(i), reg);
2554 }
2555
2556 for (i = 0; i < nqueues; i++) {
2557 iv = &sc->sc_vectors[i];
2558 rxr = iv->iv_rxr;
2559 txr = iv->iv_txr;
2560
2561 if (ixl_txr_disabled(sc, txr) != 0)
2562 goto die;
2563
2564 if (ixl_rxr_disabled(sc, rxr) != 0)
2565 goto die;
2566 }
2567
2568 for (i = 0; i < nqueues; i++) {
2569 iv = &sc->sc_vectors[i];
2570 rxr = iv->iv_rxr;
2571 txr = iv->iv_txr;
2572
2573 ixl_txr_unconfig(sc, txr);
2574 ixl_rxr_unconfig(sc, rxr);
2575
2576 ixl_txr_clean(sc, txr);
2577 ixl_rxr_clean(sc, rxr);
2578
2579 ixl_txr_free(sc, txr);
2580 ixl_rxr_free(sc, rxr);
2581
2582 ifp->if_iqs[i]->ifiq_softc = NULL;
2583 ifp->if_ifqs[i]->ifq_softc = NULL;
2584 }
2585
2586 out:
2587 rw_exit_write(&sc->sc_cfg_lock);
2588 NET_LOCK();
2589 return (error);
2590 die:
2591 sc->sc_dead = 1;
2592 log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
2593 error = ETIMEDOUT;
2594 goto out;
2595 }
2596
2597 static struct ixl_tx_ring *
ixl_txr_alloc(struct ixl_softc * sc,unsigned int qid)2598 ixl_txr_alloc(struct ixl_softc *sc, unsigned int qid)
2599 {
2600 struct ixl_tx_ring *txr;
2601 struct ixl_tx_map *maps, *txm;
2602 unsigned int i;
2603
2604 txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
2605 if (txr == NULL)
2606 return (NULL);
2607
2608 maps = mallocarray(sizeof(*maps),
2609 sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
2610 if (maps == NULL)
2611 goto free;
2612
2613 if (ixl_dmamem_alloc(sc, &txr->txr_mem,
2614 sizeof(struct ixl_tx_desc) * sc->sc_tx_ring_ndescs,
2615 IXL_TX_QUEUE_ALIGN) != 0)
2616 goto freemap;
2617
2618 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2619 txm = &maps[i];
2620
2621 if (bus_dmamap_create(sc->sc_dmat,
2622 MAXMCLBYTES, IXL_TX_PKT_DESCS, IXL_MAX_DMA_SEG_SIZE, 0,
2623 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2624 &txm->txm_map) != 0)
2625 goto uncreate;
2626
2627 txm->txm_eop = -1;
2628 txm->txm_m = NULL;
2629 }
2630
2631 txr->txr_cons = txr->txr_prod = 0;
2632 txr->txr_maps = maps;
2633
2634 txr->txr_tail = I40E_QTX_TAIL(qid);
2635 txr->txr_qid = qid;
2636
2637 return (txr);
2638
2639 uncreate:
2640 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2641 txm = &maps[i];
2642
2643 if (txm->txm_map == NULL)
2644 continue;
2645
2646 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2647 }
2648
2649 ixl_dmamem_free(sc, &txr->txr_mem);
2650 freemap:
2651 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2652 free:
2653 free(txr, M_DEVBUF, sizeof(*txr));
2654 return (NULL);
2655 }
2656
2657 static void
ixl_txr_qdis(struct ixl_softc * sc,struct ixl_tx_ring * txr,int enable)2658 ixl_txr_qdis(struct ixl_softc *sc, struct ixl_tx_ring *txr, int enable)
2659 {
2660 unsigned int qid;
2661 bus_size_t reg;
2662 uint32_t r;
2663
2664 qid = txr->txr_qid + sc->sc_base_queue;
2665 reg = I40E_GLLAN_TXPRE_QDIS(qid / 128);
2666 qid %= 128;
2667
2668 r = ixl_rd(sc, reg);
2669 CLR(r, I40E_GLLAN_TXPRE_QDIS_QINDX_MASK);
2670 SET(r, qid << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
2671 SET(r, enable ? I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK :
2672 I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK);
2673 ixl_wr(sc, reg, r);
2674 }
2675
2676 static void
ixl_txr_config(struct ixl_softc * sc,struct ixl_tx_ring * txr)2677 ixl_txr_config(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2678 {
2679 struct ixl_hmc_txq txq;
2680 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(&sc->sc_scratch);
2681 void *hmc;
2682
2683 memset(&txq, 0, sizeof(txq));
2684 txq.head = htole16(0);
2685 txq.new_context = 1;
2686 htolem64(&txq.base,
2687 IXL_DMA_DVA(&txr->txr_mem) / IXL_HMC_TXQ_BASE_UNIT);
2688 txq.head_wb_ena = IXL_HMC_TXQ_DESC_WB;
2689 htolem16(&txq.qlen, sc->sc_tx_ring_ndescs);
2690 txq.tphrdesc_ena = 0;
2691 txq.tphrpacket_ena = 0;
2692 txq.tphwdesc_ena = 0;
2693 txq.rdylist = data->qs_handle[0];
2694
2695 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2696 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2697 ixl_hmc_pack(hmc, &txq, ixl_hmc_pack_txq, nitems(ixl_hmc_pack_txq));
2698 }
2699
2700 static void
ixl_txr_unconfig(struct ixl_softc * sc,struct ixl_tx_ring * txr)2701 ixl_txr_unconfig(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2702 {
2703 void *hmc;
2704
2705 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_TX, txr->txr_qid);
2706 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_TX));
2707 }
2708
2709 static void
ixl_txr_clean(struct ixl_softc * sc,struct ixl_tx_ring * txr)2710 ixl_txr_clean(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2711 {
2712 struct ixl_tx_map *maps, *txm;
2713 bus_dmamap_t map;
2714 unsigned int i;
2715
2716 maps = txr->txr_maps;
2717 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2718 txm = &maps[i];
2719
2720 if (txm->txm_m == NULL)
2721 continue;
2722
2723 map = txm->txm_map;
2724 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2725 BUS_DMASYNC_POSTWRITE);
2726 bus_dmamap_unload(sc->sc_dmat, map);
2727
2728 m_freem(txm->txm_m);
2729 txm->txm_m = NULL;
2730 }
2731 }
2732
2733 static int
ixl_txr_enabled(struct ixl_softc * sc,struct ixl_tx_ring * txr)2734 ixl_txr_enabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2735 {
2736 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2737 uint32_t reg;
2738 int i;
2739
2740 for (i = 0; i < 10; i++) {
2741 reg = ixl_rd(sc, ena);
2742 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK))
2743 return (0);
2744
2745 delaymsec(10);
2746 }
2747
2748 return (ETIMEDOUT);
2749 }
2750
2751 static int
ixl_txr_disabled(struct ixl_softc * sc,struct ixl_tx_ring * txr)2752 ixl_txr_disabled(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2753 {
2754 bus_size_t ena = I40E_QTX_ENA(txr->txr_qid);
2755 uint32_t reg;
2756 int i;
2757
2758 for (i = 0; i < 20; i++) {
2759 reg = ixl_rd(sc, ena);
2760 if (ISSET(reg, I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2761 return (0);
2762
2763 delaymsec(10);
2764 }
2765
2766 return (ETIMEDOUT);
2767 }
2768
2769 static void
ixl_txr_free(struct ixl_softc * sc,struct ixl_tx_ring * txr)2770 ixl_txr_free(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2771 {
2772 struct ixl_tx_map *maps, *txm;
2773 unsigned int i;
2774
2775 maps = txr->txr_maps;
2776 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
2777 txm = &maps[i];
2778
2779 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
2780 }
2781
2782 ixl_dmamem_free(sc, &txr->txr_mem);
2783 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
2784 free(txr, M_DEVBUF, sizeof(*txr));
2785 }
2786
2787 static inline int
ixl_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m)2788 ixl_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
2789 {
2790 int error;
2791
2792 error = bus_dmamap_load_mbuf(dmat, map, m,
2793 BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
2794 if (error != EFBIG)
2795 return (error);
2796
2797 error = m_defrag(m, M_DONTWAIT);
2798 if (error != 0)
2799 return (error);
2800
2801 return (bus_dmamap_load_mbuf(dmat, map, m,
2802 BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
2803 }
2804
2805 static uint64_t
ixl_tx_setup_offload(struct mbuf * m0,struct ixl_tx_ring * txr,unsigned int prod)2806 ixl_tx_setup_offload(struct mbuf *m0, struct ixl_tx_ring *txr,
2807 unsigned int prod)
2808 {
2809 struct ether_extracted ext;
2810 uint64_t hlen;
2811 uint64_t offload = 0;
2812
2813 #if NVLAN > 0
2814 if (ISSET(m0->m_flags, M_VLANTAG)) {
2815 uint64_t vtag = m0->m_pkthdr.ether_vtag;
2816 offload |= IXL_TX_DESC_CMD_IL2TAG1;
2817 offload |= vtag << IXL_TX_DESC_L2TAG1_SHIFT;
2818 }
2819 #endif
2820
2821 if (!ISSET(m0->m_pkthdr.csum_flags,
2822 M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO))
2823 return (offload);
2824
2825 ether_extract_headers(m0, &ext);
2826
2827 if (ext.ip4) {
2828 offload |= ISSET(m0->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ?
2829 IXL_TX_DESC_CMD_IIPT_IPV4_CSUM :
2830 IXL_TX_DESC_CMD_IIPT_IPV4;
2831 #ifdef INET6
2832 } else if (ext.ip6) {
2833 offload |= IXL_TX_DESC_CMD_IIPT_IPV6;
2834 #endif
2835 } else {
2836 panic("CSUM_OUT set for non-IP packet");
2837 /* NOTREACHED */
2838 }
2839 hlen = ext.iphlen;
2840
2841 offload |= (ETHER_HDR_LEN >> 1) << IXL_TX_DESC_MACLEN_SHIFT;
2842 offload |= (hlen >> 2) << IXL_TX_DESC_IPLEN_SHIFT;
2843
2844 if (ext.tcp && ISSET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
2845 offload |= IXL_TX_DESC_CMD_L4T_EOFT_TCP;
2846 offload |= (uint64_t)(ext.tcphlen >> 2)
2847 << IXL_TX_DESC_L4LEN_SHIFT;
2848 } else if (ext.udp && ISSET(m0->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
2849 offload |= IXL_TX_DESC_CMD_L4T_EOFT_UDP;
2850 offload |= (uint64_t)(sizeof(*ext.udp) >> 2)
2851 << IXL_TX_DESC_L4LEN_SHIFT;
2852 }
2853
2854 if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) {
2855 if (ext.tcp && m0->m_pkthdr.ph_mss > 0) {
2856 struct ixl_tx_desc *ring, *txd;
2857 uint64_t cmd = 0, paylen, outlen;
2858
2859 hlen += ext.tcphlen;
2860
2861 /*
2862 * The MSS should not be set to a lower value than 64
2863 * or larger than 9668 bytes.
2864 */
2865 outlen = MIN(9668, MAX(64, m0->m_pkthdr.ph_mss));
2866 paylen = m0->m_pkthdr.len - ETHER_HDR_LEN - hlen;
2867
2868 ring = IXL_DMA_KVA(&txr->txr_mem);
2869 txd = &ring[prod];
2870
2871 cmd |= IXL_TX_DESC_DTYPE_CONTEXT;
2872 cmd |= IXL_TX_CTX_DESC_CMD_TSO;
2873 cmd |= paylen << IXL_TX_CTX_DESC_TLEN_SHIFT;
2874 cmd |= outlen << IXL_TX_CTX_DESC_MSS_SHIFT;
2875
2876 htolem64(&txd->addr, 0);
2877 htolem64(&txd->cmd, cmd);
2878
2879 tcpstat_add(tcps_outpkttso,
2880 (paylen + outlen - 1) / outlen);
2881 } else
2882 tcpstat_inc(tcps_outbadtso);
2883 }
2884
2885 return (offload);
2886 }
2887
2888 static void
ixl_start(struct ifqueue * ifq)2889 ixl_start(struct ifqueue *ifq)
2890 {
2891 struct ifnet *ifp = ifq->ifq_if;
2892 struct ixl_softc *sc = ifp->if_softc;
2893 struct ixl_tx_ring *txr = ifq->ifq_softc;
2894 struct ixl_tx_desc *ring, *txd;
2895 struct ixl_tx_map *txm;
2896 bus_dmamap_t map;
2897 struct mbuf *m;
2898 uint64_t cmd;
2899 unsigned int prod, free, last, i;
2900 unsigned int mask;
2901 int post = 0;
2902 uint64_t offload;
2903 #if NBPFILTER > 0
2904 caddr_t if_bpf;
2905 #endif
2906
2907 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
2908 ifq_purge(ifq);
2909 return;
2910 }
2911
2912 prod = txr->txr_prod;
2913 free = txr->txr_cons;
2914 if (free <= prod)
2915 free += sc->sc_tx_ring_ndescs;
2916 free -= prod;
2917
2918 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2919 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
2920
2921 ring = IXL_DMA_KVA(&txr->txr_mem);
2922 mask = sc->sc_tx_ring_ndescs - 1;
2923
2924 for (;;) {
2925 /* We need one extra descriptor for TSO packets. */
2926 if (free <= (IXL_TX_PKT_DESCS + 1)) {
2927 ifq_set_oactive(ifq);
2928 break;
2929 }
2930
2931 m = ifq_dequeue(ifq);
2932 if (m == NULL)
2933 break;
2934
2935 offload = ixl_tx_setup_offload(m, txr, prod);
2936
2937 txm = &txr->txr_maps[prod];
2938 map = txm->txm_map;
2939
2940 if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) {
2941 prod++;
2942 prod &= mask;
2943 free--;
2944 }
2945
2946 if (ixl_load_mbuf(sc->sc_dmat, map, m) != 0) {
2947 ifq->ifq_errors++;
2948 m_freem(m);
2949 continue;
2950 }
2951
2952 bus_dmamap_sync(sc->sc_dmat, map, 0,
2953 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
2954
2955 for (i = 0; i < map->dm_nsegs; i++) {
2956 txd = &ring[prod];
2957
2958 cmd = (uint64_t)map->dm_segs[i].ds_len <<
2959 IXL_TX_DESC_BSIZE_SHIFT;
2960 cmd |= IXL_TX_DESC_DTYPE_DATA | IXL_TX_DESC_CMD_ICRC;
2961 cmd |= offload;
2962
2963 htolem64(&txd->addr, map->dm_segs[i].ds_addr);
2964 htolem64(&txd->cmd, cmd);
2965
2966 last = prod;
2967
2968 prod++;
2969 prod &= mask;
2970 }
2971 cmd |= IXL_TX_DESC_CMD_EOP | IXL_TX_DESC_CMD_RS;
2972 htolem64(&txd->cmd, cmd);
2973
2974 txm->txm_m = m;
2975 txm->txm_eop = last;
2976
2977 #if NBPFILTER > 0
2978 if_bpf = ifp->if_bpf;
2979 if (if_bpf)
2980 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
2981 #endif
2982
2983 free -= i;
2984 post = 1;
2985 }
2986
2987 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
2988 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
2989
2990 if (post) {
2991 txr->txr_prod = prod;
2992 ixl_wr(sc, txr->txr_tail, prod);
2993 }
2994 }
2995
2996 static int
ixl_txeof(struct ixl_softc * sc,struct ixl_tx_ring * txr)2997 ixl_txeof(struct ixl_softc *sc, struct ixl_tx_ring *txr)
2998 {
2999 struct ifqueue *ifq = txr->txr_ifq;
3000 struct ixl_tx_desc *ring, *txd;
3001 struct ixl_tx_map *txm;
3002 bus_dmamap_t map;
3003 unsigned int cons, prod, last;
3004 unsigned int mask;
3005 uint64_t dtype;
3006 int done = 0;
3007
3008 prod = txr->txr_prod;
3009 cons = txr->txr_cons;
3010
3011 if (cons == prod)
3012 return (0);
3013
3014 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3015 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
3016
3017 ring = IXL_DMA_KVA(&txr->txr_mem);
3018 mask = sc->sc_tx_ring_ndescs - 1;
3019
3020 do {
3021 txm = &txr->txr_maps[cons];
3022 last = txm->txm_eop;
3023 txd = &ring[last];
3024
3025 dtype = txd->cmd & htole64(IXL_TX_DESC_DTYPE_MASK);
3026 if (dtype != htole64(IXL_TX_DESC_DTYPE_DONE))
3027 break;
3028
3029 map = txm->txm_map;
3030
3031 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3032 BUS_DMASYNC_POSTWRITE);
3033 bus_dmamap_unload(sc->sc_dmat, map);
3034 m_freem(txm->txm_m);
3035
3036 txm->txm_m = NULL;
3037 txm->txm_eop = -1;
3038
3039 cons = last + 1;
3040 cons &= mask;
3041
3042 done = 1;
3043 } while (cons != prod);
3044
3045 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&txr->txr_mem),
3046 0, IXL_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
3047
3048 txr->txr_cons = cons;
3049
3050 //ixl_enable(sc, txr->txr_msix);
3051
3052 if (ifq_is_oactive(ifq))
3053 ifq_restart(ifq);
3054
3055 return (done);
3056 }
3057
3058 static struct ixl_rx_ring *
ixl_rxr_alloc(struct ixl_softc * sc,unsigned int qid)3059 ixl_rxr_alloc(struct ixl_softc *sc, unsigned int qid)
3060 {
3061 struct ixl_rx_ring *rxr;
3062 struct ixl_rx_map *maps, *rxm;
3063 unsigned int i;
3064
3065 rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
3066 if (rxr == NULL)
3067 return (NULL);
3068
3069 maps = mallocarray(sizeof(*maps),
3070 sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
3071 if (maps == NULL)
3072 goto free;
3073
3074 if (ixl_dmamem_alloc(sc, &rxr->rxr_mem,
3075 sizeof(struct ixl_rx_rd_desc_16) * sc->sc_rx_ring_ndescs,
3076 IXL_RX_QUEUE_ALIGN) != 0)
3077 goto freemap;
3078
3079 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3080 rxm = &maps[i];
3081
3082 if (bus_dmamap_create(sc->sc_dmat,
3083 IXL_HARDMTU, 1, IXL_HARDMTU, 0,
3084 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
3085 &rxm->rxm_map) != 0)
3086 goto uncreate;
3087
3088 rxm->rxm_m = NULL;
3089 }
3090
3091 rxr->rxr_sc = sc;
3092 if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
3093 timeout_set(&rxr->rxr_refill, ixl_rxrefill, rxr);
3094 rxr->rxr_cons = rxr->rxr_prod = 0;
3095 rxr->rxr_m_head = NULL;
3096 rxr->rxr_m_tail = &rxr->rxr_m_head;
3097 rxr->rxr_maps = maps;
3098
3099 rxr->rxr_tail = I40E_QRX_TAIL(qid);
3100 rxr->rxr_qid = qid;
3101
3102 return (rxr);
3103
3104 uncreate:
3105 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3106 rxm = &maps[i];
3107
3108 if (rxm->rxm_map == NULL)
3109 continue;
3110
3111 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3112 }
3113
3114 ixl_dmamem_free(sc, &rxr->rxr_mem);
3115 freemap:
3116 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3117 free:
3118 free(rxr, M_DEVBUF, sizeof(*rxr));
3119 return (NULL);
3120 }
3121
3122 static void
ixl_rxr_clean(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3123 ixl_rxr_clean(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3124 {
3125 struct ixl_rx_map *maps, *rxm;
3126 bus_dmamap_t map;
3127 unsigned int i;
3128
3129 timeout_del_barrier(&rxr->rxr_refill);
3130
3131 maps = rxr->rxr_maps;
3132 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3133 rxm = &maps[i];
3134
3135 if (rxm->rxm_m == NULL)
3136 continue;
3137
3138 map = rxm->rxm_map;
3139 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3140 BUS_DMASYNC_POSTWRITE);
3141 bus_dmamap_unload(sc->sc_dmat, map);
3142
3143 m_freem(rxm->rxm_m);
3144 rxm->rxm_m = NULL;
3145 }
3146
3147 m_freem(rxr->rxr_m_head);
3148 rxr->rxr_m_head = NULL;
3149 rxr->rxr_m_tail = &rxr->rxr_m_head;
3150
3151 rxr->rxr_prod = rxr->rxr_cons = 0;
3152 }
3153
3154 static int
ixl_rxr_enabled(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3155 ixl_rxr_enabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3156 {
3157 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3158 uint32_t reg;
3159 int i;
3160
3161 for (i = 0; i < 10; i++) {
3162 reg = ixl_rd(sc, ena);
3163 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK))
3164 return (0);
3165
3166 delaymsec(10);
3167 }
3168
3169 return (ETIMEDOUT);
3170 }
3171
3172 static int
ixl_rxr_disabled(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3173 ixl_rxr_disabled(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3174 {
3175 bus_size_t ena = I40E_QRX_ENA(rxr->rxr_qid);
3176 uint32_t reg;
3177 int i;
3178
3179 for (i = 0; i < 20; i++) {
3180 reg = ixl_rd(sc, ena);
3181 if (ISSET(reg, I40E_QRX_ENA_QENA_STAT_MASK) == 0)
3182 return (0);
3183
3184 delaymsec(10);
3185 }
3186
3187 return (ETIMEDOUT);
3188 }
3189
3190 static void
ixl_rxr_config(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3191 ixl_rxr_config(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3192 {
3193 struct ixl_hmc_rxq rxq;
3194 void *hmc;
3195
3196 memset(&rxq, 0, sizeof(rxq));
3197
3198 rxq.head = htole16(0);
3199 htolem64(&rxq.base,
3200 IXL_DMA_DVA(&rxr->rxr_mem) / IXL_HMC_RXQ_BASE_UNIT);
3201 htolem16(&rxq.qlen, sc->sc_rx_ring_ndescs);
3202 rxq.dbuff = htole16(MCLBYTES / IXL_HMC_RXQ_DBUFF_UNIT);
3203 rxq.hbuff = 0;
3204 rxq.dtype = IXL_HMC_RXQ_DTYPE_NOSPLIT;
3205 rxq.dsize = IXL_HMC_RXQ_DSIZE_16;
3206 rxq.crcstrip = 1;
3207 rxq.l2tsel = IXL_HMC_RXQ_L2TSEL_1ST_TAG_TO_L2TAG1;
3208 rxq.showiv = 0;
3209 rxq.rxmax = htole16(IXL_HARDMTU);
3210 rxq.tphrdesc_ena = 0;
3211 rxq.tphwdesc_ena = 0;
3212 rxq.tphdata_ena = 0;
3213 rxq.tphhead_ena = 0;
3214 rxq.lrxqthresh = 0;
3215 rxq.prefena = 1;
3216
3217 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3218 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3219 ixl_hmc_pack(hmc, &rxq, ixl_hmc_pack_rxq, nitems(ixl_hmc_pack_rxq));
3220 }
3221
3222 static void
ixl_rxr_unconfig(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3223 ixl_rxr_unconfig(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3224 {
3225 void *hmc;
3226
3227 hmc = ixl_hmc_kva(sc, IXL_HMC_LAN_RX, rxr->rxr_qid);
3228 memset(hmc, 0, ixl_hmc_len(sc, IXL_HMC_LAN_RX));
3229 }
3230
3231 static void
ixl_rxr_free(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3232 ixl_rxr_free(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3233 {
3234 struct ixl_rx_map *maps, *rxm;
3235 unsigned int i;
3236
3237 maps = rxr->rxr_maps;
3238 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
3239 rxm = &maps[i];
3240
3241 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
3242 }
3243
3244 ixl_dmamem_free(sc, &rxr->rxr_mem);
3245 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
3246 free(rxr, M_DEVBUF, sizeof(*rxr));
3247 }
3248
3249 static int
ixl_rxeof(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3250 ixl_rxeof(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3251 {
3252 struct ifiqueue *ifiq = rxr->rxr_ifiq;
3253 struct ifnet *ifp = &sc->sc_ac.ac_if;
3254 struct ixl_rx_wb_desc_16 *ring, *rxd;
3255 struct ixl_rx_map *rxm;
3256 bus_dmamap_t map;
3257 unsigned int cons, prod;
3258 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
3259 struct mbuf *m;
3260 uint64_t word;
3261 unsigned int len;
3262 unsigned int mask;
3263 int done = 0;
3264
3265 prod = rxr->rxr_prod;
3266 cons = rxr->rxr_cons;
3267
3268 if (cons == prod)
3269 return (0);
3270
3271 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3272 0, IXL_DMA_LEN(&rxr->rxr_mem),
3273 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3274
3275 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3276 mask = sc->sc_rx_ring_ndescs - 1;
3277
3278 do {
3279 rxd = &ring[cons];
3280
3281 word = lemtoh64(&rxd->qword1);
3282 if (!ISSET(word, IXL_RX_DESC_DD))
3283 break;
3284
3285 if_rxr_put(&rxr->rxr_acct, 1);
3286
3287 rxm = &rxr->rxr_maps[cons];
3288
3289 map = rxm->rxm_map;
3290 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3291 BUS_DMASYNC_POSTREAD);
3292 bus_dmamap_unload(sc->sc_dmat, map);
3293
3294 m = rxm->rxm_m;
3295 rxm->rxm_m = NULL;
3296
3297 len = (word & IXL_RX_DESC_PLEN_MASK) >> IXL_RX_DESC_PLEN_SHIFT;
3298 m->m_len = len;
3299 m->m_pkthdr.len = 0;
3300
3301 m->m_next = NULL;
3302 *rxr->rxr_m_tail = m;
3303 rxr->rxr_m_tail = &m->m_next;
3304
3305 m = rxr->rxr_m_head;
3306 m->m_pkthdr.len += len;
3307
3308 if (ISSET(word, IXL_RX_DESC_EOP)) {
3309 if (!ISSET(word,
3310 IXL_RX_DESC_RXE | IXL_RX_DESC_OVERSIZE)) {
3311 if ((word & IXL_RX_DESC_FLTSTAT_MASK) ==
3312 IXL_RX_DESC_FLTSTAT_RSS) {
3313 m->m_pkthdr.ph_flowid =
3314 lemtoh32(&rxd->filter_status);
3315 m->m_pkthdr.csum_flags |= M_FLOWID;
3316 }
3317
3318 #if NVLAN > 0
3319 if (ISSET(word, IXL_RX_DESC_L2TAG1P)) {
3320 m->m_pkthdr.ether_vtag =
3321 lemtoh16(&rxd->l2tag1);
3322 SET(m->m_flags, M_VLANTAG);
3323 }
3324 #endif
3325
3326 ixl_rx_checksum(m, word);
3327 ml_enqueue(&ml, m);
3328 } else {
3329 ifp->if_ierrors++; /* XXX */
3330 m_freem(m);
3331 }
3332
3333 rxr->rxr_m_head = NULL;
3334 rxr->rxr_m_tail = &rxr->rxr_m_head;
3335 }
3336
3337 cons++;
3338 cons &= mask;
3339
3340 done = 1;
3341 } while (cons != prod);
3342
3343 if (done) {
3344 rxr->rxr_cons = cons;
3345 if (ifiq_input(ifiq, &ml))
3346 if_rxr_livelocked(&rxr->rxr_acct);
3347 ixl_rxfill(sc, rxr);
3348 }
3349
3350 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&rxr->rxr_mem),
3351 0, IXL_DMA_LEN(&rxr->rxr_mem),
3352 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3353
3354 return (done);
3355 }
3356
3357 static void
ixl_rxfill(struct ixl_softc * sc,struct ixl_rx_ring * rxr)3358 ixl_rxfill(struct ixl_softc *sc, struct ixl_rx_ring *rxr)
3359 {
3360 struct ixl_rx_rd_desc_16 *ring, *rxd;
3361 struct ixl_rx_map *rxm;
3362 bus_dmamap_t map;
3363 struct mbuf *m;
3364 unsigned int prod;
3365 unsigned int slots;
3366 unsigned int mask;
3367 int post = 0;
3368
3369 slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
3370 if (slots == 0)
3371 return;
3372
3373 prod = rxr->rxr_prod;
3374
3375 ring = IXL_DMA_KVA(&rxr->rxr_mem);
3376 mask = sc->sc_rx_ring_ndescs - 1;
3377
3378 do {
3379 rxm = &rxr->rxr_maps[prod];
3380
3381 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
3382 if (m == NULL)
3383 break;
3384 m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
3385 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
3386
3387 map = rxm->rxm_map;
3388
3389 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
3390 BUS_DMA_NOWAIT) != 0) {
3391 m_freem(m);
3392 break;
3393 }
3394
3395 rxm->rxm_m = m;
3396
3397 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
3398 BUS_DMASYNC_PREREAD);
3399
3400 rxd = &ring[prod];
3401
3402 htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
3403 rxd->haddr = htole64(0);
3404
3405 prod++;
3406 prod &= mask;
3407
3408 post = 1;
3409 } while (--slots);
3410
3411 if_rxr_put(&rxr->rxr_acct, slots);
3412
3413 if (if_rxr_inuse(&rxr->rxr_acct) == 0)
3414 timeout_add(&rxr->rxr_refill, 1);
3415 else if (post) {
3416 rxr->rxr_prod = prod;
3417 ixl_wr(sc, rxr->rxr_tail, prod);
3418 }
3419 }
3420
3421 void
ixl_rxrefill(void * arg)3422 ixl_rxrefill(void *arg)
3423 {
3424 struct ixl_rx_ring *rxr = arg;
3425 struct ixl_softc *sc = rxr->rxr_sc;
3426
3427 ixl_rxfill(sc, rxr);
3428 }
3429
3430 static int
ixl_rxrinfo(struct ixl_softc * sc,struct if_rxrinfo * ifri)3431 ixl_rxrinfo(struct ixl_softc *sc, struct if_rxrinfo *ifri)
3432 {
3433 struct ifnet *ifp = &sc->sc_ac.ac_if;
3434 struct if_rxring_info *ifr;
3435 struct ixl_rx_ring *ring;
3436 int i, rv;
3437
3438 if (!ISSET(ifp->if_flags, IFF_RUNNING))
3439 return (ENOTTY);
3440
3441 ifr = mallocarray(sizeof(*ifr), ixl_nqueues(sc), M_TEMP,
3442 M_WAITOK|M_CANFAIL|M_ZERO);
3443 if (ifr == NULL)
3444 return (ENOMEM);
3445
3446 for (i = 0; i < ixl_nqueues(sc); i++) {
3447 ring = ifp->if_iqs[i]->ifiq_softc;
3448 ifr[i].ifr_size = MCLBYTES;
3449 snprintf(ifr[i].ifr_name, sizeof(ifr[i].ifr_name), "%d", i);
3450 ifr[i].ifr_info = ring->rxr_acct;
3451 }
3452
3453 rv = if_rxr_info_ioctl(ifri, ixl_nqueues(sc), ifr);
3454 free(ifr, M_TEMP, ixl_nqueues(sc) * sizeof(*ifr));
3455
3456 return (rv);
3457 }
3458
3459 static void
ixl_rx_checksum(struct mbuf * m,uint64_t word)3460 ixl_rx_checksum(struct mbuf *m, uint64_t word)
3461 {
3462 if (!ISSET(word, IXL_RX_DESC_L3L4P))
3463 return;
3464
3465 if (ISSET(word, IXL_RX_DESC_IPE))
3466 return;
3467
3468 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
3469
3470 if (ISSET(word, IXL_RX_DESC_L4E))
3471 return;
3472
3473 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
3474 }
3475
3476 static int
ixl_intr0(void * xsc)3477 ixl_intr0(void *xsc)
3478 {
3479 struct ixl_softc *sc = xsc;
3480 struct ifnet *ifp = &sc->sc_ac.ac_if;
3481 uint32_t icr;
3482 int rv = 0;
3483
3484 ixl_intr_enable(sc);
3485 icr = ixl_rd(sc, I40E_PFINT_ICR0);
3486
3487 if (ISSET(icr, I40E_PFINT_ICR0_ADMINQ_MASK)) {
3488 ixl_atq_done(sc);
3489 task_add(systq, &sc->sc_arq_task);
3490 rv = 1;
3491 }
3492
3493 if (ISSET(icr, I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK)) {
3494 task_add(systq, &sc->sc_link_state_task);
3495 rv = 1;
3496 }
3497
3498 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3499 struct ixl_vector *iv = sc->sc_vectors;
3500 if (ISSET(icr, I40E_INTR_NOTX_RX_MASK))
3501 rv |= ixl_rxeof(sc, iv->iv_rxr);
3502 if (ISSET(icr, I40E_INTR_NOTX_TX_MASK))
3503 rv |= ixl_txeof(sc, iv->iv_txr);
3504 }
3505
3506 return (rv);
3507 }
3508
3509 static int
ixl_intr_vector(void * v)3510 ixl_intr_vector(void *v)
3511 {
3512 struct ixl_vector *iv = v;
3513 struct ixl_softc *sc = iv->iv_sc;
3514 struct ifnet *ifp = &sc->sc_ac.ac_if;
3515 int rv = 0;
3516
3517 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
3518 rv |= ixl_rxeof(sc, iv->iv_rxr);
3519 rv |= ixl_txeof(sc, iv->iv_txr);
3520 }
3521
3522 ixl_wr(sc, I40E_PFINT_DYN_CTLN(iv->iv_qid),
3523 I40E_PFINT_DYN_CTLN_INTENA_MASK |
3524 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
3525 (IXL_NOITR << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
3526
3527 return (rv);
3528 }
3529
3530 static void
ixl_link_state_update_iaq(struct ixl_softc * sc,void * arg)3531 ixl_link_state_update_iaq(struct ixl_softc *sc, void *arg)
3532 {
3533 struct ifnet *ifp = &sc->sc_ac.ac_if;
3534 struct ixl_aq_desc *iaq = arg;
3535 uint16_t retval;
3536 int link_state;
3537 int change = 0;
3538
3539 retval = lemtoh16(&iaq->iaq_retval);
3540 if (retval != IXL_AQ_RC_OK) {
3541 printf("%s: LINK STATUS error %u\n", DEVNAME(sc), retval);
3542 return;
3543 }
3544
3545 link_state = ixl_set_link_status(sc, iaq);
3546 mtx_enter(&sc->sc_link_state_mtx);
3547 if (ifp->if_link_state != link_state) {
3548 ifp->if_link_state = link_state;
3549 change = 1;
3550 }
3551 mtx_leave(&sc->sc_link_state_mtx);
3552
3553 if (change)
3554 if_link_state_change(ifp);
3555 }
3556
3557 static void
ixl_link_state_update(void * xsc)3558 ixl_link_state_update(void *xsc)
3559 {
3560 struct ixl_softc *sc = xsc;
3561 struct ixl_aq_desc *iaq;
3562 struct ixl_aq_link_param *param;
3563
3564 memset(&sc->sc_link_state_atq, 0, sizeof(sc->sc_link_state_atq));
3565 iaq = &sc->sc_link_state_atq.iatq_desc;
3566 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
3567 param = (struct ixl_aq_link_param *)iaq->iaq_param;
3568 param->notify = IXL_AQ_LINK_NOTIFY;
3569
3570 ixl_atq_set(&sc->sc_link_state_atq, ixl_link_state_update_iaq, iaq);
3571 ixl_atq_post(sc, &sc->sc_link_state_atq);
3572 }
3573
3574 #if 0
3575 static void
3576 ixl_aq_dump(const struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
3577 {
3578 printf("%s: flags %b opcode %04x\n", DEVNAME(sc),
3579 lemtoh16(&iaq->iaq_flags), IXL_AQ_FLAGS_FMT,
3580 lemtoh16(&iaq->iaq_opcode));
3581 printf("%s: datalen %u retval %u\n", DEVNAME(sc),
3582 lemtoh16(&iaq->iaq_datalen), lemtoh16(&iaq->iaq_retval));
3583 printf("%s: cookie %016llx\n", DEVNAME(sc), iaq->iaq_cookie);
3584 printf("%s: %08x %08x %08x %08x\n", DEVNAME(sc),
3585 lemtoh32(&iaq->iaq_param[0]), lemtoh32(&iaq->iaq_param[1]),
3586 lemtoh32(&iaq->iaq_param[2]), lemtoh32(&iaq->iaq_param[3]));
3587 }
3588 #endif
3589
3590 static void
ixl_arq(void * xsc)3591 ixl_arq(void *xsc)
3592 {
3593 struct ixl_softc *sc = xsc;
3594 struct ixl_aq_desc *arq, *iaq;
3595 struct ixl_aq_buf *aqb;
3596 unsigned int cons = sc->sc_arq_cons;
3597 unsigned int prod;
3598 int done = 0;
3599
3600 prod = ixl_rd(sc, sc->sc_aq_regs->arq_head) &
3601 sc->sc_aq_regs->arq_head_mask;
3602
3603 if (cons == prod)
3604 goto done;
3605
3606 arq = IXL_DMA_KVA(&sc->sc_arq);
3607
3608 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3609 0, IXL_DMA_LEN(&sc->sc_arq),
3610 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3611
3612 do {
3613 iaq = &arq[cons];
3614
3615 aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
3616 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
3617 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
3618 BUS_DMASYNC_POSTREAD);
3619
3620 switch (iaq->iaq_opcode) {
3621 case HTOLE16(IXL_AQ_OP_PHY_LINK_STATUS):
3622 ixl_link_state_update_iaq(sc, iaq);
3623 break;
3624 }
3625
3626 memset(iaq, 0, sizeof(*iaq));
3627 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
3628 if_rxr_put(&sc->sc_arq_ring, 1);
3629
3630 cons++;
3631 cons &= IXL_AQ_MASK;
3632
3633 done = 1;
3634 } while (cons != prod);
3635
3636 if (done && ixl_arq_fill(sc))
3637 ixl_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
3638
3639 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_arq),
3640 0, IXL_DMA_LEN(&sc->sc_arq),
3641 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3642
3643 sc->sc_arq_cons = cons;
3644
3645 done:
3646 ixl_intr_enable(sc);
3647 }
3648
3649 static void
ixl_atq_set(struct ixl_atq * iatq,void (* fn)(struct ixl_softc *,void *),void * arg)3650 ixl_atq_set(struct ixl_atq *iatq,
3651 void (*fn)(struct ixl_softc *, void *), void *arg)
3652 {
3653 iatq->iatq_fn = fn;
3654 iatq->iatq_arg = arg;
3655 }
3656
3657 static void
ixl_atq_post(struct ixl_softc * sc,struct ixl_atq * iatq)3658 ixl_atq_post(struct ixl_softc *sc, struct ixl_atq *iatq)
3659 {
3660 struct ixl_aq_desc *atq, *slot;
3661 unsigned int prod;
3662
3663 mtx_enter(&sc->sc_atq_mtx);
3664
3665 atq = IXL_DMA_KVA(&sc->sc_atq);
3666 prod = sc->sc_atq_prod;
3667 slot = atq + prod;
3668
3669 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3670 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3671
3672 *slot = iatq->iatq_desc;
3673 slot->iaq_cookie = (uint64_t)iatq;
3674
3675 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3676 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3677
3678 prod++;
3679 prod &= IXL_AQ_MASK;
3680 sc->sc_atq_prod = prod;
3681 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3682
3683 mtx_leave(&sc->sc_atq_mtx);
3684 }
3685
3686 static void
ixl_atq_done(struct ixl_softc * sc)3687 ixl_atq_done(struct ixl_softc *sc)
3688 {
3689 struct ixl_aq_desc *atq, *slot;
3690 struct ixl_atq *iatq;
3691 unsigned int cons;
3692 unsigned int prod;
3693
3694 mtx_enter(&sc->sc_atq_mtx);
3695
3696 prod = sc->sc_atq_prod;
3697 cons = sc->sc_atq_cons;
3698
3699 if (prod == cons) {
3700 mtx_leave(&sc->sc_atq_mtx);
3701 return;
3702 }
3703
3704 atq = IXL_DMA_KVA(&sc->sc_atq);
3705
3706 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3707 0, IXL_DMA_LEN(&sc->sc_atq),
3708 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
3709
3710 do {
3711 slot = &atq[cons];
3712 if (!ISSET(slot->iaq_flags, htole16(IXL_AQ_DD)))
3713 break;
3714
3715 KASSERT(slot->iaq_cookie != 0);
3716 iatq = (struct ixl_atq *)slot->iaq_cookie;
3717 iatq->iatq_desc = *slot;
3718
3719 memset(slot, 0, sizeof(*slot));
3720
3721 (*iatq->iatq_fn)(sc, iatq->iatq_arg);
3722
3723 cons++;
3724 cons &= IXL_AQ_MASK;
3725 } while (cons != prod);
3726
3727 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3728 0, IXL_DMA_LEN(&sc->sc_atq),
3729 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
3730
3731 sc->sc_atq_cons = cons;
3732
3733 mtx_leave(&sc->sc_atq_mtx);
3734 }
3735
3736 static void
ixl_wakeup(struct ixl_softc * sc,void * arg)3737 ixl_wakeup(struct ixl_softc *sc, void *arg)
3738 {
3739 struct cond *c = arg;
3740
3741 cond_signal(c);
3742 }
3743
3744 static void
ixl_atq_exec(struct ixl_softc * sc,struct ixl_atq * iatq,const char * wmesg)3745 ixl_atq_exec(struct ixl_softc *sc, struct ixl_atq *iatq, const char *wmesg)
3746 {
3747 struct cond c = COND_INITIALIZER();
3748
3749 KASSERT(iatq->iatq_desc.iaq_cookie == 0);
3750
3751 ixl_atq_set(iatq, ixl_wakeup, &c);
3752 ixl_atq_post(sc, iatq);
3753
3754 cond_wait(&c, wmesg);
3755 }
3756
3757 static int
ixl_atq_poll(struct ixl_softc * sc,struct ixl_aq_desc * iaq,unsigned int tm)3758 ixl_atq_poll(struct ixl_softc *sc, struct ixl_aq_desc *iaq, unsigned int tm)
3759 {
3760 struct ixl_aq_desc *atq, *slot;
3761 unsigned int prod;
3762 unsigned int t = 0;
3763
3764 mtx_enter(&sc->sc_atq_mtx);
3765
3766 atq = IXL_DMA_KVA(&sc->sc_atq);
3767 prod = sc->sc_atq_prod;
3768 slot = atq + prod;
3769
3770 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3771 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
3772
3773 *slot = *iaq;
3774 slot->iaq_flags |= htole16(IXL_AQ_SI);
3775
3776 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3777 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
3778
3779 prod++;
3780 prod &= IXL_AQ_MASK;
3781 sc->sc_atq_prod = prod;
3782 ixl_wr(sc, sc->sc_aq_regs->atq_tail, prod);
3783
3784 while (ixl_rd(sc, sc->sc_aq_regs->atq_head) != prod) {
3785 delaymsec(1);
3786
3787 if (t++ > tm) {
3788 mtx_leave(&sc->sc_atq_mtx);
3789 return (ETIMEDOUT);
3790 }
3791 }
3792
3793 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3794 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTREAD);
3795 *iaq = *slot;
3796 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_atq),
3797 0, IXL_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREREAD);
3798
3799 sc->sc_atq_cons = prod;
3800
3801 mtx_leave(&sc->sc_atq_mtx);
3802 return (0);
3803 }
3804
3805 static int
ixl_get_version(struct ixl_softc * sc)3806 ixl_get_version(struct ixl_softc *sc)
3807 {
3808 struct ixl_aq_desc iaq;
3809 uint32_t fwbuild, fwver, apiver;
3810
3811 memset(&iaq, 0, sizeof(iaq));
3812 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VERSION);
3813
3814 if (ixl_atq_poll(sc, &iaq, 2000) != 0)
3815 return (ETIMEDOUT);
3816 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK))
3817 return (EIO);
3818
3819 fwbuild = lemtoh32(&iaq.iaq_param[1]);
3820 fwver = lemtoh32(&iaq.iaq_param[2]);
3821 apiver = lemtoh32(&iaq.iaq_param[3]);
3822
3823 sc->sc_api_major = apiver & 0xffff;
3824 sc->sc_api_minor = (apiver >> 16) & 0xffff;
3825
3826 printf(", FW %hu.%hu.%05u API %hu.%hu", (uint16_t)fwver,
3827 (uint16_t)(fwver >> 16), fwbuild,
3828 sc->sc_api_major, sc->sc_api_minor);
3829
3830 return (0);
3831 }
3832
3833 static int
ixl_pxe_clear(struct ixl_softc * sc)3834 ixl_pxe_clear(struct ixl_softc *sc)
3835 {
3836 struct ixl_aq_desc iaq;
3837
3838 memset(&iaq, 0, sizeof(iaq));
3839 iaq.iaq_opcode = htole16(IXL_AQ_OP_CLEAR_PXE_MODE);
3840 iaq.iaq_param[0] = htole32(0x2);
3841
3842 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3843 printf(", CLEAR PXE MODE timeout\n");
3844 return (-1);
3845 }
3846
3847 switch (iaq.iaq_retval) {
3848 case HTOLE16(IXL_AQ_RC_OK):
3849 case HTOLE16(IXL_AQ_RC_EEXIST):
3850 break;
3851 default:
3852 printf(", CLEAR PXE MODE error\n");
3853 return (-1);
3854 }
3855
3856 return (0);
3857 }
3858
3859 static int
ixl_lldp_shut(struct ixl_softc * sc)3860 ixl_lldp_shut(struct ixl_softc *sc)
3861 {
3862 struct ixl_aq_desc iaq;
3863
3864 memset(&iaq, 0, sizeof(iaq));
3865 iaq.iaq_opcode = htole16(IXL_AQ_OP_LLDP_STOP_AGENT);
3866 iaq.iaq_param[0] = htole32(IXL_LLDP_SHUTDOWN);
3867
3868 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
3869 printf(", STOP LLDP AGENT timeout\n");
3870 return (-1);
3871 }
3872
3873 switch (iaq.iaq_retval) {
3874 case HTOLE16(IXL_AQ_RC_EMODE):
3875 case HTOLE16(IXL_AQ_RC_EPERM):
3876 /* ignore silently */
3877 default:
3878 break;
3879 }
3880
3881 return (0);
3882 }
3883
3884 static int
ixl_get_mac(struct ixl_softc * sc)3885 ixl_get_mac(struct ixl_softc *sc)
3886 {
3887 struct ixl_dmamem idm;
3888 struct ixl_aq_desc iaq;
3889 struct ixl_aq_mac_addresses *addrs;
3890 int rv;
3891
3892 #ifdef __sparc64__
3893 if (OF_getprop(PCITAG_NODE(sc->sc_tag), "local-mac-address",
3894 sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) == ETHER_ADDR_LEN)
3895 return (0);
3896 #endif
3897
3898 if (ixl_dmamem_alloc(sc, &idm, sizeof(*addrs), 0) != 0) {
3899 printf(", unable to allocate mac addresses\n");
3900 return (-1);
3901 }
3902
3903 memset(&iaq, 0, sizeof(iaq));
3904 iaq.iaq_flags = htole16(IXL_AQ_BUF);
3905 iaq.iaq_opcode = htole16(IXL_AQ_OP_MAC_ADDRESS_READ);
3906 iaq.iaq_datalen = htole16(sizeof(*addrs));
3907 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3908
3909 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3910 BUS_DMASYNC_PREREAD);
3911
3912 rv = ixl_atq_poll(sc, &iaq, 250);
3913
3914 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3915 BUS_DMASYNC_POSTREAD);
3916
3917 if (rv != 0) {
3918 printf(", MAC ADDRESS READ timeout\n");
3919 rv = -1;
3920 goto done;
3921 }
3922 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3923 printf(", MAC ADDRESS READ error\n");
3924 rv = -1;
3925 goto done;
3926 }
3927
3928 addrs = IXL_DMA_KVA(&idm);
3929 if (!ISSET(iaq.iaq_param[0], htole32(IXL_AQ_MAC_PORT_VALID))) {
3930 printf(", port address is not valid\n");
3931 goto done;
3932 }
3933
3934 memcpy(sc->sc_ac.ac_enaddr, addrs->port, ETHER_ADDR_LEN);
3935 rv = 0;
3936
3937 done:
3938 ixl_dmamem_free(sc, &idm);
3939 return (rv);
3940 }
3941
3942 static int
ixl_get_switch_config(struct ixl_softc * sc)3943 ixl_get_switch_config(struct ixl_softc *sc)
3944 {
3945 struct ixl_dmamem idm;
3946 struct ixl_aq_desc iaq;
3947 struct ixl_aq_switch_config *hdr;
3948 struct ixl_aq_switch_config_element *elms, *elm;
3949 unsigned int nelm;
3950 int rv;
3951
3952 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
3953 printf("%s: unable to allocate switch config buffer\n",
3954 DEVNAME(sc));
3955 return (-1);
3956 }
3957
3958 memset(&iaq, 0, sizeof(iaq));
3959 iaq.iaq_flags = htole16(IXL_AQ_BUF |
3960 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
3961 iaq.iaq_opcode = htole16(IXL_AQ_OP_SWITCH_GET_CONFIG);
3962 iaq.iaq_datalen = htole16(IXL_AQ_BUFLEN);
3963 ixl_aq_dva(&iaq, IXL_DMA_DVA(&idm));
3964
3965 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3966 BUS_DMASYNC_PREREAD);
3967
3968 rv = ixl_atq_poll(sc, &iaq, 250);
3969
3970 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&idm), 0, IXL_DMA_LEN(&idm),
3971 BUS_DMASYNC_POSTREAD);
3972
3973 if (rv != 0) {
3974 printf("%s: GET SWITCH CONFIG timeout\n", DEVNAME(sc));
3975 rv = -1;
3976 goto done;
3977 }
3978 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
3979 printf("%s: GET SWITCH CONFIG error\n", DEVNAME(sc));
3980 rv = -1;
3981 goto done;
3982 }
3983
3984 hdr = IXL_DMA_KVA(&idm);
3985 elms = (struct ixl_aq_switch_config_element *)(hdr + 1);
3986
3987 nelm = lemtoh16(&hdr->num_reported);
3988 if (nelm < 1) {
3989 printf("%s: no switch config available\n", DEVNAME(sc));
3990 rv = -1;
3991 goto done;
3992 }
3993
3994 #if 0
3995 for (i = 0; i < nelm; i++) {
3996 elm = &elms[i];
3997
3998 printf("%s: type %x revision %u seid %04x\n", DEVNAME(sc),
3999 elm->type, elm->revision, lemtoh16(&elm->seid));
4000 printf("%s: uplink %04x downlink %04x\n", DEVNAME(sc),
4001 lemtoh16(&elm->uplink_seid),
4002 lemtoh16(&elm->downlink_seid));
4003 printf("%s: conntype %x scheduler %04x extra %04x\n",
4004 DEVNAME(sc), elm->connection_type,
4005 lemtoh16(&elm->scheduler_id),
4006 lemtoh16(&elm->element_info));
4007 }
4008 #endif
4009
4010 elm = &elms[0];
4011
4012 sc->sc_uplink_seid = elm->uplink_seid;
4013 sc->sc_downlink_seid = elm->downlink_seid;
4014 sc->sc_seid = elm->seid;
4015
4016 if ((sc->sc_uplink_seid == htole16(0)) !=
4017 (sc->sc_downlink_seid == htole16(0))) {
4018 printf("%s: SEIDs are misconfigured\n", DEVNAME(sc));
4019 rv = -1;
4020 goto done;
4021 }
4022
4023 done:
4024 ixl_dmamem_free(sc, &idm);
4025 return (rv);
4026 }
4027
4028 static int
ixl_phy_mask_ints(struct ixl_softc * sc)4029 ixl_phy_mask_ints(struct ixl_softc *sc)
4030 {
4031 struct ixl_aq_desc iaq;
4032
4033 memset(&iaq, 0, sizeof(iaq));
4034 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_EVENT_MASK);
4035 iaq.iaq_param[2] = htole32(IXL_AQ_PHY_EV_MASK &
4036 ~(IXL_AQ_PHY_EV_LINK_UPDOWN | IXL_AQ_PHY_EV_MODULE_QUAL_FAIL |
4037 IXL_AQ_PHY_EV_MEDIA_NA));
4038
4039 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4040 printf("%s: SET PHY EVENT MASK timeout\n", DEVNAME(sc));
4041 return (-1);
4042 }
4043 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4044 printf("%s: SET PHY EVENT MASK error\n", DEVNAME(sc));
4045 return (-1);
4046 }
4047
4048 return (0);
4049 }
4050
4051 static int
ixl_get_phy_abilities(struct ixl_softc * sc,struct ixl_dmamem * idm)4052 ixl_get_phy_abilities(struct ixl_softc *sc,struct ixl_dmamem *idm)
4053 {
4054 struct ixl_aq_desc iaq;
4055 int rv;
4056
4057 memset(&iaq, 0, sizeof(iaq));
4058 iaq.iaq_flags = htole16(IXL_AQ_BUF |
4059 (IXL_DMA_LEN(idm) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4060 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_ABILITIES);
4061 htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(idm));
4062 iaq.iaq_param[0] = htole32(IXL_AQ_PHY_REPORT_INIT);
4063 ixl_aq_dva(&iaq, IXL_DMA_DVA(idm));
4064
4065 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4066 BUS_DMASYNC_PREREAD);
4067
4068 rv = ixl_atq_poll(sc, &iaq, 250);
4069
4070 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(idm), 0, IXL_DMA_LEN(idm),
4071 BUS_DMASYNC_POSTREAD);
4072
4073 if (rv != 0)
4074 return (-1);
4075
4076 return (lemtoh16(&iaq.iaq_retval));
4077 }
4078
4079 static int
ixl_get_phy_types(struct ixl_softc * sc,uint64_t * phy_types_ptr)4080 ixl_get_phy_types(struct ixl_softc *sc, uint64_t *phy_types_ptr)
4081 {
4082 struct ixl_dmamem idm;
4083 struct ixl_aq_phy_abilities *phy;
4084 uint64_t phy_types;
4085 int rv;
4086
4087 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0) {
4088 printf("%s: unable to allocate phy abilities buffer\n",
4089 DEVNAME(sc));
4090 return (-1);
4091 }
4092
4093 rv = ixl_get_phy_abilities(sc, &idm);
4094 switch (rv) {
4095 case -1:
4096 printf("%s: GET PHY ABILITIES timeout\n", DEVNAME(sc));
4097 goto err;
4098 case IXL_AQ_RC_OK:
4099 break;
4100 case IXL_AQ_RC_EIO:
4101 /* API is too old to handle this command */
4102 phy_types = 0;
4103 goto done;
4104 default:
4105 printf("%s: GET PHY ABILITIES error %u\n", DEVNAME(sc), rv);
4106 goto err;
4107 }
4108
4109 phy = IXL_DMA_KVA(&idm);
4110
4111 phy_types = lemtoh32(&phy->phy_type);
4112 phy_types |= (uint64_t)phy->phy_type_ext << 32;
4113
4114 done:
4115 *phy_types_ptr = phy_types;
4116
4117 rv = 0;
4118
4119 err:
4120 ixl_dmamem_free(sc, &idm);
4121 return (rv);
4122 }
4123
4124 /*
4125 * this returns -2 on software/driver failure, -1 for problems
4126 * talking to the hardware, or the sff module type.
4127 */
4128
4129 static int
ixl_get_module_type(struct ixl_softc * sc)4130 ixl_get_module_type(struct ixl_softc *sc)
4131 {
4132 struct ixl_dmamem idm;
4133 struct ixl_aq_phy_abilities *phy;
4134 int rv;
4135
4136 if (ixl_dmamem_alloc(sc, &idm, IXL_AQ_BUFLEN, 0) != 0)
4137 return (-2);
4138
4139 rv = ixl_get_phy_abilities(sc, &idm);
4140 if (rv != IXL_AQ_RC_OK) {
4141 rv = -1;
4142 goto done;
4143 }
4144
4145 phy = IXL_DMA_KVA(&idm);
4146
4147 rv = phy->module_type[0];
4148
4149 done:
4150 ixl_dmamem_free(sc, &idm);
4151 return (rv);
4152 }
4153
4154 static int
ixl_get_link_status(struct ixl_softc * sc)4155 ixl_get_link_status(struct ixl_softc *sc)
4156 {
4157 struct ixl_aq_desc iaq;
4158 struct ixl_aq_link_param *param;
4159
4160 memset(&iaq, 0, sizeof(iaq));
4161 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_LINK_STATUS);
4162 param = (struct ixl_aq_link_param *)iaq.iaq_param;
4163 param->notify = IXL_AQ_LINK_NOTIFY;
4164
4165 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4166 printf("%s: GET LINK STATUS timeout\n", DEVNAME(sc));
4167 return (-1);
4168 }
4169 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4170 printf("%s: GET LINK STATUS error\n", DEVNAME(sc));
4171 return (0);
4172 }
4173
4174 sc->sc_ac.ac_if.if_link_state = ixl_set_link_status(sc, &iaq);
4175
4176 return (0);
4177 }
4178
4179 struct ixl_sff_ops {
4180 int (*open)(struct ixl_softc *sc, struct if_sffpage *, uint8_t *);
4181 int (*get)(struct ixl_softc *sc, struct if_sffpage *, size_t);
4182 int (*close)(struct ixl_softc *sc, struct if_sffpage *, uint8_t);
4183 };
4184
4185 static int
ixl_sfp_open(struct ixl_softc * sc,struct if_sffpage * sff,uint8_t * page)4186 ixl_sfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4187 {
4188 int error;
4189
4190 if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4191 return (0);
4192
4193 error = ixl_sff_get_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4194 if (error != 0)
4195 return (error);
4196 if (*page == sff->sff_page)
4197 return (0);
4198 error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, sff->sff_page);
4199 if (error != 0)
4200 return (error);
4201
4202 return (0);
4203 }
4204
4205 static int
ixl_sfp_get(struct ixl_softc * sc,struct if_sffpage * sff,size_t i)4206 ixl_sfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4207 {
4208 return (ixl_sff_get_byte(sc, sff->sff_addr, i, &sff->sff_data[i]));
4209 }
4210
4211 static int
ixl_sfp_close(struct ixl_softc * sc,struct if_sffpage * sff,uint8_t page)4212 ixl_sfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4213 {
4214 int error;
4215
4216 if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4217 return (0);
4218
4219 if (page == sff->sff_page)
4220 return (0);
4221
4222 error = ixl_sff_set_byte(sc, IFSFF_ADDR_EEPROM, 127, page);
4223 if (error != 0)
4224 return (error);
4225
4226 return (0);
4227 }
4228
4229 static const struct ixl_sff_ops ixl_sfp_ops = {
4230 ixl_sfp_open,
4231 ixl_sfp_get,
4232 ixl_sfp_close,
4233 };
4234
4235 static int
ixl_qsfp_open(struct ixl_softc * sc,struct if_sffpage * sff,uint8_t * page)4236 ixl_qsfp_open(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t *page)
4237 {
4238 if (sff->sff_addr != IFSFF_ADDR_EEPROM)
4239 return (EIO);
4240
4241 return (0);
4242 }
4243
4244 static int
ixl_qsfp_get(struct ixl_softc * sc,struct if_sffpage * sff,size_t i)4245 ixl_qsfp_get(struct ixl_softc *sc, struct if_sffpage *sff, size_t i)
4246 {
4247 return (ixl_sff_get_byte(sc, sff->sff_page, i, &sff->sff_data[i]));
4248 }
4249
4250 static int
ixl_qsfp_close(struct ixl_softc * sc,struct if_sffpage * sff,uint8_t page)4251 ixl_qsfp_close(struct ixl_softc *sc, struct if_sffpage *sff, uint8_t page)
4252 {
4253 return (0);
4254 }
4255
4256 static const struct ixl_sff_ops ixl_qsfp_ops = {
4257 ixl_qsfp_open,
4258 ixl_qsfp_get,
4259 ixl_qsfp_close,
4260 };
4261
4262 static int
ixl_get_sffpage(struct ixl_softc * sc,struct if_sffpage * sff)4263 ixl_get_sffpage(struct ixl_softc *sc, struct if_sffpage *sff)
4264 {
4265 const struct ixl_sff_ops *ops;
4266 uint8_t page;
4267 size_t i;
4268 int error;
4269
4270 switch (ixl_get_module_type(sc)) {
4271 case -2:
4272 return (ENOMEM);
4273 case -1:
4274 return (ENXIO);
4275 case IXL_SFF8024_ID_SFP:
4276 ops = &ixl_sfp_ops;
4277 break;
4278 case IXL_SFF8024_ID_QSFP:
4279 case IXL_SFF8024_ID_QSFP_PLUS:
4280 case IXL_SFF8024_ID_QSFP28:
4281 ops = &ixl_qsfp_ops;
4282 break;
4283 default:
4284 return (EOPNOTSUPP);
4285 }
4286
4287 error = (*ops->open)(sc, sff, &page);
4288 if (error != 0)
4289 return (error);
4290
4291 for (i = 0; i < sizeof(sff->sff_data); i++) {
4292 error = (*ops->get)(sc, sff, i);
4293 if (error != 0)
4294 return (error);
4295 }
4296
4297 error = (*ops->close)(sc, sff, page);
4298
4299 return (0);
4300 }
4301
4302 static int
ixl_sff_get_byte(struct ixl_softc * sc,uint8_t dev,uint32_t reg,uint8_t * p)4303 ixl_sff_get_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t *p)
4304 {
4305 struct ixl_atq iatq;
4306 struct ixl_aq_desc *iaq;
4307 struct ixl_aq_phy_reg_access *param;
4308
4309 memset(&iatq, 0, sizeof(iatq));
4310 iaq = &iatq.iatq_desc;
4311 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_GET_REGISTER);
4312 param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4313 param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4314 param->dev_addr = dev;
4315 htolem32(¶m->reg, reg);
4316
4317 ixl_atq_exec(sc, &iatq, "ixlsffget");
4318
4319 if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4320 printf("%s: %s(dev 0x%02x, reg 0x%02x) -> %04x\n",
4321 DEVNAME(sc), __func__,
4322 dev, reg, lemtoh16(&iaq->iaq_retval));
4323 }
4324
4325 switch (iaq->iaq_retval) {
4326 case htole16(IXL_AQ_RC_OK):
4327 break;
4328 case htole16(IXL_AQ_RC_EBUSY):
4329 return (EBUSY);
4330 case htole16(IXL_AQ_RC_ESRCH):
4331 return (ENODEV);
4332 case htole16(IXL_AQ_RC_EIO):
4333 case htole16(IXL_AQ_RC_EINVAL):
4334 default:
4335 return (EIO);
4336 }
4337
4338 *p = lemtoh32(¶m->val);
4339
4340 return (0);
4341 }
4342
4343 static int
ixl_sff_set_byte(struct ixl_softc * sc,uint8_t dev,uint32_t reg,uint8_t v)4344 ixl_sff_set_byte(struct ixl_softc *sc, uint8_t dev, uint32_t reg, uint8_t v)
4345 {
4346 struct ixl_atq iatq;
4347 struct ixl_aq_desc *iaq;
4348 struct ixl_aq_phy_reg_access *param;
4349
4350 memset(&iatq, 0, sizeof(iatq));
4351 iaq = &iatq.iatq_desc;
4352 iaq->iaq_opcode = htole16(IXL_AQ_OP_PHY_SET_REGISTER);
4353 param = (struct ixl_aq_phy_reg_access *)iaq->iaq_param;
4354 param->phy_iface = IXL_AQ_PHY_IF_MODULE;
4355 param->dev_addr = dev;
4356 htolem32(¶m->reg, reg);
4357 htolem32(¶m->val, v);
4358
4359 ixl_atq_exec(sc, &iatq, "ixlsffset");
4360
4361 if (ISSET(sc->sc_ac.ac_if.if_flags, IFF_DEBUG)) {
4362 printf("%s: %s(dev 0x%02x, reg 0x%02x, val 0x%02x) -> %04x\n",
4363 DEVNAME(sc), __func__,
4364 dev, reg, v, lemtoh16(&iaq->iaq_retval));
4365 }
4366
4367 switch (iaq->iaq_retval) {
4368 case htole16(IXL_AQ_RC_OK):
4369 break;
4370 case htole16(IXL_AQ_RC_EBUSY):
4371 return (EBUSY);
4372 case htole16(IXL_AQ_RC_ESRCH):
4373 return (ENODEV);
4374 case htole16(IXL_AQ_RC_EIO):
4375 case htole16(IXL_AQ_RC_EINVAL):
4376 default:
4377 return (EIO);
4378 }
4379
4380 return (0);
4381 }
4382
4383 static int
ixl_get_vsi(struct ixl_softc * sc)4384 ixl_get_vsi(struct ixl_softc *sc)
4385 {
4386 struct ixl_dmamem *vsi = &sc->sc_scratch;
4387 struct ixl_aq_desc iaq;
4388 struct ixl_aq_vsi_param *param;
4389 struct ixl_aq_vsi_reply *reply;
4390 int rv;
4391
4392 /* grumble, vsi info isn't "known" at compile time */
4393
4394 memset(&iaq, 0, sizeof(iaq));
4395 htolem16(&iaq.iaq_flags, IXL_AQ_BUF |
4396 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4397 iaq.iaq_opcode = htole16(IXL_AQ_OP_GET_VSI_PARAMS);
4398 htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4399 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4400
4401 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4402 param->uplink_seid = sc->sc_seid;
4403
4404 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4405 BUS_DMASYNC_PREREAD);
4406
4407 rv = ixl_atq_poll(sc, &iaq, 250);
4408
4409 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4410 BUS_DMASYNC_POSTREAD);
4411
4412 if (rv != 0) {
4413 printf("%s: GET VSI timeout\n", DEVNAME(sc));
4414 return (-1);
4415 }
4416
4417 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4418 printf("%s: GET VSI error %u\n", DEVNAME(sc),
4419 lemtoh16(&iaq.iaq_retval));
4420 return (-1);
4421 }
4422
4423 reply = (struct ixl_aq_vsi_reply *)iaq.iaq_param;
4424 sc->sc_vsi_number = reply->vsi_number;
4425
4426 return (0);
4427 }
4428
4429 static int
ixl_set_vsi(struct ixl_softc * sc)4430 ixl_set_vsi(struct ixl_softc *sc)
4431 {
4432 struct ixl_dmamem *vsi = &sc->sc_scratch;
4433 struct ixl_aq_desc iaq;
4434 struct ixl_aq_vsi_param *param;
4435 struct ixl_aq_vsi_data *data = IXL_DMA_KVA(vsi);
4436 int rv;
4437
4438 data->valid_sections = htole16(IXL_AQ_VSI_VALID_QUEUE_MAP |
4439 IXL_AQ_VSI_VALID_VLAN);
4440
4441 CLR(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_MASK));
4442 SET(data->mapping_flags, htole16(IXL_AQ_VSI_QUE_MAP_CONTIG));
4443 data->queue_mapping[0] = htole16(0);
4444 data->tc_mapping[0] = htole16((0 << IXL_AQ_VSI_TC_Q_OFFSET_SHIFT) |
4445 (sc->sc_nqueues << IXL_AQ_VSI_TC_Q_NUMBER_SHIFT));
4446
4447 CLR(data->port_vlan_flags,
4448 htole16(IXL_AQ_VSI_PVLAN_MODE_MASK | IXL_AQ_VSI_PVLAN_EMOD_MASK));
4449 SET(data->port_vlan_flags, htole16(IXL_AQ_VSI_PVLAN_MODE_ALL |
4450 IXL_AQ_VSI_PVLAN_EMOD_STR_BOTH));
4451
4452 /* grumble, vsi info isn't "known" at compile time */
4453
4454 memset(&iaq, 0, sizeof(iaq));
4455 htolem16(&iaq.iaq_flags, IXL_AQ_BUF | IXL_AQ_RD |
4456 (IXL_DMA_LEN(vsi) > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4457 iaq.iaq_opcode = htole16(IXL_AQ_OP_UPD_VSI_PARAMS);
4458 htolem16(&iaq.iaq_datalen, IXL_DMA_LEN(vsi));
4459 ixl_aq_dva(&iaq, IXL_DMA_DVA(vsi));
4460
4461 param = (struct ixl_aq_vsi_param *)iaq.iaq_param;
4462 param->uplink_seid = sc->sc_seid;
4463
4464 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4465 BUS_DMASYNC_PREWRITE);
4466
4467 rv = ixl_atq_poll(sc, &iaq, 250);
4468
4469 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(vsi), 0, IXL_DMA_LEN(vsi),
4470 BUS_DMASYNC_POSTWRITE);
4471
4472 if (rv != 0) {
4473 printf("%s: UPDATE VSI timeout\n", DEVNAME(sc));
4474 return (-1);
4475 }
4476
4477 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4478 printf("%s: UPDATE VSI error %u\n", DEVNAME(sc),
4479 lemtoh16(&iaq.iaq_retval));
4480 return (-1);
4481 }
4482
4483 return (0);
4484 }
4485
4486 static const struct ixl_phy_type *
ixl_search_phy_type(uint8_t phy_type)4487 ixl_search_phy_type(uint8_t phy_type)
4488 {
4489 const struct ixl_phy_type *itype;
4490 uint64_t mask;
4491 unsigned int i;
4492
4493 if (phy_type >= 64)
4494 return (NULL);
4495
4496 mask = 1ULL << phy_type;
4497
4498 for (i = 0; i < nitems(ixl_phy_type_map); i++) {
4499 itype = &ixl_phy_type_map[i];
4500
4501 if (ISSET(itype->phy_type, mask))
4502 return (itype);
4503 }
4504
4505 return (NULL);
4506 }
4507
4508 static uint64_t
ixl_search_link_speed(uint8_t link_speed)4509 ixl_search_link_speed(uint8_t link_speed)
4510 {
4511 const struct ixl_speed_type *type;
4512 unsigned int i;
4513
4514 for (i = 0; i < nitems(ixl_speed_type_map); i++) {
4515 type = &ixl_speed_type_map[i];
4516
4517 if (ISSET(type->dev_speed, link_speed))
4518 return (type->net_speed);
4519 }
4520
4521 return (0);
4522 }
4523
4524 static int
ixl_set_link_status(struct ixl_softc * sc,const struct ixl_aq_desc * iaq)4525 ixl_set_link_status(struct ixl_softc *sc, const struct ixl_aq_desc *iaq)
4526 {
4527 const struct ixl_aq_link_status *status;
4528 const struct ixl_phy_type *itype;
4529 uint64_t ifm_active = IFM_ETHER;
4530 uint64_t ifm_status = IFM_AVALID;
4531 int link_state = LINK_STATE_DOWN;
4532 uint64_t baudrate = 0;
4533
4534 status = (const struct ixl_aq_link_status *)iaq->iaq_param;
4535 if (!ISSET(status->link_info, IXL_AQ_LINK_UP_FUNCTION))
4536 goto done;
4537
4538 ifm_active |= IFM_FDX;
4539 ifm_status |= IFM_ACTIVE;
4540 link_state = LINK_STATE_FULL_DUPLEX;
4541
4542 itype = ixl_search_phy_type(status->phy_type);
4543 if (itype != NULL)
4544 ifm_active |= itype->ifm_type;
4545
4546 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_TX))
4547 ifm_active |= IFM_ETH_TXPAUSE;
4548 if (ISSET(status->an_info, IXL_AQ_LINK_PAUSE_RX))
4549 ifm_active |= IFM_ETH_RXPAUSE;
4550
4551 baudrate = ixl_search_link_speed(status->link_speed);
4552
4553 done:
4554 mtx_enter(&sc->sc_link_state_mtx);
4555 sc->sc_media_active = ifm_active;
4556 sc->sc_media_status = ifm_status;
4557 sc->sc_ac.ac_if.if_baudrate = baudrate;
4558 mtx_leave(&sc->sc_link_state_mtx);
4559
4560 return (link_state);
4561 }
4562
4563 static int
ixl_restart_an(struct ixl_softc * sc)4564 ixl_restart_an(struct ixl_softc *sc)
4565 {
4566 struct ixl_aq_desc iaq;
4567
4568 memset(&iaq, 0, sizeof(iaq));
4569 iaq.iaq_opcode = htole16(IXL_AQ_OP_PHY_RESTART_AN);
4570 iaq.iaq_param[0] =
4571 htole32(IXL_AQ_PHY_RESTART_AN | IXL_AQ_PHY_LINK_ENABLE);
4572
4573 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4574 printf("%s: RESTART AN timeout\n", DEVNAME(sc));
4575 return (-1);
4576 }
4577 if (iaq.iaq_retval != htole16(IXL_AQ_RC_OK)) {
4578 printf("%s: RESTART AN error\n", DEVNAME(sc));
4579 return (-1);
4580 }
4581
4582 return (0);
4583 }
4584
4585 static int
ixl_add_macvlan(struct ixl_softc * sc,uint8_t * macaddr,uint16_t vlan,uint16_t flags)4586 ixl_add_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4587 {
4588 struct ixl_aq_desc iaq;
4589 struct ixl_aq_add_macvlan *param;
4590 struct ixl_aq_add_macvlan_elem *elem;
4591
4592 memset(&iaq, 0, sizeof(iaq));
4593 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4594 iaq.iaq_opcode = htole16(IXL_AQ_OP_ADD_MACVLAN);
4595 iaq.iaq_datalen = htole16(sizeof(*elem));
4596 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4597
4598 param = (struct ixl_aq_add_macvlan *)&iaq.iaq_param;
4599 param->num_addrs = htole16(1);
4600 param->seid0 = htole16(0x8000) | sc->sc_seid;
4601 param->seid1 = 0;
4602 param->seid2 = 0;
4603
4604 elem = IXL_DMA_KVA(&sc->sc_scratch);
4605 memset(elem, 0, sizeof(*elem));
4606 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4607 elem->flags = htole16(IXL_AQ_OP_ADD_MACVLAN_PERFECT_MATCH | flags);
4608 elem->vlan = htole16(vlan);
4609
4610 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4611 printf("%s: ADD_MACVLAN timeout\n", DEVNAME(sc));
4612 return (IXL_AQ_RC_EINVAL);
4613 }
4614
4615 return letoh16(iaq.iaq_retval);
4616 }
4617
4618 static int
ixl_remove_macvlan(struct ixl_softc * sc,uint8_t * macaddr,uint16_t vlan,uint16_t flags)4619 ixl_remove_macvlan(struct ixl_softc *sc, uint8_t *macaddr, uint16_t vlan, uint16_t flags)
4620 {
4621 struct ixl_aq_desc iaq;
4622 struct ixl_aq_remove_macvlan *param;
4623 struct ixl_aq_remove_macvlan_elem *elem;
4624
4625 memset(&iaq, 0, sizeof(iaq));
4626 iaq.iaq_flags = htole16(IXL_AQ_BUF | IXL_AQ_RD);
4627 iaq.iaq_opcode = htole16(IXL_AQ_OP_REMOVE_MACVLAN);
4628 iaq.iaq_datalen = htole16(sizeof(*elem));
4629 ixl_aq_dva(&iaq, IXL_DMA_DVA(&sc->sc_scratch));
4630
4631 param = (struct ixl_aq_remove_macvlan *)&iaq.iaq_param;
4632 param->num_addrs = htole16(1);
4633 param->seid0 = htole16(0x8000) | sc->sc_seid;
4634 param->seid1 = 0;
4635 param->seid2 = 0;
4636
4637 elem = IXL_DMA_KVA(&sc->sc_scratch);
4638 memset(elem, 0, sizeof(*elem));
4639 memcpy(elem->macaddr, macaddr, ETHER_ADDR_LEN);
4640 elem->flags = htole16(IXL_AQ_OP_REMOVE_MACVLAN_PERFECT_MATCH | flags);
4641 elem->vlan = htole16(vlan);
4642
4643 if (ixl_atq_poll(sc, &iaq, 250) != 0) {
4644 printf("%s: REMOVE_MACVLAN timeout\n", DEVNAME(sc));
4645 return (IXL_AQ_RC_EINVAL);
4646 }
4647
4648 return letoh16(iaq.iaq_retval);
4649 }
4650
4651 static int
ixl_hmc(struct ixl_softc * sc)4652 ixl_hmc(struct ixl_softc *sc)
4653 {
4654 struct {
4655 uint32_t count;
4656 uint32_t minsize;
4657 bus_size_t maxcnt;
4658 bus_size_t setoff;
4659 bus_size_t setcnt;
4660 } regs[] = {
4661 {
4662 0,
4663 IXL_HMC_TXQ_MINSIZE,
4664 I40E_GLHMC_LANTXOBJSZ,
4665 I40E_GLHMC_LANTXBASE(sc->sc_pf_id),
4666 I40E_GLHMC_LANTXCNT(sc->sc_pf_id),
4667 },
4668 {
4669 0,
4670 IXL_HMC_RXQ_MINSIZE,
4671 I40E_GLHMC_LANRXOBJSZ,
4672 I40E_GLHMC_LANRXBASE(sc->sc_pf_id),
4673 I40E_GLHMC_LANRXCNT(sc->sc_pf_id),
4674 },
4675 {
4676 0,
4677 0,
4678 I40E_GLHMC_FCOEMAX,
4679 I40E_GLHMC_FCOEDDPBASE(sc->sc_pf_id),
4680 I40E_GLHMC_FCOEDDPCNT(sc->sc_pf_id),
4681 },
4682 {
4683 0,
4684 0,
4685 I40E_GLHMC_FCOEFMAX,
4686 I40E_GLHMC_FCOEFBASE(sc->sc_pf_id),
4687 I40E_GLHMC_FCOEFCNT(sc->sc_pf_id),
4688 },
4689 };
4690 struct ixl_hmc_entry *e;
4691 uint64_t size, dva;
4692 uint8_t *kva;
4693 uint64_t *sdpage;
4694 unsigned int i;
4695 int npages, tables;
4696
4697 CTASSERT(nitems(regs) <= nitems(sc->sc_hmc_entries));
4698
4699 regs[IXL_HMC_LAN_TX].count = regs[IXL_HMC_LAN_RX].count =
4700 ixl_rd(sc, I40E_GLHMC_LANQMAX);
4701
4702 size = 0;
4703 for (i = 0; i < nitems(regs); i++) {
4704 e = &sc->sc_hmc_entries[i];
4705
4706 e->hmc_count = regs[i].count;
4707 e->hmc_size = 1U << ixl_rd(sc, regs[i].maxcnt);
4708 e->hmc_base = size;
4709
4710 if ((e->hmc_size * 8) < regs[i].minsize) {
4711 printf("%s: kernel hmc entry is too big\n",
4712 DEVNAME(sc));
4713 return (-1);
4714 }
4715
4716 size += roundup(e->hmc_size * e->hmc_count, IXL_HMC_ROUNDUP);
4717 }
4718 size = roundup(size, IXL_HMC_PGSIZE);
4719 npages = size / IXL_HMC_PGSIZE;
4720
4721 tables = roundup(size, IXL_HMC_L2SZ) / IXL_HMC_L2SZ;
4722
4723 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_pd, size, IXL_HMC_PGSIZE) != 0) {
4724 printf("%s: unable to allocate hmc pd memory\n", DEVNAME(sc));
4725 return (-1);
4726 }
4727
4728 if (ixl_dmamem_alloc(sc, &sc->sc_hmc_sd, tables * IXL_HMC_PGSIZE,
4729 IXL_HMC_PGSIZE) != 0) {
4730 printf("%s: unable to allocate hmc sd memory\n", DEVNAME(sc));
4731 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4732 return (-1);
4733 }
4734
4735 kva = IXL_DMA_KVA(&sc->sc_hmc_pd);
4736 memset(kva, 0, IXL_DMA_LEN(&sc->sc_hmc_pd));
4737
4738 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_pd),
4739 0, IXL_DMA_LEN(&sc->sc_hmc_pd),
4740 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4741
4742 dva = IXL_DMA_DVA(&sc->sc_hmc_pd);
4743 sdpage = IXL_DMA_KVA(&sc->sc_hmc_sd);
4744 for (i = 0; i < npages; i++) {
4745 htolem64(sdpage++, dva | IXL_HMC_PDVALID);
4746
4747 dva += IXL_HMC_PGSIZE;
4748 }
4749
4750 bus_dmamap_sync(sc->sc_dmat, IXL_DMA_MAP(&sc->sc_hmc_sd),
4751 0, IXL_DMA_LEN(&sc->sc_hmc_sd),
4752 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
4753
4754 dva = IXL_DMA_DVA(&sc->sc_hmc_sd);
4755 for (i = 0; i < tables; i++) {
4756 uint32_t count;
4757
4758 KASSERT(npages >= 0);
4759
4760 count = (npages > IXL_HMC_PGS) ? IXL_HMC_PGS : npages;
4761
4762 ixl_wr(sc, I40E_PFHMC_SDDATAHIGH, dva >> 32);
4763 ixl_wr(sc, I40E_PFHMC_SDDATALOW, dva |
4764 (count << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
4765 (1U << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT));
4766 ixl_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
4767 ixl_wr(sc, I40E_PFHMC_SDCMD,
4768 (1U << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | i);
4769
4770 npages -= IXL_HMC_PGS;
4771 dva += IXL_HMC_PGSIZE;
4772 }
4773
4774 for (i = 0; i < nitems(regs); i++) {
4775 e = &sc->sc_hmc_entries[i];
4776
4777 ixl_wr(sc, regs[i].setoff, e->hmc_base / IXL_HMC_ROUNDUP);
4778 ixl_wr(sc, regs[i].setcnt, e->hmc_count);
4779 }
4780
4781 return (0);
4782 }
4783
4784 static void
ixl_hmc_free(struct ixl_softc * sc)4785 ixl_hmc_free(struct ixl_softc *sc)
4786 {
4787 ixl_dmamem_free(sc, &sc->sc_hmc_sd);
4788 ixl_dmamem_free(sc, &sc->sc_hmc_pd);
4789 }
4790
4791 static void
ixl_hmc_pack(void * d,const void * s,const struct ixl_hmc_pack * packing,unsigned int npacking)4792 ixl_hmc_pack(void *d, const void *s, const struct ixl_hmc_pack *packing,
4793 unsigned int npacking)
4794 {
4795 uint8_t *dst = d;
4796 const uint8_t *src = s;
4797 unsigned int i;
4798
4799 for (i = 0; i < npacking; i++) {
4800 const struct ixl_hmc_pack *pack = &packing[i];
4801 unsigned int offset = pack->lsb / 8;
4802 unsigned int align = pack->lsb % 8;
4803 const uint8_t *in = src + pack->offset;
4804 uint8_t *out = dst + offset;
4805 int width = pack->width;
4806 unsigned int inbits = 0;
4807
4808 if (align) {
4809 inbits = (*in++) << align;
4810 *out++ |= (inbits & 0xff);
4811 inbits >>= 8;
4812
4813 width -= 8 - align;
4814 }
4815
4816 while (width >= 8) {
4817 inbits |= (*in++) << align;
4818 *out++ = (inbits & 0xff);
4819 inbits >>= 8;
4820
4821 width -= 8;
4822 }
4823
4824 if (width > 0) {
4825 inbits |= (*in) << align;
4826 *out |= (inbits & ((1 << width) - 1));
4827 }
4828 }
4829 }
4830
4831 static struct ixl_aq_buf *
ixl_aqb_alloc(struct ixl_softc * sc)4832 ixl_aqb_alloc(struct ixl_softc *sc)
4833 {
4834 struct ixl_aq_buf *aqb;
4835
4836 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
4837 if (aqb == NULL)
4838 return (NULL);
4839
4840 aqb->aqb_data = dma_alloc(IXL_AQ_BUFLEN, PR_WAITOK);
4841 if (aqb->aqb_data == NULL)
4842 goto free;
4843
4844 if (bus_dmamap_create(sc->sc_dmat, IXL_AQ_BUFLEN, 1,
4845 IXL_AQ_BUFLEN, 0,
4846 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
4847 &aqb->aqb_map) != 0)
4848 goto dma_free;
4849
4850 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
4851 IXL_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
4852 goto destroy;
4853
4854 return (aqb);
4855
4856 destroy:
4857 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4858 dma_free:
4859 dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4860 free:
4861 free(aqb, M_DEVBUF, sizeof(*aqb));
4862
4863 return (NULL);
4864 }
4865
4866 static void
ixl_aqb_free(struct ixl_softc * sc,struct ixl_aq_buf * aqb)4867 ixl_aqb_free(struct ixl_softc *sc, struct ixl_aq_buf *aqb)
4868 {
4869 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
4870 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
4871 dma_free(aqb->aqb_data, IXL_AQ_BUFLEN);
4872 free(aqb, M_DEVBUF, sizeof(*aqb));
4873 }
4874
4875 static int
ixl_arq_fill(struct ixl_softc * sc)4876 ixl_arq_fill(struct ixl_softc *sc)
4877 {
4878 struct ixl_aq_buf *aqb;
4879 struct ixl_aq_desc *arq, *iaq;
4880 unsigned int prod = sc->sc_arq_prod;
4881 unsigned int n;
4882 int post = 0;
4883
4884 n = if_rxr_get(&sc->sc_arq_ring, IXL_AQ_NUM);
4885 arq = IXL_DMA_KVA(&sc->sc_arq);
4886
4887 while (n > 0) {
4888 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
4889 if (aqb != NULL)
4890 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
4891 else if ((aqb = ixl_aqb_alloc(sc)) == NULL)
4892 break;
4893
4894 memset(aqb->aqb_data, 0, IXL_AQ_BUFLEN);
4895
4896 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4897 BUS_DMASYNC_PREREAD);
4898
4899 iaq = &arq[prod];
4900 iaq->iaq_flags = htole16(IXL_AQ_BUF |
4901 (IXL_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IXL_AQ_LB : 0));
4902 iaq->iaq_opcode = 0;
4903 iaq->iaq_datalen = htole16(IXL_AQ_BUFLEN);
4904 iaq->iaq_retval = 0;
4905 iaq->iaq_cookie = 0;
4906 iaq->iaq_param[0] = 0;
4907 iaq->iaq_param[1] = 0;
4908 ixl_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
4909
4910 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
4911
4912 prod++;
4913 prod &= IXL_AQ_MASK;
4914
4915 post = 1;
4916
4917 n--;
4918 }
4919
4920 if_rxr_put(&sc->sc_arq_ring, n);
4921 sc->sc_arq_prod = prod;
4922
4923 return (post);
4924 }
4925
4926 static void
ixl_arq_unfill(struct ixl_softc * sc)4927 ixl_arq_unfill(struct ixl_softc *sc)
4928 {
4929 struct ixl_aq_buf *aqb;
4930
4931 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
4932 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
4933
4934 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IXL_AQ_BUFLEN,
4935 BUS_DMASYNC_POSTREAD);
4936 ixl_aqb_free(sc, aqb);
4937 }
4938 }
4939
4940 static void
ixl_clear_hw(struct ixl_softc * sc)4941 ixl_clear_hw(struct ixl_softc *sc)
4942 {
4943 uint32_t num_queues, base_queue;
4944 uint32_t num_pf_int;
4945 uint32_t num_vf_int;
4946 uint32_t num_vfs;
4947 uint32_t i, j;
4948 uint32_t val;
4949
4950 /* get number of interrupts, queues, and vfs */
4951 val = ixl_rd(sc, I40E_GLPCI_CNF2);
4952 num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
4953 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
4954 num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
4955 I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
4956
4957 val = ixl_rd(sc, I40E_PFLAN_QALLOC);
4958 base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
4959 I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
4960 j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
4961 I40E_PFLAN_QALLOC_LASTQ_SHIFT;
4962 if (val & I40E_PFLAN_QALLOC_VALID_MASK)
4963 num_queues = (j - base_queue) + 1;
4964 else
4965 num_queues = 0;
4966
4967 val = ixl_rd(sc, I40E_PF_VT_PFALLOC);
4968 i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
4969 I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
4970 j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
4971 I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
4972 if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
4973 num_vfs = (j - i) + 1;
4974 else
4975 num_vfs = 0;
4976
4977 /* stop all the interrupts */
4978 ixl_wr(sc, I40E_PFINT_ICR0_ENA, 0);
4979 val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
4980 for (i = 0; i < num_pf_int - 2; i++)
4981 ixl_wr(sc, I40E_PFINT_DYN_CTLN(i), val);
4982
4983 /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
4984 val = I40E_QUEUE_TYPE_EOL << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4985 ixl_wr(sc, I40E_PFINT_LNKLST0, val);
4986 for (i = 0; i < num_pf_int - 2; i++)
4987 ixl_wr(sc, I40E_PFINT_LNKLSTN(i), val);
4988 val = I40E_QUEUE_TYPE_EOL << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4989 for (i = 0; i < num_vfs; i++)
4990 ixl_wr(sc, I40E_VPINT_LNKLST0(i), val);
4991 for (i = 0; i < num_vf_int - 2; i++)
4992 ixl_wr(sc, I40E_VPINT_LNKLSTN(i), val);
4993
4994 /* warn the HW of the coming Tx disables */
4995 for (i = 0; i < num_queues; i++) {
4996 uint32_t abs_queue_idx = base_queue + i;
4997 uint32_t reg_block = 0;
4998
4999 if (abs_queue_idx >= 128) {
5000 reg_block = abs_queue_idx / 128;
5001 abs_queue_idx %= 128;
5002 }
5003
5004 val = ixl_rd(sc, I40E_GLLAN_TXPRE_QDIS(reg_block));
5005 val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
5006 val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
5007 val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
5008
5009 ixl_wr(sc, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
5010 }
5011 delaymsec(400);
5012
5013 /* stop all the queues */
5014 for (i = 0; i < num_queues; i++) {
5015 ixl_wr(sc, I40E_QINT_TQCTL(i), 0);
5016 ixl_wr(sc, I40E_QTX_ENA(i), 0);
5017 ixl_wr(sc, I40E_QINT_RQCTL(i), 0);
5018 ixl_wr(sc, I40E_QRX_ENA(i), 0);
5019 }
5020
5021 /* short wait for all queue disables to settle */
5022 delaymsec(50);
5023 }
5024
5025 static int
ixl_pf_reset(struct ixl_softc * sc)5026 ixl_pf_reset(struct ixl_softc *sc)
5027 {
5028 uint32_t cnt = 0;
5029 uint32_t cnt1 = 0;
5030 uint32_t reg = 0;
5031 uint32_t grst_del;
5032
5033 /*
5034 * Poll for Global Reset steady state in case of recent GRST.
5035 * The grst delay value is in 100ms units, and we'll wait a
5036 * couple counts longer to be sure we don't just miss the end.
5037 */
5038 grst_del = ixl_rd(sc, I40E_GLGEN_RSTCTL);
5039 grst_del &= I40E_GLGEN_RSTCTL_GRSTDEL_MASK;
5040 grst_del >>= I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
5041 grst_del += 10;
5042
5043 for (cnt = 0; cnt < grst_del; cnt++) {
5044 reg = ixl_rd(sc, I40E_GLGEN_RSTAT);
5045 if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
5046 break;
5047 delaymsec(100);
5048 }
5049 if (reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
5050 printf(", Global reset polling failed to complete\n");
5051 return (-1);
5052 }
5053
5054 /* Now Wait for the FW to be ready */
5055 for (cnt1 = 0; cnt1 < I40E_PF_RESET_WAIT_COUNT; cnt1++) {
5056 reg = ixl_rd(sc, I40E_GLNVM_ULD);
5057 reg &= (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5058 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK);
5059 if (reg == (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5060 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))
5061 break;
5062
5063 delaymsec(10);
5064 }
5065 if (!(reg & (I40E_GLNVM_ULD_CONF_CORE_DONE_MASK |
5066 I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK))) {
5067 printf(", wait for FW Reset complete timed out "
5068 "(I40E_GLNVM_ULD = 0x%x)\n", reg);
5069 return (-1);
5070 }
5071
5072 /*
5073 * If there was a Global Reset in progress when we got here,
5074 * we don't need to do the PF Reset
5075 */
5076 if (cnt == 0) {
5077 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5078 ixl_wr(sc, I40E_PFGEN_CTRL, reg | I40E_PFGEN_CTRL_PFSWR_MASK);
5079 for (cnt = 0; cnt < I40E_PF_RESET_WAIT_COUNT; cnt++) {
5080 reg = ixl_rd(sc, I40E_PFGEN_CTRL);
5081 if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
5082 break;
5083 delaymsec(1);
5084 }
5085 if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
5086 printf(", PF reset polling failed to complete"
5087 "(I40E_PFGEN_CTRL= 0x%x)\n", reg);
5088 return (-1);
5089 }
5090 }
5091
5092 return (0);
5093 }
5094
5095 static uint32_t
ixl_710_rd_ctl(struct ixl_softc * sc,uint32_t r)5096 ixl_710_rd_ctl(struct ixl_softc *sc, uint32_t r)
5097 {
5098 struct ixl_atq iatq;
5099 struct ixl_aq_desc *iaq;
5100 uint16_t retval;
5101
5102 memset(&iatq, 0, sizeof(iatq));
5103 iaq = &iatq.iatq_desc;
5104 iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_READ);
5105 htolem32(&iaq->iaq_param[1], r);
5106
5107 ixl_atq_exec(sc, &iatq, "ixl710rd");
5108
5109 retval = lemtoh16(&iaq->iaq_retval);
5110 if (retval != IXL_AQ_RC_OK) {
5111 printf("%s: %s failed (%u)\n", DEVNAME(sc), __func__, retval);
5112 return (~0U);
5113 }
5114
5115 return (lemtoh32(&iaq->iaq_param[3]));
5116 }
5117
5118 static void
ixl_710_wr_ctl(struct ixl_softc * sc,uint32_t r,uint32_t v)5119 ixl_710_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5120 {
5121 struct ixl_atq iatq;
5122 struct ixl_aq_desc *iaq;
5123 uint16_t retval;
5124
5125 memset(&iatq, 0, sizeof(iatq));
5126 iaq = &iatq.iatq_desc;
5127 iaq->iaq_opcode = htole16(IXL_AQ_OP_RX_CTL_WRITE);
5128 htolem32(&iaq->iaq_param[1], r);
5129 htolem32(&iaq->iaq_param[3], v);
5130
5131 ixl_atq_exec(sc, &iatq, "ixl710wr");
5132
5133 retval = lemtoh16(&iaq->iaq_retval);
5134 if (retval != IXL_AQ_RC_OK) {
5135 printf("%s: %s %08x=%08x failed (%u)\n",
5136 DEVNAME(sc), __func__, r, v, retval);
5137 }
5138 }
5139
5140 static int
ixl_710_set_rss_key(struct ixl_softc * sc,const struct ixl_rss_key * rsskey)5141 ixl_710_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5142 {
5143 unsigned int i;
5144
5145 for (i = 0; i < nitems(rsskey->key); i++)
5146 ixl_wr_ctl(sc, I40E_PFQF_HKEY(i), rsskey->key[i]);
5147
5148 return (0);
5149 }
5150
5151 static int
ixl_710_set_rss_lut(struct ixl_softc * sc,const struct ixl_rss_lut_128 * lut)5152 ixl_710_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5153 {
5154 unsigned int i;
5155
5156 for (i = 0; i < nitems(lut->entries); i++)
5157 ixl_wr(sc, I40E_PFQF_HLUT(i), lut->entries[i]);
5158
5159 return (0);
5160 }
5161
5162 static uint32_t
ixl_722_rd_ctl(struct ixl_softc * sc,uint32_t r)5163 ixl_722_rd_ctl(struct ixl_softc *sc, uint32_t r)
5164 {
5165 return (ixl_rd(sc, r));
5166 }
5167
5168 static void
ixl_722_wr_ctl(struct ixl_softc * sc,uint32_t r,uint32_t v)5169 ixl_722_wr_ctl(struct ixl_softc *sc, uint32_t r, uint32_t v)
5170 {
5171 ixl_wr(sc, r, v);
5172 }
5173
5174 static int
ixl_722_set_rss_key(struct ixl_softc * sc,const struct ixl_rss_key * rsskey)5175 ixl_722_set_rss_key(struct ixl_softc *sc, const struct ixl_rss_key *rsskey)
5176 {
5177 /* XXX */
5178
5179 return (0);
5180 }
5181
5182 static int
ixl_722_set_rss_lut(struct ixl_softc * sc,const struct ixl_rss_lut_128 * lut)5183 ixl_722_set_rss_lut(struct ixl_softc *sc, const struct ixl_rss_lut_128 *lut)
5184 {
5185 /* XXX */
5186
5187 return (0);
5188 }
5189
5190 static int
ixl_dmamem_alloc(struct ixl_softc * sc,struct ixl_dmamem * ixm,bus_size_t size,u_int align)5191 ixl_dmamem_alloc(struct ixl_softc *sc, struct ixl_dmamem *ixm,
5192 bus_size_t size, u_int align)
5193 {
5194 ixm->ixm_size = size;
5195
5196 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
5197 ixm->ixm_size, 0,
5198 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
5199 &ixm->ixm_map) != 0)
5200 return (1);
5201 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
5202 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
5203 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
5204 goto destroy;
5205 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
5206 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
5207 goto free;
5208 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
5209 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
5210 goto unmap;
5211
5212 return (0);
5213 unmap:
5214 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5215 free:
5216 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5217 destroy:
5218 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5219 return (1);
5220 }
5221
5222 static void
ixl_dmamem_free(struct ixl_softc * sc,struct ixl_dmamem * ixm)5223 ixl_dmamem_free(struct ixl_softc *sc, struct ixl_dmamem *ixm)
5224 {
5225 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
5226 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
5227 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
5228 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
5229 }
5230
5231 #if NKSTAT > 0
5232
5233 CTASSERT(KSTAT_KV_U_NONE <= 0xffU);
5234 CTASSERT(KSTAT_KV_U_PACKETS <= 0xffU);
5235 CTASSERT(KSTAT_KV_U_BYTES <= 0xffU);
5236
5237 struct ixl_counter {
5238 const char *c_name;
5239 uint32_t c_base;
5240 uint8_t c_width;
5241 uint8_t c_type;
5242 };
5243
5244 const struct ixl_counter ixl_port_counters[] = {
5245 /* GORC */
5246 { "rx bytes", 0x00300000, 48, KSTAT_KV_U_BYTES },
5247 /* MLFC */
5248 { "mac local errs", 0x00300020, 32, KSTAT_KV_U_NONE },
5249 /* MRFC */
5250 { "mac remote errs", 0x00300040, 32, KSTAT_KV_U_NONE },
5251 /* MSPDC */
5252 { "mac short", 0x00300060, 32, KSTAT_KV_U_PACKETS },
5253 /* CRCERRS */
5254 { "crc errs", 0x00300080, 32, KSTAT_KV_U_PACKETS },
5255 /* RLEC */
5256 { "rx len errs", 0x003000a0, 32, KSTAT_KV_U_PACKETS },
5257 /* ERRBC */
5258 { "byte errs", 0x003000c0, 32, KSTAT_KV_U_PACKETS },
5259 /* ILLERRC */
5260 { "illegal byte", 0x003000d0, 32, KSTAT_KV_U_PACKETS },
5261 /* RUC */
5262 { "rx undersize", 0x00300100, 32, KSTAT_KV_U_PACKETS },
5263 /* ROC */
5264 { "rx oversize", 0x00300120, 32, KSTAT_KV_U_PACKETS },
5265 /* LXONRXCNT */
5266 { "rx link xon", 0x00300140, 32, KSTAT_KV_U_PACKETS },
5267 /* LXOFFRXCNT */
5268 { "rx link xoff", 0x00300160, 32, KSTAT_KV_U_PACKETS },
5269
5270 /* Priority XON Received Count */
5271 /* Priority XOFF Received Count */
5272 /* Priority XON to XOFF Count */
5273
5274 /* PRC64 */
5275 { "rx 64B", 0x00300480, 48, KSTAT_KV_U_PACKETS },
5276 /* PRC127 */
5277 { "rx 65-127B", 0x003004A0, 48, KSTAT_KV_U_PACKETS },
5278 /* PRC255 */
5279 { "rx 128-255B", 0x003004C0, 48, KSTAT_KV_U_PACKETS },
5280 /* PRC511 */
5281 { "rx 256-511B", 0x003004E0, 48, KSTAT_KV_U_PACKETS },
5282 /* PRC1023 */
5283 { "rx 512-1023B", 0x00300500, 48, KSTAT_KV_U_PACKETS },
5284 /* PRC1522 */
5285 { "rx 1024-1522B", 0x00300520, 48, KSTAT_KV_U_PACKETS },
5286 /* PRC9522 */
5287 { "rx 1523-9522B", 0x00300540, 48, KSTAT_KV_U_PACKETS },
5288 /* ROC */
5289 { "rx fragment", 0x00300560, 32, KSTAT_KV_U_PACKETS },
5290 /* RJC */
5291 { "rx jabber", 0x00300580, 32, KSTAT_KV_U_PACKETS },
5292 /* UPRC */
5293 { "rx ucasts", 0x003005a0, 48, KSTAT_KV_U_PACKETS },
5294 /* MPRC */
5295 { "rx mcasts", 0x003005c0, 48, KSTAT_KV_U_PACKETS },
5296 /* BPRC */
5297 { "rx bcasts", 0x003005e0, 48, KSTAT_KV_U_PACKETS },
5298 /* RDPC */
5299 { "rx discards", 0x00300600, 32, KSTAT_KV_U_PACKETS },
5300 /* LDPC */
5301 { "rx lo discards", 0x00300620, 32, KSTAT_KV_U_PACKETS },
5302 /* RUPP */
5303 { "rx no dest", 0x00300660, 32, KSTAT_KV_U_PACKETS },
5304
5305 /* GOTC */
5306 { "tx bytes", 0x00300680, 48, KSTAT_KV_U_BYTES },
5307 /* PTC64 */
5308 { "tx 64B", 0x003006A0, 48, KSTAT_KV_U_PACKETS },
5309 /* PTC127 */
5310 { "tx 65-127B", 0x003006C0, 48, KSTAT_KV_U_PACKETS },
5311 /* PTC255 */
5312 { "tx 128-255B", 0x003006E0, 48, KSTAT_KV_U_PACKETS },
5313 /* PTC511 */
5314 { "tx 256-511B", 0x00300700, 48, KSTAT_KV_U_PACKETS },
5315 /* PTC1023 */
5316 { "tx 512-1023B", 0x00300720, 48, KSTAT_KV_U_PACKETS },
5317 /* PTC1522 */
5318 { "tx 1024-1522B", 0x00300740, 48, KSTAT_KV_U_PACKETS },
5319 /* PTC9522 */
5320 { "tx 1523-9522B", 0x00300760, 48, KSTAT_KV_U_PACKETS },
5321
5322 /* Priority XON Transmitted Count */
5323 /* Priority XOFF Transmitted Count */
5324
5325 /* LXONTXC */
5326 { "tx link xon", 0x00300980, 48, KSTAT_KV_U_PACKETS },
5327 /* LXOFFTXC */
5328 { "tx link xoff", 0x003009a0, 48, KSTAT_KV_U_PACKETS },
5329 /* UPTC */
5330 { "tx ucasts", 0x003009c0, 48, KSTAT_KV_U_PACKETS },
5331 /* MPTC */
5332 { "tx mcasts", 0x003009e0, 48, KSTAT_KV_U_PACKETS },
5333 /* BPTC */
5334 { "tx bcasts", 0x00300a00, 48, KSTAT_KV_U_PACKETS },
5335 /* TDOLD */
5336 { "tx link down", 0x00300a20, 48, KSTAT_KV_U_PACKETS },
5337 };
5338
5339 const struct ixl_counter ixl_vsi_counters[] = {
5340 /* VSI RDPC */
5341 { "rx discards", 0x00310000, 32, KSTAT_KV_U_PACKETS },
5342 /* VSI GOTC */
5343 { "tx bytes", 0x00328000, 48, KSTAT_KV_U_BYTES },
5344 /* VSI UPTC */
5345 { "tx ucasts", 0x0033c000, 48, KSTAT_KV_U_PACKETS },
5346 /* VSI MPTC */
5347 { "tx mcasts", 0x0033cc00, 48, KSTAT_KV_U_PACKETS },
5348 /* VSI BPTC */
5349 { "tx bcasts", 0x0033d800, 48, KSTAT_KV_U_PACKETS },
5350 /* VSI TEPC */
5351 { "tx errs", 0x00344000, 48, KSTAT_KV_U_PACKETS },
5352 /* VSI TDPC */
5353 { "tx discards", 0x00348000, 48, KSTAT_KV_U_PACKETS },
5354 /* VSI GORC */
5355 { "rx bytes", 0x00358000, 48, KSTAT_KV_U_BYTES },
5356 /* VSI UPRC */
5357 { "rx ucasts", 0x0036c000, 48, KSTAT_KV_U_PACKETS },
5358 /* VSI MPRC */
5359 { "rx mcasts", 0x0036cc00, 48, KSTAT_KV_U_PACKETS },
5360 /* VSI BPRC */
5361 { "rx bcasts", 0x0036d800, 48, KSTAT_KV_U_PACKETS },
5362 /* VSI RUPP */
5363 { "rx noproto", 0x0036e400, 32, KSTAT_KV_U_PACKETS },
5364 };
5365
5366 struct ixl_counter_state {
5367 const struct ixl_counter
5368 *counters;
5369 uint64_t *values;
5370 size_t n;
5371 uint32_t index;
5372 unsigned int gen;
5373 };
5374
5375 static void
ixl_rd_counters(struct ixl_softc * sc,const struct ixl_counter_state * state,uint64_t * vs)5376 ixl_rd_counters(struct ixl_softc *sc, const struct ixl_counter_state *state,
5377 uint64_t *vs)
5378 {
5379 const struct ixl_counter *c;
5380 bus_addr_t r;
5381 uint64_t v;
5382 size_t i;
5383
5384 for (i = 0; i < state->n; i++) {
5385 c = &state->counters[i];
5386
5387 r = c->c_base + (state->index * 8);
5388
5389 if (c->c_width == 32)
5390 v = bus_space_read_4(sc->sc_memt, sc->sc_memh, r);
5391 else
5392 v = bus_space_read_8(sc->sc_memt, sc->sc_memh, r);
5393
5394 vs[i] = v;
5395 }
5396 }
5397
5398 static int
ixl_kstat_read(struct kstat * ks)5399 ixl_kstat_read(struct kstat *ks)
5400 {
5401 struct ixl_softc *sc = ks->ks_softc;
5402 struct kstat_kv *kvs = ks->ks_data;
5403 struct ixl_counter_state *state = ks->ks_ptr;
5404 unsigned int gen = (state->gen++) & 1;
5405 uint64_t *ovs = state->values + (gen * state->n);
5406 uint64_t *nvs = state->values + (!gen * state->n);
5407 size_t i;
5408
5409 ixl_rd_counters(sc, state, nvs);
5410 getnanouptime(&ks->ks_updated);
5411
5412 for (i = 0; i < state->n; i++) {
5413 const struct ixl_counter *c = &state->counters[i];
5414 uint64_t n = nvs[i], o = ovs[i];
5415
5416 if (c->c_width < 64) {
5417 if (n < o)
5418 n += (1ULL << c->c_width);
5419 }
5420
5421 kstat_kv_u64(&kvs[i]) += (n - o);
5422 }
5423
5424 return (0);
5425 }
5426
5427 static void
ixl_kstat_tick(void * arg)5428 ixl_kstat_tick(void *arg)
5429 {
5430 struct ixl_softc *sc = arg;
5431
5432 timeout_add_sec(&sc->sc_kstat_tmo, 4);
5433
5434 mtx_enter(&sc->sc_kstat_mtx);
5435
5436 ixl_kstat_read(sc->sc_port_kstat);
5437 ixl_kstat_read(sc->sc_vsi_kstat);
5438
5439 mtx_leave(&sc->sc_kstat_mtx);
5440 }
5441
5442 static struct kstat *
ixl_kstat_create(struct ixl_softc * sc,const char * name,const struct ixl_counter * counters,size_t n,uint32_t index)5443 ixl_kstat_create(struct ixl_softc *sc, const char *name,
5444 const struct ixl_counter *counters, size_t n, uint32_t index)
5445 {
5446 struct kstat *ks;
5447 struct kstat_kv *kvs;
5448 struct ixl_counter_state *state;
5449 const struct ixl_counter *c;
5450 unsigned int i;
5451
5452 ks = kstat_create(DEVNAME(sc), 0, name, 0, KSTAT_T_KV, 0);
5453 if (ks == NULL) {
5454 /* unable to create kstats */
5455 return (NULL);
5456 }
5457
5458 kvs = mallocarray(n, sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO);
5459 for (i = 0; i < n; i++) {
5460 c = &counters[i];
5461
5462 kstat_kv_unit_init(&kvs[i], c->c_name,
5463 KSTAT_KV_T_COUNTER64, c->c_type);
5464 }
5465
5466 ks->ks_data = kvs;
5467 ks->ks_datalen = n * sizeof(*kvs);
5468 ks->ks_read = ixl_kstat_read;
5469
5470 state = malloc(sizeof(*state), M_DEVBUF, M_WAITOK|M_ZERO);
5471 state->counters = counters;
5472 state->n = n;
5473 state->values = mallocarray(n * 2, sizeof(*state->values),
5474 M_DEVBUF, M_WAITOK|M_ZERO);
5475 state->index = index;
5476 ks->ks_ptr = state;
5477
5478 kstat_set_mutex(ks, &sc->sc_kstat_mtx);
5479 ks->ks_softc = sc;
5480 kstat_install(ks);
5481
5482 /* fetch a baseline */
5483 ixl_rd_counters(sc, state, state->values);
5484
5485 return (ks);
5486 }
5487
5488 static void
ixl_kstat_attach(struct ixl_softc * sc)5489 ixl_kstat_attach(struct ixl_softc *sc)
5490 {
5491 mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
5492 timeout_set(&sc->sc_kstat_tmo, ixl_kstat_tick, sc);
5493
5494 sc->sc_port_kstat = ixl_kstat_create(sc, "ixl-port",
5495 ixl_port_counters, nitems(ixl_port_counters), sc->sc_port);
5496 sc->sc_vsi_kstat = ixl_kstat_create(sc, "ixl-vsi",
5497 ixl_vsi_counters, nitems(ixl_vsi_counters),
5498 lemtoh16(&sc->sc_vsi_number));
5499
5500 /* ixl counters go up even when the interface is down */
5501 timeout_add_sec(&sc->sc_kstat_tmo, 4);
5502 }
5503
5504 #endif /* NKSTAT > 0 */
5505