1 /* $OpenBSD: if_iavf.c,v 1.18 2024/11/27 02:40:53 yasuoka Exp $ */
2
3 /*
4 * Copyright (c) 2013-2015, Intel Corporation
5 * All rights reserved.
6
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
12 *
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * 3. Neither the name of the Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2016,2017 David Gwynne <dlg@openbsd.org>
36 * Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
37 *
38 * Permission to use, copy, modify, and distribute this software for any
39 * purpose with or without fee is hereby granted, provided that the above
40 * copyright notice and this permission notice appear in all copies.
41 *
42 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
43 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
44 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
45 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
46 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
47 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
48 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 */
50
51 #include "bpfilter.h"
52 #include "vlan.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/proc.h>
57 #include <sys/sockio.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/device.h>
61 #include <sys/pool.h>
62 #include <sys/queue.h>
63 #include <sys/timeout.h>
64 #include <sys/task.h>
65 #include <sys/syslog.h>
66
67 #include <machine/bus.h>
68 #include <machine/intr.h>
69
70 #include <net/if.h>
71 #include <net/if_media.h>
72
73 #if NBPFILTER > 0
74 #include <net/bpf.h>
75 #endif
76
77 #include <netinet/in.h>
78 #include <netinet/if_ether.h>
79 #include <netinet/udp.h>
80
81 #include <dev/pci/pcireg.h>
82 #include <dev/pci/pcivar.h>
83 #include <dev/pci/pcidevs.h>
84
85 #define I40E_MASK(mask, shift) ((mask) << (shift))
86 #define I40E_AQ_LARGE_BUF 512
87
88 #define IAVF_REG_VFR 0xdeadbeef
89
90 #define IAVF_VFR_INPROGRESS 0
91 #define IAVF_VFR_COMPLETED 1
92 #define IAVF_VFR_VFACTIVE 2
93
94 #include <dev/pci/if_ixlreg.h>
95
96 struct iavf_aq_desc {
97 uint16_t iaq_flags;
98 #define IAVF_AQ_DD (1U << 0)
99 #define IAVF_AQ_CMP (1U << 1)
100 #define IAVF_AQ_ERR (1U << 2)
101 #define IAVF_AQ_VFE (1U << 3)
102 #define IAVF_AQ_LB (1U << 9)
103 #define IAVF_AQ_RD (1U << 10)
104 #define IAVF_AQ_VFC (1U << 11)
105 #define IAVF_AQ_BUF (1U << 12)
106 #define IAVF_AQ_SI (1U << 13)
107 #define IAVF_AQ_EI (1U << 14)
108 #define IAVF_AQ_FE (1U << 15)
109
110 #define IAVF_AQ_FLAGS_FMT "\020" "\020FE" "\017EI" "\016SI" "\015BUF" \
111 "\014VFC" "\013DB" "\012LB" "\004VFE" \
112 "\003ERR" "\002CMP" "\001DD"
113
114 uint16_t iaq_opcode;
115
116 uint16_t iaq_datalen;
117 uint16_t iaq_retval;
118
119 uint32_t iaq_vc_opcode;
120 uint32_t iaq_vc_retval;
121
122 uint32_t iaq_param[4];
123 /* iaq_vfid iaq_param[0] */
124 /* iaq_data_hi iaq_param[2] */
125 /* iaq_data_lo iaq_param[3] */
126 } __packed __aligned(8);
127
128 /* aq commands */
129 #define IAVF_AQ_OP_SEND_TO_PF 0x0801
130 #define IAVF_AQ_OP_MSG_FROM_PF 0x0802
131 #define IAVF_AQ_OP_SHUTDOWN 0x0803
132
133 /* virt channel messages */
134 #define IAVF_VC_OP_VERSION 1
135 #define IAVF_VC_OP_RESET_VF 2
136 #define IAVF_VC_OP_GET_VF_RESOURCES 3
137 #define IAVF_VC_OP_CONFIG_TX_QUEUE 4
138 #define IAVF_VC_OP_CONFIG_RX_QUEUE 5
139 #define IAVF_VC_OP_CONFIG_VSI_QUEUES 6
140 #define IAVF_VC_OP_CONFIG_IRQ_MAP 7
141 #define IAVF_VC_OP_ENABLE_QUEUES 8
142 #define IAVF_VC_OP_DISABLE_QUEUES 9
143 #define IAVF_VC_OP_ADD_ETH_ADDR 10
144 #define IAVF_VC_OP_DEL_ETH_ADDR 11
145 #define IAVF_VC_OP_ADD_VLAN 12
146 #define IAVF_VC_OP_DEL_VLAN 13
147 #define IAVF_VC_OP_CONFIG_PROMISC 14
148 #define IAVF_VC_OP_GET_STATS 15
149 #define IAVF_VC_OP_EVENT 17
150 #define IAVF_VC_OP_GET_RSS_HENA_CAPS 25
151 #define IAVF_VC_OP_SET_RSS_HENA 26
152
153 /* virt channel response codes */
154 #define IAVF_VC_RC_SUCCESS 0
155 #define IAVF_VC_RC_ERR_PARAM -5
156 #define IAVF_VC_RC_ERR_OPCODE -38
157 #define IAVF_VC_RC_ERR_CQP_COMPL -39
158 #define IAVF_VC_RC_ERR_VF_ID -40
159 #define IAVF_VC_RC_ERR_NOT_SUP -64
160
161 /* virt channel events */
162 #define IAVF_VC_EVENT_LINK_CHANGE 1
163 #define IAVF_VC_EVENT_RESET_IMPENDING 2
164 #define IAVF_VC_EVENT_PF_DRIVER_CLOSE 3
165
166 /* virt channel offloads */
167 #define IAVF_VC_OFFLOAD_L2 0x00000001
168 #define IAVF_VC_OFFLOAD_IWARP 0x00000002
169 #define IAVF_VC_OFFLOAD_RSVD 0x00000004
170 #define IAVF_VC_OFFLOAD_RSS_AQ 0x00000008
171 #define IAVF_VC_OFFLOAD_RSS_REG 0x00000010
172 #define IAVF_VC_OFFLOAD_WB_ON_ITR 0x00000020
173 #define IAVF_VC_OFFLOAD_VLAN 0x00010000
174 #define IAVF_VC_OFFLOAD_RX_POLLING 0x00020000
175 #define IAVF_VC_OFFLOAD_RSS_PCTYPE_V2 0x00040000
176 #define IAVF_VC_OFFLOAD_RSS_PF 0x00080000
177 #define IAVF_VC_OFFLOAD_ENCAP 0x00100000
178 #define IAVF_VC_OFFLOAD_ENCAP_CSUM 0x00200000
179 #define IAVF_VC_OFFLOAD_RX_ENCAP_CSUM 0x00400000
180
181 /* link speeds */
182 #define IAVF_VC_LINK_SPEED_100MB 0x1
183 #define IAVC_VC_LINK_SPEED_1000MB 0x2
184 #define IAVC_VC_LINK_SPEED_10GB 0x3
185 #define IAVC_VC_LINK_SPEED_40GB 0x4
186 #define IAVC_VC_LINK_SPEED_20GB 0x5
187 #define IAVC_VC_LINK_SPEED_25GB 0x6
188
189 struct iavf_link_speed {
190 uint64_t baudrate;
191 uint64_t media;
192 };
193
194 static const struct iavf_link_speed iavf_link_speeds[] = {
195 { 0, 0 },
196 { IF_Mbps(100), IFM_100_TX },
197 { IF_Mbps(1000), IFM_1000_T },
198 { IF_Gbps(10), IFM_10G_T },
199 { IF_Gbps(40), IFM_40G_CR4 },
200 { IF_Gbps(20), IFM_20G_KR2 },
201 { IF_Gbps(25), IFM_25G_CR }
202 };
203
204
205 struct iavf_vc_version_info {
206 uint32_t major;
207 uint32_t minor;
208 } __packed;
209
210 struct iavf_vc_txq_info {
211 uint16_t vsi_id;
212 uint16_t queue_id;
213 uint16_t ring_len;
214 uint16_t headwb_ena; /* deprecated */
215 uint64_t dma_ring_addr;
216 uint64_t dma_headwb_addr; /* deprecated */
217 } __packed;
218
219 struct iavf_vc_rxq_info {
220 uint16_t vsi_id;
221 uint16_t queue_id;
222 uint32_t ring_len;
223 uint16_t hdr_size;
224 uint16_t splithdr_ena;
225 uint32_t databuf_size;
226 uint32_t max_pkt_size;
227 uint32_t pad1;
228 uint64_t dma_ring_addr;
229 uint32_t rx_split_pos;
230 uint32_t pad2;
231 } __packed;
232
233 struct iavf_vc_queue_pair_info {
234 struct iavf_vc_txq_info txq;
235 struct iavf_vc_rxq_info rxq;
236 } __packed;
237
238 struct iavf_vc_queue_config_info {
239 uint16_t vsi_id;
240 uint16_t num_queue_pairs;
241 uint32_t pad;
242 struct iavf_vc_queue_pair_info qpair[1];
243 } __packed;
244
245 struct iavf_vc_vector_map {
246 uint16_t vsi_id;
247 uint16_t vector_id;
248 uint16_t rxq_map;
249 uint16_t txq_map;
250 uint16_t rxitr_idx;
251 uint16_t txitr_idx;
252 } __packed;
253
254 struct iavf_vc_irq_map_info {
255 uint16_t num_vectors;
256 struct iavf_vc_vector_map vecmap[1];
257 } __packed;
258
259 struct iavf_vc_queue_select {
260 uint16_t vsi_id;
261 uint16_t pad;
262 uint32_t rx_queues;
263 uint32_t tx_queues;
264 } __packed;
265
266 struct iavf_vc_vsi_resource {
267 uint16_t vsi_id;
268 uint16_t num_queue_pairs;
269 uint32_t vsi_type;
270 uint16_t qset_handle;
271 uint8_t default_mac[ETHER_ADDR_LEN];
272 } __packed;
273
274 struct iavf_vc_vf_resource {
275 uint16_t num_vsis;
276 uint16_t num_qp;
277 uint16_t max_vectors;
278 uint16_t max_mtu;
279 uint32_t offload_flags;
280 uint32_t rss_key_size;
281 uint32_t rss_lut_size;
282 struct iavf_vc_vsi_resource vsi_res[1];
283 } __packed;
284
285 struct iavf_vc_eth_addr {
286 uint8_t addr[ETHER_ADDR_LEN];
287 uint8_t pad[2];
288 } __packed;
289
290 struct iavf_vc_eth_addr_list {
291 uint16_t vsi_id;
292 uint16_t num_elements;
293 struct iavf_vc_eth_addr list[1];
294 } __packed;
295
296 struct iavf_vc_vlan_list {
297 uint16_t vsi_id;
298 uint16_t num_elements;
299 uint16_t vlan_id[1];
300 } __packed;
301
302 struct iavf_vc_promisc_info {
303 uint16_t vsi_id;
304 uint16_t flags;
305 #define IAVF_FLAG_VF_UNICAST_PROMISC 0x0001
306 #define IAVF_FLAG_VF_MULTICAST_PROMISC 0x0002
307 } __packed;
308
309 struct iavf_vc_pf_event {
310 uint32_t event;
311 uint32_t link_speed;
312 uint8_t link_status;
313 uint8_t pad[3];
314 uint32_t severity;
315 } __packed;
316
317 /* aq response codes */
318 #define IAVF_AQ_RC_OK 0 /* success */
319 #define IAVF_AQ_RC_EPERM 1 /* Operation not permitted */
320 #define IAVF_AQ_RC_ENOENT 2 /* No such element */
321 #define IAVF_AQ_RC_ESRCH 3 /* Bad opcode */
322 #define IAVF_AQ_RC_EINTR 4 /* operation interrupted */
323 #define IAVF_AQ_RC_EIO 5 /* I/O error */
324 #define IAVF_AQ_RC_ENXIO 6 /* No such resource */
325 #define IAVF_AQ_RC_E2BIG 7 /* Arg too long */
326 #define IAVF_AQ_RC_EAGAIN 8 /* Try again */
327 #define IAVF_AQ_RC_ENOMEM 9 /* Out of memory */
328 #define IAVF_AQ_RC_EACCES 10 /* Permission denied */
329 #define IAVF_AQ_RC_EFAULT 11 /* Bad address */
330 #define IAVF_AQ_RC_EBUSY 12 /* Device or resource busy */
331 #define IAVF_AQ_RC_EEXIST 13 /* object already exists */
332 #define IAVF_AQ_RC_EINVAL 14 /* invalid argument */
333 #define IAVF_AQ_RC_ENOTTY 15 /* not a typewriter */
334 #define IAVF_AQ_RC_ENOSPC 16 /* No space or alloc failure */
335 #define IAVF_AQ_RC_ENOSYS 17 /* function not implemented */
336 #define IAVF_AQ_RC_ERANGE 18 /* parameter out of range */
337 #define IAVF_AQ_RC_EFLUSHED 19 /* cmd flushed due to prev error */
338 #define IAVF_AQ_RC_BAD_ADDR 20 /* contains a bad pointer */
339 #define IAVF_AQ_RC_EMODE 21 /* not allowed in current mode */
340 #define IAVF_AQ_RC_EFBIG 22 /* file too large */
341
342 struct iavf_tx_desc {
343 uint64_t addr;
344 uint64_t cmd;
345 #define IAVF_TX_DESC_DTYPE_SHIFT 0
346 #define IAVF_TX_DESC_DTYPE_MASK (0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
347 #define IAVF_TX_DESC_DTYPE_DATA (0x0ULL << IAVF_TX_DESC_DTYPE_SHIFT)
348 #define IAVF_TX_DESC_DTYPE_NOP (0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
349 #define IAVF_TX_DESC_DTYPE_CONTEXT (0x1ULL << IAVF_TX_DESC_DTYPE_SHIFT)
350 #define IAVF_TX_DESC_DTYPE_FCOE_CTX (0x2ULL << IAVF_TX_DESC_DTYPE_SHIFT)
351 #define IAVF_TX_DESC_DTYPE_FD (0x8ULL << IAVF_TX_DESC_DTYPE_SHIFT)
352 #define IAVF_TX_DESC_DTYPE_DDP_CTX (0x9ULL << IAVF_TX_DESC_DTYPE_SHIFT)
353 #define IAVF_TX_DESC_DTYPE_FLEX_DATA (0xbULL << IAVF_TX_DESC_DTYPE_SHIFT)
354 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_1 (0xcULL << IAVF_TX_DESC_DTYPE_SHIFT)
355 #define IAVF_TX_DESC_DTYPE_FLEX_CTX_2 (0xdULL << IAVF_TX_DESC_DTYPE_SHIFT)
356 #define IAVF_TX_DESC_DTYPE_DONE (0xfULL << IAVF_TX_DESC_DTYPE_SHIFT)
357
358 #define IAVF_TX_DESC_CMD_SHIFT 4
359 #define IAVF_TX_DESC_CMD_MASK (0x3ffULL << IAVF_TX_DESC_CMD_SHIFT)
360 #define IAVF_TX_DESC_CMD_EOP (0x001 << IAVF_TX_DESC_CMD_SHIFT)
361 #define IAVF_TX_DESC_CMD_RS (0x002 << IAVF_TX_DESC_CMD_SHIFT)
362 #define IAVF_TX_DESC_CMD_ICRC (0x004 << IAVF_TX_DESC_CMD_SHIFT)
363 #define IAVF_TX_DESC_CMD_IL2TAG1 (0x008 << IAVF_TX_DESC_CMD_SHIFT)
364 #define IAVF_TX_DESC_CMD_DUMMY (0x010 << IAVF_TX_DESC_CMD_SHIFT)
365 #define IAVF_TX_DESC_CMD_IIPT_MASK (0x060 << IAVF_TX_DESC_CMD_SHIFT)
366 #define IAVF_TX_DESC_CMD_IIPT_NONIP (0x000 << IAVF_TX_DESC_CMD_SHIFT)
367 #define IAVF_TX_DESC_CMD_IIPT_IPV6 (0x020 << IAVF_TX_DESC_CMD_SHIFT)
368 #define IAVF_TX_DESC_CMD_IIPT_IPV4 (0x040 << IAVF_TX_DESC_CMD_SHIFT)
369 #define IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM (0x060 << IAVF_TX_DESC_CMD_SHIFT)
370 #define IAVF_TX_DESC_CMD_FCOET (0x080 << IAVF_TX_DESC_CMD_SHIFT)
371 #define IAVF_TX_DESC_CMD_L4T_EOFT_MASK (0x300 << IAVF_TX_DESC_CMD_SHIFT)
372 #define IAVF_TX_DESC_CMD_L4T_EOFT_UNK (0x000 << IAVF_TX_DESC_CMD_SHIFT)
373 #define IAVF_TX_DESC_CMD_L4T_EOFT_TCP (0x100 << IAVF_TX_DESC_CMD_SHIFT)
374 #define IAVF_TX_DESC_CMD_L4T_EOFT_SCTP (0x200 << IAVF_TX_DESC_CMD_SHIFT)
375 #define IAVF_TX_DESC_CMD_L4T_EOFT_UDP (0x300 << IAVF_TX_DESC_CMD_SHIFT)
376
377 #define IAVF_TX_DESC_MACLEN_SHIFT 16
378 #define IAVF_TX_DESC_MACLEN_MASK (0x7fULL << IAVF_TX_DESC_MACLEN_SHIFT)
379 #define IAVF_TX_DESC_IPLEN_SHIFT 23
380 #define IAVF_TX_DESC_IPLEN_MASK (0x7fULL << IAVF_TX_DESC_IPLEN_SHIFT)
381 #define IAVF_TX_DESC_L4LEN_SHIFT 30
382 #define IAVF_TX_DESC_L4LEN_MASK (0xfULL << IAVF_TX_DESC_L4LEN_SHIFT)
383 #define IAVF_TX_DESC_FCLEN_SHIFT 30
384 #define IAVF_TX_DESC_FCLEN_MASK (0xfULL << IAVF_TX_DESC_FCLEN_SHIFT)
385
386 #define IAVF_TX_DESC_BSIZE_SHIFT 34
387 #define IAVF_TX_DESC_BSIZE_MAX 0x3fffULL
388 #define IAVF_TX_DESC_BSIZE_MASK \
389 (IAVF_TX_DESC_BSIZE_MAX << IAVF_TX_DESC_BSIZE_SHIFT)
390
391 #define IAVF_TX_DESC_L2TAG1_SHIFT 48
392 #define IAVF_TX_DESC_L2TAG1_MASK (0xffff << IAVF_TX_DESC_L2TAG1_SHIFT)
393 } __packed __aligned(16);
394
395 struct iavf_rx_rd_desc_16 {
396 uint64_t paddr; /* packet addr */
397 uint64_t haddr; /* header addr */
398 } __packed __aligned(16);
399
400 struct iavf_rx_rd_desc_32 {
401 uint64_t paddr; /* packet addr */
402 uint64_t haddr; /* header addr */
403 uint64_t _reserved1;
404 uint64_t _reserved2;
405 } __packed __aligned(16);
406
407 struct iavf_rx_wb_desc_16 {
408 uint64_t qword0;
409 #define IAVF_RX_DESC_L2TAG1_SHIFT 16
410 #define IAVF_RX_DESC_L2TAG1_MASK (0xffff << IAVF_RX_DESC_L2TAG1_SHIFT)
411 uint64_t qword1;
412 #define IAVF_RX_DESC_DD (1 << 0)
413 #define IAVF_RX_DESC_EOP (1 << 1)
414 #define IAVF_RX_DESC_L2TAG1P (1 << 2)
415 #define IAVF_RX_DESC_L3L4P (1 << 3)
416 #define IAVF_RX_DESC_CRCP (1 << 4)
417 #define IAVF_RX_DESC_TSYNINDX_SHIFT 5 /* TSYNINDX */
418 #define IAVF_RX_DESC_TSYNINDX_MASK (7 << IAVF_RX_DESC_TSYNINDX_SHIFT)
419 #define IAVF_RX_DESC_UMB_SHIFT 9
420 #define IAVF_RX_DESC_UMB_MASK (0x3 << IAVF_RX_DESC_UMB_SHIFT)
421 #define IAVF_RX_DESC_UMB_UCAST (0x0 << IAVF_RX_DESC_UMB_SHIFT)
422 #define IAVF_RX_DESC_UMB_MCAST (0x1 << IAVF_RX_DESC_UMB_SHIFT)
423 #define IAVF_RX_DESC_UMB_BCAST (0x2 << IAVF_RX_DESC_UMB_SHIFT)
424 #define IAVF_RX_DESC_UMB_MIRROR (0x3 << IAVF_RX_DESC_UMB_SHIFT)
425 #define IAVF_RX_DESC_FLM (1 << 11)
426 #define IAVF_RX_DESC_FLTSTAT_SHIFT 12
427 #define IAVF_RX_DESC_FLTSTAT_MASK (0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
428 #define IAVF_RX_DESC_FLTSTAT_NODATA (0x0 << IAVF_RX_DESC_FLTSTAT_SHIFT)
429 #define IAVF_RX_DESC_FLTSTAT_FDFILTID (0x1 << IAVF_RX_DESC_FLTSTAT_SHIFT)
430 #define IAVF_RX_DESC_FLTSTAT_RSS (0x3 << IAVF_RX_DESC_FLTSTAT_SHIFT)
431 #define IAVF_RX_DESC_LPBK (1 << 14)
432 #define IAVF_RX_DESC_IPV6EXTADD (1 << 15)
433 #define IAVF_RX_DESC_INT_UDP_0 (1 << 18)
434
435 #define IAVF_RX_DESC_RXE (1 << 19)
436 #define IAVF_RX_DESC_HBO (1 << 21)
437 #define IAVF_RX_DESC_IPE (1 << 22)
438 #define IAVF_RX_DESC_L4E (1 << 23)
439 #define IAVF_RX_DESC_EIPE (1 << 24)
440 #define IAVF_RX_DESC_OVERSIZE (1 << 25)
441
442 #define IAVF_RX_DESC_PTYPE_SHIFT 30
443 #define IAVF_RX_DESC_PTYPE_MASK (0xffULL << IAVF_RX_DESC_PTYPE_SHIFT)
444
445 #define IAVF_RX_DESC_PLEN_SHIFT 38
446 #define IAVF_RX_DESC_PLEN_MASK (0x3fffULL << IAVF_RX_DESC_PLEN_SHIFT)
447 #define IAVF_RX_DESC_HLEN_SHIFT 42
448 #define IAVF_RX_DESC_HLEN_MASK (0x7ffULL << IAVF_RX_DESC_HLEN_SHIFT)
449 } __packed __aligned(16);
450
451 struct iavf_rx_wb_desc_32 {
452 uint64_t qword0;
453 uint64_t qword1;
454 uint64_t qword2;
455 uint64_t qword3;
456 } __packed __aligned(16);
457
458
459 #define IAVF_VF_MAJOR 1
460 #define IAVF_VF_MINOR 1
461
462 #define IAVF_TX_PKT_DESCS 8
463 #define IAVF_TX_QUEUE_ALIGN 128
464 #define IAVF_RX_QUEUE_ALIGN 128
465
466 #define IAVF_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */
467
468 #define IAVF_PCIREG PCI_MAPREG_START
469
470 #define IAVF_ITR0 0x0
471 #define IAVF_ITR1 0x1
472 #define IAVF_ITR2 0x2
473 #define IAVF_NOITR 0x3
474
475 #define IAVF_AQ_NUM 256
476 #define IAVF_AQ_MASK (IAVF_AQ_NUM - 1)
477 #define IAVF_AQ_ALIGN 64 /* lol */
478 #define IAVF_AQ_BUFLEN 4096
479
480 struct iavf_aq_regs {
481 bus_size_t atq_tail;
482 bus_size_t atq_head;
483 bus_size_t atq_len;
484 bus_size_t atq_bal;
485 bus_size_t atq_bah;
486
487 bus_size_t arq_tail;
488 bus_size_t arq_head;
489 bus_size_t arq_len;
490 bus_size_t arq_bal;
491 bus_size_t arq_bah;
492
493 uint32_t atq_len_enable;
494 uint32_t atq_tail_mask;
495 uint32_t atq_head_mask;
496
497 uint32_t arq_len_enable;
498 uint32_t arq_tail_mask;
499 uint32_t arq_head_mask;
500 };
501
502 struct iavf_aq_buf {
503 SIMPLEQ_ENTRY(iavf_aq_buf)
504 aqb_entry;
505 void *aqb_data;
506 bus_dmamap_t aqb_map;
507 };
508 SIMPLEQ_HEAD(iavf_aq_bufs, iavf_aq_buf);
509
510 struct iavf_dmamem {
511 bus_dmamap_t ixm_map;
512 bus_dma_segment_t ixm_seg;
513 int ixm_nsegs;
514 size_t ixm_size;
515 caddr_t ixm_kva;
516 };
517 #define IAVF_DMA_MAP(_ixm) ((_ixm)->ixm_map)
518 #define IAVF_DMA_DVA(_ixm) ((_ixm)->ixm_map->dm_segs[0].ds_addr)
519 #define IAVF_DMA_KVA(_ixm) ((void *)(_ixm)->ixm_kva)
520 #define IAVF_DMA_LEN(_ixm) ((_ixm)->ixm_size)
521
522 struct iavf_tx_map {
523 struct mbuf *txm_m;
524 bus_dmamap_t txm_map;
525 unsigned int txm_eop;
526 };
527
528 struct iavf_tx_ring {
529 unsigned int txr_prod;
530 unsigned int txr_cons;
531
532 struct iavf_tx_map *txr_maps;
533 struct iavf_dmamem txr_mem;
534
535 bus_size_t txr_tail;
536 unsigned int txr_qid;
537 };
538
539 struct iavf_rx_map {
540 struct mbuf *rxm_m;
541 bus_dmamap_t rxm_map;
542 };
543
544 struct iavf_rx_ring {
545 struct iavf_softc *rxr_sc;
546
547 struct if_rxring rxr_acct;
548 struct timeout rxr_refill;
549
550 unsigned int rxr_prod;
551 unsigned int rxr_cons;
552
553 struct iavf_rx_map *rxr_maps;
554 struct iavf_dmamem rxr_mem;
555
556 struct mbuf *rxr_m_head;
557 struct mbuf **rxr_m_tail;
558
559 bus_size_t rxr_tail;
560 unsigned int rxr_qid;
561 };
562
563 struct iavf_softc {
564 struct device sc_dev;
565 struct arpcom sc_ac;
566 struct ifmedia sc_media;
567 uint64_t sc_media_status;
568 uint64_t sc_media_active;
569
570 pci_chipset_tag_t sc_pc;
571 pci_intr_handle_t sc_ih;
572 void *sc_ihc;
573 pcitag_t sc_tag;
574
575 bus_dma_tag_t sc_dmat;
576 bus_space_tag_t sc_memt;
577 bus_space_handle_t sc_memh;
578 bus_size_t sc_mems;
579
580 uint32_t sc_major_ver;
581 uint32_t sc_minor_ver;
582
583 int sc_got_vf_resources;
584 int sc_got_irq_map;
585 uint32_t sc_vf_id;
586 uint16_t sc_vsi_id;
587 uint16_t sc_qset_handle;
588 unsigned int sc_base_queue;
589
590 struct cond sc_admin_cond;
591 int sc_admin_result;
592 struct timeout sc_admin_timeout;
593
594 struct iavf_dmamem sc_scratch;
595
596 const struct iavf_aq_regs *
597 sc_aq_regs;
598
599 struct mutex sc_atq_mtx;
600 struct iavf_dmamem sc_atq;
601 unsigned int sc_atq_prod;
602 unsigned int sc_atq_cons;
603
604 struct iavf_dmamem sc_arq;
605 struct iavf_aq_bufs sc_arq_idle;
606 struct iavf_aq_bufs sc_arq_live;
607 struct if_rxring sc_arq_ring;
608 unsigned int sc_arq_prod;
609 unsigned int sc_arq_cons;
610
611 struct task sc_reset_task;
612 int sc_resetting;
613
614 unsigned int sc_tx_ring_ndescs;
615 unsigned int sc_rx_ring_ndescs;
616 unsigned int sc_nqueues; /* 1 << sc_nqueues */
617
618 struct rwlock sc_cfg_lock;
619 unsigned int sc_dead;
620
621 uint8_t sc_enaddr[ETHER_ADDR_LEN];
622 };
623 #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname)
624
625 #define delaymsec(_ms) delay(1000 * (_ms))
626
627 static int iavf_dmamem_alloc(struct iavf_softc *, struct iavf_dmamem *,
628 bus_size_t, u_int);
629 static void iavf_dmamem_free(struct iavf_softc *, struct iavf_dmamem *);
630
631 static int iavf_arq_fill(struct iavf_softc *, int);
632 static void iavf_arq_unfill(struct iavf_softc *);
633 static void iavf_arq_timeout(void *);
634 static int iavf_arq_wait(struct iavf_softc *, int);
635
636 static int iavf_atq_post(struct iavf_softc *, struct iavf_aq_desc *);
637 static void iavf_atq_done(struct iavf_softc *);
638
639 static void iavf_init_admin_queue(struct iavf_softc *);
640
641 static int iavf_get_version(struct iavf_softc *);
642 static int iavf_get_vf_resources(struct iavf_softc *);
643 static int iavf_config_irq_map(struct iavf_softc *);
644
645 static int iavf_add_del_addr(struct iavf_softc *, uint8_t *, int);
646 static int iavf_process_arq(struct iavf_softc *, int);
647
648 static int iavf_match(struct device *, void *, void *);
649 static void iavf_attach(struct device *, struct device *, void *);
650
651 static int iavf_media_change(struct ifnet *);
652 static void iavf_media_status(struct ifnet *, struct ifmediareq *);
653 static void iavf_watchdog(struct ifnet *);
654 static int iavf_ioctl(struct ifnet *, u_long, caddr_t);
655 static void iavf_start(struct ifqueue *);
656 static int iavf_intr(void *);
657 static int iavf_up(struct iavf_softc *);
658 static int iavf_down(struct iavf_softc *);
659 static int iavf_iff(struct iavf_softc *);
660 static void iavf_reset(void *);
661
662 static struct iavf_tx_ring *
663 iavf_txr_alloc(struct iavf_softc *, unsigned int);
664 static void iavf_txr_clean(struct iavf_softc *, struct iavf_tx_ring *);
665 static void iavf_txr_free(struct iavf_softc *, struct iavf_tx_ring *);
666 static int iavf_txeof(struct iavf_softc *, struct ifqueue *);
667
668 static struct iavf_rx_ring *
669 iavf_rxr_alloc(struct iavf_softc *, unsigned int);
670 static void iavf_rxr_clean(struct iavf_softc *, struct iavf_rx_ring *);
671 static void iavf_rxr_free(struct iavf_softc *, struct iavf_rx_ring *);
672 static int iavf_rxeof(struct iavf_softc *, struct ifiqueue *);
673 static void iavf_rxfill(struct iavf_softc *, struct iavf_rx_ring *);
674 static void iavf_rxrefill(void *);
675 static int iavf_rxrinfo(struct iavf_softc *, struct if_rxrinfo *);
676
677 struct cfdriver iavf_cd = {
678 NULL,
679 "iavf",
680 DV_IFNET,
681 };
682
683 const struct cfattach iavf_ca = {
684 sizeof(struct iavf_softc),
685 iavf_match,
686 iavf_attach,
687 };
688
689 static const struct iavf_aq_regs iavf_aq_regs = {
690 .atq_tail = I40E_VF_ATQT1,
691 .atq_tail_mask = I40E_VF_ATQT1_ATQT_MASK,
692 .atq_head = I40E_VF_ATQH1,
693 .atq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
694 .atq_len = I40E_VF_ATQLEN1,
695 .atq_bal = I40E_VF_ATQBAL1,
696 .atq_bah = I40E_VF_ATQBAH1,
697 .atq_len_enable = I40E_VF_ATQLEN1_ATQENABLE_MASK,
698
699 .arq_tail = I40E_VF_ARQT1,
700 .arq_tail_mask = I40E_VF_ARQT1_ARQT_MASK,
701 .arq_head = I40E_VF_ARQH1,
702 .arq_head_mask = I40E_VF_ARQH1_ARQH_MASK,
703 .arq_len = I40E_VF_ARQLEN1,
704 .arq_bal = I40E_VF_ARQBAL1,
705 .arq_bah = I40E_VF_ARQBAH1,
706 .arq_len_enable = I40E_VF_ARQLEN1_ARQENABLE_MASK,
707 };
708
709 #define iavf_rd(_s, _r) \
710 bus_space_read_4((_s)->sc_memt, (_s)->sc_memh, (_r))
711 #define iavf_wr(_s, _r, _v) \
712 bus_space_write_4((_s)->sc_memt, (_s)->sc_memh, (_r), (_v))
713 #define iavf_barrier(_s, _r, _l, _o) \
714 bus_space_barrier((_s)->sc_memt, (_s)->sc_memh, (_r), (_l), (_o))
715 #define iavf_intr_enable(_s) \
716 iavf_wr((_s), I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL0_INTENA_MASK | \
717 I40E_VFINT_DYN_CTL0_CLEARPBA_MASK | \
718 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)); \
719 iavf_wr((_s), I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK)
720
721 #define iavf_nqueues(_sc) (1 << (_sc)->sc_nqueues)
722 #define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueues+1)) - 1)
723
724 #ifdef __LP64__
725 #define iavf_dmamem_hi(_ixm) (uint32_t)(IAVF_DMA_DVA(_ixm) >> 32)
726 #else
727 #define iavf_dmamem_hi(_ixm) 0
728 #endif
729
730 #define iavf_dmamem_lo(_ixm) (uint32_t)IAVF_DMA_DVA(_ixm)
731
732 static inline void
iavf_aq_dva(struct iavf_aq_desc * iaq,bus_addr_t addr)733 iavf_aq_dva(struct iavf_aq_desc *iaq, bus_addr_t addr)
734 {
735 #ifdef __LP64__
736 htolem32(&iaq->iaq_param[2], addr >> 32);
737 #else
738 iaq->iaq_param[2] = htole32(0);
739 #endif
740 htolem32(&iaq->iaq_param[3], addr);
741 }
742
743 #if _BYTE_ORDER == _BIG_ENDIAN
744 #define HTOLE16(_x) (uint16_t)(((_x) & 0xff) << 8 | ((_x) & 0xff00) >> 8)
745 #else
746 #define HTOLE16(_x) (_x)
747 #endif
748
749 static const struct pci_matchid iavf_devices[] = {
750 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF },
751 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_XL710_VF_HV },
752 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X722_VF },
753 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_ADAPTIVE_VF },
754 };
755
756 static int
iavf_match(struct device * parent,void * match,void * aux)757 iavf_match(struct device *parent, void *match, void *aux)
758 {
759 return (pci_matchbyid(aux, iavf_devices, nitems(iavf_devices)));
760 }
761
762 void
iavf_attach(struct device * parent,struct device * self,void * aux)763 iavf_attach(struct device *parent, struct device *self, void *aux)
764 {
765 struct iavf_softc *sc = (struct iavf_softc *)self;
766 struct ifnet *ifp = &sc->sc_ac.ac_if;
767 struct pci_attach_args *pa = aux;
768 pcireg_t memtype;
769 int tries;
770
771 rw_init(&sc->sc_cfg_lock, "iavfcfg");
772
773 sc->sc_pc = pa->pa_pc;
774 sc->sc_tag = pa->pa_tag;
775 sc->sc_dmat = pa->pa_dmat;
776 sc->sc_aq_regs = &iavf_aq_regs;
777
778 sc->sc_nqueues = 0; /* 1 << 0 is 1 queue */
779 sc->sc_tx_ring_ndescs = 1024;
780 sc->sc_rx_ring_ndescs = 1024;
781
782 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, IAVF_PCIREG);
783 if (pci_mapreg_map(pa, IAVF_PCIREG, memtype, 0,
784 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems, 0)) {
785 printf(": unable to map registers\n");
786 return;
787 }
788
789 for (tries = 0; tries < 100; tries++) {
790 uint32_t reg;
791 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
792 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
793 if (reg == IAVF_VFR_VFACTIVE ||
794 reg == IAVF_VFR_COMPLETED)
795 break;
796
797 delay(10000);
798 }
799 if (tries == 100) {
800 printf(": VF reset timed out\n");
801 return;
802 }
803 task_set(&sc->sc_reset_task, iavf_reset, sc);
804
805 mtx_init(&sc->sc_atq_mtx, IPL_NET);
806
807 if (iavf_dmamem_alloc(sc, &sc->sc_atq,
808 sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
809 printf("\n" "%s: unable to allocate atq\n", DEVNAME(sc));
810 goto unmap;
811 }
812
813 SIMPLEQ_INIT(&sc->sc_arq_idle);
814 SIMPLEQ_INIT(&sc->sc_arq_live);
815 if_rxr_init(&sc->sc_arq_ring, 2, IAVF_AQ_NUM - 1);
816 sc->sc_arq_cons = 0;
817 sc->sc_arq_prod = 0;
818
819 if (iavf_dmamem_alloc(sc, &sc->sc_arq,
820 sizeof(struct iavf_aq_desc) * IAVF_AQ_NUM, IAVF_AQ_ALIGN) != 0) {
821 printf("\n" "%s: unable to allocate arq\n", DEVNAME(sc));
822 goto free_atq;
823 }
824
825 if (!iavf_arq_fill(sc, 0)) {
826 printf("\n" "%s: unable to fill arq descriptors\n",
827 DEVNAME(sc));
828 goto free_arq;
829 }
830 timeout_set(&sc->sc_admin_timeout, iavf_arq_timeout, sc);
831
832 if (iavf_dmamem_alloc(sc, &sc->sc_scratch, PAGE_SIZE, IAVF_AQ_ALIGN) != 0) {
833 printf("\n" "%s: unable to allocate scratch\n", DEVNAME(sc));
834 goto shutdown;
835 }
836
837 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
838 0, IAVF_DMA_LEN(&sc->sc_atq),
839 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
840
841 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
842 0, IAVF_DMA_LEN(&sc->sc_arq),
843 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
844
845 iavf_init_admin_queue(sc);
846
847 if (iavf_get_version(sc) != 0) {
848 printf(", unable to get VF interface version\n");
849 goto free_scratch;
850 }
851
852 if (iavf_get_vf_resources(sc) != 0) {
853 printf(", timed out waiting for VF resources\n");
854 goto free_scratch;
855 }
856
857 if (iavf_config_irq_map(sc) != 0) {
858 printf(", timeout waiting for IRQ map response");
859 goto free_scratch;
860 }
861
862 /* msix only? */
863 if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) {
864 printf(", unable to map interrupt\n");
865 goto free_scratch;
866 }
867
868 /* generate an address if the pf didn't give us one */
869 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
870 if (memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) == 0)
871 ether_fakeaddr(ifp);
872
873 printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih),
874 ether_sprintf(sc->sc_ac.ac_enaddr));
875
876 sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih,
877 IPL_NET | IPL_MPSAFE, iavf_intr, sc, DEVNAME(sc));
878 if (sc->sc_ihc == NULL) {
879 printf("%s: unable to establish interrupt handler\n",
880 DEVNAME(sc));
881 goto free_scratch;
882 }
883
884 ifp->if_softc = sc;
885 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
886 ifp->if_xflags = IFXF_MPSAFE;
887 ifp->if_ioctl = iavf_ioctl;
888 ifp->if_qstart = iavf_start;
889 ifp->if_watchdog = iavf_watchdog;
890 if (ifp->if_hardmtu == 0)
891 ifp->if_hardmtu = IAVF_HARDMTU;
892 strlcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
893 ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs);
894
895 ifp->if_capabilities = IFCAP_VLAN_MTU;
896 #if NVLAN > 0
897 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
898 #endif
899 ifp->if_capabilities |= IFCAP_CSUM_IPv4 |
900 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4 |
901 IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
902
903 ifmedia_init(&sc->sc_media, 0, iavf_media_change, iavf_media_status);
904
905 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
906 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
907
908 if_attach(ifp);
909 ether_ifattach(ifp);
910
911 if_attach_queues(ifp, iavf_nqueues(sc));
912 if_attach_iqueues(ifp, iavf_nqueues(sc));
913
914 iavf_intr_enable(sc);
915
916 return;
917 free_scratch:
918 iavf_dmamem_free(sc, &sc->sc_scratch);
919 shutdown:
920 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
921 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
922 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
923 iavf_wr(sc, sc->sc_aq_regs->arq_tail, 0);
924
925 iavf_wr(sc, sc->sc_aq_regs->atq_bal, 0);
926 iavf_wr(sc, sc->sc_aq_regs->atq_bah, 0);
927 iavf_wr(sc, sc->sc_aq_regs->atq_len, 0);
928
929 iavf_wr(sc, sc->sc_aq_regs->arq_bal, 0);
930 iavf_wr(sc, sc->sc_aq_regs->arq_bah, 0);
931 iavf_wr(sc, sc->sc_aq_regs->arq_len, 0);
932
933 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
934 0, IAVF_DMA_LEN(&sc->sc_arq),
935 BUS_DMASYNC_POSTREAD);
936 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
937 0, IAVF_DMA_LEN(&sc->sc_atq),
938 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
939
940 iavf_arq_unfill(sc);
941 free_arq:
942 iavf_dmamem_free(sc, &sc->sc_arq);
943 free_atq:
944 iavf_dmamem_free(sc, &sc->sc_atq);
945 unmap:
946 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_mems);
947 sc->sc_mems = 0;
948 }
949
950 static int
iavf_media_change(struct ifnet * ifp)951 iavf_media_change(struct ifnet *ifp)
952 {
953 return (EOPNOTSUPP);
954 }
955
956 static void
iavf_media_status(struct ifnet * ifp,struct ifmediareq * ifm)957 iavf_media_status(struct ifnet *ifp, struct ifmediareq *ifm)
958 {
959 struct iavf_softc *sc = ifp->if_softc;
960
961 KERNEL_ASSERT_LOCKED();
962
963 ifm->ifm_status = sc->sc_media_status;
964 ifm->ifm_active = sc->sc_media_active;
965 }
966
967 static void
iavf_watchdog(struct ifnet * ifp)968 iavf_watchdog(struct ifnet *ifp)
969 {
970
971 }
972
973 int
iavf_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)974 iavf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
975 {
976 struct iavf_softc *sc = (struct iavf_softc *)ifp->if_softc;
977 struct ifreq *ifr = (struct ifreq *)data;
978 uint8_t addrhi[ETHER_ADDR_LEN], addrlo[ETHER_ADDR_LEN];
979 int /*aqerror,*/ error = 0;
980
981 switch (cmd) {
982 case SIOCSIFADDR:
983 ifp->if_flags |= IFF_UP;
984 /* FALLTHROUGH */
985
986 case SIOCSIFFLAGS:
987 if (ISSET(ifp->if_flags, IFF_UP)) {
988 if (ISSET(ifp->if_flags, IFF_RUNNING))
989 error = ENETRESET;
990 else
991 error = iavf_up(sc);
992 } else {
993 if (ISSET(ifp->if_flags, IFF_RUNNING))
994 error = iavf_down(sc);
995 }
996 break;
997
998 case SIOCGIFMEDIA:
999 case SIOCSIFMEDIA:
1000 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1001 break;
1002
1003 case SIOCGIFRXR:
1004 error = iavf_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1005 break;
1006
1007 case SIOCADDMULTI:
1008 if (ether_addmulti(ifr, &sc->sc_ac) == ENETRESET) {
1009 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1010 if (error != 0)
1011 return (error);
1012
1013 iavf_add_del_addr(sc, addrlo, 1);
1014 /* check result i guess? */
1015
1016 if (sc->sc_ac.ac_multirangecnt > 0) {
1017 SET(ifp->if_flags, IFF_ALLMULTI);
1018 error = ENETRESET;
1019 }
1020 }
1021 break;
1022
1023 case SIOCDELMULTI:
1024 if (ether_delmulti(ifr, &sc->sc_ac) == ENETRESET) {
1025 error = ether_multiaddr(&ifr->ifr_addr, addrlo, addrhi);
1026 if (error != 0)
1027 return (error);
1028
1029 iavf_add_del_addr(sc, addrlo, 0);
1030
1031 if (ISSET(ifp->if_flags, IFF_ALLMULTI) &&
1032 sc->sc_ac.ac_multirangecnt == 0) {
1033 CLR(ifp->if_flags, IFF_ALLMULTI);
1034 error = ENETRESET;
1035 }
1036 }
1037 break;
1038
1039 default:
1040 error = ether_ioctl(ifp, &sc->sc_ac, cmd, data);
1041 break;
1042 }
1043
1044 if (error == ENETRESET)
1045 error = iavf_iff(sc);
1046
1047 return (error);
1048 }
1049
1050 static int
iavf_config_vsi_queues(struct iavf_softc * sc)1051 iavf_config_vsi_queues(struct iavf_softc *sc)
1052 {
1053 struct ifnet *ifp = &sc->sc_ac.ac_if;
1054 struct iavf_aq_desc iaq;
1055 struct iavf_vc_queue_config_info *config;
1056 struct iavf_vc_txq_info *txq;
1057 struct iavf_vc_rxq_info *rxq;
1058 struct iavf_rx_ring *rxr;
1059 struct iavf_tx_ring *txr;
1060 int rv, i;
1061
1062 memset(&iaq, 0, sizeof(iaq));
1063 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1064 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1065 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_VSI_QUEUES);
1066 iaq.iaq_datalen = htole16(sizeof(*config) +
1067 iavf_nqueues(sc) * sizeof(struct iavf_vc_queue_pair_info));
1068 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1069
1070 config = IAVF_DMA_KVA(&sc->sc_scratch);
1071 config->vsi_id = htole16(sc->sc_vsi_id);
1072 config->num_queue_pairs = htole16(iavf_nqueues(sc));
1073
1074 for (i = 0; i < iavf_nqueues(sc); i++) {
1075 rxr = ifp->if_iqs[i]->ifiq_softc;
1076 txr = ifp->if_ifqs[i]->ifq_softc;
1077
1078 txq = &config->qpair[i].txq;
1079 txq->vsi_id = htole16(sc->sc_vsi_id);
1080 txq->queue_id = htole16(i);
1081 txq->ring_len = sc->sc_tx_ring_ndescs;
1082 txq->headwb_ena = 0;
1083 htolem64(&txq->dma_ring_addr, IAVF_DMA_DVA(&txr->txr_mem));
1084 txq->dma_headwb_addr = 0;
1085
1086 rxq = &config->qpair[i].rxq;
1087 rxq->vsi_id = htole16(sc->sc_vsi_id);
1088 rxq->queue_id = htole16(i);
1089 rxq->ring_len = sc->sc_rx_ring_ndescs;
1090 rxq->splithdr_ena = 0;
1091 rxq->databuf_size = htole32(MCLBYTES);
1092 rxq->max_pkt_size = htole32(IAVF_HARDMTU);
1093 htolem64(&rxq->dma_ring_addr, IAVF_DMA_DVA(&rxr->rxr_mem));
1094 rxq->rx_split_pos = 0;
1095 }
1096
1097 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1098 IAVF_DMA_LEN(&sc->sc_scratch),
1099 BUS_DMASYNC_PREREAD);
1100
1101 iavf_atq_post(sc, &iaq);
1102 rv = iavf_arq_wait(sc, 250);
1103 if (rv != IAVF_VC_RC_SUCCESS) {
1104 printf("%s: CONFIG_VSI_QUEUES failed: %d\n", DEVNAME(sc), rv);
1105 return (1);
1106 }
1107
1108 return (0);
1109 }
1110
1111 static int
iavf_config_hena(struct iavf_softc * sc)1112 iavf_config_hena(struct iavf_softc *sc)
1113 {
1114 struct iavf_aq_desc iaq;
1115 uint64_t *caps;
1116 int rv;
1117
1118 memset(&iaq, 0, sizeof(iaq));
1119 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1120 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1121 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_SET_RSS_HENA);
1122 iaq.iaq_datalen = htole16(sizeof(*caps));
1123 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1124
1125 caps = IAVF_DMA_KVA(&sc->sc_scratch);
1126 *caps = 0;
1127
1128 iavf_atq_post(sc, &iaq);
1129 rv = iavf_arq_wait(sc, 250);
1130 if (rv != IAVF_VC_RC_SUCCESS) {
1131 printf("%s: SET_RSS_HENA failed: %d\n", DEVNAME(sc), rv);
1132 return (1);
1133 }
1134
1135 caps = IAVF_DMA_KVA(&sc->sc_scratch);
1136
1137 return (0);
1138 }
1139
1140 static int
iavf_queue_select(struct iavf_softc * sc,int opcode)1141 iavf_queue_select(struct iavf_softc *sc, int opcode)
1142 {
1143 struct iavf_aq_desc iaq;
1144 struct iavf_vc_queue_select *qsel;
1145 int rv;
1146
1147 memset(&iaq, 0, sizeof(iaq));
1148 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1149 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1150 iaq.iaq_vc_opcode = htole32(opcode);
1151 iaq.iaq_datalen = htole16(sizeof(*qsel));
1152 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1153
1154 qsel = IAVF_DMA_KVA(&sc->sc_scratch);
1155 qsel->vsi_id = htole16(sc->sc_vsi_id);
1156 qsel->rx_queues = htole32(iavf_allqueues(sc));
1157 qsel->tx_queues = htole32(iavf_allqueues(sc));
1158
1159 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1160 IAVF_DMA_LEN(&sc->sc_scratch),
1161 BUS_DMASYNC_PREREAD);
1162
1163 iavf_atq_post(sc, &iaq);
1164 rv = iavf_arq_wait(sc, 250);
1165 if (rv != IAVF_VC_RC_SUCCESS) {
1166 printf("%s: queue op %d failed: %d\n", DEVNAME(sc), opcode, rv);
1167 return (1);
1168 }
1169
1170 return (0);
1171 }
1172
1173 static int
iavf_up(struct iavf_softc * sc)1174 iavf_up(struct iavf_softc *sc)
1175 {
1176 struct ifnet *ifp = &sc->sc_ac.ac_if;
1177 struct iavf_rx_ring *rxr;
1178 struct iavf_tx_ring *txr;
1179 unsigned int nqueues, i;
1180 int rv = ENOMEM;
1181
1182 nqueues = iavf_nqueues(sc);
1183 KASSERT(nqueues == 1); /* XXX */
1184
1185 rw_enter_write(&sc->sc_cfg_lock);
1186 if (sc->sc_dead) {
1187 rw_exit_write(&sc->sc_cfg_lock);
1188 return (ENXIO);
1189 }
1190
1191 for (i = 0; i < nqueues; i++) {
1192 rxr = iavf_rxr_alloc(sc, i);
1193 if (rxr == NULL)
1194 goto free;
1195
1196 txr = iavf_txr_alloc(sc, i);
1197 if (txr == NULL) {
1198 iavf_rxr_free(sc, rxr);
1199 goto free;
1200 }
1201
1202 ifp->if_iqs[i]->ifiq_softc = rxr;
1203 ifp->if_ifqs[i]->ifq_softc = txr;
1204
1205 iavf_rxfill(sc, rxr);
1206 }
1207
1208 if (iavf_config_vsi_queues(sc) != 0)
1209 goto down;
1210
1211 if (iavf_config_hena(sc) != 0)
1212 goto down;
1213
1214 if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0)
1215 goto down;
1216
1217 SET(ifp->if_flags, IFF_RUNNING);
1218
1219 iavf_wr(sc, I40E_VFINT_ITR01(0), 0x7a);
1220 iavf_wr(sc, I40E_VFINT_ITR01(1), 0x7a);
1221 iavf_wr(sc, I40E_VFINT_ITR01(2), 0);
1222
1223 rw_exit_write(&sc->sc_cfg_lock);
1224
1225 return (ENETRESET);
1226
1227 free:
1228 for (i = 0; i < nqueues; i++) {
1229 rxr = ifp->if_iqs[i]->ifiq_softc;
1230 txr = ifp->if_ifqs[i]->ifq_softc;
1231
1232 if (rxr == NULL) {
1233 /*
1234 * tx and rx get set at the same time, so if one
1235 * is NULL, the other is too.
1236 */
1237 continue;
1238 }
1239
1240 iavf_txr_free(sc, txr);
1241 iavf_rxr_free(sc, rxr);
1242 }
1243 rw_exit_write(&sc->sc_cfg_lock);
1244 return (rv);
1245 down:
1246 rw_exit_write(&sc->sc_cfg_lock);
1247 iavf_down(sc);
1248 return (ETIMEDOUT);
1249 }
1250
1251 static int
iavf_config_promisc_mode(struct iavf_softc * sc,int unicast,int multicast)1252 iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast)
1253 {
1254 struct iavf_aq_desc iaq;
1255 struct iavf_vc_promisc_info *promisc;
1256 int rv, flags;
1257
1258 memset(&iaq, 0, sizeof(iaq));
1259 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1260 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1261 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_PROMISC);
1262 iaq.iaq_datalen = htole16(sizeof(*promisc));
1263 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1264
1265 flags = 0;
1266 if (unicast)
1267 flags |= IAVF_FLAG_VF_UNICAST_PROMISC;
1268 if (multicast)
1269 flags |= IAVF_FLAG_VF_MULTICAST_PROMISC;
1270
1271 promisc = IAVF_DMA_KVA(&sc->sc_scratch);
1272 promisc->vsi_id = htole16(sc->sc_vsi_id);
1273 promisc->flags = htole16(flags);
1274
1275 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1276 IAVF_DMA_LEN(&sc->sc_scratch),
1277 BUS_DMASYNC_PREREAD);
1278
1279 iavf_atq_post(sc, &iaq);
1280 rv = iavf_arq_wait(sc, 250);
1281 if (rv != IAVF_VC_RC_SUCCESS) {
1282 printf("%s: CONFIG_PROMISC_MODE failed: %d\n", DEVNAME(sc), rv);
1283 return (1);
1284 }
1285
1286 return (0);
1287 }
1288
1289 static int
iavf_add_del_addr(struct iavf_softc * sc,uint8_t * addr,int add)1290 iavf_add_del_addr(struct iavf_softc *sc, uint8_t *addr, int add)
1291 {
1292 struct iavf_aq_desc iaq;
1293 struct iavf_vc_eth_addr_list *addrs;
1294 struct iavf_vc_eth_addr *vcaddr;
1295 int rv;
1296
1297 memset(&iaq, 0, sizeof(iaq));
1298 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
1299 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
1300 if (add)
1301 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_ADD_ETH_ADDR);
1302 else
1303 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_DEL_ETH_ADDR);
1304 iaq.iaq_datalen = htole16(sizeof(*addrs) + sizeof(*vcaddr));
1305 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
1306
1307 addrs = IAVF_DMA_KVA(&sc->sc_scratch);
1308 addrs->vsi_id = htole16(sc->sc_vsi_id);
1309 addrs->num_elements = htole16(1);
1310
1311 vcaddr = addrs->list;
1312 memcpy(vcaddr->addr, addr, ETHER_ADDR_LEN);
1313
1314 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0,
1315 IAVF_DMA_LEN(&sc->sc_scratch),
1316 BUS_DMASYNC_PREREAD);
1317
1318 iavf_atq_post(sc, &iaq);
1319 rv = iavf_arq_wait(sc, 250);
1320 if (rv != IAVF_VC_RC_SUCCESS) {
1321 printf("%s: ADD/DEL_ETH_ADDR failed: %d\n", DEVNAME(sc), rv);
1322 return (1);
1323 }
1324
1325 return (0);
1326 }
1327
1328 static int
iavf_iff(struct iavf_softc * sc)1329 iavf_iff(struct iavf_softc *sc)
1330 {
1331 struct ifnet *ifp = &sc->sc_ac.ac_if;
1332 int unicast, multicast;
1333
1334 if (!ISSET(ifp->if_flags, IFF_RUNNING))
1335 return (0);
1336
1337 rw_enter_write(&sc->sc_cfg_lock);
1338
1339 unicast = 0;
1340 multicast = 0;
1341 if (ISSET(ifp->if_flags, IFF_PROMISC)) {
1342 unicast = 1;
1343 multicast = 1;
1344 } else if (ISSET(ifp->if_flags, IFF_ALLMULTI)) {
1345 multicast = 1;
1346 }
1347 iavf_config_promisc_mode(sc, unicast, multicast);
1348
1349 if (memcmp(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN) != 0) {
1350 if (memcmp(sc->sc_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0)
1351 iavf_add_del_addr(sc, sc->sc_enaddr, 0);
1352 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1353 iavf_add_del_addr(sc, sc->sc_enaddr, 1);
1354 }
1355
1356 rw_exit_write(&sc->sc_cfg_lock);
1357 return (0);
1358 }
1359
1360 static int
iavf_down(struct iavf_softc * sc)1361 iavf_down(struct iavf_softc *sc)
1362 {
1363 struct ifnet *ifp = &sc->sc_ac.ac_if;
1364 struct iavf_rx_ring *rxr;
1365 struct iavf_tx_ring *txr;
1366 unsigned int nqueues, i;
1367 uint32_t reg;
1368 int error = 0;
1369
1370 nqueues = iavf_nqueues(sc);
1371
1372 rw_enter_write(&sc->sc_cfg_lock);
1373
1374 CLR(ifp->if_flags, IFF_RUNNING);
1375
1376 NET_UNLOCK();
1377
1378 if (sc->sc_resetting == 0) {
1379 /* disable queues */
1380 if (iavf_queue_select(sc, IAVF_VC_OP_DISABLE_QUEUES) != 0)
1381 goto die;
1382 }
1383
1384 /* mask interrupts */
1385 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1386 reg |= I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK |
1387 (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1388 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1389
1390 /* make sure no hw generated work is still in flight */
1391 intr_barrier(sc->sc_ihc);
1392 for (i = 0; i < nqueues; i++) {
1393 rxr = ifp->if_iqs[i]->ifiq_softc;
1394 txr = ifp->if_ifqs[i]->ifq_softc;
1395
1396 ifq_barrier(ifp->if_ifqs[i]);
1397
1398 timeout_del_barrier(&rxr->rxr_refill);
1399 }
1400
1401 for (i = 0; i < nqueues; i++) {
1402 rxr = ifp->if_iqs[i]->ifiq_softc;
1403 txr = ifp->if_ifqs[i]->ifq_softc;
1404
1405 iavf_txr_clean(sc, txr);
1406 iavf_rxr_clean(sc, rxr);
1407
1408 iavf_txr_free(sc, txr);
1409 iavf_rxr_free(sc, rxr);
1410
1411 ifp->if_iqs[i]->ifiq_softc = NULL;
1412 ifp->if_ifqs[i]->ifq_softc = NULL;
1413 }
1414
1415 /* unmask */
1416 reg = iavf_rd(sc, I40E_VFINT_DYN_CTL01);
1417 reg |= (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT);
1418 iavf_wr(sc, I40E_VFINT_DYN_CTL01, reg);
1419
1420 out:
1421 rw_exit_write(&sc->sc_cfg_lock);
1422 NET_LOCK();
1423 return (error);
1424 die:
1425 sc->sc_dead = 1;
1426 log(LOG_CRIT, "%s: failed to shut down rings", DEVNAME(sc));
1427 error = ETIMEDOUT;
1428 goto out;
1429 }
1430
1431 static void
iavf_reset(void * xsc)1432 iavf_reset(void *xsc)
1433 {
1434 struct iavf_softc *sc = xsc;
1435 struct ifnet *ifp = &sc->sc_ac.ac_if;
1436 int tries, up, link_state;
1437
1438 NET_LOCK();
1439
1440 /* treat the reset as a loss of link */
1441 link_state = ifp->if_link_state;
1442 if (ifp->if_link_state != LINK_STATE_DOWN) {
1443 ifp->if_link_state = LINK_STATE_DOWN;
1444 if_link_state_change(ifp);
1445 }
1446
1447 up = 0;
1448 if (ISSET(ifp->if_flags, IFF_RUNNING)) {
1449 iavf_down(sc);
1450 up = 1;
1451 }
1452
1453 rw_enter_write(&sc->sc_cfg_lock);
1454
1455 sc->sc_major_ver = UINT_MAX;
1456 sc->sc_minor_ver = UINT_MAX;
1457 sc->sc_got_vf_resources = 0;
1458 sc->sc_got_irq_map = 0;
1459
1460 for (tries = 0; tries < 100; tries++) {
1461 uint32_t reg;
1462 reg = iavf_rd(sc, I40E_VFGEN_RSTAT) &
1463 I40E_VFGEN_RSTAT_VFR_STATE_MASK;
1464 if (reg == IAVF_VFR_VFACTIVE ||
1465 reg == IAVF_VFR_COMPLETED)
1466 break;
1467
1468 delay(10000);
1469 }
1470 if (tries == 100) {
1471 printf("%s: VF reset timed out\n", DEVNAME(sc));
1472 goto failed;
1473 }
1474
1475 iavf_arq_unfill(sc);
1476 sc->sc_arq_cons = 0;
1477 sc->sc_arq_prod = 0;
1478 if (!iavf_arq_fill(sc, 0)) {
1479 printf("\n" "%s: unable to fill arq descriptors\n",
1480 DEVNAME(sc));
1481 goto failed;
1482 }
1483
1484 iavf_init_admin_queue(sc);
1485
1486 if (iavf_get_version(sc) != 0) {
1487 printf("%s: unable to get VF interface version\n",
1488 DEVNAME(sc));
1489 goto failed;
1490 }
1491
1492 if (iavf_get_vf_resources(sc) != 0) {
1493 printf("%s: timed out waiting for VF resources\n",
1494 DEVNAME(sc));
1495 goto failed;
1496 }
1497
1498 if (iavf_config_irq_map(sc) != 0) {
1499 printf("%s: timed out configuring IRQ map\n", DEVNAME(sc));
1500 goto failed;
1501 }
1502
1503 /* do we need to re-add mac addresses here? */
1504
1505 sc->sc_resetting = 0;
1506 iavf_intr_enable(sc);
1507 rw_exit_write(&sc->sc_cfg_lock);
1508
1509 /* the PF-assigned MAC address might have changed */
1510 if ((memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) != 0) &&
1511 (memcmp(sc->sc_ac.ac_enaddr, sc->sc_enaddr, ETHER_ADDR_LEN) != 0)) {
1512 memcpy(sc->sc_enaddr, sc->sc_ac.ac_enaddr, ETHER_ADDR_LEN);
1513 if_setlladdr(ifp, sc->sc_ac.ac_enaddr);
1514 ifnewlladdr(ifp);
1515 }
1516
1517 /* restore link state */
1518 if (link_state != LINK_STATE_DOWN) {
1519 ifp->if_link_state = link_state;
1520 if_link_state_change(ifp);
1521 }
1522
1523 if (up) {
1524 int i;
1525
1526 iavf_up(sc);
1527
1528 for (i = 0; i < iavf_nqueues(sc); i++) {
1529 if (ifq_is_oactive(ifp->if_ifqs[i]))
1530 ifq_restart(ifp->if_ifqs[i]);
1531 }
1532 }
1533
1534 NET_UNLOCK();
1535 return;
1536 failed:
1537 sc->sc_dead = 1;
1538 sc->sc_resetting = 0;
1539 rw_exit_write(&sc->sc_cfg_lock);
1540 NET_UNLOCK();
1541 }
1542
1543 static struct iavf_tx_ring *
iavf_txr_alloc(struct iavf_softc * sc,unsigned int qid)1544 iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid)
1545 {
1546 struct iavf_tx_ring *txr;
1547 struct iavf_tx_map *maps, *txm;
1548 unsigned int i;
1549
1550 txr = malloc(sizeof(*txr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1551 if (txr == NULL)
1552 return (NULL);
1553
1554 maps = mallocarray(sizeof(*maps),
1555 sc->sc_tx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1556 if (maps == NULL)
1557 goto free;
1558
1559 if (iavf_dmamem_alloc(sc, &txr->txr_mem,
1560 sizeof(struct iavf_tx_desc) * sc->sc_tx_ring_ndescs,
1561 IAVF_TX_QUEUE_ALIGN) != 0)
1562 goto freemap;
1563
1564 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1565 txm = &maps[i];
1566
1567 if (bus_dmamap_create(sc->sc_dmat,
1568 IAVF_HARDMTU, IAVF_TX_PKT_DESCS, IAVF_HARDMTU, 0,
1569 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1570 &txm->txm_map) != 0)
1571 goto uncreate;
1572
1573 txm->txm_eop = -1;
1574 txm->txm_m = NULL;
1575 }
1576
1577 txr->txr_cons = txr->txr_prod = 0;
1578 txr->txr_maps = maps;
1579
1580 txr->txr_tail = I40E_QTX_TAIL1(qid);
1581 txr->txr_qid = qid;
1582
1583 return (txr);
1584
1585 uncreate:
1586 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1587 txm = &maps[i];
1588
1589 if (txm->txm_map == NULL)
1590 continue;
1591
1592 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1593 }
1594
1595 iavf_dmamem_free(sc, &txr->txr_mem);
1596 freemap:
1597 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1598 free:
1599 free(txr, M_DEVBUF, sizeof(*txr));
1600 return (NULL);
1601 }
1602
1603 static void
iavf_txr_clean(struct iavf_softc * sc,struct iavf_tx_ring * txr)1604 iavf_txr_clean(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1605 {
1606 struct iavf_tx_map *maps, *txm;
1607 bus_dmamap_t map;
1608 unsigned int i;
1609
1610 maps = txr->txr_maps;
1611 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1612 txm = &maps[i];
1613
1614 if (txm->txm_m == NULL)
1615 continue;
1616
1617 map = txm->txm_map;
1618 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1619 BUS_DMASYNC_POSTWRITE);
1620 bus_dmamap_unload(sc->sc_dmat, map);
1621
1622 m_freem(txm->txm_m);
1623 txm->txm_m = NULL;
1624 }
1625 }
1626
1627 static void
iavf_txr_free(struct iavf_softc * sc,struct iavf_tx_ring * txr)1628 iavf_txr_free(struct iavf_softc *sc, struct iavf_tx_ring *txr)
1629 {
1630 struct iavf_tx_map *maps, *txm;
1631 unsigned int i;
1632
1633 maps = txr->txr_maps;
1634 for (i = 0; i < sc->sc_tx_ring_ndescs; i++) {
1635 txm = &maps[i];
1636
1637 bus_dmamap_destroy(sc->sc_dmat, txm->txm_map);
1638 }
1639
1640 iavf_dmamem_free(sc, &txr->txr_mem);
1641 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_tx_ring_ndescs);
1642 free(txr, M_DEVBUF, sizeof(*txr));
1643 }
1644
1645 static inline int
iavf_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m)1646 iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1647 {
1648 int error;
1649
1650 error = bus_dmamap_load_mbuf(dmat, map, m,
1651 BUS_DMA_STREAMING | BUS_DMA_NOWAIT);
1652 if (error != EFBIG)
1653 return (error);
1654
1655 error = m_defrag(m, M_DONTWAIT);
1656 if (error != 0)
1657 return (error);
1658
1659 return (bus_dmamap_load_mbuf(dmat, map, m,
1660 BUS_DMA_STREAMING | BUS_DMA_NOWAIT));
1661 }
1662
1663 static uint64_t
iavf_tx_offload(struct mbuf * m)1664 iavf_tx_offload(struct mbuf *m)
1665 {
1666 struct ether_extracted ext;
1667 uint64_t hlen;
1668 uint64_t offload = 0;
1669
1670 #if NVLAN > 0
1671 if (ISSET(m->m_flags, M_VLANTAG)) {
1672 uint64_t vtag = m->m_pkthdr.ether_vtag;
1673 offload |= IAVF_TX_DESC_CMD_IL2TAG1;
1674 offload |= vtag << IAVF_TX_DESC_L2TAG1_SHIFT;
1675 }
1676 #endif
1677
1678 if (!ISSET(m->m_pkthdr.csum_flags,
1679 M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT))
1680 return (offload);
1681
1682 ether_extract_headers(m, &ext);
1683
1684 if (ext.ip4) {
1685 offload |= ISSET(m->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ?
1686 IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM :
1687 IAVF_TX_DESC_CMD_IIPT_IPV4;
1688 #ifdef INET6
1689 } else if (ext.ip6) {
1690 offload |= IAVF_TX_DESC_CMD_IIPT_IPV6;
1691 #endif
1692 } else {
1693 panic("CSUM_OUT set for non-IP packet");
1694 /* NOTREACHED */
1695 }
1696 hlen = ext.iphlen;
1697
1698 offload |= (ETHER_HDR_LEN >> 1) << IAVF_TX_DESC_MACLEN_SHIFT;
1699 offload |= (hlen >> 2) << IAVF_TX_DESC_IPLEN_SHIFT;
1700
1701 if (ext.tcp && ISSET(m->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) {
1702 offload |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP;
1703 offload |= (uint64_t)(ext.tcphlen >> 2)
1704 << IAVF_TX_DESC_L4LEN_SHIFT;
1705 } else if (ext.udp && ISSET(m->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) {
1706 offload |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP;
1707 offload |= (uint64_t)(sizeof(*ext.udp) >> 2)
1708 << IAVF_TX_DESC_L4LEN_SHIFT;
1709 }
1710
1711 return offload;
1712 }
1713
1714 static void
iavf_start(struct ifqueue * ifq)1715 iavf_start(struct ifqueue *ifq)
1716 {
1717 struct ifnet *ifp = ifq->ifq_if;
1718 struct iavf_softc *sc = ifp->if_softc;
1719 struct iavf_tx_ring *txr = ifq->ifq_softc;
1720 struct iavf_tx_desc *ring, *txd;
1721 struct iavf_tx_map *txm;
1722 bus_dmamap_t map;
1723 struct mbuf *m;
1724 uint64_t cmd;
1725 uint64_t offload;
1726 unsigned int prod, free, last, i;
1727 unsigned int mask;
1728 int post = 0;
1729 #if NBPFILTER > 0
1730 caddr_t if_bpf;
1731 #endif
1732
1733 if (!LINK_STATE_IS_UP(ifp->if_link_state)) {
1734 ifq_purge(ifq);
1735 return;
1736 }
1737
1738 prod = txr->txr_prod;
1739 free = txr->txr_cons;
1740 if (free <= prod)
1741 free += sc->sc_tx_ring_ndescs;
1742 free -= prod;
1743
1744 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1745 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTWRITE);
1746
1747 ring = IAVF_DMA_KVA(&txr->txr_mem);
1748 mask = sc->sc_tx_ring_ndescs - 1;
1749
1750 for (;;) {
1751 if (free <= IAVF_TX_PKT_DESCS) {
1752 ifq_set_oactive(ifq);
1753 break;
1754 }
1755
1756 m = ifq_dequeue(ifq);
1757 if (m == NULL)
1758 break;
1759
1760 offload = iavf_tx_offload(m);
1761
1762 txm = &txr->txr_maps[prod];
1763 map = txm->txm_map;
1764
1765 if (iavf_load_mbuf(sc->sc_dmat, map, m) != 0) {
1766 ifq->ifq_errors++;
1767 m_freem(m);
1768 continue;
1769 }
1770
1771 bus_dmamap_sync(sc->sc_dmat, map, 0,
1772 map->dm_mapsize, BUS_DMASYNC_PREWRITE);
1773
1774 for (i = 0; i < map->dm_nsegs; i++) {
1775 txd = &ring[prod];
1776
1777 cmd = (uint64_t)map->dm_segs[i].ds_len <<
1778 IAVF_TX_DESC_BSIZE_SHIFT;
1779 cmd |= IAVF_TX_DESC_DTYPE_DATA | IAVF_TX_DESC_CMD_ICRC;
1780 cmd |= offload;
1781
1782 htolem64(&txd->addr, map->dm_segs[i].ds_addr);
1783 htolem64(&txd->cmd, cmd);
1784
1785 last = prod;
1786
1787 prod++;
1788 prod &= mask;
1789 }
1790 cmd |= IAVF_TX_DESC_CMD_EOP | IAVF_TX_DESC_CMD_RS;
1791 htolem64(&txd->cmd, cmd);
1792
1793 txm->txm_m = m;
1794 txm->txm_eop = last;
1795
1796 #if NBPFILTER > 0
1797 if_bpf = ifp->if_bpf;
1798 if (if_bpf)
1799 bpf_mtap_ether(if_bpf, m, BPF_DIRECTION_OUT);
1800 #endif
1801
1802 free -= i;
1803 post = 1;
1804 }
1805
1806 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1807 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREWRITE);
1808
1809 if (post) {
1810 txr->txr_prod = prod;
1811 iavf_wr(sc, txr->txr_tail, prod);
1812 }
1813 }
1814
1815 static int
iavf_txeof(struct iavf_softc * sc,struct ifqueue * ifq)1816 iavf_txeof(struct iavf_softc *sc, struct ifqueue *ifq)
1817 {
1818 struct iavf_tx_ring *txr = ifq->ifq_softc;
1819 struct iavf_tx_desc *ring, *txd;
1820 struct iavf_tx_map *txm;
1821 bus_dmamap_t map;
1822 unsigned int cons, prod, last;
1823 unsigned int mask;
1824 uint64_t dtype;
1825 int done = 0;
1826
1827 prod = txr->txr_prod;
1828 cons = txr->txr_cons;
1829
1830 if (cons == prod)
1831 return (0);
1832
1833 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1834 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_POSTREAD);
1835
1836 ring = IAVF_DMA_KVA(&txr->txr_mem);
1837 mask = sc->sc_tx_ring_ndescs - 1;
1838
1839 do {
1840 txm = &txr->txr_maps[cons];
1841 last = txm->txm_eop;
1842 txd = &ring[last];
1843
1844 dtype = txd->cmd & htole64(IAVF_TX_DESC_DTYPE_MASK);
1845 if (dtype != htole64(IAVF_TX_DESC_DTYPE_DONE))
1846 break;
1847
1848 map = txm->txm_map;
1849
1850 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1851 BUS_DMASYNC_POSTWRITE);
1852 bus_dmamap_unload(sc->sc_dmat, map);
1853 m_freem(txm->txm_m);
1854
1855 txm->txm_m = NULL;
1856 txm->txm_eop = -1;
1857
1858 cons = last + 1;
1859 cons &= mask;
1860
1861 done = 1;
1862 } while (cons != prod);
1863
1864 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&txr->txr_mem),
1865 0, IAVF_DMA_LEN(&txr->txr_mem), BUS_DMASYNC_PREREAD);
1866
1867 txr->txr_cons = cons;
1868
1869 //ixl_enable(sc, txr->txr_msix);
1870
1871 if (ifq_is_oactive(ifq))
1872 ifq_restart(ifq);
1873
1874 return (done);
1875 }
1876
1877 static struct iavf_rx_ring *
iavf_rxr_alloc(struct iavf_softc * sc,unsigned int qid)1878 iavf_rxr_alloc(struct iavf_softc *sc, unsigned int qid)
1879 {
1880 struct iavf_rx_ring *rxr;
1881 struct iavf_rx_map *maps, *rxm;
1882 unsigned int i;
1883
1884 rxr = malloc(sizeof(*rxr), M_DEVBUF, M_WAITOK|M_CANFAIL);
1885 if (rxr == NULL)
1886 return (NULL);
1887
1888 maps = mallocarray(sizeof(*maps),
1889 sc->sc_rx_ring_ndescs, M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO);
1890 if (maps == NULL)
1891 goto free;
1892
1893 if (iavf_dmamem_alloc(sc, &rxr->rxr_mem,
1894 sizeof(struct iavf_rx_rd_desc_32) * sc->sc_rx_ring_ndescs,
1895 IAVF_RX_QUEUE_ALIGN) != 0)
1896 goto freemap;
1897
1898 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1899 rxm = &maps[i];
1900
1901 if (bus_dmamap_create(sc->sc_dmat,
1902 IAVF_HARDMTU, 1, IAVF_HARDMTU, 0,
1903 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
1904 &rxm->rxm_map) != 0)
1905 goto uncreate;
1906
1907 rxm->rxm_m = NULL;
1908 }
1909
1910 rxr->rxr_sc = sc;
1911 if_rxr_init(&rxr->rxr_acct, 17, sc->sc_rx_ring_ndescs - 1);
1912 timeout_set(&rxr->rxr_refill, iavf_rxrefill, rxr);
1913 rxr->rxr_cons = rxr->rxr_prod = 0;
1914 rxr->rxr_m_head = NULL;
1915 rxr->rxr_m_tail = &rxr->rxr_m_head;
1916 rxr->rxr_maps = maps;
1917
1918 rxr->rxr_tail = I40E_QRX_TAIL1(qid);
1919 rxr->rxr_qid = qid;
1920
1921 return (rxr);
1922
1923 uncreate:
1924 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1925 rxm = &maps[i];
1926
1927 if (rxm->rxm_map == NULL)
1928 continue;
1929
1930 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1931 }
1932
1933 iavf_dmamem_free(sc, &rxr->rxr_mem);
1934 freemap:
1935 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1936 free:
1937 free(rxr, M_DEVBUF, sizeof(*rxr));
1938 return (NULL);
1939 }
1940
1941 static void
iavf_rxr_clean(struct iavf_softc * sc,struct iavf_rx_ring * rxr)1942 iavf_rxr_clean(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1943 {
1944 struct iavf_rx_map *maps, *rxm;
1945 bus_dmamap_t map;
1946 unsigned int i;
1947
1948 timeout_del_barrier(&rxr->rxr_refill);
1949
1950 maps = rxr->rxr_maps;
1951 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1952 rxm = &maps[i];
1953
1954 if (rxm->rxm_m == NULL)
1955 continue;
1956
1957 map = rxm->rxm_map;
1958 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1959 BUS_DMASYNC_POSTWRITE);
1960 bus_dmamap_unload(sc->sc_dmat, map);
1961
1962 m_freem(rxm->rxm_m);
1963 rxm->rxm_m = NULL;
1964 }
1965
1966 m_freem(rxr->rxr_m_head);
1967 rxr->rxr_m_head = NULL;
1968 rxr->rxr_m_tail = &rxr->rxr_m_head;
1969
1970 rxr->rxr_prod = rxr->rxr_cons = 0;
1971 }
1972
1973 static void
iavf_rxr_free(struct iavf_softc * sc,struct iavf_rx_ring * rxr)1974 iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
1975 {
1976 struct iavf_rx_map *maps, *rxm;
1977 unsigned int i;
1978
1979 maps = rxr->rxr_maps;
1980 for (i = 0; i < sc->sc_rx_ring_ndescs; i++) {
1981 rxm = &maps[i];
1982
1983 bus_dmamap_destroy(sc->sc_dmat, rxm->rxm_map);
1984 }
1985
1986 iavf_dmamem_free(sc, &rxr->rxr_mem);
1987 free(maps, M_DEVBUF, sizeof(*maps) * sc->sc_rx_ring_ndescs);
1988 free(rxr, M_DEVBUF, sizeof(*rxr));
1989 }
1990
1991 static void
iavf_rx_checksum(struct mbuf * m,uint64_t word)1992 iavf_rx_checksum(struct mbuf *m, uint64_t word)
1993 {
1994 if (!ISSET(word, IAVF_RX_DESC_L3L4P))
1995 return;
1996
1997 if (ISSET(word, IAVF_RX_DESC_IPE))
1998 return;
1999
2000 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK;
2001
2002 if (ISSET(word, IAVF_RX_DESC_L4E))
2003 return;
2004
2005 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK;
2006 }
2007
2008
2009 static int
iavf_rxeof(struct iavf_softc * sc,struct ifiqueue * ifiq)2010 iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq)
2011 {
2012 struct iavf_rx_ring *rxr = ifiq->ifiq_softc;
2013 struct ifnet *ifp = &sc->sc_ac.ac_if;
2014 struct iavf_rx_wb_desc_32 *ring, *rxd;
2015 struct iavf_rx_map *rxm;
2016 bus_dmamap_t map;
2017 unsigned int cons, prod;
2018 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2019 struct mbuf *m;
2020 uint64_t word;
2021 uint16_t vlan;
2022 unsigned int len;
2023 unsigned int mask;
2024 int done = 0;
2025
2026 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2027 return (0);
2028
2029 prod = rxr->rxr_prod;
2030 cons = rxr->rxr_cons;
2031
2032 if (cons == prod)
2033 return (0);
2034
2035 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
2036 0, IAVF_DMA_LEN(&rxr->rxr_mem),
2037 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2038
2039 ring = IAVF_DMA_KVA(&rxr->rxr_mem);
2040 mask = sc->sc_rx_ring_ndescs - 1;
2041
2042 do {
2043 rxd = &ring[cons];
2044
2045 word = lemtoh64(&rxd->qword1);
2046 if (!ISSET(word, IAVF_RX_DESC_DD))
2047 break;
2048
2049 if_rxr_put(&rxr->rxr_acct, 1);
2050
2051 rxm = &rxr->rxr_maps[cons];
2052
2053 map = rxm->rxm_map;
2054 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2055 BUS_DMASYNC_POSTREAD);
2056 bus_dmamap_unload(sc->sc_dmat, map);
2057
2058 m = rxm->rxm_m;
2059 rxm->rxm_m = NULL;
2060
2061 len = (word & IAVF_RX_DESC_PLEN_MASK) >> IAVF_RX_DESC_PLEN_SHIFT;
2062 m->m_len = len;
2063 m->m_pkthdr.len = 0;
2064
2065 m->m_next = NULL;
2066 *rxr->rxr_m_tail = m;
2067 rxr->rxr_m_tail = &m->m_next;
2068
2069 m = rxr->rxr_m_head;
2070 m->m_pkthdr.len += len;
2071
2072 if (ISSET(word, IAVF_RX_DESC_EOP)) {
2073 #if NVLAN > 0
2074 if (ISSET(word, IAVF_RX_DESC_L2TAG1P)) {
2075 vlan = (lemtoh64(&rxd->qword0) &
2076 IAVF_RX_DESC_L2TAG1_MASK)
2077 >> IAVF_RX_DESC_L2TAG1_SHIFT;
2078 m->m_pkthdr.ether_vtag = vlan;
2079 m->m_flags |= M_VLANTAG;
2080 }
2081 #endif
2082 if (!ISSET(word,
2083 IAVF_RX_DESC_RXE | IAVF_RX_DESC_OVERSIZE)) {
2084 iavf_rx_checksum(m, word);
2085 ml_enqueue(&ml, m);
2086 } else {
2087 ifp->if_ierrors++; /* XXX */
2088 m_freem(m);
2089 }
2090
2091 rxr->rxr_m_head = NULL;
2092 rxr->rxr_m_tail = &rxr->rxr_m_head;
2093 }
2094
2095 cons++;
2096 cons &= mask;
2097
2098 done = 1;
2099 } while (cons != prod);
2100
2101 if (done) {
2102 rxr->rxr_cons = cons;
2103 if (ifiq_input(ifiq, &ml))
2104 if_rxr_livelocked(&rxr->rxr_acct);
2105 iavf_rxfill(sc, rxr);
2106 }
2107
2108 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&rxr->rxr_mem),
2109 0, IAVF_DMA_LEN(&rxr->rxr_mem),
2110 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2111
2112 return (done);
2113 }
2114
2115 static void
iavf_rxfill(struct iavf_softc * sc,struct iavf_rx_ring * rxr)2116 iavf_rxfill(struct iavf_softc *sc, struct iavf_rx_ring *rxr)
2117 {
2118 struct iavf_rx_rd_desc_32 *ring, *rxd;
2119 struct iavf_rx_map *rxm;
2120 bus_dmamap_t map;
2121 struct mbuf *m;
2122 unsigned int prod;
2123 unsigned int slots;
2124 unsigned int mask;
2125 int post = 0;
2126
2127 slots = if_rxr_get(&rxr->rxr_acct, sc->sc_rx_ring_ndescs);
2128 if (slots == 0)
2129 return;
2130
2131 prod = rxr->rxr_prod;
2132
2133 ring = IAVF_DMA_KVA(&rxr->rxr_mem);
2134 mask = sc->sc_rx_ring_ndescs - 1;
2135
2136 do {
2137 rxm = &rxr->rxr_maps[prod];
2138
2139 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES + ETHER_ALIGN);
2140 if (m == NULL)
2141 break;
2142 m->m_data += (m->m_ext.ext_size - (MCLBYTES + ETHER_ALIGN));
2143 m->m_len = m->m_pkthdr.len = MCLBYTES + ETHER_ALIGN;
2144
2145 map = rxm->rxm_map;
2146
2147 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m,
2148 BUS_DMA_NOWAIT) != 0) {
2149 m_freem(m);
2150 break;
2151 }
2152
2153 rxm->rxm_m = m;
2154
2155 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
2156 BUS_DMASYNC_PREREAD);
2157
2158 rxd = &ring[prod];
2159
2160 htolem64(&rxd->paddr, map->dm_segs[0].ds_addr);
2161 rxd->haddr = htole64(0);
2162
2163 prod++;
2164 prod &= mask;
2165
2166 post = 1;
2167 } while (--slots);
2168
2169 if_rxr_put(&rxr->rxr_acct, slots);
2170
2171 if (if_rxr_inuse(&rxr->rxr_acct) == 0)
2172 timeout_add(&rxr->rxr_refill, 1);
2173 else if (post) {
2174 rxr->rxr_prod = prod;
2175 iavf_wr(sc, rxr->rxr_tail, prod);
2176 }
2177 }
2178
2179 void
iavf_rxrefill(void * arg)2180 iavf_rxrefill(void *arg)
2181 {
2182 struct iavf_rx_ring *rxr = arg;
2183 struct iavf_softc *sc = rxr->rxr_sc;
2184
2185 iavf_rxfill(sc, rxr);
2186 }
2187
2188 static int
iavf_rxrinfo(struct iavf_softc * sc,struct if_rxrinfo * ifri)2189 iavf_rxrinfo(struct iavf_softc *sc, struct if_rxrinfo *ifri)
2190 {
2191 struct ifnet *ifp = &sc->sc_ac.ac_if;
2192 struct if_rxring_info *ifr;
2193 struct iavf_rx_ring *ring;
2194 int i, rv;
2195
2196 if (!ISSET(ifp->if_flags, IFF_RUNNING))
2197 return (ENOTTY);
2198
2199 ifr = mallocarray(sizeof(*ifr), iavf_nqueues(sc), M_TEMP,
2200 M_WAITOK|M_CANFAIL|M_ZERO);
2201 if (ifr == NULL)
2202 return (ENOMEM);
2203
2204 for (i = 0; i < iavf_nqueues(sc); i++) {
2205 ring = ifp->if_iqs[i]->ifiq_softc;
2206 ifr[i].ifr_size = MCLBYTES;
2207 ifr[i].ifr_info = ring->rxr_acct;
2208 }
2209
2210 rv = if_rxr_info_ioctl(ifri, iavf_nqueues(sc), ifr);
2211 free(ifr, M_TEMP, iavf_nqueues(sc) * sizeof(*ifr));
2212
2213 return (rv);
2214 }
2215
2216 static int
iavf_intr(void * xsc)2217 iavf_intr(void *xsc)
2218 {
2219 struct iavf_softc *sc = xsc;
2220 struct ifnet *ifp = &sc->sc_ac.ac_if;
2221 uint32_t icr, ena;
2222 int i, rv = 0;
2223
2224 ena = iavf_rd(sc, I40E_VFINT_ICR0_ENA1);
2225 iavf_intr_enable(sc);
2226 icr = iavf_rd(sc, I40E_VFINT_ICR01);
2227
2228 if (icr == IAVF_REG_VFR) {
2229 printf("%s: VF reset in progress\n", DEVNAME(sc));
2230 sc->sc_resetting = 1;
2231 task_add(systq, &sc->sc_reset_task);
2232 return (1);
2233 }
2234
2235 if (ISSET(icr, I40E_VFINT_ICR01_ADMINQ_MASK)) {
2236 iavf_atq_done(sc);
2237 iavf_process_arq(sc, 0);
2238 rv = 1;
2239 }
2240
2241 if (ISSET(icr, I40E_VFINT_ICR01_QUEUE_0_MASK)) {
2242 for (i = 0; i < iavf_nqueues(sc); i++) {
2243 rv |= iavf_rxeof(sc, ifp->if_iqs[i]);
2244 rv |= iavf_txeof(sc, ifp->if_ifqs[i]);
2245 }
2246 }
2247
2248 return (rv);
2249 }
2250
2251 static void
iavf_process_vf_resources(struct iavf_softc * sc,struct iavf_aq_desc * desc,struct iavf_aq_buf * buf)2252 iavf_process_vf_resources(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2253 struct iavf_aq_buf *buf)
2254 {
2255 struct ifnet *ifp = &sc->sc_ac.ac_if;
2256 struct iavf_vc_vf_resource *vf_res;
2257 struct iavf_vc_vsi_resource *vsi_res;
2258 int mtu;
2259
2260 sc->sc_got_vf_resources = 1;
2261
2262 vf_res = buf->aqb_data;
2263 if (letoh16(vf_res->num_vsis) == 0) {
2264 printf(", no VSI available\n");
2265 /* set vsi number to something */
2266 return;
2267 }
2268
2269 mtu = letoh16(vf_res->max_mtu);
2270 if (mtu != 0)
2271 ifp->if_hardmtu = MIN(IAVF_HARDMTU, mtu);
2272
2273 /* limit vectors to what we got here? */
2274
2275 /* just take the first vsi */
2276 vsi_res = &vf_res->vsi_res[0];
2277 sc->sc_vsi_id = letoh16(vsi_res->vsi_id);
2278 sc->sc_qset_handle = letoh16(vsi_res->qset_handle);
2279 /* limit number of queues to what we got here */
2280 /* is vsi type interesting? */
2281
2282 sc->sc_vf_id = letoh32(desc->iaq_param[0]);
2283
2284 memcpy(sc->sc_ac.ac_enaddr, vsi_res->default_mac, ETHER_ADDR_LEN);
2285
2286 if (sc->sc_resetting == 0)
2287 printf(", VF %d VSI %d", sc->sc_vf_id, sc->sc_vsi_id);
2288 }
2289
2290 static const struct iavf_link_speed *
iavf_find_link_speed(struct iavf_softc * sc,uint32_t link_speed)2291 iavf_find_link_speed(struct iavf_softc *sc, uint32_t link_speed)
2292 {
2293 int i;
2294 for (i = 0; i < nitems(iavf_link_speeds); i++) {
2295 if (link_speed & (1 << i))
2296 return (&iavf_link_speeds[i]);
2297 }
2298
2299 return (NULL);
2300 }
2301
2302 static void
iavf_process_vc_event(struct iavf_softc * sc,struct iavf_aq_desc * desc,struct iavf_aq_buf * buf)2303 iavf_process_vc_event(struct iavf_softc *sc, struct iavf_aq_desc *desc,
2304 struct iavf_aq_buf *buf)
2305 {
2306 struct iavf_vc_pf_event *event;
2307 struct ifnet *ifp = &sc->sc_ac.ac_if;
2308 const struct iavf_link_speed *speed;
2309 int link;
2310
2311 event = buf->aqb_data;
2312 switch (event->event) {
2313 case IAVF_VC_EVENT_LINK_CHANGE:
2314 sc->sc_media_status = IFM_AVALID;
2315 sc->sc_media_active = IFM_ETHER;
2316 link = LINK_STATE_DOWN;
2317 if (event->link_status) {
2318 link = LINK_STATE_UP;
2319 sc->sc_media_status |= IFM_ACTIVE;
2320
2321 ifp->if_baudrate = 0;
2322 speed = iavf_find_link_speed(sc, event->link_speed);
2323 if (speed != NULL) {
2324 sc->sc_media_active |= speed->media;
2325 ifp->if_baudrate = speed->baudrate;
2326 }
2327 }
2328
2329 if (ifp->if_link_state != link) {
2330 ifp->if_link_state = link;
2331 if_link_state_change(ifp);
2332 }
2333 break;
2334
2335 default:
2336 break;
2337 }
2338 }
2339
2340 static void
iavf_process_irq_map(struct iavf_softc * sc,struct iavf_aq_desc * desc)2341 iavf_process_irq_map(struct iavf_softc *sc, struct iavf_aq_desc *desc)
2342 {
2343 if (letoh32(desc->iaq_vc_retval) != IAVF_VC_RC_SUCCESS) {
2344 printf("config irq map failed: %d\n", letoh32(desc->iaq_vc_retval));
2345 }
2346 sc->sc_got_irq_map = 1;
2347 }
2348
2349 static void
iavf_init_admin_queue(struct iavf_softc * sc)2350 iavf_init_admin_queue(struct iavf_softc *sc)
2351 {
2352 iavf_wr(sc, sc->sc_aq_regs->atq_head, 0);
2353 iavf_wr(sc, sc->sc_aq_regs->arq_head, 0);
2354 iavf_wr(sc, sc->sc_aq_regs->atq_tail, 0);
2355
2356 iavf_barrier(sc, 0, sc->sc_mems, BUS_SPACE_BARRIER_WRITE);
2357
2358 iavf_wr(sc, sc->sc_aq_regs->atq_bal,
2359 iavf_dmamem_lo(&sc->sc_atq));
2360 iavf_wr(sc, sc->sc_aq_regs->atq_bah,
2361 iavf_dmamem_hi(&sc->sc_atq));
2362 iavf_wr(sc, sc->sc_aq_regs->atq_len,
2363 sc->sc_aq_regs->atq_len_enable | IAVF_AQ_NUM);
2364
2365 iavf_wr(sc, sc->sc_aq_regs->arq_bal,
2366 iavf_dmamem_lo(&sc->sc_arq));
2367 iavf_wr(sc, sc->sc_aq_regs->arq_bah,
2368 iavf_dmamem_hi(&sc->sc_arq));
2369 iavf_wr(sc, sc->sc_aq_regs->arq_len,
2370 sc->sc_aq_regs->arq_len_enable | IAVF_AQ_NUM);
2371
2372 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2373 }
2374
2375 static int
iavf_process_arq(struct iavf_softc * sc,int fill)2376 iavf_process_arq(struct iavf_softc *sc, int fill)
2377 {
2378 struct iavf_aq_desc *arq, *iaq;
2379 struct iavf_aq_buf *aqb;
2380 struct iavf_vc_version_info *ver;
2381 unsigned int cons = sc->sc_arq_cons;
2382 unsigned int prod;
2383 int done = 0;
2384
2385 prod = iavf_rd(sc, sc->sc_aq_regs->arq_head) &
2386 sc->sc_aq_regs->arq_head_mask;
2387
2388 if (cons == prod)
2389 return (0);
2390
2391 arq = IAVF_DMA_KVA(&sc->sc_arq);
2392
2393 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2394 0, IAVF_DMA_LEN(&sc->sc_arq),
2395 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2396
2397 do {
2398 iaq = &arq[cons];
2399
2400 aqb = SIMPLEQ_FIRST(&sc->sc_arq_live);
2401 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2402 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2403 BUS_DMASYNC_POSTREAD);
2404
2405 switch (letoh32(iaq->iaq_vc_opcode)) {
2406 case IAVF_VC_OP_VERSION:
2407 ver = aqb->aqb_data;
2408 sc->sc_major_ver = letoh32(ver->major);
2409 sc->sc_minor_ver = letoh32(ver->minor);
2410 break;
2411
2412 case IAVF_VC_OP_GET_VF_RESOURCES:
2413 iavf_process_vf_resources(sc, iaq, aqb);
2414 break;
2415
2416 case IAVF_VC_OP_EVENT:
2417 iavf_process_vc_event(sc, iaq, aqb);
2418 break;
2419
2420 case IAVF_VC_OP_CONFIG_IRQ_MAP:
2421 iavf_process_irq_map(sc, iaq);
2422 break;
2423
2424 case IAVF_VC_OP_CONFIG_TX_QUEUE:
2425 case IAVF_VC_OP_CONFIG_RX_QUEUE:
2426 case IAVF_VC_OP_CONFIG_VSI_QUEUES:
2427 case IAVF_VC_OP_ENABLE_QUEUES:
2428 case IAVF_VC_OP_DISABLE_QUEUES:
2429 case IAVF_VC_OP_GET_RSS_HENA_CAPS:
2430 case IAVF_VC_OP_SET_RSS_HENA:
2431 case IAVF_VC_OP_ADD_ETH_ADDR:
2432 case IAVF_VC_OP_DEL_ETH_ADDR:
2433 case IAVF_VC_OP_CONFIG_PROMISC:
2434 sc->sc_admin_result = letoh32(iaq->iaq_vc_retval);
2435 cond_signal(&sc->sc_admin_cond);
2436 break;
2437 }
2438
2439 memset(iaq, 0, sizeof(*iaq));
2440 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_idle, aqb, aqb_entry);
2441 if_rxr_put(&sc->sc_arq_ring, 1);
2442
2443 cons++;
2444 cons &= IAVF_AQ_MASK;
2445
2446 done = 1;
2447 } while (cons != prod);
2448
2449 if (fill)
2450 iavf_arq_fill(sc, 1);
2451
2452 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_arq),
2453 0, IAVF_DMA_LEN(&sc->sc_arq),
2454 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2455
2456 sc->sc_arq_cons = cons;
2457 return (done);
2458 }
2459
2460 static void
iavf_atq_done(struct iavf_softc * sc)2461 iavf_atq_done(struct iavf_softc *sc)
2462 {
2463 struct iavf_aq_desc *atq, *slot;
2464 unsigned int cons;
2465 unsigned int prod;
2466
2467 mtx_enter(&sc->sc_atq_mtx);
2468
2469 prod = sc->sc_atq_prod;
2470 cons = sc->sc_atq_cons;
2471
2472 if (prod == cons) {
2473 mtx_leave(&sc->sc_atq_mtx);
2474 return;
2475 }
2476
2477 atq = IAVF_DMA_KVA(&sc->sc_atq);
2478
2479 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2480 0, IAVF_DMA_LEN(&sc->sc_atq),
2481 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2482
2483 do {
2484 slot = &atq[cons];
2485 if (!ISSET(slot->iaq_flags, htole16(IAVF_AQ_DD)))
2486 break;
2487
2488 memset(slot, 0, sizeof(*slot));
2489
2490 cons++;
2491 cons &= IAVF_AQ_MASK;
2492 } while (cons != prod);
2493
2494 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2495 0, IAVF_DMA_LEN(&sc->sc_atq),
2496 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2497
2498 sc->sc_atq_cons = cons;
2499
2500 mtx_leave(&sc->sc_atq_mtx);
2501 }
2502
2503 static int
iavf_atq_post(struct iavf_softc * sc,struct iavf_aq_desc * iaq)2504 iavf_atq_post(struct iavf_softc *sc, struct iavf_aq_desc *iaq)
2505 {
2506 struct iavf_aq_desc *atq, *slot;
2507 unsigned int prod;
2508
2509 mtx_enter(&sc->sc_atq_mtx);
2510
2511 atq = IAVF_DMA_KVA(&sc->sc_atq);
2512 prod = sc->sc_atq_prod;
2513 slot = atq + prod;
2514
2515 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2516 0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_POSTWRITE);
2517
2518 *slot = *iaq;
2519 slot->iaq_flags |= htole16(IAVF_AQ_SI);
2520
2521 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_atq),
2522 0, IAVF_DMA_LEN(&sc->sc_atq), BUS_DMASYNC_PREWRITE);
2523
2524 prod++;
2525 prod &= IAVF_AQ_MASK;
2526 sc->sc_atq_prod = prod;
2527 iavf_wr(sc, sc->sc_aq_regs->atq_tail, prod);
2528
2529 mtx_leave(&sc->sc_atq_mtx);
2530
2531 return (prod);
2532 }
2533
2534 static int
iavf_get_version(struct iavf_softc * sc)2535 iavf_get_version(struct iavf_softc *sc)
2536 {
2537 struct iavf_aq_desc iaq;
2538 struct iavf_vc_version_info *ver;
2539 int tries;
2540
2541 memset(&iaq, 0, sizeof(iaq));
2542 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2543 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2544 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_VERSION);
2545 iaq.iaq_datalen = htole16(sizeof(struct iavf_vc_version_info));
2546 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2547
2548 ver = IAVF_DMA_KVA(&sc->sc_scratch);
2549 ver->major = htole32(IAVF_VF_MAJOR);
2550 ver->minor = htole32(IAVF_VF_MINOR);
2551 sc->sc_major_ver = UINT_MAX;
2552 sc->sc_minor_ver = UINT_MAX;
2553
2554 membar_sync();
2555 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2556 BUS_DMASYNC_PREREAD);
2557
2558 iavf_atq_post(sc, &iaq);
2559
2560 for (tries = 0; tries < 100; tries++) {
2561 iavf_process_arq(sc, 1);
2562 if (sc->sc_major_ver != -1)
2563 break;
2564
2565 delaymsec(1);
2566 }
2567 if (tries == 100) {
2568 printf(", timeout waiting for VF version");
2569 return (1);
2570 }
2571
2572 if (sc->sc_major_ver != IAVF_VF_MAJOR) {
2573 printf(", unsupported VF version %d", sc->sc_major_ver);
2574 return (1);
2575 }
2576
2577 if (sc->sc_resetting == 0) {
2578 printf(", VF version %d.%d%s", sc->sc_major_ver,
2579 sc->sc_minor_ver,
2580 (sc->sc_minor_ver > IAVF_VF_MINOR) ? " (minor mismatch)" : "");
2581 }
2582
2583 return (0);
2584 }
2585
2586 static int
iavf_get_vf_resources(struct iavf_softc * sc)2587 iavf_get_vf_resources(struct iavf_softc *sc)
2588 {
2589 struct iavf_aq_desc iaq;
2590 uint32_t *cap;
2591 int tries;
2592
2593 memset(&iaq, 0, sizeof(iaq));
2594 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2595 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2596 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_GET_VF_RESOURCES);
2597 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2598
2599 if (sc->sc_minor_ver > 0) {
2600 iaq.iaq_datalen = htole16(sizeof(uint32_t));
2601 cap = IAVF_DMA_KVA(&sc->sc_scratch);
2602 *cap = htole32(IAVF_VC_OFFLOAD_L2 | IAVF_VC_OFFLOAD_VLAN |
2603 IAVF_VC_OFFLOAD_RSS_PF);
2604 }
2605
2606 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2607 BUS_DMASYNC_PREREAD);
2608
2609 sc->sc_got_vf_resources = 0;
2610 iavf_atq_post(sc, &iaq);
2611
2612 for (tries = 0; tries < 100; tries++) {
2613 iavf_process_arq(sc, 1);
2614 if (sc->sc_got_vf_resources != 0)
2615 return (0);
2616
2617 delaymsec(1);
2618 }
2619
2620 return (1);
2621 }
2622
2623 static int
iavf_config_irq_map(struct iavf_softc * sc)2624 iavf_config_irq_map(struct iavf_softc *sc)
2625 {
2626 struct iavf_aq_desc iaq;
2627 struct iavf_vc_vector_map *vec;
2628 struct iavf_vc_irq_map_info *map;
2629 int tries;
2630
2631 memset(&iaq, 0, sizeof(iaq));
2632 iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD);
2633 iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF);
2634 iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_IRQ_MAP);
2635 iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec));
2636 iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch));
2637
2638 map = IAVF_DMA_KVA(&sc->sc_scratch);
2639 map->num_vectors = htole16(1);
2640
2641 vec = map->vecmap;
2642 vec[0].vsi_id = htole16(sc->sc_vsi_id);
2643 vec[0].vector_id = 0;
2644 vec[0].rxq_map = htole16(iavf_allqueues(sc));
2645 vec[0].txq_map = htole16(iavf_allqueues(sc));
2646 vec[0].rxitr_idx = htole16(IAVF_NOITR);
2647 vec[0].txitr_idx = htole16(IAVF_NOITR);
2648
2649 bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch),
2650 BUS_DMASYNC_PREREAD);
2651
2652 sc->sc_got_irq_map = 0;
2653 iavf_atq_post(sc, &iaq);
2654
2655 for (tries = 0; tries < 100; tries++) {
2656 iavf_process_arq(sc, 1);
2657 if (sc->sc_got_irq_map != 0)
2658 return (0);
2659
2660 delaymsec(1);
2661 }
2662
2663 return (1);
2664 }
2665
2666 static struct iavf_aq_buf *
iavf_aqb_alloc(struct iavf_softc * sc)2667 iavf_aqb_alloc(struct iavf_softc *sc)
2668 {
2669 struct iavf_aq_buf *aqb;
2670
2671 aqb = malloc(sizeof(*aqb), M_DEVBUF, M_WAITOK);
2672 if (aqb == NULL)
2673 return (NULL);
2674
2675 aqb->aqb_data = dma_alloc(IAVF_AQ_BUFLEN, PR_WAITOK);
2676 if (aqb->aqb_data == NULL)
2677 goto free;
2678
2679 if (bus_dmamap_create(sc->sc_dmat, IAVF_AQ_BUFLEN, 1,
2680 IAVF_AQ_BUFLEN, 0,
2681 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2682 &aqb->aqb_map) != 0)
2683 goto dma_free;
2684
2685 if (bus_dmamap_load(sc->sc_dmat, aqb->aqb_map, aqb->aqb_data,
2686 IAVF_AQ_BUFLEN, NULL, BUS_DMA_WAITOK) != 0)
2687 goto destroy;
2688
2689 return (aqb);
2690
2691 destroy:
2692 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2693 dma_free:
2694 dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2695 free:
2696 free(aqb, M_DEVBUF, sizeof(*aqb));
2697
2698 return (NULL);
2699 }
2700
2701 static void
iavf_aqb_free(struct iavf_softc * sc,struct iavf_aq_buf * aqb)2702 iavf_aqb_free(struct iavf_softc *sc, struct iavf_aq_buf *aqb)
2703 {
2704 bus_dmamap_unload(sc->sc_dmat, aqb->aqb_map);
2705 bus_dmamap_destroy(sc->sc_dmat, aqb->aqb_map);
2706 dma_free(aqb->aqb_data, IAVF_AQ_BUFLEN);
2707 free(aqb, M_DEVBUF, sizeof(*aqb));
2708 }
2709
2710 static int
iavf_arq_fill(struct iavf_softc * sc,int post)2711 iavf_arq_fill(struct iavf_softc *sc, int post)
2712 {
2713 struct iavf_aq_buf *aqb;
2714 struct iavf_aq_desc *arq, *iaq;
2715 unsigned int prod = sc->sc_arq_prod;
2716 unsigned int n;
2717 int filled = 0;
2718
2719 n = if_rxr_get(&sc->sc_arq_ring, IAVF_AQ_NUM);
2720 arq = IAVF_DMA_KVA(&sc->sc_arq);
2721
2722 while (n > 0) {
2723 aqb = SIMPLEQ_FIRST(&sc->sc_arq_idle);
2724 if (aqb != NULL)
2725 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_idle, aqb_entry);
2726 else if ((aqb = iavf_aqb_alloc(sc)) == NULL)
2727 break;
2728
2729 memset(aqb->aqb_data, 0, IAVF_AQ_BUFLEN);
2730
2731 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2732 BUS_DMASYNC_PREREAD);
2733
2734 iaq = &arq[prod];
2735 iaq->iaq_flags = htole16(IAVF_AQ_BUF |
2736 (IAVF_AQ_BUFLEN > I40E_AQ_LARGE_BUF ? IAVF_AQ_LB : 0));
2737 iaq->iaq_opcode = 0;
2738 iaq->iaq_datalen = htole16(IAVF_AQ_BUFLEN);
2739 iaq->iaq_retval = 0;
2740 iaq->iaq_vc_opcode = 0;
2741 iaq->iaq_vc_retval = 0;
2742 iaq->iaq_param[0] = 0;
2743 iaq->iaq_param[1] = 0;
2744 iavf_aq_dva(iaq, aqb->aqb_map->dm_segs[0].ds_addr);
2745
2746 SIMPLEQ_INSERT_TAIL(&sc->sc_arq_live, aqb, aqb_entry);
2747
2748 prod++;
2749 prod &= IAVF_AQ_MASK;
2750
2751 filled = 1;
2752
2753 n--;
2754 }
2755
2756 if_rxr_put(&sc->sc_arq_ring, n);
2757 sc->sc_arq_prod = prod;
2758
2759 if (filled && post)
2760 iavf_wr(sc, sc->sc_aq_regs->arq_tail, sc->sc_arq_prod);
2761
2762 return (filled);
2763 }
2764
2765 static void
iavf_arq_unfill(struct iavf_softc * sc)2766 iavf_arq_unfill(struct iavf_softc *sc)
2767 {
2768 struct iavf_aq_buf *aqb;
2769
2770 while ((aqb = SIMPLEQ_FIRST(&sc->sc_arq_live)) != NULL) {
2771 SIMPLEQ_REMOVE_HEAD(&sc->sc_arq_live, aqb_entry);
2772
2773 bus_dmamap_sync(sc->sc_dmat, aqb->aqb_map, 0, IAVF_AQ_BUFLEN,
2774 BUS_DMASYNC_POSTREAD);
2775 iavf_aqb_free(sc, aqb);
2776 if_rxr_put(&sc->sc_arq_ring, 1);
2777 }
2778 }
2779
2780 static void
iavf_arq_timeout(void * xsc)2781 iavf_arq_timeout(void *xsc)
2782 {
2783 struct iavf_softc *sc = xsc;
2784
2785 sc->sc_admin_result = -1;
2786 cond_signal(&sc->sc_admin_cond);
2787 }
2788
2789 static int
iavf_arq_wait(struct iavf_softc * sc,int msec)2790 iavf_arq_wait(struct iavf_softc *sc, int msec)
2791 {
2792 cond_init(&sc->sc_admin_cond);
2793
2794 timeout_add_msec(&sc->sc_admin_timeout, msec);
2795
2796 cond_wait(&sc->sc_admin_cond, "iavfarq");
2797 timeout_del(&sc->sc_admin_timeout);
2798
2799 iavf_arq_fill(sc, 1);
2800 return sc->sc_admin_result;
2801 }
2802
2803 static int
iavf_dmamem_alloc(struct iavf_softc * sc,struct iavf_dmamem * ixm,bus_size_t size,u_int align)2804 iavf_dmamem_alloc(struct iavf_softc *sc, struct iavf_dmamem *ixm,
2805 bus_size_t size, u_int align)
2806 {
2807 ixm->ixm_size = size;
2808
2809 if (bus_dmamap_create(sc->sc_dmat, ixm->ixm_size, 1,
2810 ixm->ixm_size, 0,
2811 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2812 &ixm->ixm_map) != 0)
2813 return (1);
2814 if (bus_dmamem_alloc(sc->sc_dmat, ixm->ixm_size,
2815 align, 0, &ixm->ixm_seg, 1, &ixm->ixm_nsegs,
2816 BUS_DMA_WAITOK | BUS_DMA_ZERO) != 0)
2817 goto destroy;
2818 if (bus_dmamem_map(sc->sc_dmat, &ixm->ixm_seg, ixm->ixm_nsegs,
2819 ixm->ixm_size, &ixm->ixm_kva, BUS_DMA_WAITOK) != 0)
2820 goto free;
2821 if (bus_dmamap_load(sc->sc_dmat, ixm->ixm_map, ixm->ixm_kva,
2822 ixm->ixm_size, NULL, BUS_DMA_WAITOK) != 0)
2823 goto unmap;
2824
2825 return (0);
2826 unmap:
2827 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2828 free:
2829 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2830 destroy:
2831 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2832 return (1);
2833 }
2834
2835 static void
iavf_dmamem_free(struct iavf_softc * sc,struct iavf_dmamem * ixm)2836 iavf_dmamem_free(struct iavf_softc *sc, struct iavf_dmamem *ixm)
2837 {
2838 bus_dmamap_unload(sc->sc_dmat, ixm->ixm_map);
2839 bus_dmamem_unmap(sc->sc_dmat, ixm->ixm_kva, ixm->ixm_size);
2840 bus_dmamem_free(sc->sc_dmat, &ixm->ixm_seg, 1);
2841 bus_dmamap_destroy(sc->sc_dmat, ixm->ixm_map);
2842 }
2843