xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision c1d255d3)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static int	ixl_vc_opcode_level(uint16_t opcode);
43 
44 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
45 
46 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
47 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
48 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
49 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
50 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
51 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
53 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
54 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
55 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
56 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
57 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
58 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
59 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
60 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
61 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
62 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
64     enum i40e_queue_type *last_type, uint16_t *last_queue);
65 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
66 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
67 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
68 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
69 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
72 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
75 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
77 static int	ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
78 
79 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
80 
81 /*
82  * TODO: Move pieces of this into iflib and call the rest in a handler?
83  *
84  * e.g. ixl_if_iov_set_schema
85  *
86  * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
87  * in the driver.
88  */
89 void
90 ixl_initialize_sriov(struct ixl_pf *pf)
91 {
92 	device_t dev = pf->dev;
93 	struct i40e_hw *hw = &pf->hw;
94 	nvlist_t	*pf_schema, *vf_schema;
95 	int		iov_error;
96 
97 	pf_schema = pci_iov_schema_alloc_node();
98 	vf_schema = pci_iov_schema_alloc_node();
99 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
100 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
101 	    IOV_SCHEMA_HASDEFAULT, TRUE);
102 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
103 	    IOV_SCHEMA_HASDEFAULT, FALSE);
104 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
105 	    IOV_SCHEMA_HASDEFAULT, FALSE);
106 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
107 	    IOV_SCHEMA_HASDEFAULT,
108 	    max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
109 
110 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
111 	if (iov_error != 0) {
112 		device_printf(dev,
113 		    "Failed to initialize SR-IOV (error=%d)\n",
114 		    iov_error);
115 	} else
116 		device_printf(dev, "SR-IOV ready\n");
117 
118 	pf->vc_debug_lvl = 1;
119 }
120 
121 /*
122  * Allocate the VSI for a VF.
123  */
124 static int
125 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
126 {
127 	device_t dev;
128 	struct i40e_hw *hw;
129 	struct ixl_vsi *vsi;
130 	struct i40e_vsi_context vsi_ctx;
131 	int i;
132 	enum i40e_status_code code;
133 
134 	hw = &pf->hw;
135 	vsi = &pf->vsi;
136 	dev = pf->dev;
137 
138 	vsi_ctx.pf_num = hw->pf_id;
139 	vsi_ctx.uplink_seid = pf->veb_seid;
140 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
141 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
142 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
143 
144 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
145 
146 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
147 	if (pf->enable_vf_loopback)
148 		vsi_ctx.info.switch_id =
149 		   htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
150 
151 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
152 	vsi_ctx.info.sec_flags = 0;
153 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
154 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
155 
156 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
157 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
158 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
159 
160 	vsi_ctx.info.valid_sections |=
161 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
162 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
163 
164 	/* XXX: Only scattered allocation is supported for VFs right now */
165 	for (i = 0; i < vf->qtag.num_active; i++)
166 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
167 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
168 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
169 
170 	vsi_ctx.info.tc_mapping[0] = htole16(
171 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
172 	    ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
173 
174 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
175 	if (code != I40E_SUCCESS)
176 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
177 	vf->vsi.seid = vsi_ctx.seid;
178 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
179 	vf->vsi.num_rx_queues = vf->qtag.num_active;
180 	vf->vsi.num_tx_queues = vf->qtag.num_active;
181 
182 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
183 	if (code != I40E_SUCCESS)
184 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
185 
186 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
187 	if (code != I40E_SUCCESS) {
188 		device_printf(dev, "Failed to disable BW limit: %d\n",
189 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
190 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
191 	}
192 
193 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
194 	return (0);
195 }
196 
197 static int
198 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
199 {
200 	struct i40e_hw *hw;
201 	int error;
202 
203 	hw = &pf->hw;
204 	vf->vsi.flags |= IXL_FLAGS_IS_VF;
205 
206 	error = ixl_vf_alloc_vsi(pf, vf);
207 	if (error != 0)
208 		return (error);
209 
210 	vf->vsi.dev = pf->dev;
211 
212 	ixl_init_filters(&vf->vsi);
213 	/* Let VF receive broadcast Ethernet frames */
214 	error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
215 	if (error)
216 		device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
217 	/* Re-add VF's MAC/VLAN filters to its VSI */
218 	ixl_reconfigure_filters(&vf->vsi);
219 
220 	return (0);
221 }
222 
223 static void
224 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
225     uint32_t val)
226 {
227 	uint32_t qtable;
228 	int index, shift;
229 
230 	/*
231 	 * Two queues are mapped in a single register, so we have to do some
232 	 * gymnastics to convert the queue number into a register index and
233 	 * shift.
234 	 */
235 	index = qnum / 2;
236 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
237 
238 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
239 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
240 	qtable |= val << shift;
241 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
242 }
243 
244 static void
245 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
246 {
247 	struct i40e_hw *hw;
248 	uint32_t qtable;
249 	int i;
250 
251 	hw = &pf->hw;
252 
253 	/*
254 	 * Contiguous mappings aren't actually supported by the hardware,
255 	 * so we have to use non-contiguous mappings.
256 	 */
257 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
258 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
259 
260 	/* Enable LAN traffic on this VF */
261 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
262 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
263 
264 	/* Program index of each VF queue into PF queue space
265 	 * (This is only needed if QTABLE is enabled) */
266 	for (i = 0; i < vf->vsi.num_tx_queues; i++) {
267 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
268 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
269 
270 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
271 	}
272 	for (; i < IXL_MAX_VSI_QUEUES; i++)
273 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
274 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
275 
276 	/* Map queues allocated to VF to its VSI;
277 	 * This mapping matches the VF-wide mapping since the VF
278 	 * is only given a single VSI */
279 	for (i = 0; i < vf->vsi.num_tx_queues; i++)
280 		ixl_vf_map_vsi_queue(hw, vf, i,
281 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
282 
283 	/* Set rest of VSI queues as unused. */
284 	for (; i < IXL_MAX_VSI_QUEUES; i++)
285 		ixl_vf_map_vsi_queue(hw, vf, i,
286 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
287 
288 	ixl_flush(hw);
289 }
290 
291 static void
292 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
293 {
294 	struct i40e_hw *hw;
295 
296 	hw = &pf->hw;
297 
298 	if (vsi->seid == 0)
299 		return;
300 
301 	i40e_aq_delete_element(hw, vsi->seid, NULL);
302 }
303 
304 static void
305 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
306 {
307 
308 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
309 	ixl_flush(hw);
310 }
311 
312 static void
313 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
314 {
315 
316 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
317 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
318 	ixl_flush(hw);
319 }
320 
321 static void
322 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
323 {
324 	struct i40e_hw *hw;
325 	uint32_t vfint_reg, vpint_reg;
326 	int i;
327 
328 	hw = &pf->hw;
329 
330 	ixl_vf_vsi_release(pf, &vf->vsi);
331 
332 	/* Index 0 has a special register. */
333 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
334 
335 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
336 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
337 		ixl_vf_disable_queue_intr(hw, vfint_reg);
338 	}
339 
340 	/* Index 0 has a special register. */
341 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
342 
343 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
344 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
345 		ixl_vf_unregister_intr(hw, vpint_reg);
346 	}
347 
348 	vf->vsi.num_tx_queues = 0;
349 	vf->vsi.num_rx_queues = 0;
350 }
351 
352 static int
353 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
354 {
355 	struct i40e_hw *hw;
356 	int i;
357 	uint16_t global_vf_num;
358 	uint32_t ciad;
359 
360 	hw = &pf->hw;
361 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
362 
363 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
364 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
365 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
366 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
367 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
368 			return (0);
369 		DELAY(1);
370 	}
371 
372 	return (ETIMEDOUT);
373 }
374 
375 static void
376 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
377 {
378 	struct i40e_hw *hw;
379 	uint32_t vfrtrig;
380 
381 	hw = &pf->hw;
382 
383 	ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
384 
385 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
386 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
387 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
388 	ixl_flush(hw);
389 
390 	ixl_reinit_vf(pf, vf);
391 
392 	ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
393 }
394 
395 static void
396 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
397 {
398 	struct i40e_hw *hw;
399 	uint32_t vfrstat, vfrtrig;
400 	int i, error;
401 
402 	hw = &pf->hw;
403 
404 	error = ixl_flush_pcie(pf, vf);
405 	if (error != 0)
406 		device_printf(pf->dev,
407 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
408 		    vf->vf_num);
409 
410 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
411 		DELAY(10);
412 
413 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
414 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
415 			break;
416 	}
417 
418 	if (i == IXL_VF_RESET_TIMEOUT)
419 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
420 
421 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
422 
423 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
424 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
425 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
426 
427 	if (vf->vsi.seid != 0)
428 		ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
429 	ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
430 
431 	ixl_vf_release_resources(pf, vf);
432 	ixl_vf_setup_vsi(pf, vf);
433 	ixl_vf_map_queues(pf, vf);
434 
435 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
436 	ixl_flush(hw);
437 }
438 
439 static int
440 ixl_vc_opcode_level(uint16_t opcode)
441 {
442 	switch (opcode) {
443 	case VIRTCHNL_OP_GET_STATS:
444 		return (10);
445 	default:
446 		return (5);
447 	}
448 }
449 
450 static void
451 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
452     enum i40e_status_code status, void *msg, uint16_t len)
453 {
454 	struct i40e_hw *hw;
455 	int global_vf_id;
456 
457 	hw = &pf->hw;
458 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
459 
460 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
461 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
462 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
463 
464 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
465 }
466 
467 static void
468 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
469 {
470 
471 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
472 }
473 
474 static void
475 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
476     enum i40e_status_code status, const char *file, int line)
477 {
478 
479 	I40E_VC_DEBUG(pf, 1,
480 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
481 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
482 	    status, vf->vf_num, file, line);
483 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
484 }
485 
486 static void
487 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
488     uint16_t msg_size)
489 {
490 	struct virtchnl_version_info *recv_vf_version;
491 	device_t dev = pf->dev;
492 
493 	recv_vf_version = (struct virtchnl_version_info *)msg;
494 
495 	/* VFs running the 1.0 API expect to get 1.0 back */
496 	if (VF_IS_V10(recv_vf_version)) {
497 		vf->version.major = 1;
498 		vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
499 	} else {
500 		vf->version.major = VIRTCHNL_VERSION_MAJOR;
501 		vf->version.minor = VIRTCHNL_VERSION_MINOR;
502 
503 		if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
504 		    (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
505 		    device_printf(dev,
506 		        "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
507 			__func__, vf->vf_num,
508 			recv_vf_version->major, recv_vf_version->minor,
509 			VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
510 	}
511 
512 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
513 	    &vf->version, sizeof(vf->version));
514 }
515 
516 static void
517 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
518     uint16_t msg_size)
519 {
520 	ixl_reset_vf(pf, vf);
521 
522 	/* No response to a reset message. */
523 }
524 
525 static void
526 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
527     uint16_t msg_size)
528 {
529 	struct virtchnl_vf_resource reply;
530 
531 	bzero(&reply, sizeof(reply));
532 
533 	if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
534 		reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
535 					 VIRTCHNL_VF_OFFLOAD_RSS_REG |
536 					 VIRTCHNL_VF_OFFLOAD_VLAN;
537 	else
538 		/* Force VF RSS setup by PF in 1.1+ VFs */
539 		reply.vf_cap_flags = *(u32 *)msg & (
540 					 VIRTCHNL_VF_OFFLOAD_L2 |
541 					 VIRTCHNL_VF_OFFLOAD_RSS_PF |
542 					 VIRTCHNL_VF_OFFLOAD_VLAN);
543 
544 	reply.num_vsis = 1;
545 	reply.num_queue_pairs = vf->vsi.num_tx_queues;
546 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
547 	reply.rss_key_size = 52;
548 	reply.rss_lut_size = 64;
549 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
550 	reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
551 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
552 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
553 
554 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
555 	    I40E_SUCCESS, &reply, sizeof(reply));
556 }
557 
558 static int
559 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
560     struct virtchnl_txq_info *info)
561 {
562 	struct i40e_hw *hw;
563 	struct i40e_hmc_obj_txq txq;
564 	uint16_t global_queue_num, global_vf_num;
565 	enum i40e_status_code status;
566 	uint32_t qtx_ctl;
567 
568 	hw = &pf->hw;
569 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
570 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
571 	bzero(&txq, sizeof(txq));
572 
573 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
574 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
575 
576 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
577 	if (status != I40E_SUCCESS)
578 		return (EINVAL);
579 
580 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
581 
582 	txq.head_wb_ena = info->headwb_enabled;
583 	txq.head_wb_addr = info->dma_headwb_addr;
584 	txq.qlen = info->ring_len;
585 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
586 	txq.rdylist_act = 0;
587 
588 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
589 	if (status != I40E_SUCCESS)
590 		return (EINVAL);
591 
592 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
593 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
594 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
595 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
596 	ixl_flush(hw);
597 
598 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
599 
600 	return (0);
601 }
602 
603 static int
604 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
605     struct virtchnl_rxq_info *info)
606 {
607 	struct i40e_hw *hw;
608 	struct i40e_hmc_obj_rxq rxq;
609 	uint16_t global_queue_num;
610 	enum i40e_status_code status;
611 
612 	hw = &pf->hw;
613 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
614 	bzero(&rxq, sizeof(rxq));
615 
616 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
617 	    vf->vf_num, global_queue_num, info->queue_id);
618 
619 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
620 		return (EINVAL);
621 
622 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
623 	    info->max_pkt_size < ETHER_MIN_LEN)
624 		return (EINVAL);
625 
626 	if (info->splithdr_enabled) {
627 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
628 			return (EINVAL);
629 
630 		rxq.hsplit_0 = info->rx_split_pos &
631 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
632 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
633 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
634 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
635 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
636 
637 		rxq.dtype = 2;
638 	}
639 
640 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
641 	if (status != I40E_SUCCESS)
642 		return (EINVAL);
643 
644 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
645 	rxq.qlen = info->ring_len;
646 
647 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
648 
649 	rxq.dsize = 1;
650 	rxq.crcstrip = 1;
651 	rxq.l2tsel = 1;
652 
653 	rxq.rxmax = info->max_pkt_size;
654 	rxq.tphrdesc_ena = 1;
655 	rxq.tphwdesc_ena = 1;
656 	rxq.tphdata_ena = 1;
657 	rxq.tphhead_ena = 1;
658 	rxq.lrxqthresh = 1;
659 	rxq.prefena = 1;
660 
661 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
662 	if (status != I40E_SUCCESS)
663 		return (EINVAL);
664 
665 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
666 
667 	return (0);
668 }
669 
670 static void
671 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
672     uint16_t msg_size)
673 {
674 	struct virtchnl_vsi_queue_config_info *info;
675 	struct virtchnl_queue_pair_info *pair;
676 	int i;
677 
678 	info = msg;
679 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
680 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
681 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
682 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
683 		    I40E_ERR_PARAM);
684 		return;
685 	}
686 
687 	if (info->vsi_id != vf->vsi.vsi_num) {
688 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
689 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
690 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
691 		    I40E_ERR_PARAM);
692 		return;
693 	}
694 
695 	for (i = 0; i < info->num_queue_pairs; i++) {
696 		pair = &info->qpair[i];
697 
698 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
699 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
700 		    pair->txq.queue_id != pair->rxq.queue_id ||
701 		    pair->txq.queue_id >= vf->vsi.num_tx_queues) {
702 
703 			i40e_send_vf_nack(pf, vf,
704 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
705 			return;
706 		}
707 
708 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
709 			i40e_send_vf_nack(pf, vf,
710 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
711 			return;
712 		}
713 
714 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
715 			i40e_send_vf_nack(pf, vf,
716 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
717 			return;
718 		}
719 	}
720 
721 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
722 }
723 
724 static void
725 ixl_vf_set_qctl(struct ixl_pf *pf,
726     const struct virtchnl_vector_map *vector,
727     enum i40e_queue_type cur_type, uint16_t cur_queue,
728     enum i40e_queue_type *last_type, uint16_t *last_queue)
729 {
730 	uint32_t offset, qctl;
731 	uint16_t itr_indx;
732 
733 	if (cur_type == I40E_QUEUE_TYPE_RX) {
734 		offset = I40E_QINT_RQCTL(cur_queue);
735 		itr_indx = vector->rxitr_idx;
736 	} else {
737 		offset = I40E_QINT_TQCTL(cur_queue);
738 		itr_indx = vector->txitr_idx;
739 	}
740 
741 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
742 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
743 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
744 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
745 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
746 
747 	wr32(&pf->hw, offset, qctl);
748 
749 	*last_type = cur_type;
750 	*last_queue = cur_queue;
751 }
752 
753 static void
754 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
755     const struct virtchnl_vector_map *vector)
756 {
757 	struct i40e_hw *hw;
758 	u_int qindex;
759 	enum i40e_queue_type type, last_type;
760 	uint32_t lnklst_reg;
761 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
762 
763 	hw = &pf->hw;
764 
765 	rxq_map = vector->rxq_map;
766 	txq_map = vector->txq_map;
767 
768 	last_queue = IXL_END_OF_INTR_LNKLST;
769 	last_type = I40E_QUEUE_TYPE_RX;
770 
771 	/*
772 	 * The datasheet says to optimize performance, RX queues and TX queues
773 	 * should be interleaved in the interrupt linked list, so we process
774 	 * both at once here.
775 	 */
776 	while ((rxq_map != 0) || (txq_map != 0)) {
777 		if (txq_map != 0) {
778 			qindex = ffs(txq_map) - 1;
779 			type = I40E_QUEUE_TYPE_TX;
780 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
781 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
782 			    &last_type, &last_queue);
783 			txq_map &= ~(1 << qindex);
784 		}
785 
786 		if (rxq_map != 0) {
787 			qindex = ffs(rxq_map) - 1;
788 			type = I40E_QUEUE_TYPE_RX;
789 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
790 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
791 			    &last_type, &last_queue);
792 			rxq_map &= ~(1 << qindex);
793 		}
794 	}
795 
796 	if (vector->vector_id == 0)
797 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
798 	else
799 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
800 		    vf->vf_num);
801 	wr32(hw, lnklst_reg,
802 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
803 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
804 
805 	ixl_flush(hw);
806 }
807 
808 static void
809 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
810     uint16_t msg_size)
811 {
812 	struct virtchnl_irq_map_info *map;
813 	struct virtchnl_vector_map *vector;
814 	struct i40e_hw *hw;
815 	int i, largest_txq, largest_rxq;
816 
817 	hw = &pf->hw;
818 	map = msg;
819 
820 	for (i = 0; i < map->num_vectors; i++) {
821 		vector = &map->vecmap[i];
822 
823 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
824 		    vector->vsi_id != vf->vsi.vsi_num) {
825 			i40e_send_vf_nack(pf, vf,
826 			    VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
827 			return;
828 		}
829 
830 		if (vector->rxq_map != 0) {
831 			largest_rxq = fls(vector->rxq_map) - 1;
832 			if (largest_rxq >= vf->vsi.num_rx_queues) {
833 				i40e_send_vf_nack(pf, vf,
834 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
835 				    I40E_ERR_PARAM);
836 				return;
837 			}
838 		}
839 
840 		if (vector->txq_map != 0) {
841 			largest_txq = fls(vector->txq_map) - 1;
842 			if (largest_txq >= vf->vsi.num_tx_queues) {
843 				i40e_send_vf_nack(pf, vf,
844 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
845 				    I40E_ERR_PARAM);
846 				return;
847 			}
848 		}
849 
850 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
851 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
852 			i40e_send_vf_nack(pf, vf,
853 			    VIRTCHNL_OP_CONFIG_IRQ_MAP,
854 			    I40E_ERR_PARAM);
855 			return;
856 		}
857 
858 		ixl_vf_config_vector(pf, vf, vector);
859 	}
860 
861 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
862 }
863 
864 static void
865 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
866     uint16_t msg_size)
867 {
868 	struct virtchnl_queue_select *select;
869 	int error = 0;
870 
871 	select = msg;
872 
873 	if (select->vsi_id != vf->vsi.vsi_num ||
874 	    select->rx_queues == 0 || select->tx_queues == 0) {
875 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
876 		    I40E_ERR_PARAM);
877 		return;
878 	}
879 
880 	/* Enable TX rings selected by the VF */
881 	for (int i = 0; i < 32; i++) {
882 		if ((1 << i) & select->tx_queues) {
883 			/* Warn if queue is out of VF allocation range */
884 			if (i >= vf->vsi.num_tx_queues) {
885 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
886 				    vf->vf_num, i);
887 				break;
888 			}
889 			/* Skip this queue if it hasn't been configured */
890 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
891 				continue;
892 			/* Warn if this queue is already marked as enabled */
893 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
894 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
895 				    vf->vf_num, i);
896 
897 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
898 			if (error)
899 				break;
900 			else
901 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
902 		}
903 	}
904 
905 	/* Enable RX rings selected by the VF */
906 	for (int i = 0; i < 32; i++) {
907 		if ((1 << i) & select->rx_queues) {
908 			/* Warn if queue is out of VF allocation range */
909 			if (i >= vf->vsi.num_rx_queues) {
910 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
911 				    vf->vf_num, i);
912 				break;
913 			}
914 			/* Skip this queue if it hasn't been configured */
915 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
916 				continue;
917 			/* Warn if this queue is already marked as enabled */
918 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
919 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
920 				    vf->vf_num, i);
921 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
922 			if (error)
923 				break;
924 			else
925 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
926 		}
927 	}
928 
929 	if (error) {
930 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
931 		    I40E_ERR_TIMEOUT);
932 		return;
933 	}
934 
935 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
936 }
937 
938 static void
939 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
940     void *msg, uint16_t msg_size)
941 {
942 	struct virtchnl_queue_select *select;
943 	int error = 0;
944 
945 	select = msg;
946 
947 	if (select->vsi_id != vf->vsi.vsi_num ||
948 	    select->rx_queues == 0 || select->tx_queues == 0) {
949 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
950 		    I40E_ERR_PARAM);
951 		return;
952 	}
953 
954 	/* Disable TX rings selected by the VF */
955 	for (int i = 0; i < 32; i++) {
956 		if ((1 << i) & select->tx_queues) {
957 			/* Warn if queue is out of VF allocation range */
958 			if (i >= vf->vsi.num_tx_queues) {
959 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
960 				    vf->vf_num, i);
961 				break;
962 			}
963 			/* Skip this queue if it hasn't been configured */
964 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
965 				continue;
966 			/* Warn if this queue is already marked as disabled */
967 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
968 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
969 				    vf->vf_num, i);
970 				continue;
971 			}
972 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
973 			if (error)
974 				break;
975 			else
976 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
977 		}
978 	}
979 
980 	/* Enable RX rings selected by the VF */
981 	for (int i = 0; i < 32; i++) {
982 		if ((1 << i) & select->rx_queues) {
983 			/* Warn if queue is out of VF allocation range */
984 			if (i >= vf->vsi.num_rx_queues) {
985 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
986 				    vf->vf_num, i);
987 				break;
988 			}
989 			/* Skip this queue if it hasn't been configured */
990 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
991 				continue;
992 			/* Warn if this queue is already marked as disabled */
993 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
994 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
995 				    vf->vf_num, i);
996 				continue;
997 			}
998 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
999 			if (error)
1000 				break;
1001 			else
1002 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1003 		}
1004 	}
1005 
1006 	if (error) {
1007 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1008 		    I40E_ERR_TIMEOUT);
1009 		return;
1010 	}
1011 
1012 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1013 }
1014 
1015 static int
1016 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1017 {
1018 
1019 	if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
1020 		return (EINVAL);
1021 
1022 	/*
1023 	 * If the VF is not allowed to change its MAC address, don't let it
1024 	 * set a MAC filter for an address that is not a multicast address and
1025 	 * is not its assigned MAC.
1026 	 */
1027 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1028 	    !(ETHER_IS_MULTICAST(addr) || !ixl_ether_is_equal(addr, vf->mac)))
1029 		return (EPERM);
1030 
1031 	return (0);
1032 }
1033 
1034 static void
1035 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1036     uint16_t msg_size)
1037 {
1038 	struct virtchnl_ether_addr_list *addr_list;
1039 	struct virtchnl_ether_addr *addr;
1040 	struct ixl_vsi *vsi;
1041 	int i;
1042 
1043 	vsi = &vf->vsi;
1044 	addr_list = msg;
1045 
1046 	if (addr_list->vsi_id != vsi->vsi_num) {
1047 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1048 		    I40E_ERR_PARAM);
1049 		return;
1050 	}
1051 
1052 	for (i = 0; i < addr_list->num_elements; i++) {
1053 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1054 			i40e_send_vf_nack(pf, vf,
1055 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1056 			return;
1057 		}
1058 	}
1059 
1060 	for (i = 0; i < addr_list->num_elements; i++) {
1061 		addr = &addr_list->list[i];
1062 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1063 	}
1064 
1065 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1066 }
1067 
1068 static void
1069 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1070     uint16_t msg_size)
1071 {
1072 	struct virtchnl_ether_addr_list *addr_list;
1073 	struct virtchnl_ether_addr *addr;
1074 	struct ixl_vsi *vsi;
1075 	int i;
1076 
1077 	vsi = &vf->vsi;
1078 	addr_list = msg;
1079 
1080 	if (addr_list->vsi_id != vsi->vsi_num) {
1081 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
1082 		    I40E_ERR_PARAM);
1083 		return;
1084 	}
1085 
1086 	for (i = 0; i < addr_list->num_elements; i++) {
1087 		addr = &addr_list->list[i];
1088 		if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
1089 			i40e_send_vf_nack(pf, vf,
1090 			    VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
1091 			return;
1092 		}
1093 	}
1094 
1095 	for (i = 0; i < addr_list->num_elements; i++) {
1096 		addr = &addr_list->list[i];
1097 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1098 	}
1099 
1100 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1101 }
1102 
1103 static enum i40e_status_code
1104 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1105 {
1106 	struct i40e_vsi_context vsi_ctx;
1107 
1108 	vsi_ctx.seid = vf->vsi.seid;
1109 
1110 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1111 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1112 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1113 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1114 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1115 }
1116 
1117 static void
1118 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1119     uint16_t msg_size)
1120 {
1121 	struct virtchnl_vlan_filter_list *filter_list;
1122 	enum i40e_status_code code;
1123 	int i;
1124 
1125 	filter_list = msg;
1126 
1127 	if (filter_list->vsi_id != vf->vsi.vsi_num) {
1128 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1129 		    I40E_ERR_PARAM);
1130 		return;
1131 	}
1132 
1133 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1134 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1135 		    I40E_ERR_PARAM);
1136 		return;
1137 	}
1138 
1139 	for (i = 0; i < filter_list->num_elements; i++) {
1140 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1141 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1142 			    I40E_ERR_PARAM);
1143 			return;
1144 		}
1145 	}
1146 
1147 	code = ixl_vf_enable_vlan_strip(pf, vf);
1148 	if (code != I40E_SUCCESS) {
1149 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1150 		    I40E_ERR_PARAM);
1151 	}
1152 
1153 	for (i = 0; i < filter_list->num_elements; i++)
1154 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1155 
1156 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1157 }
1158 
1159 static void
1160 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1161     uint16_t msg_size)
1162 {
1163 	struct virtchnl_vlan_filter_list *filter_list;
1164 	int i;
1165 
1166 	filter_list = msg;
1167 
1168 	if (filter_list->vsi_id != vf->vsi.vsi_num) {
1169 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1170 		    I40E_ERR_PARAM);
1171 		return;
1172 	}
1173 
1174 	for (i = 0; i < filter_list->num_elements; i++) {
1175 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1176 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1177 			    I40E_ERR_PARAM);
1178 			return;
1179 		}
1180 	}
1181 
1182 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1183 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1184 		    I40E_ERR_PARAM);
1185 		return;
1186 	}
1187 
1188 	for (i = 0; i < filter_list->num_elements; i++)
1189 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1190 
1191 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1192 }
1193 
1194 static void
1195 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1196     void *msg, uint16_t msg_size)
1197 {
1198 	struct virtchnl_promisc_info *info;
1199 	struct i40e_hw *hw = &pf->hw;
1200 	enum i40e_status_code code;
1201 
1202 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1203 		/*
1204 		 * Do the same thing as the Linux PF driver -- lie to the VF
1205 		 */
1206 		ixl_send_vf_ack(pf, vf,
1207 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1208 		return;
1209 	}
1210 
1211 	info = msg;
1212 	if (info->vsi_id != vf->vsi.vsi_num) {
1213 		i40e_send_vf_nack(pf, vf,
1214 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1215 		return;
1216 	}
1217 
1218 	code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1219 	    info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1220 	if (code != I40E_SUCCESS) {
1221 		device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1222 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1223 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1224 		i40e_send_vf_nack(pf, vf,
1225 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1226 		return;
1227 	}
1228 
1229 	code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1230 	    info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1231 	if (code != I40E_SUCCESS) {
1232 		device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1233 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1234 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1235 		i40e_send_vf_nack(pf, vf,
1236 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1237 		return;
1238 	}
1239 
1240 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1241 }
1242 
1243 static void
1244 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1245     uint16_t msg_size)
1246 {
1247 	struct virtchnl_queue_select *queue;
1248 
1249 	queue = msg;
1250 	if (queue->vsi_id != vf->vsi.vsi_num) {
1251 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1252 		    I40E_ERR_PARAM);
1253 		return;
1254 	}
1255 
1256 	ixl_update_eth_stats(&vf->vsi);
1257 
1258 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1259 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1260 }
1261 
1262 static void
1263 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1264     uint16_t msg_size)
1265 {
1266 	struct i40e_hw *hw;
1267 	struct virtchnl_rss_key *key;
1268 	struct i40e_aqc_get_set_rss_key_data key_data;
1269 	enum i40e_status_code status;
1270 
1271 	hw = &pf->hw;
1272 
1273 	key = msg;
1274 
1275 	if (key->key_len > 52) {
1276 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1277 		    vf->vf_num, key->key_len, 52);
1278 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1279 		    I40E_ERR_PARAM);
1280 		return;
1281 	}
1282 
1283 	if (key->vsi_id != vf->vsi.vsi_num) {
1284 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1285 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1286 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1287 		    I40E_ERR_PARAM);
1288 		return;
1289 	}
1290 
1291 	/* Fill out hash using MAC-dependent method */
1292 	if (hw->mac.type == I40E_MAC_X722) {
1293 		bzero(&key_data, sizeof(key_data));
1294 		if (key->key_len <= 40)
1295 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1296 		else {
1297 			bcopy(key->key, key_data.standard_rss_key, 40);
1298 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1299 		}
1300 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1301 		if (status) {
1302 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1303 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1304 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1305 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1306 			return;
1307 		}
1308 	} else {
1309 		for (int i = 0; i < (key->key_len / 4); i++)
1310 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1311 	}
1312 
1313 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1314 	    vf->vf_num, key->key[0]);
1315 
1316 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1317 }
1318 
1319 static void
1320 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1321     uint16_t msg_size)
1322 {
1323 	struct i40e_hw *hw;
1324 	struct virtchnl_rss_lut *lut;
1325 	enum i40e_status_code status;
1326 
1327 	hw = &pf->hw;
1328 
1329 	lut = msg;
1330 
1331 	if (lut->lut_entries > 64) {
1332 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1333 		    vf->vf_num, lut->lut_entries, 64);
1334 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1335 		    I40E_ERR_PARAM);
1336 		return;
1337 	}
1338 
1339 	if (lut->vsi_id != vf->vsi.vsi_num) {
1340 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1341 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1342 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1343 		    I40E_ERR_PARAM);
1344 		return;
1345 	}
1346 
1347 	/* Fill out LUT using MAC-dependent method */
1348 	if (hw->mac.type == I40E_MAC_X722) {
1349 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1350 		if (status) {
1351 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1352 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1353 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1354 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1355 			return;
1356 		}
1357 	} else {
1358 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1359 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1360 	}
1361 
1362 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1363 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1364 
1365 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1366 }
1367 
1368 static void
1369 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1370     uint16_t msg_size)
1371 {
1372 	struct i40e_hw *hw;
1373 	struct virtchnl_rss_hena *hena;
1374 
1375 	hw = &pf->hw;
1376 	hena = msg;
1377 
1378 	/* Set HENA */
1379 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1380 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1381 
1382 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1383 	    vf->vf_num, hena->hena);
1384 
1385 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1386 }
1387 
1388 static void
1389 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1390 {
1391 	struct virtchnl_pf_event event;
1392 	struct i40e_hw *hw;
1393 
1394 	hw = &pf->hw;
1395 	event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1396 	event.severity = PF_EVENT_SEVERITY_INFO;
1397 	event.event_data.link_event.link_status = pf->vsi.link_active;
1398 	event.event_data.link_event.link_speed =
1399 	    i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
1400 
1401 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1402 			sizeof(event));
1403 }
1404 
1405 void
1406 ixl_broadcast_link_state(struct ixl_pf *pf)
1407 {
1408 	int i;
1409 
1410 	for (i = 0; i < pf->num_vfs; i++)
1411 		ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1412 }
1413 
1414 void
1415 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1416 {
1417 	device_t dev = pf->dev;
1418 	struct ixl_vf *vf;
1419 	uint16_t vf_num, msg_size;
1420 	uint32_t opcode;
1421 	void *msg;
1422 	int err;
1423 
1424 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1425 	opcode = le32toh(event->desc.cookie_high);
1426 
1427 	if (vf_num >= pf->num_vfs) {
1428 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1429 		return;
1430 	}
1431 
1432 	vf = &pf->vfs[vf_num];
1433 	msg = event->msg_buf;
1434 	msg_size = event->msg_len;
1435 
1436 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1437 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1438 	    ixl_vc_opcode_str(opcode), opcode,
1439 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1440 	    vf_num, msg_size);
1441 
1442 	/* Perform basic checks on the msg */
1443 	err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
1444 	if (err) {
1445 		device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
1446 		    __func__, vf->vf_num, opcode, msg_size, err);
1447 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
1448 		return;
1449 	}
1450 
1451 	/* This must be a stray msg from a previously destroyed VF. */
1452 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1453 		return;
1454 
1455 	switch (opcode) {
1456 	case VIRTCHNL_OP_VERSION:
1457 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1458 		break;
1459 	case VIRTCHNL_OP_RESET_VF:
1460 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1461 		break;
1462 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1463 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1464 		/* Notify VF of link state after it obtains queues, as this is
1465 		 * the last thing it will do as part of initialization
1466 		 */
1467 		ixl_notify_vf_link_state(pf, vf);
1468 		break;
1469 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1470 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1471 		break;
1472 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1473 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1474 		break;
1475 	case VIRTCHNL_OP_ENABLE_QUEUES:
1476 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1477 		/* Notify VF of link state after it obtains queues, as this is
1478 		 * the last thing it will do as part of initialization
1479 		 */
1480 		ixl_notify_vf_link_state(pf, vf);
1481 		break;
1482 	case VIRTCHNL_OP_DISABLE_QUEUES:
1483 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1484 		break;
1485 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1486 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1487 		break;
1488 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1489 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1490 		break;
1491 	case VIRTCHNL_OP_ADD_VLAN:
1492 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1493 		break;
1494 	case VIRTCHNL_OP_DEL_VLAN:
1495 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1496 		break;
1497 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1498 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1499 		break;
1500 	case VIRTCHNL_OP_GET_STATS:
1501 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1502 		break;
1503 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1504 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1505 		break;
1506 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1507 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1508 		break;
1509 	case VIRTCHNL_OP_SET_RSS_HENA:
1510 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1511 		break;
1512 
1513 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1514 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1515 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1516 	default:
1517 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1518 		break;
1519 	}
1520 }
1521 
1522 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1523 void
1524 ixl_handle_vflr(struct ixl_pf *pf)
1525 {
1526 	struct ixl_vf *vf;
1527 	struct i40e_hw *hw;
1528 	uint16_t global_vf_num;
1529 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1530 	int i;
1531 
1532 	hw = &pf->hw;
1533 
1534 	ixl_dbg_iov(pf, "%s: begin\n", __func__);
1535 
1536 	/* Re-enable VFLR interrupt cause so driver doesn't miss a
1537 	 * reset interrupt for another VF */
1538 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1539 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1540 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1541 	ixl_flush(hw);
1542 
1543 	for (i = 0; i < pf->num_vfs; i++) {
1544 		global_vf_num = hw->func_caps.vf_base_id + i;
1545 
1546 		vf = &pf->vfs[i];
1547 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1548 			continue;
1549 
1550 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1551 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1552 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1553 		if (vflrstat & vflrstat_mask) {
1554 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1555 			    vflrstat_mask);
1556 
1557 			ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1558 			ixl_reinit_vf(pf, vf);
1559 			ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1560 		}
1561 	}
1562 
1563 }
1564 
1565 static int
1566 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1567 {
1568 
1569 	switch (err) {
1570 	case I40E_AQ_RC_EPERM:
1571 		return (EPERM);
1572 	case I40E_AQ_RC_ENOENT:
1573 		return (ENOENT);
1574 	case I40E_AQ_RC_ESRCH:
1575 		return (ESRCH);
1576 	case I40E_AQ_RC_EINTR:
1577 		return (EINTR);
1578 	case I40E_AQ_RC_EIO:
1579 		return (EIO);
1580 	case I40E_AQ_RC_ENXIO:
1581 		return (ENXIO);
1582 	case I40E_AQ_RC_E2BIG:
1583 		return (E2BIG);
1584 	case I40E_AQ_RC_EAGAIN:
1585 		return (EAGAIN);
1586 	case I40E_AQ_RC_ENOMEM:
1587 		return (ENOMEM);
1588 	case I40E_AQ_RC_EACCES:
1589 		return (EACCES);
1590 	case I40E_AQ_RC_EFAULT:
1591 		return (EFAULT);
1592 	case I40E_AQ_RC_EBUSY:
1593 		return (EBUSY);
1594 	case I40E_AQ_RC_EEXIST:
1595 		return (EEXIST);
1596 	case I40E_AQ_RC_EINVAL:
1597 		return (EINVAL);
1598 	case I40E_AQ_RC_ENOTTY:
1599 		return (ENOTTY);
1600 	case I40E_AQ_RC_ENOSPC:
1601 		return (ENOSPC);
1602 	case I40E_AQ_RC_ENOSYS:
1603 		return (ENOSYS);
1604 	case I40E_AQ_RC_ERANGE:
1605 		return (ERANGE);
1606 	case I40E_AQ_RC_EFLUSHED:
1607 		return (EINVAL);	/* No exact equivalent in errno.h */
1608 	case I40E_AQ_RC_BAD_ADDR:
1609 		return (EFAULT);
1610 	case I40E_AQ_RC_EMODE:
1611 		return (EPERM);
1612 	case I40E_AQ_RC_EFBIG:
1613 		return (EFBIG);
1614 	default:
1615 		return (EINVAL);
1616 	}
1617 }
1618 
1619 static int
1620 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1621 {
1622 	struct i40e_hw *hw = &pf->hw;
1623 	device_t dev = pf->dev;
1624 	struct ixl_vsi *vsi = &pf->vsi;
1625 	struct i40e_vsi_context	ctxt;
1626 	int error;
1627 
1628 	memset(&ctxt, 0, sizeof(ctxt));
1629 
1630 	ctxt.seid = vsi->seid;
1631 	if (pf->veb_seid != 0)
1632 		ctxt.uplink_seid = pf->veb_seid;
1633 	ctxt.pf_num = hw->pf_id;
1634 	ctxt.connection_type = IXL_VSI_DATA_PORT;
1635 
1636 	ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1637 	ctxt.info.switch_id = (enable) ?
1638 	    htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1639 
1640 	/* error is set to 0 on success */
1641 	error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1642 	if (error) {
1643 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1644 		    " aq_error %d\n", error, hw->aq.asq_last_status);
1645 	}
1646 
1647 	return (error);
1648 }
1649 
1650 int
1651 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1652 {
1653 	struct ixl_pf *pf = iflib_get_softc(ctx);
1654 	device_t dev = iflib_get_dev(ctx);
1655 	struct i40e_hw *hw;
1656 	struct ixl_vsi *pf_vsi;
1657 	enum i40e_status_code ret;
1658 	int error;
1659 
1660 	hw = &pf->hw;
1661 	pf_vsi = &pf->vsi;
1662 
1663 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1664 	    M_ZERO);
1665 	if (pf->vfs == NULL) {
1666 		error = ENOMEM;
1667 		goto fail;
1668 	}
1669 
1670 	/*
1671 	 * Add the VEB and ...
1672 	 * - do nothing: VEPA mode
1673 	 * - enable loopback mode on connected VSIs: VEB mode
1674 	 */
1675 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1676 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1677 	if (ret != I40E_SUCCESS) {
1678 		error = hw->aq.asq_last_status;
1679 		device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1680 		    i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1681 		goto fail;
1682 	}
1683 	if (pf->enable_vf_loopback)
1684 		ixl_config_pf_vsi_loopback(pf, true);
1685 
1686 	/*
1687 	 * Adding a VEB brings back the default MAC filter(s). Remove them,
1688 	 * and let the driver add the proper filters back.
1689 	 */
1690 	ixl_del_default_hw_filters(pf_vsi);
1691 	ixl_reconfigure_filters(pf_vsi);
1692 
1693 	pf->num_vfs = num_vfs;
1694 	return (0);
1695 
1696 fail:
1697 	free(pf->vfs, M_IXL);
1698 	pf->vfs = NULL;
1699 	return (error);
1700 }
1701 
1702 void
1703 ixl_if_iov_uninit(if_ctx_t ctx)
1704 {
1705 	struct ixl_pf *pf = iflib_get_softc(ctx);
1706 	struct i40e_hw *hw;
1707 	struct ixl_vsi *vsi;
1708 	struct ifnet *ifp;
1709 	struct ixl_vf *vfs;
1710 	int i, num_vfs;
1711 
1712 	hw = &pf->hw;
1713 	vsi = &pf->vsi;
1714 	ifp = vsi->ifp;
1715 
1716 	for (i = 0; i < pf->num_vfs; i++) {
1717 		if (pf->vfs[i].vsi.seid != 0)
1718 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1719 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1720 		ixl_free_filters(&pf->vfs[i].vsi.ftl);
1721 		ixl_dbg_iov(pf, "VF %d: %d released\n",
1722 		    i, pf->vfs[i].qtag.num_allocated);
1723 		ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1724 	}
1725 
1726 	if (pf->veb_seid != 0) {
1727 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1728 		pf->veb_seid = 0;
1729 	}
1730 	/* Reset PF VSI loopback mode */
1731 	if (pf->enable_vf_loopback)
1732 		ixl_config_pf_vsi_loopback(pf, false);
1733 
1734 	vfs = pf->vfs;
1735 	num_vfs = pf->num_vfs;
1736 
1737 	pf->vfs = NULL;
1738 	pf->num_vfs = 0;
1739 
1740 	/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1741 	for (i = 0; i < num_vfs; i++)
1742 		sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
1743 	free(vfs, M_IXL);
1744 }
1745 
1746 static int
1747 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1748 {
1749 	device_t dev = pf->dev;
1750 	int error;
1751 
1752 	/* Validate, and clamp value if invalid */
1753 	if (num_queues < 1 || num_queues > 16)
1754 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1755 		    num_queues, vf->vf_num);
1756 	if (num_queues < 1) {
1757 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1758 		num_queues = 1;
1759 	} else if (num_queues > IAVF_MAX_QUEUES) {
1760 		device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1761 		num_queues = IAVF_MAX_QUEUES;
1762 	}
1763 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1764 	if (error) {
1765 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1766 		    num_queues, vf->vf_num);
1767 		return (ENOSPC);
1768 	}
1769 
1770 	ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1771 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1772 	ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1773 
1774 	return (0);
1775 }
1776 
1777 int
1778 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1779 {
1780 	struct ixl_pf *pf = iflib_get_softc(ctx);
1781 	char sysctl_name[IXL_QUEUE_NAME_LEN];
1782 	struct ixl_vf *vf;
1783 	const void *mac;
1784 	size_t size;
1785 	int error;
1786 	int vf_num_queues;
1787 
1788 	vf = &pf->vfs[vfnum];
1789 	vf->vf_num = vfnum;
1790 	vf->vsi.back = pf;
1791 	vf->vf_flags = VF_FLAG_ENABLED;
1792 
1793 	/* Reserve queue allocation from PF */
1794 	vf_num_queues = nvlist_get_number(params, "num-queues");
1795 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1796 	if (error != 0)
1797 		goto out;
1798 
1799 	error = ixl_vf_setup_vsi(pf, vf);
1800 	if (error != 0)
1801 		goto out;
1802 
1803 	if (nvlist_exists_binary(params, "mac-addr")) {
1804 		mac = nvlist_get_binary(params, "mac-addr", &size);
1805 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1806 
1807 		if (nvlist_get_bool(params, "allow-set-mac"))
1808 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1809 	} else
1810 		/*
1811 		 * If the administrator has not specified a MAC address then
1812 		 * we must allow the VF to choose one.
1813 		 */
1814 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1815 
1816 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1817 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1818 
1819 	if (nvlist_get_bool(params, "allow-promisc"))
1820 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1821 
1822 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1823 
1824 	/* VF needs to be reset before it can be used */
1825 	ixl_reset_vf(pf, vf);
1826 out:
1827 	if (error == 0) {
1828 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1829 		ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
1830 	}
1831 
1832 	return (error);
1833 }
1834 
1835