xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision 148a8da8)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static bool	ixl_zero_mac(const uint8_t *addr);
43 static bool	ixl_bcast_mac(const uint8_t *addr);
44 
45 static int	ixl_vc_opcode_level(uint16_t opcode);
46 
47 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
48 
49 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67     enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
80 static int	ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable);
81 
82 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
83 
84 /*
85  * TODO: Move pieces of this into iflib and call the rest in a handler?
86  *
87  * e.g. ixl_if_iov_set_schema
88  *
89  * It's odd to do pci_iov_detach() there while doing pci_iov_attach()
90  * in the driver.
91  */
92 void
93 ixl_initialize_sriov(struct ixl_pf *pf)
94 {
95 	device_t dev = pf->dev;
96 	struct i40e_hw *hw = &pf->hw;
97 	nvlist_t	*pf_schema, *vf_schema;
98 	int		iov_error;
99 
100 	pf_schema = pci_iov_schema_alloc_node();
101 	vf_schema = pci_iov_schema_alloc_node();
102 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
103 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
104 	    IOV_SCHEMA_HASDEFAULT, TRUE);
105 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
106 	    IOV_SCHEMA_HASDEFAULT, FALSE);
107 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
108 	    IOV_SCHEMA_HASDEFAULT, FALSE);
109 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
110 	    IOV_SCHEMA_HASDEFAULT,
111 	    max(1, min(hw->func_caps.num_msix_vectors_vf - 1, IAVF_MAX_QUEUES)));
112 
113 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
114 	if (iov_error != 0) {
115 		device_printf(dev,
116 		    "Failed to initialize SR-IOV (error=%d)\n",
117 		    iov_error);
118 	} else
119 		device_printf(dev, "SR-IOV ready\n");
120 }
121 
122 
123 /*
124  * Allocate the VSI for a VF.
125  */
126 static int
127 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
128 {
129 	device_t dev;
130 	struct i40e_hw *hw;
131 	struct ixl_vsi *vsi;
132 	struct i40e_vsi_context vsi_ctx;
133 	int i;
134 	enum i40e_status_code code;
135 
136 	hw = &pf->hw;
137 	vsi = &pf->vsi;
138 	dev = pf->dev;
139 
140 	vsi_ctx.pf_num = hw->pf_id;
141 	vsi_ctx.uplink_seid = pf->veb_seid;
142 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
143 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
144 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
145 
146 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
147 
148 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
149 	if (pf->enable_vf_loopback)
150 		vsi_ctx.info.switch_id =
151 		   htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
152 
153 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
154 	vsi_ctx.info.sec_flags = 0;
155 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
156 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
157 
158 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
159 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
160 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
161 
162 	vsi_ctx.info.valid_sections |=
163 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
164 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
165 
166 	/* XXX: Only scattered allocation is supported for VFs right now */
167 	for (i = 0; i < vf->qtag.num_active; i++)
168 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
169 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
170 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
171 
172 	vsi_ctx.info.tc_mapping[0] = htole16(
173 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
174 	    ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
175 
176 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
177 	if (code != I40E_SUCCESS)
178 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
179 	vf->vsi.seid = vsi_ctx.seid;
180 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
181 	vf->vsi.num_rx_queues = vf->qtag.num_active;
182 	vf->vsi.num_tx_queues = vf->qtag.num_active;
183 
184 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
185 	if (code != I40E_SUCCESS)
186 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 
188 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
189 	if (code != I40E_SUCCESS) {
190 		device_printf(dev, "Failed to disable BW limit: %d\n",
191 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
192 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
193 	}
194 
195 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
196 	return (0);
197 }
198 
199 static int
200 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
201 {
202 	struct i40e_hw *hw;
203 	int error;
204 
205 	hw = &pf->hw;
206 
207 	error = ixl_vf_alloc_vsi(pf, vf);
208 	if (error != 0)
209 		return (error);
210 
211 	/* Let VF receive broadcast Ethernet frames */
212 	error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
213 	if (error)
214 		device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
215 	/* Re-add VF's MAC/VLAN filters to its VSI */
216 	ixl_reconfigure_filters(&vf->vsi);
217 	/* Reset stats? */
218 	vf->vsi.hw_filters_add = 0;
219 	vf->vsi.hw_filters_del = 0;
220 
221 	return (0);
222 }
223 
224 static void
225 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
226     uint32_t val)
227 {
228 	uint32_t qtable;
229 	int index, shift;
230 
231 	/*
232 	 * Two queues are mapped in a single register, so we have to do some
233 	 * gymnastics to convert the queue number into a register index and
234 	 * shift.
235 	 */
236 	index = qnum / 2;
237 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
238 
239 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
240 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
241 	qtable |= val << shift;
242 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
243 }
244 
245 static void
246 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
247 {
248 	struct i40e_hw *hw;
249 	uint32_t qtable;
250 	int i;
251 
252 	hw = &pf->hw;
253 
254 	/*
255 	 * Contiguous mappings aren't actually supported by the hardware,
256 	 * so we have to use non-contiguous mappings.
257 	 */
258 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
259 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
260 
261 	/* Enable LAN traffic on this VF */
262 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
263 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
264 
265 	/* Program index of each VF queue into PF queue space
266 	 * (This is only needed if QTABLE is enabled) */
267 	for (i = 0; i < vf->vsi.num_tx_queues; i++) {
268 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
269 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
270 
271 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
272 	}
273 	for (; i < IXL_MAX_VSI_QUEUES; i++)
274 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
275 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
276 
277 	/* Map queues allocated to VF to its VSI;
278 	 * This mapping matches the VF-wide mapping since the VF
279 	 * is only given a single VSI */
280 	for (i = 0; i < vf->vsi.num_tx_queues; i++)
281 		ixl_vf_map_vsi_queue(hw, vf, i,
282 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
283 
284 	/* Set rest of VSI queues as unused. */
285 	for (; i < IXL_MAX_VSI_QUEUES; i++)
286 		ixl_vf_map_vsi_queue(hw, vf, i,
287 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
288 
289 	ixl_flush(hw);
290 }
291 
292 static void
293 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
294 {
295 	struct i40e_hw *hw;
296 
297 	hw = &pf->hw;
298 
299 	if (vsi->seid == 0)
300 		return;
301 
302 	i40e_aq_delete_element(hw, vsi->seid, NULL);
303 }
304 
305 static void
306 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
307 {
308 
309 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
310 	ixl_flush(hw);
311 }
312 
313 static void
314 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
315 {
316 
317 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
318 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
319 	ixl_flush(hw);
320 }
321 
322 static void
323 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
324 {
325 	struct i40e_hw *hw;
326 	uint32_t vfint_reg, vpint_reg;
327 	int i;
328 
329 	hw = &pf->hw;
330 
331 	ixl_vf_vsi_release(pf, &vf->vsi);
332 
333 	/* Index 0 has a special register. */
334 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
335 
336 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
337 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
338 		ixl_vf_disable_queue_intr(hw, vfint_reg);
339 	}
340 
341 	/* Index 0 has a special register. */
342 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
343 
344 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
345 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
346 		ixl_vf_unregister_intr(hw, vpint_reg);
347 	}
348 
349 	vf->vsi.num_tx_queues = 0;
350 	vf->vsi.num_rx_queues = 0;
351 }
352 
353 static int
354 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
355 {
356 	struct i40e_hw *hw;
357 	int i;
358 	uint16_t global_vf_num;
359 	uint32_t ciad;
360 
361 	hw = &pf->hw;
362 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
363 
364 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
365 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
366 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
367 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
368 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
369 			return (0);
370 		DELAY(1);
371 	}
372 
373 	return (ETIMEDOUT);
374 }
375 
376 static void
377 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
378 {
379 	struct i40e_hw *hw;
380 	uint32_t vfrtrig;
381 
382 	hw = &pf->hw;
383 
384 	ixl_dbg_iov(pf, "Resetting VF-%d\n", vf->vf_num);
385 
386 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
387 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
388 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
389 	ixl_flush(hw);
390 
391 	ixl_reinit_vf(pf, vf);
392 
393 	ixl_dbg_iov(pf, "Resetting VF-%d done.\n", vf->vf_num);
394 }
395 
396 static void
397 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
398 {
399 	struct i40e_hw *hw;
400 	uint32_t vfrstat, vfrtrig;
401 	int i, error;
402 
403 	hw = &pf->hw;
404 
405 	error = ixl_flush_pcie(pf, vf);
406 	if (error != 0)
407 		device_printf(pf->dev,
408 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
409 		    vf->vf_num);
410 
411 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
412 		DELAY(10);
413 
414 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
415 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
416 			break;
417 	}
418 
419 	if (i == IXL_VF_RESET_TIMEOUT)
420 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
421 
422 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
423 
424 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
425 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
426 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
427 
428 	if (vf->vsi.seid != 0)
429 		ixl_disable_rings(pf, &vf->vsi, &vf->qtag);
430 	ixl_pf_qmgr_clear_queue_flags(&vf->qtag);
431 
432 	ixl_vf_release_resources(pf, vf);
433 	ixl_vf_setup_vsi(pf, vf);
434 	ixl_vf_map_queues(pf, vf);
435 
436 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
437 	ixl_flush(hw);
438 }
439 
440 static int
441 ixl_vc_opcode_level(uint16_t opcode)
442 {
443 	switch (opcode) {
444 	case VIRTCHNL_OP_GET_STATS:
445 		return (10);
446 	default:
447 		return (5);
448 	}
449 }
450 
451 static void
452 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
453     enum i40e_status_code status, void *msg, uint16_t len)
454 {
455 	struct i40e_hw *hw;
456 	int global_vf_id;
457 
458 	hw = &pf->hw;
459 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
460 
461 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
462 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
463 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
464 
465 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
466 }
467 
468 static void
469 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
470 {
471 
472 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
473 }
474 
475 static void
476 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
477     enum i40e_status_code status, const char *file, int line)
478 {
479 
480 	I40E_VC_DEBUG(pf, 1,
481 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
482 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
483 	    status, vf->vf_num, file, line);
484 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
485 }
486 
487 static void
488 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
489     uint16_t msg_size)
490 {
491 	struct virtchnl_version_info reply;
492 
493 	if (msg_size != sizeof(struct virtchnl_version_info)) {
494 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
495 		    I40E_ERR_PARAM);
496 		return;
497 	}
498 
499 	vf->version = ((struct virtchnl_version_info *)msg)->minor;
500 
501 	reply.major = VIRTCHNL_VERSION_MAJOR;
502 	reply.minor = VIRTCHNL_VERSION_MINOR;
503 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
504 	    sizeof(reply));
505 }
506 
507 static void
508 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
509     uint16_t msg_size)
510 {
511 
512 	if (msg_size != 0) {
513 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
514 		    I40E_ERR_PARAM);
515 		return;
516 	}
517 
518 	ixl_reset_vf(pf, vf);
519 
520 	/* No response to a reset message. */
521 }
522 
523 static void
524 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
525     uint16_t msg_size)
526 {
527 	struct virtchnl_vf_resource reply;
528 
529 	if ((vf->version == 0 && msg_size != 0) ||
530 	    (vf->version == 1 && msg_size != 4)) {
531 		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
532 		    " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
533 		    vf->version);
534 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
535 		    I40E_ERR_PARAM);
536 		return;
537 	}
538 
539 	bzero(&reply, sizeof(reply));
540 
541 	if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
542 		reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
543 					 VIRTCHNL_VF_OFFLOAD_RSS_REG |
544 					 VIRTCHNL_VF_OFFLOAD_VLAN;
545 	else
546 		/* Force VF RSS setup by PF in 1.1+ VFs */
547 		reply.vf_cap_flags = *(u32 *)msg & (
548 					 VIRTCHNL_VF_OFFLOAD_L2 |
549 					 VIRTCHNL_VF_OFFLOAD_RSS_PF |
550 					 VIRTCHNL_VF_OFFLOAD_VLAN);
551 
552 	reply.num_vsis = 1;
553 	reply.num_queue_pairs = vf->vsi.num_tx_queues;
554 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
555 	reply.rss_key_size = 52;
556 	reply.rss_lut_size = 64;
557 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
558 	reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
559 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
560 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
561 
562 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
563 	    I40E_SUCCESS, &reply, sizeof(reply));
564 }
565 
566 static int
567 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
568     struct virtchnl_txq_info *info)
569 {
570 	struct i40e_hw *hw;
571 	struct i40e_hmc_obj_txq txq;
572 	uint16_t global_queue_num, global_vf_num;
573 	enum i40e_status_code status;
574 	uint32_t qtx_ctl;
575 
576 	hw = &pf->hw;
577 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
578 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
579 	bzero(&txq, sizeof(txq));
580 
581 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
582 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
583 
584 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
585 	if (status != I40E_SUCCESS)
586 		return (EINVAL);
587 
588 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
589 
590 	txq.head_wb_ena = info->headwb_enabled;
591 	txq.head_wb_addr = info->dma_headwb_addr;
592 	txq.qlen = info->ring_len;
593 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
594 	txq.rdylist_act = 0;
595 
596 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
597 	if (status != I40E_SUCCESS)
598 		return (EINVAL);
599 
600 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
601 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
602 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
603 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
604 	ixl_flush(hw);
605 
606 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
607 
608 	return (0);
609 }
610 
611 static int
612 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
613     struct virtchnl_rxq_info *info)
614 {
615 	struct i40e_hw *hw;
616 	struct i40e_hmc_obj_rxq rxq;
617 	uint16_t global_queue_num;
618 	enum i40e_status_code status;
619 
620 	hw = &pf->hw;
621 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
622 	bzero(&rxq, sizeof(rxq));
623 
624 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
625 	    vf->vf_num, global_queue_num, info->queue_id);
626 
627 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
628 		return (EINVAL);
629 
630 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
631 	    info->max_pkt_size < ETHER_MIN_LEN)
632 		return (EINVAL);
633 
634 	if (info->splithdr_enabled) {
635 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
636 			return (EINVAL);
637 
638 		rxq.hsplit_0 = info->rx_split_pos &
639 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
640 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
641 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
642 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
643 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
644 
645 		rxq.dtype = 2;
646 	}
647 
648 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
649 	if (status != I40E_SUCCESS)
650 		return (EINVAL);
651 
652 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
653 	rxq.qlen = info->ring_len;
654 
655 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
656 
657 	rxq.dsize = 1;
658 	rxq.crcstrip = 1;
659 	rxq.l2tsel = 1;
660 
661 	rxq.rxmax = info->max_pkt_size;
662 	rxq.tphrdesc_ena = 1;
663 	rxq.tphwdesc_ena = 1;
664 	rxq.tphdata_ena = 1;
665 	rxq.tphhead_ena = 1;
666 	rxq.lrxqthresh = 1;
667 	rxq.prefena = 1;
668 
669 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
670 	if (status != I40E_SUCCESS)
671 		return (EINVAL);
672 
673 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
674 
675 	return (0);
676 }
677 
678 static void
679 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
680     uint16_t msg_size)
681 {
682 	struct virtchnl_vsi_queue_config_info *info;
683 	struct virtchnl_queue_pair_info *pair;
684 	uint16_t expected_msg_size;
685 	int i;
686 
687 	if (msg_size < sizeof(*info)) {
688 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
689 		    I40E_ERR_PARAM);
690 		return;
691 	}
692 
693 	info = msg;
694 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
695 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
696 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
697 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
698 		    I40E_ERR_PARAM);
699 		return;
700 	}
701 
702 	expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
703 	if (msg_size != expected_msg_size) {
704 		device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
705 		    vf->vf_num, msg_size, expected_msg_size);
706 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
707 		    I40E_ERR_PARAM);
708 		return;
709 	}
710 
711 	if (info->vsi_id != vf->vsi.vsi_num) {
712 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
713 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
714 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
715 		    I40E_ERR_PARAM);
716 		return;
717 	}
718 
719 	for (i = 0; i < info->num_queue_pairs; i++) {
720 		pair = &info->qpair[i];
721 
722 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
723 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
724 		    pair->txq.queue_id != pair->rxq.queue_id ||
725 		    pair->txq.queue_id >= vf->vsi.num_tx_queues) {
726 
727 			i40e_send_vf_nack(pf, vf,
728 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
729 			return;
730 		}
731 
732 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
733 			i40e_send_vf_nack(pf, vf,
734 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
735 			return;
736 		}
737 
738 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
739 			i40e_send_vf_nack(pf, vf,
740 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
741 			return;
742 		}
743 	}
744 
745 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
746 }
747 
748 static void
749 ixl_vf_set_qctl(struct ixl_pf *pf,
750     const struct virtchnl_vector_map *vector,
751     enum i40e_queue_type cur_type, uint16_t cur_queue,
752     enum i40e_queue_type *last_type, uint16_t *last_queue)
753 {
754 	uint32_t offset, qctl;
755 	uint16_t itr_indx;
756 
757 	if (cur_type == I40E_QUEUE_TYPE_RX) {
758 		offset = I40E_QINT_RQCTL(cur_queue);
759 		itr_indx = vector->rxitr_idx;
760 	} else {
761 		offset = I40E_QINT_TQCTL(cur_queue);
762 		itr_indx = vector->txitr_idx;
763 	}
764 
765 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
766 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
767 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
768 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
769 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
770 
771 	wr32(&pf->hw, offset, qctl);
772 
773 	*last_type = cur_type;
774 	*last_queue = cur_queue;
775 }
776 
777 static void
778 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
779     const struct virtchnl_vector_map *vector)
780 {
781 	struct i40e_hw *hw;
782 	u_int qindex;
783 	enum i40e_queue_type type, last_type;
784 	uint32_t lnklst_reg;
785 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
786 
787 	hw = &pf->hw;
788 
789 	rxq_map = vector->rxq_map;
790 	txq_map = vector->txq_map;
791 
792 	last_queue = IXL_END_OF_INTR_LNKLST;
793 	last_type = I40E_QUEUE_TYPE_RX;
794 
795 	/*
796 	 * The datasheet says to optimize performance, RX queues and TX queues
797 	 * should be interleaved in the interrupt linked list, so we process
798 	 * both at once here.
799 	 */
800 	while ((rxq_map != 0) || (txq_map != 0)) {
801 		if (txq_map != 0) {
802 			qindex = ffs(txq_map) - 1;
803 			type = I40E_QUEUE_TYPE_TX;
804 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
805 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
806 			    &last_type, &last_queue);
807 			txq_map &= ~(1 << qindex);
808 		}
809 
810 		if (rxq_map != 0) {
811 			qindex = ffs(rxq_map) - 1;
812 			type = I40E_QUEUE_TYPE_RX;
813 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
814 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
815 			    &last_type, &last_queue);
816 			rxq_map &= ~(1 << qindex);
817 		}
818 	}
819 
820 	if (vector->vector_id == 0)
821 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
822 	else
823 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
824 		    vf->vf_num);
825 	wr32(hw, lnklst_reg,
826 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
827 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
828 
829 	ixl_flush(hw);
830 }
831 
832 static void
833 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
834     uint16_t msg_size)
835 {
836 	struct virtchnl_irq_map_info *map;
837 	struct virtchnl_vector_map *vector;
838 	struct i40e_hw *hw;
839 	int i, largest_txq, largest_rxq;
840 
841 	hw = &pf->hw;
842 
843 	if (msg_size < sizeof(*map)) {
844 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
845 		    I40E_ERR_PARAM);
846 		return;
847 	}
848 
849 	map = msg;
850 	if (map->num_vectors == 0) {
851 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
852 		    I40E_ERR_PARAM);
853 		return;
854 	}
855 
856 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
857 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
858 		    I40E_ERR_PARAM);
859 		return;
860 	}
861 
862 	for (i = 0; i < map->num_vectors; i++) {
863 		vector = &map->vecmap[i];
864 
865 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
866 		    vector->vsi_id != vf->vsi.vsi_num) {
867 			i40e_send_vf_nack(pf, vf,
868 			    VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
869 			return;
870 		}
871 
872 		if (vector->rxq_map != 0) {
873 			largest_rxq = fls(vector->rxq_map) - 1;
874 			if (largest_rxq >= vf->vsi.num_rx_queues) {
875 				i40e_send_vf_nack(pf, vf,
876 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
877 				    I40E_ERR_PARAM);
878 				return;
879 			}
880 		}
881 
882 		if (vector->txq_map != 0) {
883 			largest_txq = fls(vector->txq_map) - 1;
884 			if (largest_txq >= vf->vsi.num_tx_queues) {
885 				i40e_send_vf_nack(pf, vf,
886 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
887 				    I40E_ERR_PARAM);
888 				return;
889 			}
890 		}
891 
892 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
893 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
894 			i40e_send_vf_nack(pf, vf,
895 			    VIRTCHNL_OP_CONFIG_IRQ_MAP,
896 			    I40E_ERR_PARAM);
897 			return;
898 		}
899 
900 		ixl_vf_config_vector(pf, vf, vector);
901 	}
902 
903 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
904 }
905 
906 static void
907 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
908     uint16_t msg_size)
909 {
910 	struct virtchnl_queue_select *select;
911 	int error = 0;
912 
913 	if (msg_size != sizeof(*select)) {
914 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
915 		    I40E_ERR_PARAM);
916 		return;
917 	}
918 
919 	select = msg;
920 	if (select->vsi_id != vf->vsi.vsi_num ||
921 	    select->rx_queues == 0 || select->tx_queues == 0) {
922 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
923 		    I40E_ERR_PARAM);
924 		return;
925 	}
926 
927 	/* Enable TX rings selected by the VF */
928 	for (int i = 0; i < 32; i++) {
929 		if ((1 << i) & select->tx_queues) {
930 			/* Warn if queue is out of VF allocation range */
931 			if (i >= vf->vsi.num_tx_queues) {
932 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
933 				    vf->vf_num, i);
934 				break;
935 			}
936 			/* Skip this queue if it hasn't been configured */
937 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
938 				continue;
939 			/* Warn if this queue is already marked as enabled */
940 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
941 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already enabled!\n",
942 				    vf->vf_num, i);
943 
944 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
945 			if (error)
946 				break;
947 			else
948 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
949 		}
950 	}
951 
952 	/* Enable RX rings selected by the VF */
953 	for (int i = 0; i < 32; i++) {
954 		if ((1 << i) & select->rx_queues) {
955 			/* Warn if queue is out of VF allocation range */
956 			if (i >= vf->vsi.num_rx_queues) {
957 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
958 				    vf->vf_num, i);
959 				break;
960 			}
961 			/* Skip this queue if it hasn't been configured */
962 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
963 				continue;
964 			/* Warn if this queue is already marked as enabled */
965 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
966 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already enabled!\n",
967 				    vf->vf_num, i);
968 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
969 			if (error)
970 				break;
971 			else
972 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
973 		}
974 	}
975 
976 	if (error) {
977 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
978 		    I40E_ERR_TIMEOUT);
979 		return;
980 	}
981 
982 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
983 }
984 
985 static void
986 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
987     void *msg, uint16_t msg_size)
988 {
989 	struct virtchnl_queue_select *select;
990 	int error = 0;
991 
992 	if (msg_size != sizeof(*select)) {
993 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
994 		    I40E_ERR_PARAM);
995 		return;
996 	}
997 
998 	select = msg;
999 	if (select->vsi_id != vf->vsi.vsi_num ||
1000 	    select->rx_queues == 0 || select->tx_queues == 0) {
1001 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1002 		    I40E_ERR_PARAM);
1003 		return;
1004 	}
1005 
1006 	/* Disable TX rings selected by the VF */
1007 	for (int i = 0; i < 32; i++) {
1008 		if ((1 << i) & select->tx_queues) {
1009 			/* Warn if queue is out of VF allocation range */
1010 			if (i >= vf->vsi.num_tx_queues) {
1011 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
1012 				    vf->vf_num, i);
1013 				break;
1014 			}
1015 			/* Skip this queue if it hasn't been configured */
1016 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1017 				continue;
1018 			/* Warn if this queue is already marked as disabled */
1019 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1020 				ixl_dbg_iov(pf, "VF %d: TX ring %d is already disabled!\n",
1021 				    vf->vf_num, i);
1022 				continue;
1023 			}
1024 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1025 			if (error)
1026 				break;
1027 			else
1028 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1029 		}
1030 	}
1031 
1032 	/* Enable RX rings selected by the VF */
1033 	for (int i = 0; i < 32; i++) {
1034 		if ((1 << i) & select->rx_queues) {
1035 			/* Warn if queue is out of VF allocation range */
1036 			if (i >= vf->vsi.num_rx_queues) {
1037 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1038 				    vf->vf_num, i);
1039 				break;
1040 			}
1041 			/* Skip this queue if it hasn't been configured */
1042 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1043 				continue;
1044 			/* Warn if this queue is already marked as disabled */
1045 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1046 				ixl_dbg_iov(pf, "VF %d: RX ring %d is already disabled!\n",
1047 				    vf->vf_num, i);
1048 				continue;
1049 			}
1050 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1051 			if (error)
1052 				break;
1053 			else
1054 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1055 		}
1056 	}
1057 
1058 	if (error) {
1059 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1060 		    I40E_ERR_TIMEOUT);
1061 		return;
1062 	}
1063 
1064 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1065 }
1066 
1067 static bool
1068 ixl_zero_mac(const uint8_t *addr)
1069 {
1070 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1071 
1072 	return (cmp_etheraddr(addr, zero));
1073 }
1074 
1075 static bool
1076 ixl_bcast_mac(const uint8_t *addr)
1077 {
1078 	static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
1079 	    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1080 
1081 	return (cmp_etheraddr(addr, ixl_bcast_addr));
1082 }
1083 
1084 static int
1085 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1086 {
1087 
1088 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1089 		return (EINVAL);
1090 
1091 	/*
1092 	 * If the VF is not allowed to change its MAC address, don't let it
1093 	 * set a MAC filter for an address that is not a multicast address and
1094 	 * is not its assigned MAC.
1095 	 */
1096 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1097 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1098 		return (EPERM);
1099 
1100 	return (0);
1101 }
1102 
1103 static void
1104 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1105     uint16_t msg_size)
1106 {
1107 	struct virtchnl_ether_addr_list *addr_list;
1108 	struct virtchnl_ether_addr *addr;
1109 	struct ixl_vsi *vsi;
1110 	int i;
1111 	size_t expected_size;
1112 
1113 	vsi = &vf->vsi;
1114 
1115 	if (msg_size < sizeof(*addr_list)) {
1116 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1117 		    I40E_ERR_PARAM);
1118 		return;
1119 	}
1120 
1121 	addr_list = msg;
1122 	expected_size = sizeof(*addr_list) +
1123 	    addr_list->num_elements * sizeof(*addr);
1124 
1125 	if (addr_list->num_elements == 0 ||
1126 	    addr_list->vsi_id != vsi->vsi_num ||
1127 	    msg_size != expected_size) {
1128 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1129 		    I40E_ERR_PARAM);
1130 		return;
1131 	}
1132 
1133 	for (i = 0; i < addr_list->num_elements; i++) {
1134 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1135 			i40e_send_vf_nack(pf, vf,
1136 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1137 			return;
1138 		}
1139 	}
1140 
1141 	for (i = 0; i < addr_list->num_elements; i++) {
1142 		addr = &addr_list->list[i];
1143 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1144 	}
1145 
1146 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1147 }
1148 
1149 static void
1150 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1151     uint16_t msg_size)
1152 {
1153 	struct virtchnl_ether_addr_list *addr_list;
1154 	struct virtchnl_ether_addr *addr;
1155 	size_t expected_size;
1156 	int i;
1157 
1158 	if (msg_size < sizeof(*addr_list)) {
1159 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1160 		    I40E_ERR_PARAM);
1161 		return;
1162 	}
1163 
1164 	addr_list = msg;
1165 	expected_size = sizeof(*addr_list) +
1166 	    addr_list->num_elements * sizeof(*addr);
1167 
1168 	if (addr_list->num_elements == 0 ||
1169 	    addr_list->vsi_id != vf->vsi.vsi_num ||
1170 	    msg_size != expected_size) {
1171 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1172 		    I40E_ERR_PARAM);
1173 		return;
1174 	}
1175 
1176 	for (i = 0; i < addr_list->num_elements; i++) {
1177 		addr = &addr_list->list[i];
1178 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1179 			i40e_send_vf_nack(pf, vf,
1180 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1181 			return;
1182 		}
1183 	}
1184 
1185 	for (i = 0; i < addr_list->num_elements; i++) {
1186 		addr = &addr_list->list[i];
1187 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1188 	}
1189 
1190 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1191 }
1192 
1193 static enum i40e_status_code
1194 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1195 {
1196 	struct i40e_vsi_context vsi_ctx;
1197 
1198 	vsi_ctx.seid = vf->vsi.seid;
1199 
1200 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1201 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1202 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1203 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1204 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1205 }
1206 
1207 static void
1208 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1209     uint16_t msg_size)
1210 {
1211 	struct virtchnl_vlan_filter_list *filter_list;
1212 	enum i40e_status_code code;
1213 	size_t expected_size;
1214 	int i;
1215 
1216 	if (msg_size < sizeof(*filter_list)) {
1217 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1218 		    I40E_ERR_PARAM);
1219 		return;
1220 	}
1221 
1222 	filter_list = msg;
1223 	expected_size = sizeof(*filter_list) +
1224 	    filter_list->num_elements * sizeof(uint16_t);
1225 	if (filter_list->num_elements == 0 ||
1226 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1227 	    msg_size != expected_size) {
1228 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1229 		    I40E_ERR_PARAM);
1230 		return;
1231 	}
1232 
1233 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1234 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1235 		    I40E_ERR_PARAM);
1236 		return;
1237 	}
1238 
1239 	for (i = 0; i < filter_list->num_elements; i++) {
1240 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1241 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1242 			    I40E_ERR_PARAM);
1243 			return;
1244 		}
1245 	}
1246 
1247 	code = ixl_vf_enable_vlan_strip(pf, vf);
1248 	if (code != I40E_SUCCESS) {
1249 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1250 		    I40E_ERR_PARAM);
1251 	}
1252 
1253 	for (i = 0; i < filter_list->num_elements; i++)
1254 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1255 
1256 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1257 }
1258 
1259 static void
1260 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1261     uint16_t msg_size)
1262 {
1263 	struct virtchnl_vlan_filter_list *filter_list;
1264 	int i;
1265 	size_t expected_size;
1266 
1267 	if (msg_size < sizeof(*filter_list)) {
1268 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1269 		    I40E_ERR_PARAM);
1270 		return;
1271 	}
1272 
1273 	filter_list = msg;
1274 	expected_size = sizeof(*filter_list) +
1275 	    filter_list->num_elements * sizeof(uint16_t);
1276 	if (filter_list->num_elements == 0 ||
1277 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1278 	    msg_size != expected_size) {
1279 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1280 		    I40E_ERR_PARAM);
1281 		return;
1282 	}
1283 
1284 	for (i = 0; i < filter_list->num_elements; i++) {
1285 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1286 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1287 			    I40E_ERR_PARAM);
1288 			return;
1289 		}
1290 	}
1291 
1292 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1293 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1294 		    I40E_ERR_PARAM);
1295 		return;
1296 	}
1297 
1298 	for (i = 0; i < filter_list->num_elements; i++)
1299 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1300 
1301 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1302 }
1303 
1304 static void
1305 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1306     void *msg, uint16_t msg_size)
1307 {
1308 	struct virtchnl_promisc_info *info;
1309 	struct i40e_hw *hw = &pf->hw;
1310 	enum i40e_status_code code;
1311 
1312 	if (msg_size != sizeof(*info)) {
1313 		i40e_send_vf_nack(pf, vf,
1314 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1315 		return;
1316 	}
1317 
1318 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1319 		/*
1320 		 * Do the same thing as the Linux PF driver -- lie to the VF
1321 		 */
1322 		ixl_send_vf_ack(pf, vf,
1323 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1324 		return;
1325 	}
1326 
1327 	info = msg;
1328 	if (info->vsi_id != vf->vsi.vsi_num) {
1329 		i40e_send_vf_nack(pf, vf,
1330 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1331 		return;
1332 	}
1333 
1334 	code = i40e_aq_set_vsi_unicast_promiscuous(hw, vf->vsi.seid,
1335 	    info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1336 	if (code != I40E_SUCCESS) {
1337 		device_printf(pf->dev, "i40e_aq_set_vsi_unicast_promiscuous (seid %d) failed: status %s,"
1338 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1339 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1340 		i40e_send_vf_nack(pf, vf,
1341 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1342 		return;
1343 	}
1344 
1345 	code = i40e_aq_set_vsi_multicast_promiscuous(hw, vf->vsi.seid,
1346 	    info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1347 	if (code != I40E_SUCCESS) {
1348 		device_printf(pf->dev, "i40e_aq_set_vsi_multicast_promiscuous (seid %d) failed: status %s,"
1349 		    " error %s\n", vf->vsi.seid, i40e_stat_str(hw, code),
1350 		    i40e_aq_str(hw, hw->aq.asq_last_status));
1351 		i40e_send_vf_nack(pf, vf,
1352 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1353 		return;
1354 	}
1355 
1356 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1357 }
1358 
1359 static void
1360 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1361     uint16_t msg_size)
1362 {
1363 	struct virtchnl_queue_select *queue;
1364 
1365 	if (msg_size != sizeof(*queue)) {
1366 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1367 		    I40E_ERR_PARAM);
1368 		return;
1369 	}
1370 
1371 	queue = msg;
1372 	if (queue->vsi_id != vf->vsi.vsi_num) {
1373 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1374 		    I40E_ERR_PARAM);
1375 		return;
1376 	}
1377 
1378 	ixl_update_eth_stats(&vf->vsi);
1379 
1380 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1381 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1382 }
1383 
1384 static void
1385 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1386     uint16_t msg_size)
1387 {
1388 	struct i40e_hw *hw;
1389 	struct virtchnl_rss_key *key;
1390 	struct i40e_aqc_get_set_rss_key_data key_data;
1391 	enum i40e_status_code status;
1392 
1393 	hw = &pf->hw;
1394 
1395 	if (msg_size < sizeof(*key)) {
1396 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1397 		    I40E_ERR_PARAM);
1398 		return;
1399 	}
1400 
1401 	key = msg;
1402 
1403 	if (key->key_len > 52) {
1404 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1405 		    vf->vf_num, key->key_len, 52);
1406 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1407 		    I40E_ERR_PARAM);
1408 		return;
1409 	}
1410 
1411 	if (key->vsi_id != vf->vsi.vsi_num) {
1412 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1413 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1414 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1415 		    I40E_ERR_PARAM);
1416 		return;
1417 	}
1418 
1419 	/* Fill out hash using MAC-dependent method */
1420 	if (hw->mac.type == I40E_MAC_X722) {
1421 		bzero(&key_data, sizeof(key_data));
1422 		if (key->key_len <= 40)
1423 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1424 		else {
1425 			bcopy(key->key, key_data.standard_rss_key, 40);
1426 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1427 		}
1428 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1429 		if (status) {
1430 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1431 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1432 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1433 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1434 			return;
1435 		}
1436 	} else {
1437 		for (int i = 0; i < (key->key_len / 4); i++)
1438 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1439 	}
1440 
1441 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1442 	    vf->vf_num, key->key[0]);
1443 
1444 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1445 }
1446 
1447 static void
1448 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1449     uint16_t msg_size)
1450 {
1451 	struct i40e_hw *hw;
1452 	struct virtchnl_rss_lut *lut;
1453 	enum i40e_status_code status;
1454 
1455 	hw = &pf->hw;
1456 
1457 	if (msg_size < sizeof(*lut)) {
1458 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1459 		    I40E_ERR_PARAM);
1460 		return;
1461 	}
1462 
1463 	lut = msg;
1464 
1465 	if (lut->lut_entries > 64) {
1466 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1467 		    vf->vf_num, lut->lut_entries, 64);
1468 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1469 		    I40E_ERR_PARAM);
1470 		return;
1471 	}
1472 
1473 	if (lut->vsi_id != vf->vsi.vsi_num) {
1474 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1475 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1476 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1477 		    I40E_ERR_PARAM);
1478 		return;
1479 	}
1480 
1481 	/* Fill out LUT using MAC-dependent method */
1482 	if (hw->mac.type == I40E_MAC_X722) {
1483 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1484 		if (status) {
1485 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1486 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1487 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1488 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1489 			return;
1490 		}
1491 	} else {
1492 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1493 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1494 	}
1495 
1496 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1497 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1498 
1499 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1500 }
1501 
1502 static void
1503 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1504     uint16_t msg_size)
1505 {
1506 	struct i40e_hw *hw;
1507 	struct virtchnl_rss_hena *hena;
1508 
1509 	hw = &pf->hw;
1510 
1511 	if (msg_size < sizeof(*hena)) {
1512 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1513 		    I40E_ERR_PARAM);
1514 		return;
1515 	}
1516 
1517 	hena = msg;
1518 
1519 	/* Set HENA */
1520 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1521 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1522 
1523 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1524 	    vf->vf_num, hena->hena);
1525 
1526 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1527 }
1528 
1529 static void
1530 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1531 {
1532 	struct virtchnl_pf_event event;
1533 	struct i40e_hw *hw;
1534 
1535 	hw = &pf->hw;
1536 	event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1537 	event.severity = PF_EVENT_SEVERITY_INFO;
1538 	event.event_data.link_event.link_status = pf->vsi.link_active;
1539 	event.event_data.link_event.link_speed =
1540 		(enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1541 
1542 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1543 			sizeof(event));
1544 }
1545 
1546 void
1547 ixl_broadcast_link_state(struct ixl_pf *pf)
1548 {
1549 	int i;
1550 
1551 	for (i = 0; i < pf->num_vfs; i++)
1552 		ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1553 }
1554 
1555 void
1556 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1557 {
1558 	struct ixl_vf *vf;
1559 	void *msg;
1560 	uint16_t vf_num, msg_size;
1561 	uint32_t opcode;
1562 
1563 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1564 	opcode = le32toh(event->desc.cookie_high);
1565 
1566 	if (vf_num >= pf->num_vfs) {
1567 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1568 		return;
1569 	}
1570 
1571 	vf = &pf->vfs[vf_num];
1572 	msg = event->msg_buf;
1573 	msg_size = event->msg_len;
1574 
1575 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1576 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1577 	    ixl_vc_opcode_str(opcode), opcode,
1578 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1579 	    vf_num, msg_size);
1580 
1581 	/* This must be a stray msg from a previously destroyed VF. */
1582 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1583 		return;
1584 
1585 	switch (opcode) {
1586 	case VIRTCHNL_OP_VERSION:
1587 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1588 		break;
1589 	case VIRTCHNL_OP_RESET_VF:
1590 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1591 		break;
1592 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1593 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1594 		/* Notify VF of link state after it obtains queues, as this is
1595 		 * the last thing it will do as part of initialization
1596 		 */
1597 		ixl_notify_vf_link_state(pf, vf);
1598 		break;
1599 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1600 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1601 		break;
1602 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1603 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1604 		break;
1605 	case VIRTCHNL_OP_ENABLE_QUEUES:
1606 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1607 		/* Notify VF of link state after it obtains queues, as this is
1608 		 * the last thing it will do as part of initialization
1609 		 */
1610 		ixl_notify_vf_link_state(pf, vf);
1611 		break;
1612 	case VIRTCHNL_OP_DISABLE_QUEUES:
1613 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1614 		break;
1615 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1616 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1617 		break;
1618 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1619 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1620 		break;
1621 	case VIRTCHNL_OP_ADD_VLAN:
1622 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1623 		break;
1624 	case VIRTCHNL_OP_DEL_VLAN:
1625 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1626 		break;
1627 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1628 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1629 		break;
1630 	case VIRTCHNL_OP_GET_STATS:
1631 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1632 		break;
1633 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1634 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1635 		break;
1636 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1637 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1638 		break;
1639 	case VIRTCHNL_OP_SET_RSS_HENA:
1640 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1641 		break;
1642 
1643 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1644 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1645 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1646 	default:
1647 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1648 		break;
1649 	}
1650 }
1651 
1652 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1653 void
1654 ixl_handle_vflr(struct ixl_pf *pf)
1655 {
1656 	struct ixl_vf *vf;
1657 	struct i40e_hw *hw;
1658 	uint16_t global_vf_num;
1659 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1660 	int i;
1661 
1662 	hw = &pf->hw;
1663 
1664 	ixl_dbg_iov(pf, "%s: begin\n", __func__);
1665 
1666 	/* Re-enable VFLR interrupt cause so driver doesn't miss a
1667 	 * reset interrupt for another VF */
1668 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1669 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1670 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1671 	ixl_flush(hw);
1672 
1673 	for (i = 0; i < pf->num_vfs; i++) {
1674 		global_vf_num = hw->func_caps.vf_base_id + i;
1675 
1676 		vf = &pf->vfs[i];
1677 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1678 			continue;
1679 
1680 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1681 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1682 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1683 		if (vflrstat & vflrstat_mask) {
1684 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1685 			    vflrstat_mask);
1686 
1687 			ixl_dbg_iov(pf, "Reinitializing VF-%d\n", i);
1688 			ixl_reinit_vf(pf, vf);
1689 			ixl_dbg_iov(pf, "Reinitializing VF-%d done\n", i);
1690 		}
1691 	}
1692 
1693 }
1694 
1695 static int
1696 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1697 {
1698 
1699 	switch (err) {
1700 	case I40E_AQ_RC_EPERM:
1701 		return (EPERM);
1702 	case I40E_AQ_RC_ENOENT:
1703 		return (ENOENT);
1704 	case I40E_AQ_RC_ESRCH:
1705 		return (ESRCH);
1706 	case I40E_AQ_RC_EINTR:
1707 		return (EINTR);
1708 	case I40E_AQ_RC_EIO:
1709 		return (EIO);
1710 	case I40E_AQ_RC_ENXIO:
1711 		return (ENXIO);
1712 	case I40E_AQ_RC_E2BIG:
1713 		return (E2BIG);
1714 	case I40E_AQ_RC_EAGAIN:
1715 		return (EAGAIN);
1716 	case I40E_AQ_RC_ENOMEM:
1717 		return (ENOMEM);
1718 	case I40E_AQ_RC_EACCES:
1719 		return (EACCES);
1720 	case I40E_AQ_RC_EFAULT:
1721 		return (EFAULT);
1722 	case I40E_AQ_RC_EBUSY:
1723 		return (EBUSY);
1724 	case I40E_AQ_RC_EEXIST:
1725 		return (EEXIST);
1726 	case I40E_AQ_RC_EINVAL:
1727 		return (EINVAL);
1728 	case I40E_AQ_RC_ENOTTY:
1729 		return (ENOTTY);
1730 	case I40E_AQ_RC_ENOSPC:
1731 		return (ENOSPC);
1732 	case I40E_AQ_RC_ENOSYS:
1733 		return (ENOSYS);
1734 	case I40E_AQ_RC_ERANGE:
1735 		return (ERANGE);
1736 	case I40E_AQ_RC_EFLUSHED:
1737 		return (EINVAL);	/* No exact equivalent in errno.h */
1738 	case I40E_AQ_RC_BAD_ADDR:
1739 		return (EFAULT);
1740 	case I40E_AQ_RC_EMODE:
1741 		return (EPERM);
1742 	case I40E_AQ_RC_EFBIG:
1743 		return (EFBIG);
1744 	default:
1745 		return (EINVAL);
1746 	}
1747 }
1748 
1749 static int
1750 ixl_config_pf_vsi_loopback(struct ixl_pf *pf, bool enable)
1751 {
1752 	struct i40e_hw *hw = &pf->hw;
1753 	device_t dev = pf->dev;
1754 	struct ixl_vsi *vsi = &pf->vsi;
1755 	struct i40e_vsi_context	ctxt;
1756 	int error;
1757 
1758 	memset(&ctxt, 0, sizeof(ctxt));
1759 
1760 	ctxt.seid = vsi->seid;
1761 	if (pf->veb_seid != 0)
1762 		ctxt.uplink_seid = pf->veb_seid;
1763 	ctxt.pf_num = hw->pf_id;
1764 	ctxt.connection_type = IXL_VSI_DATA_PORT;
1765 
1766 	ctxt.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
1767 	ctxt.info.switch_id = (enable) ?
1768 	    htole16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB) : 0;
1769 
1770 	/* error is set to 0 on success */
1771 	error = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
1772 	if (error) {
1773 		device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
1774 		    " aq_error %d\n", error, hw->aq.asq_last_status);
1775 	}
1776 
1777 	return (error);
1778 }
1779 
1780 int
1781 ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
1782 {
1783 	struct ixl_pf *pf = iflib_get_softc(ctx);
1784 	device_t dev = iflib_get_dev(ctx);
1785 	struct i40e_hw *hw;
1786 	struct ixl_vsi *pf_vsi;
1787 	enum i40e_status_code ret;
1788 	int i, error;
1789 
1790 	hw = &pf->hw;
1791 	pf_vsi = &pf->vsi;
1792 
1793 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1794 	    M_ZERO);
1795 	if (pf->vfs == NULL) {
1796 		error = ENOMEM;
1797 		goto fail;
1798 	}
1799 
1800 	for (i = 0; i < num_vfs; i++)
1801 		sysctl_ctx_init(&pf->vfs[i].ctx);
1802 
1803 	/*
1804 	 * Add the VEB and ...
1805 	 * - do nothing: VEPA mode
1806 	 * - enable loopback mode on connected VSIs: VEB mode
1807 	 */
1808 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1809 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1810 	if (ret != I40E_SUCCESS) {
1811 		error = hw->aq.asq_last_status;
1812 		device_printf(dev, "i40e_aq_add_veb failed; status %s error %s",
1813 		    i40e_stat_str(hw, ret), i40e_aq_str(hw, error));
1814 		goto fail;
1815 	}
1816 	if (pf->enable_vf_loopback)
1817 		ixl_config_pf_vsi_loopback(pf, true);
1818 
1819 	/*
1820 	 * Adding a VEB brings back the default MAC filter(s). Remove them,
1821 	 * and let the driver add the proper filters back.
1822 	 */
1823 	ixl_del_default_hw_filters(pf_vsi);
1824 	ixl_reconfigure_filters(pf_vsi);
1825 
1826 	pf->num_vfs = num_vfs;
1827 	return (0);
1828 
1829 fail:
1830 	free(pf->vfs, M_IXL);
1831 	pf->vfs = NULL;
1832 	return (error);
1833 }
1834 
1835 void
1836 ixl_if_iov_uninit(if_ctx_t ctx)
1837 {
1838 	struct ixl_pf *pf = iflib_get_softc(ctx);
1839 	struct i40e_hw *hw;
1840 	struct ixl_vsi *vsi;
1841 	struct ifnet *ifp;
1842 	struct ixl_vf *vfs;
1843 	int i, num_vfs;
1844 
1845 	hw = &pf->hw;
1846 	vsi = &pf->vsi;
1847 	ifp = vsi->ifp;
1848 
1849 	for (i = 0; i < pf->num_vfs; i++) {
1850 		if (pf->vfs[i].vsi.seid != 0)
1851 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1852 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1853 		ixl_free_mac_filters(&pf->vfs[i].vsi);
1854 		ixl_dbg_iov(pf, "VF %d: %d released\n",
1855 		    i, pf->vfs[i].qtag.num_allocated);
1856 		ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1857 	}
1858 
1859 	if (pf->veb_seid != 0) {
1860 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1861 		pf->veb_seid = 0;
1862 	}
1863 	/* Reset PF VSI loopback mode */
1864 	if (pf->enable_vf_loopback)
1865 		ixl_config_pf_vsi_loopback(pf, false);
1866 
1867 	vfs = pf->vfs;
1868 	num_vfs = pf->num_vfs;
1869 
1870 	pf->vfs = NULL;
1871 	pf->num_vfs = 0;
1872 
1873 	/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
1874 	for (i = 0; i < num_vfs; i++)
1875 		sysctl_ctx_free(&vfs[i].ctx);
1876 	free(vfs, M_IXL);
1877 }
1878 
1879 static int
1880 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1881 {
1882 	device_t dev = pf->dev;
1883 	int error;
1884 
1885 	/* Validate, and clamp value if invalid */
1886 	if (num_queues < 1 || num_queues > 16)
1887 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1888 		    num_queues, vf->vf_num);
1889 	if (num_queues < 1) {
1890 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1891 		num_queues = 1;
1892 	} else if (num_queues > IAVF_MAX_QUEUES) {
1893 		device_printf(dev, "Setting VF %d num-queues to %d\n", vf->vf_num, IAVF_MAX_QUEUES);
1894 		num_queues = IAVF_MAX_QUEUES;
1895 	}
1896 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1897 	if (error) {
1898 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1899 		    num_queues, vf->vf_num);
1900 		return (ENOSPC);
1901 	}
1902 
1903 	ixl_dbg_iov(pf, "VF %d: %d allocated, %d active\n",
1904 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1905 	ixl_dbg_iov(pf, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1906 
1907 	return (0);
1908 }
1909 
1910 int
1911 ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
1912 {
1913 	struct ixl_pf *pf = iflib_get_softc(ctx);
1914 	device_t dev = pf->dev;
1915 	char sysctl_name[QUEUE_NAME_LEN];
1916 	struct ixl_vf *vf;
1917 	const void *mac;
1918 	size_t size;
1919 	int error;
1920 	int vf_num_queues;
1921 
1922 	vf = &pf->vfs[vfnum];
1923 	vf->vf_num = vfnum;
1924 	vf->vsi.back = pf;
1925 	vf->vf_flags = VF_FLAG_ENABLED;
1926 	SLIST_INIT(&vf->vsi.ftl);
1927 
1928 	/* Reserve queue allocation from PF */
1929 	vf_num_queues = nvlist_get_number(params, "num-queues");
1930 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1931 	if (error != 0)
1932 		goto out;
1933 
1934 	error = ixl_vf_setup_vsi(pf, vf);
1935 	if (error != 0)
1936 		goto out;
1937 
1938 	if (nvlist_exists_binary(params, "mac-addr")) {
1939 		mac = nvlist_get_binary(params, "mac-addr", &size);
1940 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1941 
1942 		if (nvlist_get_bool(params, "allow-set-mac"))
1943 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1944 	} else
1945 		/*
1946 		 * If the administrator has not specified a MAC address then
1947 		 * we must allow the VF to choose one.
1948 		 */
1949 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1950 
1951 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1952 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1953 
1954 	if (nvlist_get_bool(params, "allow-promisc"))
1955 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1956 
1957 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1958 
1959 	/* VF needs to be reset before it can be used */
1960 	ixl_reset_vf(pf, vf);
1961 out:
1962 	if (error == 0) {
1963 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1964 		ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name);
1965 	}
1966 
1967 	return (error);
1968 }
1969 
1970