xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision b0b1dbdd)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2015, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static bool	ixl_zero_mac(const uint8_t *addr);
43 static bool	ixl_bcast_mac(const uint8_t *addr);
44 
45 static int	ixl_vc_opcode_level(uint16_t opcode);
46 
47 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
48 
49 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
64 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
65 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67     enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
69 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
80 
81 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
82 
83 void
84 ixl_initialize_sriov(struct ixl_pf *pf)
85 {
86 	device_t dev = pf->dev;
87 	struct i40e_hw *hw = &pf->hw;
88 	nvlist_t	*pf_schema, *vf_schema;
89 	int		iov_error;
90 
91 	/* SR-IOV is only supported when MSI-X is in use. */
92 	if (pf->msix <= 1)
93 		return;
94 
95 	pf_schema = pci_iov_schema_alloc_node();
96 	vf_schema = pci_iov_schema_alloc_node();
97 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
98 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
99 	    IOV_SCHEMA_HASDEFAULT, TRUE);
100 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
101 	    IOV_SCHEMA_HASDEFAULT, FALSE);
102 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
103 	    IOV_SCHEMA_HASDEFAULT, FALSE);
104 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
105 	    IOV_SCHEMA_HASDEFAULT,
106 	    max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
107 
108 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
109 	if (iov_error != 0) {
110 		device_printf(dev,
111 		    "Failed to initialize SR-IOV (error=%d)\n",
112 		    iov_error);
113 	} else
114 		device_printf(dev, "SR-IOV ready\n");
115 
116 	pf->vc_debug_lvl = 1;
117 }
118 
119 /*
120  * Allocate the VSI for a VF.
121  */
122 static int
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
124 {
125 	device_t dev;
126 	struct i40e_hw *hw;
127 	struct ixl_vsi *vsi;
128 	struct i40e_vsi_context vsi_ctx;
129 	int i;
130 	enum i40e_status_code code;
131 
132 	hw = &pf->hw;
133 	vsi = &pf->vsi;
134 	dev = pf->dev;
135 
136 	vsi_ctx.pf_num = hw->pf_id;
137 	vsi_ctx.uplink_seid = pf->veb_seid;
138 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
141 
142 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
143 
144 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 	vsi_ctx.info.switch_id = htole16(0);
146 
147 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 	vsi_ctx.info.sec_flags = 0;
149 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
151 
152 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
155 
156 	vsi_ctx.info.valid_sections |=
157 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
159 
160 	/* ERJ: Only scattered allocation is supported for VFs right now */
161 	for (i = 0; i < vf->qtag.num_active; i++)
162 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
165 
166 	vsi_ctx.info.tc_mapping[0] = htole16(
167 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 	    (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
169 
170 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 	if (code != I40E_SUCCESS)
172 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 	vf->vsi.seid = vsi_ctx.seid;
174 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 	// vf->vsi.first_queue = vf->qtag.qidx[0];
176 	vf->vsi.num_queues = vf->qtag.num_active;
177 
178 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
179 	if (code != I40E_SUCCESS)
180 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
181 
182 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
183 	if (code != I40E_SUCCESS) {
184 		device_printf(dev, "Failed to disable BW limit: %d\n",
185 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
186 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
187 	}
188 
189 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
190 	return (0);
191 }
192 
193 static int
194 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
195 {
196 	struct i40e_hw *hw;
197 	int error;
198 
199 	hw = &pf->hw;
200 
201 	error = ixl_vf_alloc_vsi(pf, vf);
202 	if (error != 0)
203 		return (error);
204 
205 	vf->vsi.hw_filters_add = 0;
206 	vf->vsi.hw_filters_del = 0;
207 	ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
208 	ixl_reconfigure_filters(&vf->vsi);
209 
210 	return (0);
211 }
212 
213 static void
214 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
215     uint32_t val)
216 {
217 	uint32_t qtable;
218 	int index, shift;
219 
220 	/*
221 	 * Two queues are mapped in a single register, so we have to do some
222 	 * gymnastics to convert the queue number into a register index and
223 	 * shift.
224 	 */
225 	index = qnum / 2;
226 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
227 
228 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
229 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
230 	qtable |= val << shift;
231 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
232 }
233 
234 static void
235 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
236 {
237 	struct i40e_hw *hw;
238 	uint32_t qtable;
239 	int i;
240 
241 	hw = &pf->hw;
242 
243 	/*
244 	 * Contiguous mappings aren't actually supported by the hardware,
245 	 * so we have to use non-contiguous mappings.
246 	 */
247 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
248 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
249 
250 	/* Enable LAN traffic on this VF */
251 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
252 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
253 
254 	/* Program index of each VF queue into PF queue space
255 	 * (This is only needed if QTABLE is enabled) */
256 	for (i = 0; i < vf->vsi.num_queues; i++) {
257 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
258 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
259 
260 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
261 	}
262 	for (; i < IXL_MAX_VSI_QUEUES; i++)
263 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
264 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
265 
266 	/* Map queues allocated to VF to its VSI;
267 	 * This mapping matches the VF-wide mapping since the VF
268 	 * is only given a single VSI */
269 	for (i = 0; i < vf->vsi.num_queues; i++)
270 		ixl_vf_map_vsi_queue(hw, vf, i,
271 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
272 
273 	/* Set rest of VSI queues as unused. */
274 	for (; i < IXL_MAX_VSI_QUEUES; i++)
275 		ixl_vf_map_vsi_queue(hw, vf, i,
276 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
277 
278 	ixl_flush(hw);
279 }
280 
281 static void
282 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
283 {
284 	struct i40e_hw *hw;
285 
286 	hw = &pf->hw;
287 
288 	if (vsi->seid == 0)
289 		return;
290 
291 	i40e_aq_delete_element(hw, vsi->seid, NULL);
292 }
293 
294 static void
295 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
296 {
297 
298 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
299 	ixl_flush(hw);
300 }
301 
302 static void
303 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
304 {
305 
306 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
307 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
308 	ixl_flush(hw);
309 }
310 
311 static void
312 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
313 {
314 	struct i40e_hw *hw;
315 	uint32_t vfint_reg, vpint_reg;
316 	int i;
317 
318 	hw = &pf->hw;
319 
320 	ixl_vf_vsi_release(pf, &vf->vsi);
321 
322 	/* Index 0 has a special register. */
323 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
324 
325 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
326 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
327 		ixl_vf_disable_queue_intr(hw, vfint_reg);
328 	}
329 
330 	/* Index 0 has a special register. */
331 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
332 
333 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
334 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
335 		ixl_vf_unregister_intr(hw, vpint_reg);
336 	}
337 
338 	vf->vsi.num_queues = 0;
339 }
340 
341 static int
342 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
343 {
344 	struct i40e_hw *hw;
345 	int i;
346 	uint16_t global_vf_num;
347 	uint32_t ciad;
348 
349 	hw = &pf->hw;
350 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
351 
352 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
353 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
354 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
355 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
356 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
357 			return (0);
358 		DELAY(1);
359 	}
360 
361 	return (ETIMEDOUT);
362 }
363 
364 static void
365 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
366 {
367 	struct i40e_hw *hw;
368 	uint32_t vfrtrig;
369 
370 	hw = &pf->hw;
371 
372 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
373 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
374 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
375 	ixl_flush(hw);
376 
377 	ixl_reinit_vf(pf, vf);
378 }
379 
380 static void
381 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
382 {
383 	struct i40e_hw *hw;
384 	uint32_t vfrstat, vfrtrig;
385 	int i, error;
386 
387 	hw = &pf->hw;
388 
389 	error = ixl_flush_pcie(pf, vf);
390 	if (error != 0)
391 		device_printf(pf->dev,
392 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
393 		    vf->vf_num);
394 
395 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
396 		DELAY(10);
397 
398 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
399 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
400 			break;
401 	}
402 
403 	if (i == IXL_VF_RESET_TIMEOUT)
404 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
405 
406 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
407 
408 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
409 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
410 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
411 
412 	if (vf->vsi.seid != 0)
413 		ixl_disable_rings(&vf->vsi);
414 
415 	ixl_vf_release_resources(pf, vf);
416 	ixl_vf_setup_vsi(pf, vf);
417 	ixl_vf_map_queues(pf, vf);
418 
419 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
420 	ixl_flush(hw);
421 }
422 
423 static int
424 ixl_vc_opcode_level(uint16_t opcode)
425 {
426 	switch (opcode) {
427 	case I40E_VIRTCHNL_OP_GET_STATS:
428 		return (10);
429 	default:
430 		return (5);
431 	}
432 }
433 
434 static void
435 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
436     enum i40e_status_code status, void *msg, uint16_t len)
437 {
438 	struct i40e_hw *hw;
439 	int global_vf_id;
440 
441 	hw = &pf->hw;
442 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
443 
444 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
445 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
446 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
447 
448 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
449 }
450 
451 static void
452 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
453 {
454 
455 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
456 }
457 
458 static void
459 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
460     enum i40e_status_code status, const char *file, int line)
461 {
462 
463 	I40E_VC_DEBUG(pf, 1,
464 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
465 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
466 	    status, vf->vf_num, file, line);
467 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
468 }
469 
470 static void
471 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
472     uint16_t msg_size)
473 {
474 	struct i40e_virtchnl_version_info reply;
475 
476 	if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
477 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
478 		    I40E_ERR_PARAM);
479 		return;
480 	}
481 
482 	vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
483 
484 	reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
485 	reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
486 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
487 	    sizeof(reply));
488 }
489 
490 static void
491 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
492     uint16_t msg_size)
493 {
494 
495 	if (msg_size != 0) {
496 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
497 		    I40E_ERR_PARAM);
498 		return;
499 	}
500 
501 	ixl_reset_vf(pf, vf);
502 
503 	/* No response to a reset message. */
504 }
505 
506 static void
507 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
508     uint16_t msg_size)
509 {
510 	struct i40e_virtchnl_vf_resource reply;
511 
512 	if ((vf->version == 0 && msg_size != 0) ||
513 	    (vf->version == 1 && msg_size != 4)) {
514 		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
515 		    " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
516 		    vf->version);
517 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
518 		    I40E_ERR_PARAM);
519 		return;
520 	}
521 
522 	bzero(&reply, sizeof(reply));
523 
524 	if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
525 		reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
526 					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
527 					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
528 	else
529 		/* Force VF RSS setup by PF in 1.1+ VFs */
530 		reply.vf_offload_flags = *(u32 *)msg & (
531 					 I40E_VIRTCHNL_VF_OFFLOAD_L2 |
532 					 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
533 					 I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
534 
535 	reply.num_vsis = 1;
536 	reply.num_queue_pairs = vf->vsi.num_queues;
537 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
538 	reply.rss_key_size = 52;
539 	reply.rss_lut_size = 64;
540 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
541 	reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
542 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
543 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
544 
545 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
546 	    I40E_SUCCESS, &reply, sizeof(reply));
547 }
548 
549 static int
550 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
551     struct i40e_virtchnl_txq_info *info)
552 {
553 	struct i40e_hw *hw;
554 	struct i40e_hmc_obj_txq txq;
555 	uint16_t global_queue_num, global_vf_num;
556 	enum i40e_status_code status;
557 	uint32_t qtx_ctl;
558 
559 	hw = &pf->hw;
560 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
561 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
562 	bzero(&txq, sizeof(txq));
563 
564 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
565 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
566 
567 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
568 	if (status != I40E_SUCCESS)
569 		return (EINVAL);
570 
571 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
572 
573 	txq.head_wb_ena = info->headwb_enabled;
574 	txq.head_wb_addr = info->dma_headwb_addr;
575 	txq.qlen = info->ring_len;
576 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
577 	txq.rdylist_act = 0;
578 
579 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
580 	if (status != I40E_SUCCESS)
581 		return (EINVAL);
582 
583 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
584 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
585 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
586 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
587 	ixl_flush(hw);
588 
589 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
590 
591 	return (0);
592 }
593 
594 static int
595 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
596     struct i40e_virtchnl_rxq_info *info)
597 {
598 	struct i40e_hw *hw;
599 	struct i40e_hmc_obj_rxq rxq;
600 	uint16_t global_queue_num;
601 	enum i40e_status_code status;
602 
603 	hw = &pf->hw;
604 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
605 	bzero(&rxq, sizeof(rxq));
606 
607 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
608 	    vf->vf_num, global_queue_num, info->queue_id);
609 
610 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
611 		return (EINVAL);
612 
613 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
614 	    info->max_pkt_size < ETHER_MIN_LEN)
615 		return (EINVAL);
616 
617 	if (info->splithdr_enabled) {
618 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
619 			return (EINVAL);
620 
621 		rxq.hsplit_0 = info->rx_split_pos &
622 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
623 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
624 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
625 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
626 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
627 
628 		rxq.dtype = 2;
629 	}
630 
631 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
632 	if (status != I40E_SUCCESS)
633 		return (EINVAL);
634 
635 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
636 	rxq.qlen = info->ring_len;
637 
638 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
639 
640 	rxq.dsize = 1;
641 	rxq.crcstrip = 1;
642 	rxq.l2tsel = 1;
643 
644 	rxq.rxmax = info->max_pkt_size;
645 	rxq.tphrdesc_ena = 1;
646 	rxq.tphwdesc_ena = 1;
647 	rxq.tphdata_ena = 1;
648 	rxq.tphhead_ena = 1;
649 	rxq.lrxqthresh = 2;
650 	rxq.prefena = 1;
651 
652 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
653 	if (status != I40E_SUCCESS)
654 		return (EINVAL);
655 
656 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
657 
658 	return (0);
659 }
660 
661 static void
662 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
663     uint16_t msg_size)
664 {
665 	struct i40e_virtchnl_vsi_queue_config_info *info;
666 	struct i40e_virtchnl_queue_pair_info *pair;
667 	uint16_t expected_msg_size;
668 	int i;
669 
670 	if (msg_size < sizeof(*info)) {
671 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
672 		    I40E_ERR_PARAM);
673 		return;
674 	}
675 
676 	info = msg;
677 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
678 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
679 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
680 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
681 		    I40E_ERR_PARAM);
682 		return;
683 	}
684 
685 	expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
686 	if (msg_size != expected_msg_size) {
687 		device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
688 		    vf->vf_num, msg_size, expected_msg_size);
689 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
690 		    I40E_ERR_PARAM);
691 		return;
692 	}
693 
694 	if (info->vsi_id != vf->vsi.vsi_num) {
695 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
696 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
697 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
698 		    I40E_ERR_PARAM);
699 		return;
700 	}
701 
702 	for (i = 0; i < info->num_queue_pairs; i++) {
703 		pair = &info->qpair[i];
704 
705 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
706 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
707 		    pair->txq.queue_id != pair->rxq.queue_id ||
708 		    pair->txq.queue_id >= vf->vsi.num_queues) {
709 
710 			i40e_send_vf_nack(pf, vf,
711 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
712 			return;
713 		}
714 
715 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
716 			i40e_send_vf_nack(pf, vf,
717 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
718 			return;
719 		}
720 
721 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
722 			i40e_send_vf_nack(pf, vf,
723 			    I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
724 			return;
725 		}
726 	}
727 
728 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
729 }
730 
731 static void
732 ixl_vf_set_qctl(struct ixl_pf *pf,
733     const struct i40e_virtchnl_vector_map *vector,
734     enum i40e_queue_type cur_type, uint16_t cur_queue,
735     enum i40e_queue_type *last_type, uint16_t *last_queue)
736 {
737 	uint32_t offset, qctl;
738 	uint16_t itr_indx;
739 
740 	if (cur_type == I40E_QUEUE_TYPE_RX) {
741 		offset = I40E_QINT_RQCTL(cur_queue);
742 		itr_indx = vector->rxitr_idx;
743 	} else {
744 		offset = I40E_QINT_TQCTL(cur_queue);
745 		itr_indx = vector->txitr_idx;
746 	}
747 
748 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
749 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
750 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
751 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
752 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
753 
754 	wr32(&pf->hw, offset, qctl);
755 
756 	*last_type = cur_type;
757 	*last_queue = cur_queue;
758 }
759 
760 static void
761 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
762     const struct i40e_virtchnl_vector_map *vector)
763 {
764 	struct i40e_hw *hw;
765 	u_int qindex;
766 	enum i40e_queue_type type, last_type;
767 	uint32_t lnklst_reg;
768 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
769 
770 	hw = &pf->hw;
771 
772 	rxq_map = vector->rxq_map;
773 	txq_map = vector->txq_map;
774 
775 	last_queue = IXL_END_OF_INTR_LNKLST;
776 	last_type = I40E_QUEUE_TYPE_RX;
777 
778 	/*
779 	 * The datasheet says to optimize performance, RX queues and TX queues
780 	 * should be interleaved in the interrupt linked list, so we process
781 	 * both at once here.
782 	 */
783 	while ((rxq_map != 0) || (txq_map != 0)) {
784 		if (txq_map != 0) {
785 			qindex = ffs(txq_map) - 1;
786 			type = I40E_QUEUE_TYPE_TX;
787 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
788 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
789 			    &last_type, &last_queue);
790 			txq_map &= ~(1 << qindex);
791 		}
792 
793 		if (rxq_map != 0) {
794 			qindex = ffs(rxq_map) - 1;
795 			type = I40E_QUEUE_TYPE_RX;
796 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
797 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
798 			    &last_type, &last_queue);
799 			rxq_map &= ~(1 << qindex);
800 		}
801 	}
802 
803 	if (vector->vector_id == 0)
804 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
805 	else
806 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
807 		    vf->vf_num);
808 	wr32(hw, lnklst_reg,
809 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
810 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
811 
812 	ixl_flush(hw);
813 }
814 
815 static void
816 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
817     uint16_t msg_size)
818 {
819 	struct i40e_virtchnl_irq_map_info *map;
820 	struct i40e_virtchnl_vector_map *vector;
821 	struct i40e_hw *hw;
822 	int i, largest_txq, largest_rxq;
823 
824 	hw = &pf->hw;
825 
826 	if (msg_size < sizeof(*map)) {
827 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
828 		    I40E_ERR_PARAM);
829 		return;
830 	}
831 
832 	map = msg;
833 	if (map->num_vectors == 0) {
834 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
835 		    I40E_ERR_PARAM);
836 		return;
837 	}
838 
839 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
840 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
841 		    I40E_ERR_PARAM);
842 		return;
843 	}
844 
845 	for (i = 0; i < map->num_vectors; i++) {
846 		vector = &map->vecmap[i];
847 
848 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
849 		    vector->vsi_id != vf->vsi.vsi_num) {
850 			i40e_send_vf_nack(pf, vf,
851 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
852 			return;
853 		}
854 
855 		if (vector->rxq_map != 0) {
856 			largest_rxq = fls(vector->rxq_map) - 1;
857 			if (largest_rxq >= vf->vsi.num_queues) {
858 				i40e_send_vf_nack(pf, vf,
859 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
860 				    I40E_ERR_PARAM);
861 				return;
862 			}
863 		}
864 
865 		if (vector->txq_map != 0) {
866 			largest_txq = fls(vector->txq_map) - 1;
867 			if (largest_txq >= vf->vsi.num_queues) {
868 				i40e_send_vf_nack(pf, vf,
869 				    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
870 				    I40E_ERR_PARAM);
871 				return;
872 			}
873 		}
874 
875 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
876 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
877 			i40e_send_vf_nack(pf, vf,
878 			    I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
879 			    I40E_ERR_PARAM);
880 			return;
881 		}
882 
883 		ixl_vf_config_vector(pf, vf, vector);
884 	}
885 
886 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
887 }
888 
889 static void
890 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
891     uint16_t msg_size)
892 {
893 	struct i40e_virtchnl_queue_select *select;
894 	int error = 0;
895 
896 	if (msg_size != sizeof(*select)) {
897 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
898 		    I40E_ERR_PARAM);
899 		return;
900 	}
901 
902 	select = msg;
903 	if (select->vsi_id != vf->vsi.vsi_num ||
904 	    select->rx_queues == 0 || select->tx_queues == 0) {
905 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
906 		    I40E_ERR_PARAM);
907 		return;
908 	}
909 
910 	/* Enable TX rings selected by the VF */
911 	for (int i = 0; i < 32; i++) {
912 		if ((1 << i) & select->tx_queues) {
913 			/* Warn if queue is out of VF allocation range */
914 			if (i >= vf->vsi.num_queues) {
915 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
916 				    vf->vf_num, i);
917 				break;
918 			}
919 			/* Skip this queue if it hasn't been configured */
920 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
921 				continue;
922 			/* Warn if this queue is already marked as enabled */
923 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
924 				device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
925 				    vf->vf_num, i);
926 
927 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
928 			if (error)
929 				break;
930 			else
931 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
932 		}
933 	}
934 
935 	/* Enable RX rings selected by the VF */
936 	for (int i = 0; i < 32; i++) {
937 		if ((1 << i) & select->rx_queues) {
938 			/* Warn if queue is out of VF allocation range */
939 			if (i >= vf->vsi.num_queues) {
940 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
941 				    vf->vf_num, i);
942 				break;
943 			}
944 			/* Skip this queue if it hasn't been configured */
945 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
946 				continue;
947 			/* Warn if this queue is already marked as enabled */
948 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
949 				device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
950 				    vf->vf_num, i);
951 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
952 			if (error)
953 				break;
954 			else
955 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
956 		}
957 	}
958 
959 	if (error) {
960 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
961 		    I40E_ERR_TIMEOUT);
962 		return;
963 	}
964 
965 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
966 }
967 
968 static void
969 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
970     void *msg, uint16_t msg_size)
971 {
972 	struct i40e_virtchnl_queue_select *select;
973 	int error = 0;
974 
975 	if (msg_size != sizeof(*select)) {
976 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
977 		    I40E_ERR_PARAM);
978 		return;
979 	}
980 
981 	select = msg;
982 	if (select->vsi_id != vf->vsi.vsi_num ||
983 	    select->rx_queues == 0 || select->tx_queues == 0) {
984 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
985 		    I40E_ERR_PARAM);
986 		return;
987 	}
988 
989 	/* Disable TX rings selected by the VF */
990 	for (int i = 0; i < 32; i++) {
991 		if ((1 << i) & select->tx_queues) {
992 			/* Warn if queue is out of VF allocation range */
993 			if (i >= vf->vsi.num_queues) {
994 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
995 				    vf->vf_num, i);
996 				break;
997 			}
998 			/* Skip this queue if it hasn't been configured */
999 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1000 				continue;
1001 			/* Warn if this queue is already marked as disabled */
1002 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1003 				device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1004 				    vf->vf_num, i);
1005 				continue;
1006 			}
1007 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1008 			if (error)
1009 				break;
1010 			else
1011 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1012 		}
1013 	}
1014 
1015 	/* Enable RX rings selected by the VF */
1016 	for (int i = 0; i < 32; i++) {
1017 		if ((1 << i) & select->rx_queues) {
1018 			/* Warn if queue is out of VF allocation range */
1019 			if (i >= vf->vsi.num_queues) {
1020 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1021 				    vf->vf_num, i);
1022 				break;
1023 			}
1024 			/* Skip this queue if it hasn't been configured */
1025 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1026 				continue;
1027 			/* Warn if this queue is already marked as disabled */
1028 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1029 				device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1030 				    vf->vf_num, i);
1031 				continue;
1032 			}
1033 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1034 			if (error)
1035 				break;
1036 			else
1037 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1038 		}
1039 	}
1040 
1041 	if (error) {
1042 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
1043 		    I40E_ERR_TIMEOUT);
1044 		return;
1045 	}
1046 
1047 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
1048 }
1049 
1050 static bool
1051 ixl_zero_mac(const uint8_t *addr)
1052 {
1053 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1054 
1055 	return (cmp_etheraddr(addr, zero));
1056 }
1057 
1058 static bool
1059 ixl_bcast_mac(const uint8_t *addr)
1060 {
1061 
1062 	return (cmp_etheraddr(addr, ixl_bcast_addr));
1063 }
1064 
1065 static int
1066 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1067 {
1068 
1069 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1070 		return (EINVAL);
1071 
1072 	/*
1073 	 * If the VF is not allowed to change its MAC address, don't let it
1074 	 * set a MAC filter for an address that is not a multicast address and
1075 	 * is not its assigned MAC.
1076 	 */
1077 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1078 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1079 		return (EPERM);
1080 
1081 	return (0);
1082 }
1083 
1084 static void
1085 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1086     uint16_t msg_size)
1087 {
1088 	struct i40e_virtchnl_ether_addr_list *addr_list;
1089 	struct i40e_virtchnl_ether_addr *addr;
1090 	struct ixl_vsi *vsi;
1091 	int i;
1092 	size_t expected_size;
1093 
1094 	vsi = &vf->vsi;
1095 
1096 	if (msg_size < sizeof(*addr_list)) {
1097 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1098 		    I40E_ERR_PARAM);
1099 		return;
1100 	}
1101 
1102 	addr_list = msg;
1103 	expected_size = sizeof(*addr_list) +
1104 	    addr_list->num_elements * sizeof(*addr);
1105 
1106 	if (addr_list->num_elements == 0 ||
1107 	    addr_list->vsi_id != vsi->vsi_num ||
1108 	    msg_size != expected_size) {
1109 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1110 		    I40E_ERR_PARAM);
1111 		return;
1112 	}
1113 
1114 	for (i = 0; i < addr_list->num_elements; i++) {
1115 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1116 			i40e_send_vf_nack(pf, vf,
1117 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1118 			return;
1119 		}
1120 	}
1121 
1122 	for (i = 0; i < addr_list->num_elements; i++) {
1123 		addr = &addr_list->list[i];
1124 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1125 	}
1126 
1127 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
1128 }
1129 
1130 static void
1131 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1132     uint16_t msg_size)
1133 {
1134 	struct i40e_virtchnl_ether_addr_list *addr_list;
1135 	struct i40e_virtchnl_ether_addr *addr;
1136 	size_t expected_size;
1137 	int i;
1138 
1139 	if (msg_size < sizeof(*addr_list)) {
1140 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1141 		    I40E_ERR_PARAM);
1142 		return;
1143 	}
1144 
1145 	addr_list = msg;
1146 	expected_size = sizeof(*addr_list) +
1147 	    addr_list->num_elements * sizeof(*addr);
1148 
1149 	if (addr_list->num_elements == 0 ||
1150 	    addr_list->vsi_id != vf->vsi.vsi_num ||
1151 	    msg_size != expected_size) {
1152 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
1153 		    I40E_ERR_PARAM);
1154 		return;
1155 	}
1156 
1157 	for (i = 0; i < addr_list->num_elements; i++) {
1158 		addr = &addr_list->list[i];
1159 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1160 			i40e_send_vf_nack(pf, vf,
1161 			    I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
1162 			return;
1163 		}
1164 	}
1165 
1166 	for (i = 0; i < addr_list->num_elements; i++) {
1167 		addr = &addr_list->list[i];
1168 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1169 	}
1170 
1171 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
1172 }
1173 
1174 static enum i40e_status_code
1175 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1176 {
1177 	struct i40e_vsi_context vsi_ctx;
1178 
1179 	vsi_ctx.seid = vf->vsi.seid;
1180 
1181 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1182 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1183 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1184 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1185 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1186 }
1187 
1188 static void
1189 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1190     uint16_t msg_size)
1191 {
1192 	struct i40e_virtchnl_vlan_filter_list *filter_list;
1193 	enum i40e_status_code code;
1194 	size_t expected_size;
1195 	int i;
1196 
1197 	if (msg_size < sizeof(*filter_list)) {
1198 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1199 		    I40E_ERR_PARAM);
1200 		return;
1201 	}
1202 
1203 	filter_list = msg;
1204 	expected_size = sizeof(*filter_list) +
1205 	    filter_list->num_elements * sizeof(uint16_t);
1206 	if (filter_list->num_elements == 0 ||
1207 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1208 	    msg_size != expected_size) {
1209 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1210 		    I40E_ERR_PARAM);
1211 		return;
1212 	}
1213 
1214 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1215 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1216 		    I40E_ERR_PARAM);
1217 		return;
1218 	}
1219 
1220 	for (i = 0; i < filter_list->num_elements; i++) {
1221 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1222 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1223 			    I40E_ERR_PARAM);
1224 			return;
1225 		}
1226 	}
1227 
1228 	code = ixl_vf_enable_vlan_strip(pf, vf);
1229 	if (code != I40E_SUCCESS) {
1230 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1231 		    I40E_ERR_PARAM);
1232 	}
1233 
1234 	for (i = 0; i < filter_list->num_elements; i++)
1235 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1236 
1237 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
1238 }
1239 
1240 static void
1241 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1242     uint16_t msg_size)
1243 {
1244 	struct i40e_virtchnl_vlan_filter_list *filter_list;
1245 	int i;
1246 	size_t expected_size;
1247 
1248 	if (msg_size < sizeof(*filter_list)) {
1249 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1250 		    I40E_ERR_PARAM);
1251 		return;
1252 	}
1253 
1254 	filter_list = msg;
1255 	expected_size = sizeof(*filter_list) +
1256 	    filter_list->num_elements * sizeof(uint16_t);
1257 	if (filter_list->num_elements == 0 ||
1258 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1259 	    msg_size != expected_size) {
1260 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
1261 		    I40E_ERR_PARAM);
1262 		return;
1263 	}
1264 
1265 	for (i = 0; i < filter_list->num_elements; i++) {
1266 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1267 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1268 			    I40E_ERR_PARAM);
1269 			return;
1270 		}
1271 	}
1272 
1273 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1274 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
1275 		    I40E_ERR_PARAM);
1276 		return;
1277 	}
1278 
1279 	for (i = 0; i < filter_list->num_elements; i++)
1280 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1281 
1282 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
1283 }
1284 
1285 static void
1286 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1287     void *msg, uint16_t msg_size)
1288 {
1289 	struct i40e_virtchnl_promisc_info *info;
1290 	enum i40e_status_code code;
1291 
1292 	if (msg_size != sizeof(*info)) {
1293 		i40e_send_vf_nack(pf, vf,
1294 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1295 		return;
1296 	}
1297 
1298 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1299 		i40e_send_vf_nack(pf, vf,
1300 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1301 		return;
1302 	}
1303 
1304 	info = msg;
1305 	if (info->vsi_id != vf->vsi.vsi_num) {
1306 		i40e_send_vf_nack(pf, vf,
1307 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1308 		return;
1309 	}
1310 
1311 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1312 	    info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1313 	if (code != I40E_SUCCESS) {
1314 		i40e_send_vf_nack(pf, vf,
1315 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1316 		return;
1317 	}
1318 
1319 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1320 	    info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
1321 	if (code != I40E_SUCCESS) {
1322 		i40e_send_vf_nack(pf, vf,
1323 		    I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1324 		return;
1325 	}
1326 
1327 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1328 }
1329 
1330 static void
1331 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1332     uint16_t msg_size)
1333 {
1334 	struct i40e_virtchnl_queue_select *queue;
1335 
1336 	if (msg_size != sizeof(*queue)) {
1337 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1338 		    I40E_ERR_PARAM);
1339 		return;
1340 	}
1341 
1342 	queue = msg;
1343 	if (queue->vsi_id != vf->vsi.vsi_num) {
1344 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1345 		    I40E_ERR_PARAM);
1346 		return;
1347 	}
1348 
1349 	ixl_update_eth_stats(&vf->vsi);
1350 
1351 	ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
1352 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1353 }
1354 
1355 static void
1356 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1357     uint16_t msg_size)
1358 {
1359 	struct i40e_hw *hw;
1360 	struct i40e_virtchnl_rss_key *key;
1361 	struct i40e_aqc_get_set_rss_key_data key_data;
1362 	enum i40e_status_code status;
1363 
1364 	hw = &pf->hw;
1365 
1366 	if (msg_size < sizeof(*key)) {
1367 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1368 		    I40E_ERR_PARAM);
1369 		return;
1370 	}
1371 
1372 	key = msg;
1373 
1374 	if (key->key_len > 52) {
1375 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1376 		    vf->vf_num, key->key_len, 52);
1377 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1378 		    I40E_ERR_PARAM);
1379 		return;
1380 	}
1381 
1382 	if (key->vsi_id != vf->vsi.vsi_num) {
1383 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1384 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1385 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1386 		    I40E_ERR_PARAM);
1387 		return;
1388 	}
1389 
1390 	/* Fill out hash using MAC-dependent method */
1391 	if (hw->mac.type == I40E_MAC_X722) {
1392 		bzero(&key_data, sizeof(key_data));
1393 		if (key->key_len <= 40)
1394 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1395 		else {
1396 			bcopy(key->key, key_data.standard_rss_key, 40);
1397 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1398 		}
1399 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1400 		if (status) {
1401 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1402 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1403 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
1404 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1405 			return;
1406 		}
1407 	} else {
1408 		for (int i = 0; i < (key->key_len / 4); i++)
1409 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)key->key)[i]);
1410 	}
1411 
1412 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1413 	    vf->vf_num, key->key[0]);
1414 
1415 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
1416 }
1417 
1418 static void
1419 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1420     uint16_t msg_size)
1421 {
1422 	struct i40e_hw *hw;
1423 	struct i40e_virtchnl_rss_lut *lut;
1424 	enum i40e_status_code status;
1425 
1426 	hw = &pf->hw;
1427 
1428 	if (msg_size < sizeof(*lut)) {
1429 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1430 		    I40E_ERR_PARAM);
1431 		return;
1432 	}
1433 
1434 	lut = msg;
1435 
1436 	if (lut->lut_entries > 64) {
1437 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1438 		    vf->vf_num, lut->lut_entries, 64);
1439 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1440 		    I40E_ERR_PARAM);
1441 		return;
1442 	}
1443 
1444 	if (lut->vsi_id != vf->vsi.vsi_num) {
1445 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1446 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1447 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1448 		    I40E_ERR_PARAM);
1449 		return;
1450 	}
1451 
1452 	/* Fill out LUT using MAC-dependent method */
1453 	if (hw->mac.type == I40E_MAC_X722) {
1454 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1455 		if (status) {
1456 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1457 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1458 			i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
1459 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1460 			return;
1461 		}
1462 	} else {
1463 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1464 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, IXL_GLOBAL_VF_NUM(hw, vf)), ((u32 *)lut->lut)[i]);
1465 	}
1466 
1467 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1468 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1469 
1470 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
1471 }
1472 
1473 static void
1474 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1475     uint16_t msg_size)
1476 {
1477 	struct i40e_hw *hw;
1478 	struct i40e_virtchnl_rss_hena *hena;
1479 
1480 	hw = &pf->hw;
1481 
1482 	if (msg_size < sizeof(*hena)) {
1483 		i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
1484 		    I40E_ERR_PARAM);
1485 		return;
1486 	}
1487 
1488 	hena = msg;
1489 
1490 	/* Set HENA */
1491 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)hena->hena);
1492 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, IXL_GLOBAL_VF_NUM(hw, vf)), (u32)(hena->hena >> 32));
1493 
1494 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1495 	    vf->vf_num, hena->hena);
1496 
1497 	ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
1498 }
1499 
1500 void
1501 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1502 {
1503 	struct ixl_vf *vf;
1504 	void *msg;
1505 	uint16_t vf_num, msg_size;
1506 	uint32_t opcode;
1507 
1508 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1509 	opcode = le32toh(event->desc.cookie_high);
1510 
1511 	if (vf_num >= pf->num_vfs) {
1512 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1513 		return;
1514 	}
1515 
1516 	vf = &pf->vfs[vf_num];
1517 	msg = event->msg_buf;
1518 	msg_size = event->msg_len;
1519 
1520 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1521 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1522 	    ixl_vc_opcode_str(opcode), opcode,
1523 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1524 	    vf_num, msg_size);
1525 
1526 	/* This must be a stray msg from a previously destroyed VF. */
1527 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1528 		return;
1529 
1530 	switch (opcode) {
1531 	case I40E_VIRTCHNL_OP_VERSION:
1532 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1533 		break;
1534 	case I40E_VIRTCHNL_OP_RESET_VF:
1535 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1536 		break;
1537 	case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
1538 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1539 		break;
1540 	case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1541 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1542 		break;
1543 	case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
1544 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1545 		break;
1546 	case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
1547 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1548 		break;
1549 	case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
1550 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1551 		break;
1552 	case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
1553 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1554 		break;
1555 	case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
1556 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1557 		break;
1558 	case I40E_VIRTCHNL_OP_ADD_VLAN:
1559 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1560 		break;
1561 	case I40E_VIRTCHNL_OP_DEL_VLAN:
1562 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1563 		break;
1564 	case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1565 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1566 		break;
1567 	case I40E_VIRTCHNL_OP_GET_STATS:
1568 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1569 		break;
1570 	case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
1571 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1572 		break;
1573 	case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
1574 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1575 		break;
1576 	case I40E_VIRTCHNL_OP_SET_RSS_HENA:
1577 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1578 		break;
1579 
1580 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1581 	case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
1582 	case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
1583 	default:
1584 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1585 		break;
1586 	}
1587 }
1588 
1589 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1590 void
1591 ixl_handle_vflr(void *arg, int pending)
1592 {
1593 	struct ixl_pf *pf;
1594 	struct ixl_vf *vf;
1595 	struct i40e_hw *hw;
1596 	uint16_t global_vf_num;
1597 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1598 	int i;
1599 
1600 	pf = arg;
1601 	hw = &pf->hw;
1602 
1603 	IXL_PF_LOCK(pf);
1604 	for (i = 0; i < pf->num_vfs; i++) {
1605 		global_vf_num = hw->func_caps.vf_base_id + i;
1606 
1607 		vf = &pf->vfs[i];
1608 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1609 			continue;
1610 
1611 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1612 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1613 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1614 		if (vflrstat & vflrstat_mask) {
1615 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1616 			    vflrstat_mask);
1617 
1618 			ixl_reinit_vf(pf, vf);
1619 		}
1620 	}
1621 
1622 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1623 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1624 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1625 	ixl_flush(hw);
1626 
1627 	IXL_PF_UNLOCK(pf);
1628 }
1629 
1630 static int
1631 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1632 {
1633 
1634 	switch (err) {
1635 	case I40E_AQ_RC_EPERM:
1636 		return (EPERM);
1637 	case I40E_AQ_RC_ENOENT:
1638 		return (ENOENT);
1639 	case I40E_AQ_RC_ESRCH:
1640 		return (ESRCH);
1641 	case I40E_AQ_RC_EINTR:
1642 		return (EINTR);
1643 	case I40E_AQ_RC_EIO:
1644 		return (EIO);
1645 	case I40E_AQ_RC_ENXIO:
1646 		return (ENXIO);
1647 	case I40E_AQ_RC_E2BIG:
1648 		return (E2BIG);
1649 	case I40E_AQ_RC_EAGAIN:
1650 		return (EAGAIN);
1651 	case I40E_AQ_RC_ENOMEM:
1652 		return (ENOMEM);
1653 	case I40E_AQ_RC_EACCES:
1654 		return (EACCES);
1655 	case I40E_AQ_RC_EFAULT:
1656 		return (EFAULT);
1657 	case I40E_AQ_RC_EBUSY:
1658 		return (EBUSY);
1659 	case I40E_AQ_RC_EEXIST:
1660 		return (EEXIST);
1661 	case I40E_AQ_RC_EINVAL:
1662 		return (EINVAL);
1663 	case I40E_AQ_RC_ENOTTY:
1664 		return (ENOTTY);
1665 	case I40E_AQ_RC_ENOSPC:
1666 		return (ENOSPC);
1667 	case I40E_AQ_RC_ENOSYS:
1668 		return (ENOSYS);
1669 	case I40E_AQ_RC_ERANGE:
1670 		return (ERANGE);
1671 	case I40E_AQ_RC_EFLUSHED:
1672 		return (EINVAL);	/* No exact equivalent in errno.h */
1673 	case I40E_AQ_RC_BAD_ADDR:
1674 		return (EFAULT);
1675 	case I40E_AQ_RC_EMODE:
1676 		return (EPERM);
1677 	case I40E_AQ_RC_EFBIG:
1678 		return (EFBIG);
1679 	default:
1680 		return (EINVAL);
1681 	}
1682 }
1683 
1684 int
1685 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1686 {
1687 	struct ixl_pf *pf;
1688 	struct i40e_hw *hw;
1689 	struct ixl_vsi *pf_vsi;
1690 	enum i40e_status_code ret;
1691 	int i, error;
1692 
1693 	pf = device_get_softc(dev);
1694 	hw = &pf->hw;
1695 	pf_vsi = &pf->vsi;
1696 
1697 	IXL_PF_LOCK(pf);
1698 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1699 	    M_ZERO);
1700 
1701 	if (pf->vfs == NULL) {
1702 		error = ENOMEM;
1703 		goto fail;
1704 	}
1705 
1706 	for (i = 0; i < num_vfs; i++)
1707 		sysctl_ctx_init(&pf->vfs[i].ctx);
1708 
1709 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1710 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1711 	if (ret != I40E_SUCCESS) {
1712 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1713 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1714 		    error);
1715 		goto fail;
1716 	}
1717 
1718 	pf->num_vfs = num_vfs;
1719 	IXL_PF_UNLOCK(pf);
1720 	return (0);
1721 
1722 fail:
1723 	free(pf->vfs, M_IXL);
1724 	pf->vfs = NULL;
1725 	IXL_PF_UNLOCK(pf);
1726 	return (error);
1727 }
1728 
1729 void
1730 ixl_iov_uninit(device_t dev)
1731 {
1732 	struct ixl_pf *pf;
1733 	struct i40e_hw *hw;
1734 	struct ixl_vsi *vsi;
1735 	struct ifnet *ifp;
1736 	struct ixl_vf *vfs;
1737 	int i, num_vfs;
1738 
1739 	pf = device_get_softc(dev);
1740 	hw = &pf->hw;
1741 	vsi = &pf->vsi;
1742 	ifp = vsi->ifp;
1743 
1744 	IXL_PF_LOCK(pf);
1745 	for (i = 0; i < pf->num_vfs; i++) {
1746 		if (pf->vfs[i].vsi.seid != 0)
1747 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1748 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1749 		DDPRINTF(dev, "VF %d: %d released\n",
1750 		    i, pf->vfs[i].qtag.num_allocated);
1751 		DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1752 	}
1753 
1754 	if (pf->veb_seid != 0) {
1755 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1756 		pf->veb_seid = 0;
1757 	}
1758 
1759 	vfs = pf->vfs;
1760 	num_vfs = pf->num_vfs;
1761 
1762 	pf->vfs = NULL;
1763 	pf->num_vfs = 0;
1764 	IXL_PF_UNLOCK(pf);
1765 
1766 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
1767 	for (i = 0; i < num_vfs; i++)
1768 		sysctl_ctx_free(&vfs[i].ctx);
1769 	free(vfs, M_IXL);
1770 }
1771 
1772 static int
1773 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1774 {
1775 	device_t dev = pf->dev;
1776 	int error;
1777 
1778 	/* Validate, and clamp value if invalid */
1779 	if (num_queues < 1 || num_queues > 16)
1780 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1781 		    num_queues, vf->vf_num);
1782 	if (num_queues < 1) {
1783 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1784 		num_queues = 1;
1785 	} else if (num_queues > 16) {
1786 		device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1787 		num_queues = 16;
1788 	}
1789 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1790 	if (error) {
1791 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1792 		    num_queues, vf->vf_num);
1793 		return (ENOSPC);
1794 	}
1795 
1796 	DDPRINTF(dev, "VF %d: %d allocated, %d active",
1797 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1798 	DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1799 
1800 	return (0);
1801 }
1802 
1803 int
1804 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1805 {
1806 	char sysctl_name[QUEUE_NAME_LEN];
1807 	struct ixl_pf *pf;
1808 	struct ixl_vf *vf;
1809 	const void *mac;
1810 	size_t size;
1811 	int error;
1812 	int vf_num_queues;
1813 
1814 	pf = device_get_softc(dev);
1815 	vf = &pf->vfs[vfnum];
1816 
1817 	IXL_PF_LOCK(pf);
1818 	vf->vf_num = vfnum;
1819 
1820 	vf->vsi.back = pf;
1821 	vf->vf_flags = VF_FLAG_ENABLED;
1822 	SLIST_INIT(&vf->vsi.ftl);
1823 
1824 	/* Reserve queue allocation from PF */
1825 	vf_num_queues = nvlist_get_number(params, "num-queues");
1826 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1827 	if (error != 0)
1828 		goto out;
1829 
1830 	error = ixl_vf_setup_vsi(pf, vf);
1831 	if (error != 0)
1832 		goto out;
1833 
1834 	if (nvlist_exists_binary(params, "mac-addr")) {
1835 		mac = nvlist_get_binary(params, "mac-addr", &size);
1836 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1837 
1838 		if (nvlist_get_bool(params, "allow-set-mac"))
1839 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1840 	} else
1841 		/*
1842 		 * If the administrator has not specified a MAC address then
1843 		 * we must allow the VF to choose one.
1844 		 */
1845 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1846 
1847 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1848 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1849 
1850 	if (nvlist_get_bool(params, "allow-promisc"))
1851 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1852 
1853 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1854 
1855 	ixl_reset_vf(pf, vf);
1856 out:
1857 	IXL_PF_UNLOCK(pf);
1858 	if (error == 0) {
1859 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1860 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1861 	}
1862 
1863 	return (error);
1864 }
1865 
1866