xref: /freebsd/sys/dev/ixl/ixl_pf_iov.c (revision 190cef3d)
1 /******************************************************************************
2 
3   Copyright (c) 2013-2018, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixl_pf_iov.h"
36 
37 /* Private functions */
38 static void	ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
39 static void	ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
40 static void	ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
41 
42 static bool	ixl_zero_mac(const uint8_t *addr);
43 static bool	ixl_bcast_mac(const uint8_t *addr);
44 
45 static int	ixl_vc_opcode_level(uint16_t opcode);
46 
47 static int	ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
48 
49 static int	ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
50 static int	ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
51 static void	ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
52 static void	ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
53 static void	ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
54 static int	ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
55 static void	ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
56 static void	ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
57 static void	ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
58 static void	ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
59 static void	ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
60 static void	ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
61 static void	ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
62 static void	ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
63 static int	ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_txq_info *info);
64 static int	ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct virtchnl_rxq_info *info);
65 static void	ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
66 static void	ixl_vf_set_qctl(struct ixl_pf *pf, const struct virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
67     enum i40e_queue_type *last_type, uint16_t *last_queue);
68 static void	ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct virtchnl_vector_map *vector);
69 static void	ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
70 static void	ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
71 static void	ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
72 static void	ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
73 static void	ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
74 static enum i40e_status_code	ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
75 static void	ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
76 static void	ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
77 static void	ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
78 static void	ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
79 static int	ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
80 
81 static int	ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
82 
83 void
84 ixl_initialize_sriov(struct ixl_pf *pf)
85 {
86 	return;
87 #if 0
88 	device_t dev = pf->dev;
89 	struct i40e_hw *hw = &pf->hw;
90 	nvlist_t	*pf_schema, *vf_schema;
91 	int		iov_error;
92 
93 	pf_schema = pci_iov_schema_alloc_node();
94 	vf_schema = pci_iov_schema_alloc_node();
95 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
96 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
97 	    IOV_SCHEMA_HASDEFAULT, TRUE);
98 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
99 	    IOV_SCHEMA_HASDEFAULT, FALSE);
100 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
101 	    IOV_SCHEMA_HASDEFAULT, FALSE);
102 	pci_iov_schema_add_uint16(vf_schema, "num-queues",
103 	    IOV_SCHEMA_HASDEFAULT,
104 	    max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
105 
106 	iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
107 	if (iov_error != 0) {
108 		device_printf(dev,
109 		    "Failed to initialize SR-IOV (error=%d)\n",
110 		    iov_error);
111 	} else
112 		device_printf(dev, "SR-IOV ready\n");
113 
114 	pf->vc_debug_lvl = 1;
115 #endif
116 }
117 
118 
119 /*
120  * Allocate the VSI for a VF.
121  */
122 static int
123 ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
124 {
125 	device_t dev;
126 	struct i40e_hw *hw;
127 	struct ixl_vsi *vsi;
128 	struct i40e_vsi_context vsi_ctx;
129 	int i;
130 	enum i40e_status_code code;
131 
132 	hw = &pf->hw;
133 	vsi = &pf->vsi;
134 	dev = pf->dev;
135 
136 	vsi_ctx.pf_num = hw->pf_id;
137 	vsi_ctx.uplink_seid = pf->veb_seid;
138 	vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
139 	vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
140 	vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
141 
142 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
143 
144 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
145 	vsi_ctx.info.switch_id = htole16(0);
146 
147 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
148 	vsi_ctx.info.sec_flags = 0;
149 	if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
150 		vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
151 
152 	vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
153 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
154 	    I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
155 
156 	vsi_ctx.info.valid_sections |=
157 	    htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
158 	vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
159 
160 	/* ERJ: Only scattered allocation is supported for VFs right now */
161 	for (i = 0; i < vf->qtag.num_active; i++)
162 		vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
163 	for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
164 		vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
165 
166 	vsi_ctx.info.tc_mapping[0] = htole16(
167 	    (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
168 	    ((fls(vf->qtag.num_allocated) - 1) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
169 
170 	code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
171 	if (code != I40E_SUCCESS)
172 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
173 	vf->vsi.seid = vsi_ctx.seid;
174 	vf->vsi.vsi_num = vsi_ctx.vsi_number;
175 	// TODO: How to deal with num tx queues / num rx queues split?
176 	// I don't think just assigning this variable is going to work
177 	vf->vsi.num_rx_queues = vf->qtag.num_active;
178 	vf->vsi.num_tx_queues = vf->qtag.num_active;
179 
180 	code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
181 	if (code != I40E_SUCCESS)
182 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
183 
184 	code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
185 	if (code != I40E_SUCCESS) {
186 		device_printf(dev, "Failed to disable BW limit: %d\n",
187 		    ixl_adminq_err_to_errno(hw->aq.asq_last_status));
188 		return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
189 	}
190 
191 	memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
192 	return (0);
193 }
194 
195 static int
196 ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
197 {
198 	struct i40e_hw *hw;
199 	int error;
200 
201 	hw = &pf->hw;
202 
203 	error = ixl_vf_alloc_vsi(pf, vf);
204 	if (error != 0)
205 		return (error);
206 
207 	vf->vsi.hw_filters_add = 0;
208 	vf->vsi.hw_filters_del = 0;
209 	// ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
210 	ixl_reconfigure_filters(&vf->vsi);
211 
212 	return (0);
213 }
214 
215 static void
216 ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
217     uint32_t val)
218 {
219 	uint32_t qtable;
220 	int index, shift;
221 
222 	/*
223 	 * Two queues are mapped in a single register, so we have to do some
224 	 * gymnastics to convert the queue number into a register index and
225 	 * shift.
226 	 */
227 	index = qnum / 2;
228 	shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
229 
230 	qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
231 	qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
232 	qtable |= val << shift;
233 	i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
234 }
235 
236 static void
237 ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
238 {
239 	struct i40e_hw *hw;
240 	uint32_t qtable;
241 	int i;
242 
243 	hw = &pf->hw;
244 
245 	/*
246 	 * Contiguous mappings aren't actually supported by the hardware,
247 	 * so we have to use non-contiguous mappings.
248 	 */
249 	i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
250 	     I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
251 
252 	/* Enable LAN traffic on this VF */
253 	wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
254 	    I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
255 
256 	/* Program index of each VF queue into PF queue space
257 	 * (This is only needed if QTABLE is enabled) */
258 	for (i = 0; i < vf->vsi.num_tx_queues; i++) {
259 		qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
260 		    I40E_VPLAN_QTABLE_QINDEX_SHIFT;
261 
262 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
263 	}
264 	for (; i < IXL_MAX_VSI_QUEUES; i++)
265 		wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
266 		    I40E_VPLAN_QTABLE_QINDEX_MASK);
267 
268 	/* Map queues allocated to VF to its VSI;
269 	 * This mapping matches the VF-wide mapping since the VF
270 	 * is only given a single VSI */
271 	for (i = 0; i < vf->vsi.num_tx_queues; i++)
272 		ixl_vf_map_vsi_queue(hw, vf, i,
273 		    ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
274 
275 	/* Set rest of VSI queues as unused. */
276 	for (; i < IXL_MAX_VSI_QUEUES; i++)
277 		ixl_vf_map_vsi_queue(hw, vf, i,
278 		    I40E_VSILAN_QTABLE_QINDEX_0_MASK);
279 
280 	ixl_flush(hw);
281 }
282 
283 static void
284 ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
285 {
286 	struct i40e_hw *hw;
287 
288 	hw = &pf->hw;
289 
290 	if (vsi->seid == 0)
291 		return;
292 
293 	i40e_aq_delete_element(hw, vsi->seid, NULL);
294 }
295 
296 static void
297 ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
298 {
299 
300 	wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
301 	ixl_flush(hw);
302 }
303 
304 static void
305 ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
306 {
307 
308 	wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
309 	    I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
310 	ixl_flush(hw);
311 }
312 
313 static void
314 ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
315 {
316 	struct i40e_hw *hw;
317 	uint32_t vfint_reg, vpint_reg;
318 	int i;
319 
320 	hw = &pf->hw;
321 
322 	ixl_vf_vsi_release(pf, &vf->vsi);
323 
324 	/* Index 0 has a special register. */
325 	ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
326 
327 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
328 		vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
329 		ixl_vf_disable_queue_intr(hw, vfint_reg);
330 	}
331 
332 	/* Index 0 has a special register. */
333 	ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
334 
335 	for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
336 		vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
337 		ixl_vf_unregister_intr(hw, vpint_reg);
338 	}
339 
340 	vf->vsi.num_tx_queues = 0;
341 	vf->vsi.num_rx_queues = 0;
342 }
343 
344 static int
345 ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
346 {
347 	struct i40e_hw *hw;
348 	int i;
349 	uint16_t global_vf_num;
350 	uint32_t ciad;
351 
352 	hw = &pf->hw;
353 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
354 
355 	wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
356 	     (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
357 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
358 		ciad = rd32(hw, I40E_PF_PCI_CIAD);
359 		if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
360 			return (0);
361 		DELAY(1);
362 	}
363 
364 	return (ETIMEDOUT);
365 }
366 
367 static void
368 ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
369 {
370 	struct i40e_hw *hw;
371 	uint32_t vfrtrig;
372 
373 	hw = &pf->hw;
374 
375 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
376 	vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
377 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
378 	ixl_flush(hw);
379 
380 	ixl_reinit_vf(pf, vf);
381 }
382 
383 static void
384 ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
385 {
386 	struct i40e_hw *hw;
387 	uint32_t vfrstat, vfrtrig;
388 	int i, error;
389 
390 	hw = &pf->hw;
391 
392 	error = ixl_flush_pcie(pf, vf);
393 	if (error != 0)
394 		device_printf(pf->dev,
395 		    "Timed out waiting for PCIe activity to stop on VF-%d\n",
396 		    vf->vf_num);
397 
398 	for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
399 		DELAY(10);
400 
401 		vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
402 		if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
403 			break;
404 	}
405 
406 	if (i == IXL_VF_RESET_TIMEOUT)
407 		device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
408 
409 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_COMPLETED);
410 
411 	vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
412 	vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
413 	wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
414 
415 	if (vf->vsi.seid != 0)
416 		ixl_disable_rings(&vf->vsi);
417 
418 	ixl_vf_release_resources(pf, vf);
419 	ixl_vf_setup_vsi(pf, vf);
420 	ixl_vf_map_queues(pf, vf);
421 
422 	wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
423 	ixl_flush(hw);
424 }
425 
426 static int
427 ixl_vc_opcode_level(uint16_t opcode)
428 {
429 	switch (opcode) {
430 	case VIRTCHNL_OP_GET_STATS:
431 		return (10);
432 	default:
433 		return (5);
434 	}
435 }
436 
437 static void
438 ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
439     enum i40e_status_code status, void *msg, uint16_t len)
440 {
441 	struct i40e_hw *hw;
442 	int global_vf_id;
443 
444 	hw = &pf->hw;
445 	global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
446 
447 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
448 	    "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
449 	    ixl_vc_opcode_str(op), op, status, vf->vf_num);
450 
451 	i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
452 }
453 
454 static void
455 ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
456 {
457 
458 	ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
459 }
460 
461 static void
462 ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
463     enum i40e_status_code status, const char *file, int line)
464 {
465 
466 	I40E_VC_DEBUG(pf, 1,
467 	    "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
468 	    ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
469 	    status, vf->vf_num, file, line);
470 	ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
471 }
472 
473 static void
474 ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
475     uint16_t msg_size)
476 {
477 	struct virtchnl_version_info reply;
478 
479 	if (msg_size != sizeof(struct virtchnl_version_info)) {
480 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
481 		    I40E_ERR_PARAM);
482 		return;
483 	}
484 
485 	vf->version = ((struct virtchnl_version_info *)msg)->minor;
486 
487 	reply.major = VIRTCHNL_VERSION_MAJOR;
488 	reply.minor = VIRTCHNL_VERSION_MINOR;
489 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
490 	    sizeof(reply));
491 }
492 
493 static void
494 ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
495     uint16_t msg_size)
496 {
497 
498 	if (msg_size != 0) {
499 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
500 		    I40E_ERR_PARAM);
501 		return;
502 	}
503 
504 	ixl_reset_vf(pf, vf);
505 
506 	/* No response to a reset message. */
507 }
508 
509 static void
510 ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
511     uint16_t msg_size)
512 {
513 	struct virtchnl_vf_resource reply;
514 
515 	if ((vf->version == 0 && msg_size != 0) ||
516 	    (vf->version == 1 && msg_size != 4)) {
517 		device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
518 		    " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
519 		    vf->version);
520 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
521 		    I40E_ERR_PARAM);
522 		return;
523 	}
524 
525 	bzero(&reply, sizeof(reply));
526 
527 	if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
528 		reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
529 					 VIRTCHNL_VF_OFFLOAD_RSS_REG |
530 					 VIRTCHNL_VF_OFFLOAD_VLAN;
531 	else
532 		/* Force VF RSS setup by PF in 1.1+ VFs */
533 		reply.vf_cap_flags = *(u32 *)msg & (
534 					 VIRTCHNL_VF_OFFLOAD_L2 |
535 					 VIRTCHNL_VF_OFFLOAD_RSS_PF |
536 					 VIRTCHNL_VF_OFFLOAD_VLAN);
537 
538 	reply.num_vsis = 1;
539 	reply.num_queue_pairs = vf->vsi.num_tx_queues;
540 	reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
541 	reply.rss_key_size = 52;
542 	reply.rss_lut_size = 64;
543 	reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
544 	reply.vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
545 	reply.vsi_res[0].num_queue_pairs = vf->vsi.num_tx_queues;
546 	memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
547 
548 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
549 	    I40E_SUCCESS, &reply, sizeof(reply));
550 }
551 
552 static int
553 ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
554     struct virtchnl_txq_info *info)
555 {
556 	struct i40e_hw *hw;
557 	struct i40e_hmc_obj_txq txq;
558 	uint16_t global_queue_num, global_vf_num;
559 	enum i40e_status_code status;
560 	uint32_t qtx_ctl;
561 
562 	hw = &pf->hw;
563 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
564 	global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
565 	bzero(&txq, sizeof(txq));
566 
567 	DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
568 	    vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
569 
570 	status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
571 	if (status != I40E_SUCCESS)
572 		return (EINVAL);
573 
574 	txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
575 
576 	txq.head_wb_ena = info->headwb_enabled;
577 	txq.head_wb_addr = info->dma_headwb_addr;
578 	txq.qlen = info->ring_len;
579 	txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
580 	txq.rdylist_act = 0;
581 
582 	status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
583 	if (status != I40E_SUCCESS)
584 		return (EINVAL);
585 
586 	qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
587 	    (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
588 	    (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
589 	wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
590 	ixl_flush(hw);
591 
592 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
593 
594 	return (0);
595 }
596 
597 static int
598 ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
599     struct virtchnl_rxq_info *info)
600 {
601 	struct i40e_hw *hw;
602 	struct i40e_hmc_obj_rxq rxq;
603 	uint16_t global_queue_num;
604 	enum i40e_status_code status;
605 
606 	hw = &pf->hw;
607 	global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
608 	bzero(&rxq, sizeof(rxq));
609 
610 	DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
611 	    vf->vf_num, global_queue_num, info->queue_id);
612 
613 	if (info->databuffer_size > IXL_VF_MAX_BUFFER)
614 		return (EINVAL);
615 
616 	if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
617 	    info->max_pkt_size < ETHER_MIN_LEN)
618 		return (EINVAL);
619 
620 	if (info->splithdr_enabled) {
621 		if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
622 			return (EINVAL);
623 
624 		rxq.hsplit_0 = info->rx_split_pos &
625 		    (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
626 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
627 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
628 		     I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
629 		rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
630 
631 		rxq.dtype = 2;
632 	}
633 
634 	status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
635 	if (status != I40E_SUCCESS)
636 		return (EINVAL);
637 
638 	rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
639 	rxq.qlen = info->ring_len;
640 
641 	rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
642 
643 	rxq.dsize = 1;
644 	rxq.crcstrip = 1;
645 	rxq.l2tsel = 1;
646 
647 	rxq.rxmax = info->max_pkt_size;
648 	rxq.tphrdesc_ena = 1;
649 	rxq.tphwdesc_ena = 1;
650 	rxq.tphdata_ena = 1;
651 	rxq.tphhead_ena = 1;
652 	rxq.lrxqthresh = 2;
653 	rxq.prefena = 1;
654 
655 	status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
656 	if (status != I40E_SUCCESS)
657 		return (EINVAL);
658 
659 	ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
660 
661 	return (0);
662 }
663 
664 static void
665 ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
666     uint16_t msg_size)
667 {
668 	struct virtchnl_vsi_queue_config_info *info;
669 	struct virtchnl_queue_pair_info *pair;
670 	uint16_t expected_msg_size;
671 	int i;
672 
673 	if (msg_size < sizeof(*info)) {
674 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
675 		    I40E_ERR_PARAM);
676 		return;
677 	}
678 
679 	info = msg;
680 	if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
681 		device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
682 		    vf->vf_num, info->num_queue_pairs, vf->vsi.num_tx_queues);
683 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
684 		    I40E_ERR_PARAM);
685 		return;
686 	}
687 
688 	expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
689 	if (msg_size != expected_msg_size) {
690 		device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
691 		    vf->vf_num, msg_size, expected_msg_size);
692 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
693 		    I40E_ERR_PARAM);
694 		return;
695 	}
696 
697 	if (info->vsi_id != vf->vsi.vsi_num) {
698 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
699 		    vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
700 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
701 		    I40E_ERR_PARAM);
702 		return;
703 	}
704 
705 	for (i = 0; i < info->num_queue_pairs; i++) {
706 		pair = &info->qpair[i];
707 
708 		if (pair->txq.vsi_id != vf->vsi.vsi_num ||
709 		    pair->rxq.vsi_id != vf->vsi.vsi_num ||
710 		    pair->txq.queue_id != pair->rxq.queue_id ||
711 		    pair->txq.queue_id >= vf->vsi.num_tx_queues) {
712 
713 			i40e_send_vf_nack(pf, vf,
714 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
715 			return;
716 		}
717 
718 		if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
719 			i40e_send_vf_nack(pf, vf,
720 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
721 			return;
722 		}
723 
724 		if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
725 			i40e_send_vf_nack(pf, vf,
726 			    VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
727 			return;
728 		}
729 	}
730 
731 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES);
732 }
733 
734 static void
735 ixl_vf_set_qctl(struct ixl_pf *pf,
736     const struct virtchnl_vector_map *vector,
737     enum i40e_queue_type cur_type, uint16_t cur_queue,
738     enum i40e_queue_type *last_type, uint16_t *last_queue)
739 {
740 	uint32_t offset, qctl;
741 	uint16_t itr_indx;
742 
743 	if (cur_type == I40E_QUEUE_TYPE_RX) {
744 		offset = I40E_QINT_RQCTL(cur_queue);
745 		itr_indx = vector->rxitr_idx;
746 	} else {
747 		offset = I40E_QINT_TQCTL(cur_queue);
748 		itr_indx = vector->txitr_idx;
749 	}
750 
751 	qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
752 	    (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
753 	    (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
754 	    I40E_QINT_RQCTL_CAUSE_ENA_MASK |
755 	    (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
756 
757 	wr32(&pf->hw, offset, qctl);
758 
759 	*last_type = cur_type;
760 	*last_queue = cur_queue;
761 }
762 
763 static void
764 ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
765     const struct virtchnl_vector_map *vector)
766 {
767 	struct i40e_hw *hw;
768 	u_int qindex;
769 	enum i40e_queue_type type, last_type;
770 	uint32_t lnklst_reg;
771 	uint16_t rxq_map, txq_map, cur_queue, last_queue;
772 
773 	hw = &pf->hw;
774 
775 	rxq_map = vector->rxq_map;
776 	txq_map = vector->txq_map;
777 
778 	last_queue = IXL_END_OF_INTR_LNKLST;
779 	last_type = I40E_QUEUE_TYPE_RX;
780 
781 	/*
782 	 * The datasheet says to optimize performance, RX queues and TX queues
783 	 * should be interleaved in the interrupt linked list, so we process
784 	 * both at once here.
785 	 */
786 	while ((rxq_map != 0) || (txq_map != 0)) {
787 		if (txq_map != 0) {
788 			qindex = ffs(txq_map) - 1;
789 			type = I40E_QUEUE_TYPE_TX;
790 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
791 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
792 			    &last_type, &last_queue);
793 			txq_map &= ~(1 << qindex);
794 		}
795 
796 		if (rxq_map != 0) {
797 			qindex = ffs(rxq_map) - 1;
798 			type = I40E_QUEUE_TYPE_RX;
799 			cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
800 			ixl_vf_set_qctl(pf, vector, type, cur_queue,
801 			    &last_type, &last_queue);
802 			rxq_map &= ~(1 << qindex);
803 		}
804 	}
805 
806 	if (vector->vector_id == 0)
807 		lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
808 	else
809 		lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
810 		    vf->vf_num);
811 	wr32(hw, lnklst_reg,
812 	    (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
813 	    (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
814 
815 	ixl_flush(hw);
816 }
817 
818 static void
819 ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
820     uint16_t msg_size)
821 {
822 	struct virtchnl_irq_map_info *map;
823 	struct virtchnl_vector_map *vector;
824 	struct i40e_hw *hw;
825 	int i, largest_txq, largest_rxq;
826 
827 	hw = &pf->hw;
828 
829 	if (msg_size < sizeof(*map)) {
830 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
831 		    I40E_ERR_PARAM);
832 		return;
833 	}
834 
835 	map = msg;
836 	if (map->num_vectors == 0) {
837 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
838 		    I40E_ERR_PARAM);
839 		return;
840 	}
841 
842 	if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
843 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
844 		    I40E_ERR_PARAM);
845 		return;
846 	}
847 
848 	for (i = 0; i < map->num_vectors; i++) {
849 		vector = &map->vecmap[i];
850 
851 		if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
852 		    vector->vsi_id != vf->vsi.vsi_num) {
853 			i40e_send_vf_nack(pf, vf,
854 			    VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
855 			return;
856 		}
857 
858 		if (vector->rxq_map != 0) {
859 			largest_rxq = fls(vector->rxq_map) - 1;
860 			if (largest_rxq >= vf->vsi.num_rx_queues) {
861 				i40e_send_vf_nack(pf, vf,
862 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
863 				    I40E_ERR_PARAM);
864 				return;
865 			}
866 		}
867 
868 		if (vector->txq_map != 0) {
869 			largest_txq = fls(vector->txq_map) - 1;
870 			if (largest_txq >= vf->vsi.num_tx_queues) {
871 				i40e_send_vf_nack(pf, vf,
872 				    VIRTCHNL_OP_CONFIG_IRQ_MAP,
873 				    I40E_ERR_PARAM);
874 				return;
875 			}
876 		}
877 
878 		if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
879 		    vector->txitr_idx > IXL_MAX_ITR_IDX) {
880 			i40e_send_vf_nack(pf, vf,
881 			    VIRTCHNL_OP_CONFIG_IRQ_MAP,
882 			    I40E_ERR_PARAM);
883 			return;
884 		}
885 
886 		ixl_vf_config_vector(pf, vf, vector);
887 	}
888 
889 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP);
890 }
891 
892 static void
893 ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
894     uint16_t msg_size)
895 {
896 	struct virtchnl_queue_select *select;
897 	int error = 0;
898 
899 	if (msg_size != sizeof(*select)) {
900 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
901 		    I40E_ERR_PARAM);
902 		return;
903 	}
904 
905 	select = msg;
906 	if (select->vsi_id != vf->vsi.vsi_num ||
907 	    select->rx_queues == 0 || select->tx_queues == 0) {
908 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
909 		    I40E_ERR_PARAM);
910 		return;
911 	}
912 
913 	/* Enable TX rings selected by the VF */
914 	for (int i = 0; i < 32; i++) {
915 		if ((1 << i) & select->tx_queues) {
916 			/* Warn if queue is out of VF allocation range */
917 			if (i >= vf->vsi.num_tx_queues) {
918 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
919 				    vf->vf_num, i);
920 				break;
921 			}
922 			/* Skip this queue if it hasn't been configured */
923 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
924 				continue;
925 			/* Warn if this queue is already marked as enabled */
926 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
927 				device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
928 				    vf->vf_num, i);
929 
930 			error = ixl_enable_tx_ring(pf, &vf->qtag, i);
931 			if (error)
932 				break;
933 			else
934 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
935 		}
936 	}
937 
938 	/* Enable RX rings selected by the VF */
939 	for (int i = 0; i < 32; i++) {
940 		if ((1 << i) & select->rx_queues) {
941 			/* Warn if queue is out of VF allocation range */
942 			if (i >= vf->vsi.num_rx_queues) {
943 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
944 				    vf->vf_num, i);
945 				break;
946 			}
947 			/* Skip this queue if it hasn't been configured */
948 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
949 				continue;
950 			/* Warn if this queue is already marked as enabled */
951 			if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
952 				device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
953 				    vf->vf_num, i);
954 			error = ixl_enable_rx_ring(pf, &vf->qtag, i);
955 			if (error)
956 				break;
957 			else
958 				ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
959 		}
960 	}
961 
962 	if (error) {
963 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
964 		    I40E_ERR_TIMEOUT);
965 		return;
966 	}
967 
968 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES);
969 }
970 
971 static void
972 ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
973     void *msg, uint16_t msg_size)
974 {
975 	struct virtchnl_queue_select *select;
976 	int error = 0;
977 
978 	if (msg_size != sizeof(*select)) {
979 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
980 		    I40E_ERR_PARAM);
981 		return;
982 	}
983 
984 	select = msg;
985 	if (select->vsi_id != vf->vsi.vsi_num ||
986 	    select->rx_queues == 0 || select->tx_queues == 0) {
987 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
988 		    I40E_ERR_PARAM);
989 		return;
990 	}
991 
992 	/* Disable TX rings selected by the VF */
993 	for (int i = 0; i < 32; i++) {
994 		if ((1 << i) & select->tx_queues) {
995 			/* Warn if queue is out of VF allocation range */
996 			if (i >= vf->vsi.num_tx_queues) {
997 				device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
998 				    vf->vf_num, i);
999 				break;
1000 			}
1001 			/* Skip this queue if it hasn't been configured */
1002 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
1003 				continue;
1004 			/* Warn if this queue is already marked as disabled */
1005 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
1006 				device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
1007 				    vf->vf_num, i);
1008 				continue;
1009 			}
1010 			error = ixl_disable_tx_ring(pf, &vf->qtag, i);
1011 			if (error)
1012 				break;
1013 			else
1014 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
1015 		}
1016 	}
1017 
1018 	/* Enable RX rings selected by the VF */
1019 	for (int i = 0; i < 32; i++) {
1020 		if ((1 << i) & select->rx_queues) {
1021 			/* Warn if queue is out of VF allocation range */
1022 			if (i >= vf->vsi.num_rx_queues) {
1023 				device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
1024 				    vf->vf_num, i);
1025 				break;
1026 			}
1027 			/* Skip this queue if it hasn't been configured */
1028 			if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
1029 				continue;
1030 			/* Warn if this queue is already marked as disabled */
1031 			if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
1032 				device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
1033 				    vf->vf_num, i);
1034 				continue;
1035 			}
1036 			error = ixl_disable_rx_ring(pf, &vf->qtag, i);
1037 			if (error)
1038 				break;
1039 			else
1040 				ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
1041 		}
1042 	}
1043 
1044 	if (error) {
1045 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
1046 		    I40E_ERR_TIMEOUT);
1047 		return;
1048 	}
1049 
1050 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
1051 }
1052 
1053 static bool
1054 ixl_zero_mac(const uint8_t *addr)
1055 {
1056 	uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
1057 
1058 	return (cmp_etheraddr(addr, zero));
1059 }
1060 
1061 static bool
1062 ixl_bcast_mac(const uint8_t *addr)
1063 {
1064 	static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
1065 	    {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1066 
1067 	return (cmp_etheraddr(addr, ixl_bcast_addr));
1068 }
1069 
1070 static int
1071 ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
1072 {
1073 
1074 	if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
1075 		return (EINVAL);
1076 
1077 	/*
1078 	 * If the VF is not allowed to change its MAC address, don't let it
1079 	 * set a MAC filter for an address that is not a multicast address and
1080 	 * is not its assigned MAC.
1081 	 */
1082 	if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
1083 	    !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
1084 		return (EPERM);
1085 
1086 	return (0);
1087 }
1088 
1089 static void
1090 ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1091     uint16_t msg_size)
1092 {
1093 	struct virtchnl_ether_addr_list *addr_list;
1094 	struct virtchnl_ether_addr *addr;
1095 	struct ixl_vsi *vsi;
1096 	int i;
1097 	size_t expected_size;
1098 
1099 	vsi = &vf->vsi;
1100 
1101 	if (msg_size < sizeof(*addr_list)) {
1102 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1103 		    I40E_ERR_PARAM);
1104 		return;
1105 	}
1106 
1107 	addr_list = msg;
1108 	expected_size = sizeof(*addr_list) +
1109 	    addr_list->num_elements * sizeof(*addr);
1110 
1111 	if (addr_list->num_elements == 0 ||
1112 	    addr_list->vsi_id != vsi->vsi_num ||
1113 	    msg_size != expected_size) {
1114 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1115 		    I40E_ERR_PARAM);
1116 		return;
1117 	}
1118 
1119 	for (i = 0; i < addr_list->num_elements; i++) {
1120 		if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
1121 			i40e_send_vf_nack(pf, vf,
1122 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1123 			return;
1124 		}
1125 	}
1126 
1127 	for (i = 0; i < addr_list->num_elements; i++) {
1128 		addr = &addr_list->list[i];
1129 		ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
1130 	}
1131 
1132 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR);
1133 }
1134 
1135 static void
1136 ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1137     uint16_t msg_size)
1138 {
1139 	struct virtchnl_ether_addr_list *addr_list;
1140 	struct virtchnl_ether_addr *addr;
1141 	size_t expected_size;
1142 	int i;
1143 
1144 	if (msg_size < sizeof(*addr_list)) {
1145 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1146 		    I40E_ERR_PARAM);
1147 		return;
1148 	}
1149 
1150 	addr_list = msg;
1151 	expected_size = sizeof(*addr_list) +
1152 	    addr_list->num_elements * sizeof(*addr);
1153 
1154 	if (addr_list->num_elements == 0 ||
1155 	    addr_list->vsi_id != vf->vsi.vsi_num ||
1156 	    msg_size != expected_size) {
1157 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
1158 		    I40E_ERR_PARAM);
1159 		return;
1160 	}
1161 
1162 	for (i = 0; i < addr_list->num_elements; i++) {
1163 		addr = &addr_list->list[i];
1164 		if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
1165 			i40e_send_vf_nack(pf, vf,
1166 			    VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
1167 			return;
1168 		}
1169 	}
1170 
1171 	for (i = 0; i < addr_list->num_elements; i++) {
1172 		addr = &addr_list->list[i];
1173 		ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
1174 	}
1175 
1176 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR);
1177 }
1178 
1179 static enum i40e_status_code
1180 ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
1181 {
1182 	struct i40e_vsi_context vsi_ctx;
1183 
1184 	vsi_ctx.seid = vf->vsi.seid;
1185 
1186 	bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
1187 	vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
1188 	vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
1189 	    I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
1190 	return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
1191 }
1192 
1193 static void
1194 ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1195     uint16_t msg_size)
1196 {
1197 	struct virtchnl_vlan_filter_list *filter_list;
1198 	enum i40e_status_code code;
1199 	size_t expected_size;
1200 	int i;
1201 
1202 	if (msg_size < sizeof(*filter_list)) {
1203 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1204 		    I40E_ERR_PARAM);
1205 		return;
1206 	}
1207 
1208 	filter_list = msg;
1209 	expected_size = sizeof(*filter_list) +
1210 	    filter_list->num_elements * sizeof(uint16_t);
1211 	if (filter_list->num_elements == 0 ||
1212 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1213 	    msg_size != expected_size) {
1214 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1215 		    I40E_ERR_PARAM);
1216 		return;
1217 	}
1218 
1219 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1220 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1221 		    I40E_ERR_PARAM);
1222 		return;
1223 	}
1224 
1225 	for (i = 0; i < filter_list->num_elements; i++) {
1226 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1227 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1228 			    I40E_ERR_PARAM);
1229 			return;
1230 		}
1231 	}
1232 
1233 	code = ixl_vf_enable_vlan_strip(pf, vf);
1234 	if (code != I40E_SUCCESS) {
1235 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1236 		    I40E_ERR_PARAM);
1237 	}
1238 
1239 	for (i = 0; i < filter_list->num_elements; i++)
1240 		ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1241 
1242 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_ADD_VLAN);
1243 }
1244 
1245 static void
1246 ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1247     uint16_t msg_size)
1248 {
1249 	struct virtchnl_vlan_filter_list *filter_list;
1250 	int i;
1251 	size_t expected_size;
1252 
1253 	if (msg_size < sizeof(*filter_list)) {
1254 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1255 		    I40E_ERR_PARAM);
1256 		return;
1257 	}
1258 
1259 	filter_list = msg;
1260 	expected_size = sizeof(*filter_list) +
1261 	    filter_list->num_elements * sizeof(uint16_t);
1262 	if (filter_list->num_elements == 0 ||
1263 	    filter_list->vsi_id != vf->vsi.vsi_num ||
1264 	    msg_size != expected_size) {
1265 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
1266 		    I40E_ERR_PARAM);
1267 		return;
1268 	}
1269 
1270 	for (i = 0; i < filter_list->num_elements; i++) {
1271 		if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
1272 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1273 			    I40E_ERR_PARAM);
1274 			return;
1275 		}
1276 	}
1277 
1278 	if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
1279 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
1280 		    I40E_ERR_PARAM);
1281 		return;
1282 	}
1283 
1284 	for (i = 0; i < filter_list->num_elements; i++)
1285 		ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
1286 
1287 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DEL_VLAN);
1288 }
1289 
1290 static void
1291 ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
1292     void *msg, uint16_t msg_size)
1293 {
1294 	struct virtchnl_promisc_info *info;
1295 	enum i40e_status_code code;
1296 
1297 	if (msg_size != sizeof(*info)) {
1298 		i40e_send_vf_nack(pf, vf,
1299 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1300 		return;
1301 	}
1302 
1303 	if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
1304 		i40e_send_vf_nack(pf, vf,
1305 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1306 		return;
1307 	}
1308 
1309 	info = msg;
1310 	if (info->vsi_id != vf->vsi.vsi_num) {
1311 		i40e_send_vf_nack(pf, vf,
1312 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
1313 		return;
1314 	}
1315 
1316 	code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
1317 	    info->flags & FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
1318 	if (code != I40E_SUCCESS) {
1319 		i40e_send_vf_nack(pf, vf,
1320 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1321 		return;
1322 	}
1323 
1324 	code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
1325 	    info->flags & FLAG_VF_MULTICAST_PROMISC, NULL);
1326 	if (code != I40E_SUCCESS) {
1327 		i40e_send_vf_nack(pf, vf,
1328 		    VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
1329 		return;
1330 	}
1331 
1332 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
1333 }
1334 
1335 static void
1336 ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1337     uint16_t msg_size)
1338 {
1339 	struct virtchnl_queue_select *queue;
1340 
1341 	if (msg_size != sizeof(*queue)) {
1342 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1343 		    I40E_ERR_PARAM);
1344 		return;
1345 	}
1346 
1347 	queue = msg;
1348 	if (queue->vsi_id != vf->vsi.vsi_num) {
1349 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
1350 		    I40E_ERR_PARAM);
1351 		return;
1352 	}
1353 
1354 	ixl_update_eth_stats(&vf->vsi);
1355 
1356 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_GET_STATS,
1357 	    I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
1358 }
1359 
1360 static void
1361 ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1362     uint16_t msg_size)
1363 {
1364 	struct i40e_hw *hw;
1365 	struct virtchnl_rss_key *key;
1366 	struct i40e_aqc_get_set_rss_key_data key_data;
1367 	enum i40e_status_code status;
1368 
1369 	hw = &pf->hw;
1370 
1371 	if (msg_size < sizeof(*key)) {
1372 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1373 		    I40E_ERR_PARAM);
1374 		return;
1375 	}
1376 
1377 	key = msg;
1378 
1379 	if (key->key_len > 52) {
1380 		device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
1381 		    vf->vf_num, key->key_len, 52);
1382 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1383 		    I40E_ERR_PARAM);
1384 		return;
1385 	}
1386 
1387 	if (key->vsi_id != vf->vsi.vsi_num) {
1388 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1389 		    vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
1390 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1391 		    I40E_ERR_PARAM);
1392 		return;
1393 	}
1394 
1395 	/* Fill out hash using MAC-dependent method */
1396 	if (hw->mac.type == I40E_MAC_X722) {
1397 		bzero(&key_data, sizeof(key_data));
1398 		if (key->key_len <= 40)
1399 			bcopy(key->key, key_data.standard_rss_key, key->key_len);
1400 		else {
1401 			bcopy(key->key, key_data.standard_rss_key, 40);
1402 			bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
1403 		}
1404 		status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
1405 		if (status) {
1406 			device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
1407 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1408 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
1409 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1410 			return;
1411 		}
1412 	} else {
1413 		for (int i = 0; i < (key->key_len / 4); i++)
1414 			i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
1415 	}
1416 
1417 	DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
1418 	    vf->vf_num, key->key[0]);
1419 
1420 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY);
1421 }
1422 
1423 static void
1424 ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1425     uint16_t msg_size)
1426 {
1427 	struct i40e_hw *hw;
1428 	struct virtchnl_rss_lut *lut;
1429 	enum i40e_status_code status;
1430 
1431 	hw = &pf->hw;
1432 
1433 	if (msg_size < sizeof(*lut)) {
1434 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1435 		    I40E_ERR_PARAM);
1436 		return;
1437 	}
1438 
1439 	lut = msg;
1440 
1441 	if (lut->lut_entries > 64) {
1442 		device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
1443 		    vf->vf_num, lut->lut_entries, 64);
1444 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1445 		    I40E_ERR_PARAM);
1446 		return;
1447 	}
1448 
1449 	if (lut->vsi_id != vf->vsi.vsi_num) {
1450 		device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
1451 		    vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
1452 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1453 		    I40E_ERR_PARAM);
1454 		return;
1455 	}
1456 
1457 	/* Fill out LUT using MAC-dependent method */
1458 	if (hw->mac.type == I40E_MAC_X722) {
1459 		status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
1460 		if (status) {
1461 			device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
1462 			    i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
1463 			i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
1464 			    I40E_ERR_ADMIN_QUEUE_ERROR);
1465 			return;
1466 		}
1467 	} else {
1468 		for (int i = 0; i < (lut->lut_entries / 4); i++)
1469 			i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
1470 	}
1471 
1472 	DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
1473 	    vf->vf_num, lut->lut[0], lut->lut_entries);
1474 
1475 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT);
1476 }
1477 
1478 static void
1479 ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
1480     uint16_t msg_size)
1481 {
1482 	struct i40e_hw *hw;
1483 	struct virtchnl_rss_hena *hena;
1484 
1485 	hw = &pf->hw;
1486 
1487 	if (msg_size < sizeof(*hena)) {
1488 		i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
1489 		    I40E_ERR_PARAM);
1490 		return;
1491 	}
1492 
1493 	hena = msg;
1494 
1495 	/* Set HENA */
1496 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
1497 	i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
1498 
1499 	DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
1500 	    vf->vf_num, hena->hena);
1501 
1502 	ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA);
1503 }
1504 
1505 static void
1506 ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
1507 {
1508 	struct virtchnl_pf_event event;
1509 	struct i40e_hw *hw;
1510 
1511 	hw = &pf->hw;
1512 	event.event = VIRTCHNL_EVENT_LINK_CHANGE;
1513 	event.severity = PF_EVENT_SEVERITY_INFO;
1514 	event.event_data.link_event.link_status = pf->vsi.link_active;
1515 	event.event_data.link_event.link_speed =
1516 		(enum virtchnl_link_speed)hw->phy.link_info.link_speed;
1517 
1518 	ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
1519 			sizeof(event));
1520 }
1521 
1522 void
1523 ixl_broadcast_link_state(struct ixl_pf *pf)
1524 {
1525 	int i;
1526 
1527 	for (i = 0; i < pf->num_vfs; i++)
1528 		ixl_notify_vf_link_state(pf, &pf->vfs[i]);
1529 }
1530 
1531 void
1532 ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
1533 {
1534 	struct ixl_vf *vf;
1535 	void *msg;
1536 	uint16_t vf_num, msg_size;
1537 	uint32_t opcode;
1538 
1539 	vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
1540 	opcode = le32toh(event->desc.cookie_high);
1541 
1542 	if (vf_num >= pf->num_vfs) {
1543 		device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
1544 		return;
1545 	}
1546 
1547 	vf = &pf->vfs[vf_num];
1548 	msg = event->msg_buf;
1549 	msg_size = event->msg_len;
1550 
1551 	I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
1552 	    "Got msg %s(%d) from%sVF-%d of size %d\n",
1553 	    ixl_vc_opcode_str(opcode), opcode,
1554 	    (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
1555 	    vf_num, msg_size);
1556 
1557 	/* This must be a stray msg from a previously destroyed VF. */
1558 	if (!(vf->vf_flags & VF_FLAG_ENABLED))
1559 		return;
1560 
1561 	switch (opcode) {
1562 	case VIRTCHNL_OP_VERSION:
1563 		ixl_vf_version_msg(pf, vf, msg, msg_size);
1564 		break;
1565 	case VIRTCHNL_OP_RESET_VF:
1566 		ixl_vf_reset_msg(pf, vf, msg, msg_size);
1567 		break;
1568 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1569 		ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
1570 		/* Notify VF of link state after it obtains queues, as this is
1571 		 * the last thing it will do as part of initialization
1572 		 */
1573 		ixl_notify_vf_link_state(pf, vf);
1574 		break;
1575 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1576 		ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
1577 		break;
1578 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1579 		ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
1580 		break;
1581 	case VIRTCHNL_OP_ENABLE_QUEUES:
1582 		ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
1583 		/* Notify VF of link state after it obtains queues, as this is
1584 		 * the last thing it will do as part of initialization
1585 		 */
1586 		ixl_notify_vf_link_state(pf, vf);
1587 		break;
1588 	case VIRTCHNL_OP_DISABLE_QUEUES:
1589 		ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
1590 		break;
1591 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1592 		ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
1593 		break;
1594 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1595 		ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
1596 		break;
1597 	case VIRTCHNL_OP_ADD_VLAN:
1598 		ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
1599 		break;
1600 	case VIRTCHNL_OP_DEL_VLAN:
1601 		ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
1602 		break;
1603 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1604 		ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
1605 		break;
1606 	case VIRTCHNL_OP_GET_STATS:
1607 		ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
1608 		break;
1609 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1610 		ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
1611 		break;
1612 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1613 		ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
1614 		break;
1615 	case VIRTCHNL_OP_SET_RSS_HENA:
1616 		ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
1617 		break;
1618 
1619 	/* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
1620 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1621 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1622 	default:
1623 		i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
1624 		break;
1625 	}
1626 }
1627 
1628 /* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
1629 void
1630 ixl_handle_vflr(void *arg, int pending)
1631 {
1632 	struct ixl_pf *pf;
1633 	struct ixl_vf *vf;
1634 	struct i40e_hw *hw;
1635 	uint16_t global_vf_num;
1636 	uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
1637 	int i;
1638 
1639 	pf = arg;
1640 	hw = &pf->hw;
1641 
1642 	/* TODO: May need to lock this */
1643 	for (i = 0; i < pf->num_vfs; i++) {
1644 		global_vf_num = hw->func_caps.vf_base_id + i;
1645 
1646 		vf = &pf->vfs[i];
1647 		if (!(vf->vf_flags & VF_FLAG_ENABLED))
1648 			continue;
1649 
1650 		vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
1651 		vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
1652 		vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
1653 		if (vflrstat & vflrstat_mask) {
1654 			wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
1655 			    vflrstat_mask);
1656 
1657 			ixl_reinit_vf(pf, vf);
1658 		}
1659 	}
1660 
1661 	atomic_clear_32(&pf->state, IXL_PF_STATE_VF_RESET_REQ);
1662 	icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
1663 	icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
1664 	wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
1665 	ixl_flush(hw);
1666 
1667 	// IXL_PF_UNLOCK()
1668 }
1669 
1670 static int
1671 ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
1672 {
1673 
1674 	switch (err) {
1675 	case I40E_AQ_RC_EPERM:
1676 		return (EPERM);
1677 	case I40E_AQ_RC_ENOENT:
1678 		return (ENOENT);
1679 	case I40E_AQ_RC_ESRCH:
1680 		return (ESRCH);
1681 	case I40E_AQ_RC_EINTR:
1682 		return (EINTR);
1683 	case I40E_AQ_RC_EIO:
1684 		return (EIO);
1685 	case I40E_AQ_RC_ENXIO:
1686 		return (ENXIO);
1687 	case I40E_AQ_RC_E2BIG:
1688 		return (E2BIG);
1689 	case I40E_AQ_RC_EAGAIN:
1690 		return (EAGAIN);
1691 	case I40E_AQ_RC_ENOMEM:
1692 		return (ENOMEM);
1693 	case I40E_AQ_RC_EACCES:
1694 		return (EACCES);
1695 	case I40E_AQ_RC_EFAULT:
1696 		return (EFAULT);
1697 	case I40E_AQ_RC_EBUSY:
1698 		return (EBUSY);
1699 	case I40E_AQ_RC_EEXIST:
1700 		return (EEXIST);
1701 	case I40E_AQ_RC_EINVAL:
1702 		return (EINVAL);
1703 	case I40E_AQ_RC_ENOTTY:
1704 		return (ENOTTY);
1705 	case I40E_AQ_RC_ENOSPC:
1706 		return (ENOSPC);
1707 	case I40E_AQ_RC_ENOSYS:
1708 		return (ENOSYS);
1709 	case I40E_AQ_RC_ERANGE:
1710 		return (ERANGE);
1711 	case I40E_AQ_RC_EFLUSHED:
1712 		return (EINVAL);	/* No exact equivalent in errno.h */
1713 	case I40E_AQ_RC_BAD_ADDR:
1714 		return (EFAULT);
1715 	case I40E_AQ_RC_EMODE:
1716 		return (EPERM);
1717 	case I40E_AQ_RC_EFBIG:
1718 		return (EFBIG);
1719 	default:
1720 		return (EINVAL);
1721 	}
1722 }
1723 
1724 int
1725 ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
1726 {
1727 	struct ixl_pf *pf;
1728 	struct i40e_hw *hw;
1729 	struct ixl_vsi *pf_vsi;
1730 	enum i40e_status_code ret;
1731 	int i, error;
1732 
1733 	pf = device_get_softc(dev);
1734 	hw = &pf->hw;
1735 	pf_vsi = &pf->vsi;
1736 
1737 	//IXL_PF_LOCK(pf);
1738 	pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
1739 	    M_ZERO);
1740 
1741 	if (pf->vfs == NULL) {
1742 		error = ENOMEM;
1743 		goto fail;
1744 	}
1745 
1746 	for (i = 0; i < num_vfs; i++)
1747 		sysctl_ctx_init(&pf->vfs[i].ctx);
1748 
1749 	ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
1750 	    1, FALSE, &pf->veb_seid, FALSE, NULL);
1751 	if (ret != I40E_SUCCESS) {
1752 		error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
1753 		device_printf(dev, "add_veb failed; code=%d error=%d", ret,
1754 		    error);
1755 		goto fail;
1756 	}
1757 
1758 	pf->num_vfs = num_vfs;
1759 	//IXL_PF_UNLOCK(pf);
1760 	return (0);
1761 
1762 fail:
1763 	free(pf->vfs, M_IXL);
1764 	pf->vfs = NULL;
1765 	//IXL_PF_UNLOCK(pf);
1766 	return (error);
1767 }
1768 
1769 void
1770 ixl_iov_uninit(device_t dev)
1771 {
1772 	struct ixl_pf *pf;
1773 	struct i40e_hw *hw;
1774 	struct ixl_vsi *vsi;
1775 	struct ifnet *ifp;
1776 	struct ixl_vf *vfs;
1777 	int i, num_vfs;
1778 
1779 	pf = device_get_softc(dev);
1780 	hw = &pf->hw;
1781 	vsi = &pf->vsi;
1782 	ifp = vsi->ifp;
1783 
1784 	//IXL_PF_LOCK(pf);
1785 	for (i = 0; i < pf->num_vfs; i++) {
1786 		if (pf->vfs[i].vsi.seid != 0)
1787 			i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
1788 		ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
1789 		ixl_free_mac_filters(&pf->vfs[i].vsi);
1790 		DDPRINTF(dev, "VF %d: %d released\n",
1791 		    i, pf->vfs[i].qtag.num_allocated);
1792 		DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1793 	}
1794 
1795 	if (pf->veb_seid != 0) {
1796 		i40e_aq_delete_element(hw, pf->veb_seid, NULL);
1797 		pf->veb_seid = 0;
1798 	}
1799 
1800 	vfs = pf->vfs;
1801 	num_vfs = pf->num_vfs;
1802 
1803 	pf->vfs = NULL;
1804 	pf->num_vfs = 0;
1805 	//IXL_PF_UNLOCK(pf);
1806 
1807 	/* Do this after the unlock as sysctl_ctx_free might sleep. */
1808 	for (i = 0; i < num_vfs; i++)
1809 		sysctl_ctx_free(&vfs[i].ctx);
1810 	free(vfs, M_IXL);
1811 }
1812 
1813 static int
1814 ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
1815 {
1816 	device_t dev = pf->dev;
1817 	int error;
1818 
1819 	/* Validate, and clamp value if invalid */
1820 	if (num_queues < 1 || num_queues > 16)
1821 		device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
1822 		    num_queues, vf->vf_num);
1823 	if (num_queues < 1) {
1824 		device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
1825 		num_queues = 1;
1826 	} else if (num_queues > 16) {
1827 		device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
1828 		num_queues = 16;
1829 	}
1830 	error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
1831 	if (error) {
1832 		device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
1833 		    num_queues, vf->vf_num);
1834 		return (ENOSPC);
1835 	}
1836 
1837 	DDPRINTF(dev, "VF %d: %d allocated, %d active",
1838 	    vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
1839 	DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
1840 
1841 	return (0);
1842 }
1843 
1844 int
1845 ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
1846 {
1847 	char sysctl_name[QUEUE_NAME_LEN];
1848 	struct ixl_pf *pf;
1849 	struct ixl_vf *vf;
1850 	const void *mac;
1851 	size_t size;
1852 	int error;
1853 	int vf_num_queues;
1854 
1855 	pf = device_get_softc(dev);
1856 	vf = &pf->vfs[vfnum];
1857 
1858 	//IXL_PF_LOCK(pf);
1859 	vf->vf_num = vfnum;
1860 
1861 	vf->vsi.back = pf;
1862 	vf->vf_flags = VF_FLAG_ENABLED;
1863 	SLIST_INIT(&vf->vsi.ftl);
1864 
1865 	/* Reserve queue allocation from PF */
1866 	vf_num_queues = nvlist_get_number(params, "num-queues");
1867 	error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
1868 	if (error != 0)
1869 		goto out;
1870 
1871 	error = ixl_vf_setup_vsi(pf, vf);
1872 	if (error != 0)
1873 		goto out;
1874 
1875 	if (nvlist_exists_binary(params, "mac-addr")) {
1876 		mac = nvlist_get_binary(params, "mac-addr", &size);
1877 		bcopy(mac, vf->mac, ETHER_ADDR_LEN);
1878 
1879 		if (nvlist_get_bool(params, "allow-set-mac"))
1880 			vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1881 	} else
1882 		/*
1883 		 * If the administrator has not specified a MAC address then
1884 		 * we must allow the VF to choose one.
1885 		 */
1886 		vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
1887 
1888 	if (nvlist_get_bool(params, "mac-anti-spoof"))
1889 		vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
1890 
1891 	if (nvlist_get_bool(params, "allow-promisc"))
1892 		vf->vf_flags |= VF_FLAG_PROMISC_CAP;
1893 
1894 	vf->vf_flags |= VF_FLAG_VLAN_CAP;
1895 
1896 	ixl_reset_vf(pf, vf);
1897 out:
1898 	//IXL_PF_UNLOCK(pf);
1899 	if (error == 0) {
1900 		snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
1901 		ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
1902 	}
1903 
1904 	return (error);
1905 }
1906 
1907