xref: /freebsd/sys/dev/iavf/iavf_vc_common.c (revision 315ee00f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file iavf_vc_common.c
34  * @brief Common virtchnl interface functions
35  *
36  * Contains functions implementing the virtchnl interface for connecting to
37  * the PF driver. This file contains the functions which are common between
38  * the legacy and iflib driver implementations.
39  */
40 #include "iavf_vc_common.h"
41 
42 /* Static function decls */
43 static void iavf_handle_link_event(struct iavf_sc *sc,
44     struct virtchnl_pf_event *vpe);
45 
46 /**
47  * iavf_send_pf_msg - Send virtchnl message to PF device
48  * @sc: device softc
49  * @op: the op to send
50  * @msg: message contents
51  * @len: length of the message
52  *
53  * Send a message to the PF device over the virtchnl connection. Print
54  * a status code if the message reports an error.
55  *
56  * @returns zero on success, or an error code on failure.
57  */
58 int
59 iavf_send_pf_msg(struct iavf_sc *sc,
60 	enum virtchnl_ops op, u8 *msg, u16 len)
61 {
62 	struct iavf_hw *hw = &sc->hw;
63 	device_t dev = sc->dev;
64 	enum iavf_status status;
65 	int val_err;
66 
67 	/* Validating message before sending it to the PF */
68 	val_err = virtchnl_vc_validate_vf_msg(&sc->version, op, msg, len);
69 	if (val_err)
70 		device_printf(dev, "Error validating msg to PF for op %d,"
71 		    " msglen %d: error %d\n", op, len, val_err);
72 
73 	if (!iavf_check_asq_alive(hw)) {
74 		if (op != VIRTCHNL_OP_GET_STATS)
75 			device_printf(dev, "Unable to send opcode %s to PF, "
76 			    "ASQ is not alive\n", iavf_vc_opcode_str(op));
77 		return (0);
78 	}
79 
80 	if (op != VIRTCHNL_OP_GET_STATS)
81 		iavf_dbg_vc(sc,
82 		    "Sending msg (op=%s[%d]) to PF\n",
83 		    iavf_vc_opcode_str(op), op);
84 
85 	status = iavf_aq_send_msg_to_pf(hw, op, IAVF_SUCCESS, msg, len, NULL);
86 	if (status && op != VIRTCHNL_OP_GET_STATS)
87 		device_printf(dev, "Unable to send opcode %s to PF, "
88 		    "status %s, aq error %s\n",
89 		    iavf_vc_opcode_str(op),
90 		    iavf_stat_str(hw, status),
91 		    iavf_aq_str(hw, hw->aq.asq_last_status));
92 
93 	return (status);
94 }
95 
96 /**
97  * iavf_send_api_ver - Send the API version we support to the PF
98  * @sc: device softc
99  *
100  * Send API version admin queue message to the PF. The reply is not checked
101  * in this function.
102  *
103  * @returns 0 if the message was successfully sent, or one of the
104  * IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
105  */
106 int
107 iavf_send_api_ver(struct iavf_sc *sc)
108 {
109 	struct virtchnl_version_info vvi;
110 
111 	vvi.major = VIRTCHNL_VERSION_MAJOR;
112 	vvi.minor = VIRTCHNL_VERSION_MINOR;
113 
114 	return iavf_send_pf_msg(sc, VIRTCHNL_OP_VERSION,
115 	    (u8 *)&vvi, sizeof(vvi));
116 }
117 
118 /**
119  * iavf_verify_api_ver - Verify the PF supports our API version
120  * @sc: device softc
121  *
122  * Compare API versions with the PF. Must be called after admin queue is
123  * initialized.
124  *
125  * @returns 0 if API versions match, EIO if they do not, or
126  * IAVF_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
127  */
128 int
129 iavf_verify_api_ver(struct iavf_sc *sc)
130 {
131 	struct virtchnl_version_info *pf_vvi;
132 	struct iavf_hw *hw = &sc->hw;
133 	struct iavf_arq_event_info event;
134 	enum iavf_status status;
135 	device_t dev = sc->dev;
136 	int error = 0;
137 	int retries = 0;
138 
139 	event.buf_len = IAVF_AQ_BUF_SZ;
140 	event.msg_buf = (u8 *)malloc(event.buf_len, M_IAVF, M_WAITOK);
141 
142 	for (;;) {
143 		if (++retries > IAVF_AQ_MAX_ERR)
144 			goto out_alloc;
145 
146 		/* Initial delay here is necessary */
147 		iavf_msec_pause(100);
148 		status = iavf_clean_arq_element(hw, &event, NULL);
149 		if (status == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
150 			continue;
151 		else if (status) {
152 			error = EIO;
153 			goto out_alloc;
154 		}
155 
156 		if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
157 		    VIRTCHNL_OP_VERSION) {
158 			iavf_dbg_vc(sc, "%s: Received unexpected op response: %d\n",
159 			    __func__, le32toh(event.desc.cookie_high));
160 			/* Don't stop looking for expected response */
161 			continue;
162 		}
163 
164 		status = (enum iavf_status)le32toh(event.desc.cookie_low);
165 		if (status) {
166 			error = EIO;
167 			goto out_alloc;
168 		} else
169 			break;
170 	}
171 
172 	pf_vvi = (struct virtchnl_version_info *)event.msg_buf;
173 	if ((pf_vvi->major > VIRTCHNL_VERSION_MAJOR) ||
174 	    ((pf_vvi->major == VIRTCHNL_VERSION_MAJOR) &&
175 	    (pf_vvi->minor > VIRTCHNL_VERSION_MINOR))) {
176 		device_printf(dev, "Critical PF/VF API version mismatch!\n");
177 		error = EIO;
178 	} else {
179 		sc->version.major = pf_vvi->major;
180 		sc->version.minor = pf_vvi->minor;
181 	}
182 
183 	/* Log PF/VF api versions */
184 	device_printf(dev, "PF API %d.%d / VF API %d.%d\n",
185 	    pf_vvi->major, pf_vvi->minor,
186 	    VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
187 
188 out_alloc:
189 	free(event.msg_buf, M_IAVF);
190 	return (error);
191 }
192 
193 /**
194  * iavf_send_vf_config_msg - Send VF configuration request
195  * @sc: device softc
196  *
197  * Send VF configuration request admin queue message to the PF. The reply
198  * is not checked in this function.
199  *
200  * @returns 0 if the message was successfully sent, or one of the
201  * IAVF_ADMIN_QUEUE_ERROR_ statuses if not.
202  */
203 int
204 iavf_send_vf_config_msg(struct iavf_sc *sc)
205 {
206 	u32 caps;
207 
208 	/* Support the base mode functionality, as well as advanced
209 	 * speed reporting capability.
210 	 */
211 	caps = VF_BASE_MODE_OFFLOADS |
212 	    VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
213 
214 	iavf_dbg_info(sc, "Sending offload flags: 0x%b\n",
215 	    caps, IAVF_PRINTF_VF_OFFLOAD_FLAGS);
216 
217 	if (sc->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
218 		return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
219 				  NULL, 0);
220 	else
221 		return iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_VF_RESOURCES,
222 				  (u8 *)&caps, sizeof(caps));
223 }
224 
225 /**
226  * iavf_get_vf_config - Get the VF configuration from the PF
227  * @sc: device softc
228  *
229  * Get VF configuration from PF and populate hw structure. Must be called after
230  * admin queue is initialized. Busy waits until response is received from PF,
231  * with maximum timeout. Response from PF is returned in the buffer for further
232  * processing by the caller.
233  *
234  * @returns zero on success, or an error code on failure
235  */
236 int
237 iavf_get_vf_config(struct iavf_sc *sc)
238 {
239 	struct iavf_hw	*hw = &sc->hw;
240 	device_t	dev = sc->dev;
241 	enum iavf_status status = IAVF_SUCCESS;
242 	struct iavf_arq_event_info event;
243 	u16 len;
244 	u32 retries = 0;
245 	int error = 0;
246 
247 	/* Note this assumes a single VSI */
248 	len = sizeof(struct virtchnl_vf_resource) +
249 	    sizeof(struct virtchnl_vsi_resource);
250 	event.buf_len = len;
251 	event.msg_buf = (u8 *)malloc(event.buf_len, M_IAVF, M_WAITOK);
252 
253 	for (;;) {
254 		status = iavf_clean_arq_element(hw, &event, NULL);
255 		if (status == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
256 			if (++retries <= IAVF_AQ_MAX_ERR)
257 				iavf_msec_pause(10);
258 		} else if ((enum virtchnl_ops)le32toh(event.desc.cookie_high) !=
259 		    VIRTCHNL_OP_GET_VF_RESOURCES) {
260 			iavf_dbg_vc(sc, "%s: Received a response from PF,"
261 			    " opcode %d, error %d",
262 			    __func__,
263 			    le32toh(event.desc.cookie_high),
264 			    le32toh(event.desc.cookie_low));
265 			retries++;
266 			continue;
267 		} else {
268 			status = (enum iavf_status)le32toh(event.desc.cookie_low);
269 			if (status) {
270 				device_printf(dev, "%s: Error returned from PF,"
271 				    " opcode %d, error %d\n", __func__,
272 				    le32toh(event.desc.cookie_high),
273 				    le32toh(event.desc.cookie_low));
274 				error = EIO;
275 				goto out_alloc;
276 			}
277 			/* We retrieved the config message, with no errors */
278 			break;
279 		}
280 
281 		if (retries > IAVF_AQ_MAX_ERR) {
282 			iavf_dbg_vc(sc,
283 			    "%s: Did not receive response after %d tries.",
284 			    __func__, retries);
285 			error = ETIMEDOUT;
286 			goto out_alloc;
287 		}
288 	}
289 
290 	memcpy(sc->vf_res, event.msg_buf, min(event.msg_len, len));
291 	iavf_vf_parse_hw_config(hw, sc->vf_res);
292 
293 out_alloc:
294 	free(event.msg_buf, M_IAVF);
295 	return (error);
296 }
297 
298 /**
299  * iavf_enable_queues - Enable queues
300  * @sc: device softc
301  *
302  * Request that the PF enable all of our queues.
303  *
304  * @remark the reply from the PF is not checked by this function.
305  *
306  * @returns zero
307  */
308 int
309 iavf_enable_queues(struct iavf_sc *sc)
310 {
311 	struct virtchnl_queue_select vqs;
312 	struct iavf_vsi *vsi = &sc->vsi;
313 
314 	vqs.vsi_id = sc->vsi_res->vsi_id;
315 	vqs.tx_queues = (1 << IAVF_NTXQS(vsi)) - 1;
316 	vqs.rx_queues = vqs.tx_queues;
317 	iavf_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
318 			   (u8 *)&vqs, sizeof(vqs));
319 	return (0);
320 }
321 
322 /**
323  * iavf_disable_queues - Disable queues
324  * @sc: device softc
325  *
326  * Request that the PF disable all of our queues.
327  *
328  * @remark the reply from the PF is not checked by this function.
329  *
330  * @returns zero
331  */
332 int
333 iavf_disable_queues(struct iavf_sc *sc)
334 {
335 	struct virtchnl_queue_select vqs;
336 	struct iavf_vsi *vsi = &sc->vsi;
337 
338 	vqs.vsi_id = sc->vsi_res->vsi_id;
339 	vqs.tx_queues = (1 << IAVF_NTXQS(vsi)) - 1;
340 	vqs.rx_queues = vqs.tx_queues;
341 	iavf_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
342 			   (u8 *)&vqs, sizeof(vqs));
343 	return (0);
344 }
345 
346 /**
347  * iavf_add_vlans - Add VLAN filters
348  * @sc: device softc
349  *
350  * Scan the Filter List looking for vlans that need
351  * to be added, then create the data to hand to the AQ
352  * for handling.
353  *
354  * @returns zero on success, or an error code on failure.
355  */
356 int
357 iavf_add_vlans(struct iavf_sc *sc)
358 {
359 	struct virtchnl_vlan_filter_list *v;
360 	struct iavf_vlan_filter *f, *ftmp;
361 	device_t dev = sc->dev;
362 	int i = 0, cnt = 0;
363 	u32 len;
364 
365 	/* Get count of VLAN filters to add */
366 	SLIST_FOREACH(f, sc->vlan_filters, next) {
367 		if (f->flags & IAVF_FILTER_ADD)
368 			cnt++;
369 	}
370 
371 	if (!cnt) /* no work... */
372 		return (ENOENT);
373 
374 	len = sizeof(struct virtchnl_vlan_filter_list) +
375 	      (cnt * sizeof(u16));
376 
377 	if (len > IAVF_AQ_BUF_SZ) {
378 		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
379 			__func__);
380 		return (EFBIG);
381 	}
382 
383 	v = (struct virtchnl_vlan_filter_list *)malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
384 	if (!v) {
385 		device_printf(dev, "%s: unable to allocate memory\n",
386 			__func__);
387 		return (ENOMEM);
388 	}
389 
390 	v->vsi_id = sc->vsi_res->vsi_id;
391 	v->num_elements = cnt;
392 
393 	/* Scan the filter array */
394 	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
395                 if (f->flags & IAVF_FILTER_ADD) {
396                         bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
397 			f->flags = IAVF_FILTER_USED;
398                         i++;
399                 }
400                 if (i == cnt)
401                         break;
402 	}
403 
404 	iavf_send_pf_msg(sc, VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
405 	free(v, M_IAVF);
406 	/* add stats? */
407 	return (0);
408 }
409 
410 /**
411  * iavf_del_vlans - Delete VLAN filters
412  * @sc: device softc
413  *
414  * Scan the Filter Table looking for vlans that need
415  * to be removed, then create the data to hand to the AQ
416  * for handling.
417  *
418  * @returns zero on success, or an error code on failure.
419  */
420 int
421 iavf_del_vlans(struct iavf_sc *sc)
422 {
423 	struct virtchnl_vlan_filter_list *v;
424 	struct iavf_vlan_filter *f, *ftmp;
425 	device_t dev = sc->dev;
426 	int i = 0, cnt = 0;
427 	u32 len;
428 
429 	/* Get count of VLAN filters to delete */
430 	SLIST_FOREACH(f, sc->vlan_filters, next) {
431 		if (f->flags & IAVF_FILTER_DEL)
432 			cnt++;
433 	}
434 
435 	if (!cnt) /* no work... */
436 		return (ENOENT);
437 
438 	len = sizeof(struct virtchnl_vlan_filter_list) +
439 	      (cnt * sizeof(u16));
440 
441 	if (len > IAVF_AQ_BUF_SZ) {
442 		device_printf(dev, "%s: Exceeded Max AQ Buf size\n",
443 			__func__);
444 		return (EFBIG);
445 	}
446 
447 	v = (struct virtchnl_vlan_filter_list *)
448 	    malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
449 	if (!v) {
450 		device_printf(dev, "%s: unable to allocate memory\n",
451 			__func__);
452 		return (ENOMEM);
453 	}
454 
455 	v->vsi_id = sc->vsi_res->vsi_id;
456 	v->num_elements = cnt;
457 
458 	/* Scan the filter array */
459 	SLIST_FOREACH_SAFE(f, sc->vlan_filters, next, ftmp) {
460                 if (f->flags & IAVF_FILTER_DEL) {
461                         bcopy(&f->vlan, &v->vlan_id[i], sizeof(u16));
462                         i++;
463                         SLIST_REMOVE(sc->vlan_filters, f, iavf_vlan_filter, next);
464                         free(f, M_IAVF);
465                 }
466                 if (i == cnt)
467                         break;
468 	}
469 
470 	iavf_send_pf_msg(sc, VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
471 	free(v, M_IAVF);
472 	/* add stats? */
473 	return (0);
474 }
475 
476 /**
477  * iavf_add_ether_filters - Add MAC filters
478  * @sc: device softc
479  *
480  * This routine takes additions to the vsi filter
481  * table and creates an Admin Queue call to create
482  * the filters in the hardware.
483  *
484  * @returns zero on success, or an error code on failure.
485  */
486 int
487 iavf_add_ether_filters(struct iavf_sc *sc)
488 {
489 	struct virtchnl_ether_addr_list *a;
490 	struct iavf_mac_filter *f;
491 	device_t dev = sc->dev;
492 	int len, j = 0, cnt = 0;
493 	int error;
494 
495 	/* Get count of MAC addresses to add */
496 	SLIST_FOREACH(f, sc->mac_filters, next) {
497 		if (f->flags & IAVF_FILTER_ADD)
498 			cnt++;
499 	}
500 	if (cnt == 0) { /* Should not happen... */
501 		iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
502 		return (ENOENT);
503 	}
504 
505 	len = sizeof(struct virtchnl_ether_addr_list) +
506 	    (cnt * sizeof(struct virtchnl_ether_addr));
507 
508 	a = (struct virtchnl_ether_addr_list *)
509 	    malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
510 	if (a == NULL) {
511 		device_printf(dev, "%s: Failed to get memory for "
512 		    "virtchnl_ether_addr_list\n", __func__);
513 		return (ENOMEM);
514 	}
515 	a->vsi_id = sc->vsi.id;
516 	a->num_elements = cnt;
517 
518 	/* Scan the filter array */
519 	SLIST_FOREACH(f, sc->mac_filters, next) {
520 		if (f->flags & IAVF_FILTER_ADD) {
521 			bcopy(f->macaddr, a->list[j].addr, ETHER_ADDR_LEN);
522 			f->flags &= ~IAVF_FILTER_ADD;
523 			j++;
524 
525 			iavf_dbg_vc(sc, "%s: ADD: " MAC_FORMAT "\n",
526 			    __func__, MAC_FORMAT_ARGS(f->macaddr));
527 		}
528 		if (j == cnt)
529 			break;
530 	}
531 	iavf_dbg_vc(sc, "%s: len %d, j %d, cnt %d\n", __func__,
532 	    len, j, cnt);
533 
534 	error = iavf_send_pf_msg(sc,
535 	    VIRTCHNL_OP_ADD_ETH_ADDR, (u8 *)a, len);
536 	/* add stats? */
537 	free(a, M_IAVF);
538 	return (error);
539 }
540 
541 /**
542  * iavf_del_ether_filters - Delete MAC filters
543  * @sc: device softc
544  *
545  * This routine takes filters flagged for deletion in the
546  * sc MAC filter list and creates an Admin Queue call
547  * to delete those filters in the hardware.
548  *
549  * @returns zero on success, or an error code on failure.
550 */
551 int
552 iavf_del_ether_filters(struct iavf_sc *sc)
553 {
554 	struct virtchnl_ether_addr_list *d;
555 	struct iavf_mac_filter *f, *f_temp;
556 	device_t dev = sc->dev;
557 	int len, j = 0, cnt = 0;
558 
559 	/* Get count of MAC addresses to delete */
560 	SLIST_FOREACH(f, sc->mac_filters, next) {
561 		if (f->flags & IAVF_FILTER_DEL)
562 			cnt++;
563 	}
564 	if (cnt == 0) {
565 		iavf_dbg_vc(sc, "%s: cnt == 0, exiting...\n", __func__);
566 		return (ENOENT);
567 	}
568 
569 	len = sizeof(struct virtchnl_ether_addr_list) +
570 	    (cnt * sizeof(struct virtchnl_ether_addr));
571 
572 	d = (struct virtchnl_ether_addr_list *)
573 	    malloc(len, M_IAVF, M_NOWAIT | M_ZERO);
574 	if (d == NULL) {
575 		device_printf(dev, "%s: Failed to get memory for "
576 		    "virtchnl_ether_addr_list\n", __func__);
577 		return (ENOMEM);
578 	}
579 	d->vsi_id = sc->vsi.id;
580 	d->num_elements = cnt;
581 
582 	/* Scan the filter array */
583 	SLIST_FOREACH_SAFE(f, sc->mac_filters, next, f_temp) {
584 		if (f->flags & IAVF_FILTER_DEL) {
585 			bcopy(f->macaddr, d->list[j].addr, ETHER_ADDR_LEN);
586 			iavf_dbg_vc(sc, "DEL: " MAC_FORMAT "\n",
587 			    MAC_FORMAT_ARGS(f->macaddr));
588 			j++;
589 			SLIST_REMOVE(sc->mac_filters, f, iavf_mac_filter, next);
590 			free(f, M_IAVF);
591 		}
592 		if (j == cnt)
593 			break;
594 	}
595 	iavf_send_pf_msg(sc,
596 	    VIRTCHNL_OP_DEL_ETH_ADDR, (u8 *)d, len);
597 	/* add stats? */
598 	free(d, M_IAVF);
599 	return (0);
600 }
601 
602 /**
603  * iavf_request_reset - Request a device reset
604  * @sc: device softc
605  *
606  * Request that the PF reset this VF. No response is expected.
607  *
608  * @returns zero
609  */
610 int
611 iavf_request_reset(struct iavf_sc *sc)
612 {
613 	/*
614 	** Set the reset status to "in progress" before
615 	** the request, this avoids any possibility of
616 	** a mistaken early detection of completion.
617 	*/
618 	wr32(&sc->hw, IAVF_VFGEN_RSTAT, VIRTCHNL_VFR_INPROGRESS);
619 	iavf_send_pf_msg(sc, VIRTCHNL_OP_RESET_VF, NULL, 0);
620 	return (0);
621 }
622 
623 /**
624  * iavf_request_stats - Request VF stats
625  * @sc: device softc
626  *
627  * Request the statistics for this VF's VSI from PF.
628  *
629  * @remark prints an error message on failure to obtain stats, but does not
630  * return with an error code.
631  *
632  * @returns zero
633  */
634 int
635 iavf_request_stats(struct iavf_sc *sc)
636 {
637 	struct virtchnl_queue_select vqs;
638 	int error = 0;
639 
640 	vqs.vsi_id = sc->vsi_res->vsi_id;
641 	/* Low priority, we don't need to error check */
642 	error = iavf_send_pf_msg(sc, VIRTCHNL_OP_GET_STATS,
643 	    (u8 *)&vqs, sizeof(vqs));
644 	if (error)
645 		device_printf(sc->dev, "Error sending stats request to PF: %d\n", error);
646 
647 	return (0);
648 }
649 
650 /**
651  * iavf_update_stats_counters - Update driver statistics
652  * @sc: device softc
653  * @es: ethernet stats storage
654  *
655  * Updates driver's stats counters with VSI stats returned from PF.
656  */
657 void
658 iavf_update_stats_counters(struct iavf_sc *sc, struct iavf_eth_stats *es)
659 {
660 	struct iavf_vsi *vsi = &sc->vsi;
661 
662 	/* Update ifnet stats */
663 	vsi->ipackets = es->rx_unicast + es->rx_multicast + es->rx_broadcast;
664 	vsi->opackets = es->tx_unicast + es->tx_multicast + es->tx_broadcast;
665 	vsi->ibytes = es->rx_bytes;
666 	vsi->obytes = es->tx_bytes;
667 	vsi->imcasts = es->rx_multicast;
668 	vsi->omcasts = es->tx_multicast;
669 
670 	vsi->oerrors = es->tx_errors;
671 	vsi->iqdrops = es->rx_discards;
672 	vsi->oqdrops = es->tx_discards;
673 	vsi->noproto = es->rx_unknown_protocol;
674 
675 	vsi->eth_stats = *es;
676 }
677 
678 /**
679  * iavf_config_rss_key - Configure RSS key over virtchnl
680  * @sc: device softc
681  *
682  * Send a message to the PF to configure the RSS key using the virtchnl
683  * interface.
684  *
685  * @remark this does not check the reply from the PF.
686  *
687  * @returns zero on success, or an error code on failure.
688  */
689 int
690 iavf_config_rss_key(struct iavf_sc *sc)
691 {
692 	struct virtchnl_rss_key *rss_key_msg;
693 	int msg_len, key_length;
694 	u8		rss_seed[IAVF_RSS_KEY_SIZE];
695 
696 #ifdef RSS
697 	/* Fetch the configured RSS key */
698 	rss_getkey((uint8_t *) &rss_seed);
699 #else
700 	iavf_get_default_rss_key((u32 *)rss_seed);
701 #endif
702 
703 	/* Send the fetched key */
704 	key_length = IAVF_RSS_KEY_SIZE;
705 	msg_len = sizeof(struct virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
706 	rss_key_msg = (struct virtchnl_rss_key *)
707 	    malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
708 	if (rss_key_msg == NULL) {
709 		device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
710 		return (ENOMEM);
711 	}
712 
713 	rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
714 	rss_key_msg->key_len = key_length;
715 	bcopy(rss_seed, &rss_key_msg->key[0], key_length);
716 
717 	iavf_dbg_vc(sc, "%s: vsi_id %d, key_len %d\n", __func__,
718 	    rss_key_msg->vsi_id, rss_key_msg->key_len);
719 
720 	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_KEY,
721 			  (u8 *)rss_key_msg, msg_len);
722 
723 	free(rss_key_msg, M_IAVF);
724 	return (0);
725 }
726 
727 /**
728  * iavf_set_rss_hena - Configure the RSS HENA
729  * @sc: device softc
730  *
731  * Configure the RSS HENA values by sending a virtchnl message to the PF
732  *
733  * @remark the reply from the PF is not checked by this function.
734  *
735  * @returns zero
736  */
737 int
738 iavf_set_rss_hena(struct iavf_sc *sc)
739 {
740 	struct virtchnl_rss_hena hena;
741 	struct iavf_hw *hw = &sc->hw;
742 
743 	if (hw->mac.type == IAVF_MAC_VF)
744 		hena.hena = IAVF_DEFAULT_RSS_HENA_AVF;
745 	else if (hw->mac.type == IAVF_MAC_X722_VF)
746 		hena.hena = IAVF_DEFAULT_RSS_HENA_X722;
747 	else
748 		hena.hena = IAVF_DEFAULT_RSS_HENA_BASE;
749 
750 	iavf_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
751 	    (u8 *)&hena, sizeof(hena));
752 	return (0);
753 }
754 
755 /**
756  * iavf_config_rss_lut - Configure RSS lookup table
757  * @sc: device softc
758  *
759  * Configure the RSS lookup table by sending a virtchnl message to the PF.
760  *
761  * @remark the reply from the PF is not checked in this function.
762  *
763  * @returns zero on success, or an error code on failure.
764  */
765 int
766 iavf_config_rss_lut(struct iavf_sc *sc)
767 {
768 	struct virtchnl_rss_lut *rss_lut_msg;
769 	int msg_len;
770 	u16 lut_length;
771 	u32 lut;
772 	int i, que_id;
773 
774 	lut_length = IAVF_RSS_VSI_LUT_SIZE;
775 	msg_len = sizeof(struct virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
776 	rss_lut_msg = (struct virtchnl_rss_lut *)
777 	    malloc(msg_len, M_IAVF, M_NOWAIT | M_ZERO);
778 	if (rss_lut_msg == NULL) {
779 		device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
780 		return (ENOMEM);
781 	}
782 
783 	rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
784 	/* Each LUT entry is a max of 1 byte, so this is easy */
785 	rss_lut_msg->lut_entries = lut_length;
786 
787 	/* Populate the LUT with max no. of queues in round robin fashion */
788 	for (i = 0; i < lut_length; i++) {
789 #ifdef RSS
790 		/*
791 		 * Fetch the RSS bucket id for the given indirection entry.
792 		 * Cap it at the number of configured buckets (which is
793 		 * num_queues.)
794 		 */
795 		que_id = rss_get_indirection_to_bucket(i);
796 		que_id = que_id % sc->vsi.num_rx_queues;
797 #else
798 		que_id = i % sc->vsi.num_rx_queues;
799 #endif
800 		lut = que_id & IAVF_RSS_VSI_LUT_ENTRY_MASK;
801 		rss_lut_msg->lut[i] = lut;
802 	}
803 
804 	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_RSS_LUT,
805 			  (u8 *)rss_lut_msg, msg_len);
806 
807 	free(rss_lut_msg, M_IAVF);
808 	return (0);
809 }
810 
811 /**
812  * iavf_config_promisc_mode - Configure promiscuous mode
813  * @sc: device softc
814  *
815  * Configure the device into promiscuous mode by sending a virtchnl message to
816  * the PF.
817  *
818  * @remark the reply from the PF is not checked in this function.
819  *
820  * @returns zero
821  */
822 int
823 iavf_config_promisc_mode(struct iavf_sc *sc)
824 {
825 	struct virtchnl_promisc_info pinfo;
826 
827 	pinfo.vsi_id = sc->vsi_res->vsi_id;
828 	pinfo.flags = sc->promisc_flags;
829 
830 	iavf_send_pf_msg(sc, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
831 	    (u8 *)&pinfo, sizeof(pinfo));
832 	return (0);
833 }
834 
835 /**
836  * iavf_vc_send_cmd - Convert request into virtchnl calls
837  * @sc: device softc
838  * @request: the requested command to run
839  *
840  * Send the proper virtchnl call based on the request value.
841  *
842  * @returns zero on success, or an error code on failure. Note that unknown
843  * requests will return zero.
844  */
845 int
846 iavf_vc_send_cmd(struct iavf_sc *sc, uint32_t request)
847 {
848 	switch (request) {
849 	case IAVF_FLAG_AQ_MAP_VECTORS:
850 		return iavf_map_queues(sc);
851 
852 	case IAVF_FLAG_AQ_ADD_MAC_FILTER:
853 		return iavf_add_ether_filters(sc);
854 
855 	case IAVF_FLAG_AQ_ADD_VLAN_FILTER:
856 		return iavf_add_vlans(sc);
857 
858 	case IAVF_FLAG_AQ_DEL_MAC_FILTER:
859 		return iavf_del_ether_filters(sc);
860 
861 	case IAVF_FLAG_AQ_DEL_VLAN_FILTER:
862 		return iavf_del_vlans(sc);
863 
864 	case IAVF_FLAG_AQ_CONFIGURE_QUEUES:
865 		return iavf_configure_queues(sc);
866 
867 	case IAVF_FLAG_AQ_DISABLE_QUEUES:
868 		return iavf_disable_queues(sc);
869 
870 	case IAVF_FLAG_AQ_ENABLE_QUEUES:
871 		return iavf_enable_queues(sc);
872 
873 	case IAVF_FLAG_AQ_CONFIG_RSS_KEY:
874 		return iavf_config_rss_key(sc);
875 
876 	case IAVF_FLAG_AQ_SET_RSS_HENA:
877 		return iavf_set_rss_hena(sc);
878 
879 	case IAVF_FLAG_AQ_CONFIG_RSS_LUT:
880 		return iavf_config_rss_lut(sc);
881 
882 	case IAVF_FLAG_AQ_CONFIGURE_PROMISC:
883 		return iavf_config_promisc_mode(sc);
884 	}
885 
886 	return (0);
887 }
888 
889 /**
890  * iavf_vc_get_op_chan - Get op channel for a request
891  * @sc: device softc
892  * @request: the request type
893  *
894  * @returns the op channel for the given request, or NULL if no channel is
895  * used.
896  */
897 void *
898 iavf_vc_get_op_chan(struct iavf_sc *sc, uint32_t request)
899 {
900 	switch (request) {
901 	case IAVF_FLAG_AQ_ENABLE_QUEUES:
902 		return (&sc->enable_queues_chan);
903 	case IAVF_FLAG_AQ_DISABLE_QUEUES:
904 		return (&sc->disable_queues_chan);
905 	default:
906 		return (NULL);
907 	}
908 }
909 
910 /**
911  * iavf_vc_stat_str - convert virtchnl status err code to a string
912  * @hw: pointer to the HW structure
913  * @stat_err: the status error code to convert
914  *
915  * @returns the human readable string representing the specified error code.
916  **/
917 const char *
918 iavf_vc_stat_str(struct iavf_hw *hw, enum virtchnl_status_code stat_err)
919 {
920 	switch (stat_err) {
921 	case VIRTCHNL_STATUS_SUCCESS:
922 		return "OK";
923 	case VIRTCHNL_ERR_PARAM:
924 		return "VIRTCHNL_ERR_PARAM";
925 	case VIRTCHNL_STATUS_ERR_NO_MEMORY:
926 		return "VIRTCHNL_STATUS_ERR_NO_MEMORY";
927 	case VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH:
928 		return "VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH";
929 	case VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR:
930 		return "VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR";
931 	case VIRTCHNL_STATUS_ERR_INVALID_VF_ID:
932 		return "VIRTCHNL_STATUS_ERR_INVALID_VF_ID";
933 	case VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR:
934 		return "VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR";
935 	case VIRTCHNL_STATUS_NOT_SUPPORTED:
936 		return "VIRTCHNL_STATUS_NOT_SUPPORTED";
937 	}
938 
939 	snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
940 	return hw->err_str;
941 }
942 
943 /**
944  * iavf_adv_speed_to_ext_speed - Convert numeric speed to iavf speed enum
945  * @adv_link_speed: link speed in Mb/s
946  *
947  * Converts the link speed from the "advanced" link speed virtchnl op into the
948  * closest approximation of the internal iavf link speed, rounded down.
949  *
950  * @returns the link speed as an iavf_ext_link_speed enum value
951  */
952 enum iavf_ext_link_speed
953 iavf_adv_speed_to_ext_speed(u32 adv_link_speed)
954 {
955 	if (adv_link_speed >= 100000)
956 		return IAVF_EXT_LINK_SPEED_100GB;
957 	if (adv_link_speed >= 50000)
958 		return IAVF_EXT_LINK_SPEED_50GB;
959 	if (adv_link_speed >= 40000)
960 		return IAVF_EXT_LINK_SPEED_40GB;
961 	if (adv_link_speed >= 25000)
962 		return IAVF_EXT_LINK_SPEED_25GB;
963 	if (adv_link_speed >= 20000)
964 		return IAVF_EXT_LINK_SPEED_20GB;
965 	if (adv_link_speed >= 10000)
966 		return IAVF_EXT_LINK_SPEED_10GB;
967 	if (adv_link_speed >= 5000)
968 		return IAVF_EXT_LINK_SPEED_5GB;
969 	if (adv_link_speed >= 2500)
970 		return IAVF_EXT_LINK_SPEED_2500MB;
971 	if (adv_link_speed >= 1000)
972 		return IAVF_EXT_LINK_SPEED_1000MB;
973 	if (adv_link_speed >= 100)
974 		return IAVF_EXT_LINK_SPEED_100MB;
975 	if (adv_link_speed >= 10)
976 		return IAVF_EXT_LINK_SPEED_10MB;
977 
978 	return IAVF_EXT_LINK_SPEED_UNKNOWN;
979 }
980 
981 /**
982  * iavf_ext_speed_to_ifmedia - Convert internal iavf speed to ifmedia value
983  * @link_speed: the link speed
984  *
985  * @remark this is sort of a hack, because we don't actually know what media
986  * type the VF is running on. In an ideal world we might just report the media
987  * type as "virtual" and have another mechanism for reporting the link
988  * speed.
989  *
990  * @returns a suitable ifmedia type for the given link speed.
991  */
992 u32
993 iavf_ext_speed_to_ifmedia(enum iavf_ext_link_speed link_speed)
994 {
995 	switch (link_speed) {
996 	case IAVF_EXT_LINK_SPEED_100GB:
997 		return IFM_100G_SR4;
998 	case IAVF_EXT_LINK_SPEED_50GB:
999 		return IFM_50G_SR2;
1000 	case IAVF_EXT_LINK_SPEED_40GB:
1001 		return IFM_40G_SR4;
1002 	case IAVF_EXT_LINK_SPEED_25GB:
1003 		return IFM_25G_SR;
1004 	case IAVF_EXT_LINK_SPEED_20GB:
1005 		return IFM_20G_KR2;
1006 	case IAVF_EXT_LINK_SPEED_10GB:
1007 		return IFM_10G_SR;
1008 	case IAVF_EXT_LINK_SPEED_5GB:
1009 		return IFM_5000_T;
1010 	case IAVF_EXT_LINK_SPEED_2500MB:
1011 		return IFM_2500_T;
1012 	case IAVF_EXT_LINK_SPEED_1000MB:
1013 		return IFM_1000_T;
1014 	case IAVF_EXT_LINK_SPEED_100MB:
1015 		return IFM_100_TX;
1016 	case IAVF_EXT_LINK_SPEED_10MB:
1017 		return IFM_10_T;
1018 	case IAVF_EXT_LINK_SPEED_UNKNOWN:
1019 	default:
1020 		return IFM_UNKNOWN;
1021 	}
1022 }
1023 
1024 /**
1025  * iavf_vc_speed_to_ext_speed - Convert virtchnl speed enum to native iavf
1026  * driver speed representation.
1027  * @link_speed: link speed enum value
1028  *
1029  * @returns the link speed in the native iavf format.
1030  */
1031 enum iavf_ext_link_speed
1032 iavf_vc_speed_to_ext_speed(enum virtchnl_link_speed link_speed)
1033 {
1034 	switch (link_speed) {
1035 	case VIRTCHNL_LINK_SPEED_40GB:
1036 		return IAVF_EXT_LINK_SPEED_40GB;
1037 	case VIRTCHNL_LINK_SPEED_25GB:
1038 		return IAVF_EXT_LINK_SPEED_25GB;
1039 	case VIRTCHNL_LINK_SPEED_20GB:
1040 		return IAVF_EXT_LINK_SPEED_20GB;
1041 	case VIRTCHNL_LINK_SPEED_10GB:
1042 		return IAVF_EXT_LINK_SPEED_10GB;
1043 	case VIRTCHNL_LINK_SPEED_1GB:
1044 		return IAVF_EXT_LINK_SPEED_1000MB;
1045 	case VIRTCHNL_LINK_SPEED_100MB:
1046 		return IAVF_EXT_LINK_SPEED_100MB;
1047 	case VIRTCHNL_LINK_SPEED_UNKNOWN:
1048 	default:
1049 		return IAVF_EXT_LINK_SPEED_UNKNOWN;
1050 	}
1051 }
1052 
1053 /**
1054  * iavf_vc_speed_to_string - Convert virtchnl speed to a string
1055  * @link_speed: the speed to convert
1056  *
1057  * @returns string representing the link speed as reported by the virtchnl
1058  * interface.
1059  */
1060 const char *
1061 iavf_vc_speed_to_string(enum virtchnl_link_speed link_speed)
1062 {
1063 	return iavf_ext_speed_to_str(iavf_vc_speed_to_ext_speed(link_speed));
1064 }
1065 
1066 /**
1067  * iavf_ext_speed_to_str - Convert iavf speed enum to string representation
1068  * @link_speed: link speed enum value
1069  *
1070  * XXX: This is an iavf-modified copy of ice_aq_speed_to_str()
1071  *
1072  * @returns the string representation of the given link speed.
1073  */
1074 const char *
1075 iavf_ext_speed_to_str(enum iavf_ext_link_speed link_speed)
1076 {
1077 	switch (link_speed) {
1078 	case IAVF_EXT_LINK_SPEED_100GB:
1079 		return "100 Gbps";
1080 	case IAVF_EXT_LINK_SPEED_50GB:
1081 		return "50 Gbps";
1082 	case IAVF_EXT_LINK_SPEED_40GB:
1083 		return "40 Gbps";
1084 	case IAVF_EXT_LINK_SPEED_25GB:
1085 		return "25 Gbps";
1086 	case IAVF_EXT_LINK_SPEED_20GB:
1087 		return "20 Gbps";
1088 	case IAVF_EXT_LINK_SPEED_10GB:
1089 		return "10 Gbps";
1090 	case IAVF_EXT_LINK_SPEED_5GB:
1091 		return "5 Gbps";
1092 	case IAVF_EXT_LINK_SPEED_2500MB:
1093 		return "2.5 Gbps";
1094 	case IAVF_EXT_LINK_SPEED_1000MB:
1095 		return "1 Gbps";
1096 	case IAVF_EXT_LINK_SPEED_100MB:
1097 		return "100 Mbps";
1098 	case IAVF_EXT_LINK_SPEED_10MB:
1099 		return "10 Mbps";
1100 	case IAVF_EXT_LINK_SPEED_UNKNOWN:
1101 	default:
1102 		return "Unknown";
1103 	}
1104 }
1105 
1106 /**
1107  * iavf_vc_opcode_str - Convert virtchnl opcode to string
1108  * @op: the virtchnl op code
1109  *
1110  * @returns the string representation of the given virtchnl op code
1111  */
1112 const char *
1113 iavf_vc_opcode_str(uint16_t op)
1114 {
1115 	switch (op) {
1116 	case VIRTCHNL_OP_VERSION:
1117 		return ("VERSION");
1118 	case VIRTCHNL_OP_RESET_VF:
1119 		return ("RESET_VF");
1120 	case VIRTCHNL_OP_GET_VF_RESOURCES:
1121 		return ("GET_VF_RESOURCES");
1122 	case VIRTCHNL_OP_CONFIG_TX_QUEUE:
1123 		return ("CONFIG_TX_QUEUE");
1124 	case VIRTCHNL_OP_CONFIG_RX_QUEUE:
1125 		return ("CONFIG_RX_QUEUE");
1126 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1127 		return ("CONFIG_VSI_QUEUES");
1128 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1129 		return ("CONFIG_IRQ_MAP");
1130 	case VIRTCHNL_OP_ENABLE_QUEUES:
1131 		return ("ENABLE_QUEUES");
1132 	case VIRTCHNL_OP_DISABLE_QUEUES:
1133 		return ("DISABLE_QUEUES");
1134 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1135 		return ("ADD_ETH_ADDR");
1136 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1137 		return ("DEL_ETH_ADDR");
1138 	case VIRTCHNL_OP_ADD_VLAN:
1139 		return ("ADD_VLAN");
1140 	case VIRTCHNL_OP_DEL_VLAN:
1141 		return ("DEL_VLAN");
1142 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1143 		return ("CONFIG_PROMISCUOUS_MODE");
1144 	case VIRTCHNL_OP_GET_STATS:
1145 		return ("GET_STATS");
1146 	case VIRTCHNL_OP_RSVD:
1147 		return ("RSVD");
1148 	case VIRTCHNL_OP_EVENT:
1149 		return ("EVENT");
1150 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1151 		return ("CONFIG_RSS_KEY");
1152 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1153 		return ("CONFIG_RSS_LUT");
1154 	case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
1155 		return ("GET_RSS_HENA_CAPS");
1156 	case VIRTCHNL_OP_SET_RSS_HENA:
1157 		return ("SET_RSS_HENA");
1158 	default:
1159 		return ("UNKNOWN");
1160 	}
1161 }
1162 
1163 /**
1164  * iavf_vc_completion - Handle PF reply messages
1165  * @sc: device softc
1166  * @v_opcode: virtchnl op code
1167  * @v_retval: virtchnl return value
1168  * @msg: the message to send
1169  * @msglen: length of the msg buffer
1170  *
1171  * Asynchronous completion function for admin queue messages. Rather than busy
1172  * wait, we fire off our requests and assume that no errors will be returned.
1173  * This function handles the reply messages.
1174  */
1175 void
1176 iavf_vc_completion(struct iavf_sc *sc,
1177     enum virtchnl_ops v_opcode,
1178     enum virtchnl_status_code v_retval, u8 *msg, u16 msglen __unused)
1179 {
1180 	device_t	dev = sc->dev;
1181 
1182 	if (v_opcode != VIRTCHNL_OP_GET_STATS)
1183 		iavf_dbg_vc(sc, "%s: opcode %s\n", __func__,
1184 		    iavf_vc_opcode_str(v_opcode));
1185 
1186 	if (v_opcode == VIRTCHNL_OP_EVENT) {
1187 		struct virtchnl_pf_event *vpe =
1188 			(struct virtchnl_pf_event *)msg;
1189 
1190 		switch (vpe->event) {
1191 		case VIRTCHNL_EVENT_LINK_CHANGE:
1192 			iavf_handle_link_event(sc, vpe);
1193 			break;
1194 		case VIRTCHNL_EVENT_RESET_IMPENDING:
1195 			device_printf(dev, "PF initiated reset!\n");
1196 			iavf_set_state(&sc->state, IAVF_STATE_RESET_PENDING);
1197 			break;
1198 		default:
1199 			iavf_dbg_vc(sc, "Unknown event %d from AQ\n",
1200 				vpe->event);
1201 			break;
1202 		}
1203 
1204 		return;
1205 	}
1206 
1207 	/* Catch-all error response */
1208 	if (v_retval) {
1209 		bool print_error = true;
1210 
1211 		switch (v_opcode) {
1212 		case VIRTCHNL_OP_ADD_ETH_ADDR:
1213 			device_printf(dev, "WARNING: Error adding VF mac filter!\n");
1214 			device_printf(dev, "WARNING: Device may not receive traffic!\n");
1215 			break;
1216 		case VIRTCHNL_OP_ENABLE_QUEUES:
1217 			sc->enable_queues_chan = 1;
1218 			wakeup_one(&sc->enable_queues_chan);
1219 			break;
1220 		case VIRTCHNL_OP_DISABLE_QUEUES:
1221 			sc->disable_queues_chan = 1;
1222 			wakeup_one(&sc->disable_queues_chan);
1223 			/* This may fail, but it does not necessarily mean that
1224 			 * something is critically wrong.
1225 			 */
1226 			if (!(sc->dbg_mask & IAVF_DBG_VC))
1227 				print_error = false;
1228 			break;
1229 		default:
1230 			break;
1231 		}
1232 
1233 		if (print_error)
1234 			device_printf(dev,
1235 			    "%s: AQ returned error %s to our request %s!\n",
1236 			    __func__, iavf_vc_stat_str(&sc->hw, v_retval),
1237 			    iavf_vc_opcode_str(v_opcode));
1238 		return;
1239 	}
1240 
1241 	switch (v_opcode) {
1242 	case VIRTCHNL_OP_GET_STATS:
1243 		iavf_update_stats_counters(sc, (struct iavf_eth_stats *)msg);
1244 		break;
1245 	case VIRTCHNL_OP_ADD_ETH_ADDR:
1246 		break;
1247 	case VIRTCHNL_OP_DEL_ETH_ADDR:
1248 		break;
1249 	case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
1250 		break;
1251 	case VIRTCHNL_OP_ADD_VLAN:
1252 		break;
1253 	case VIRTCHNL_OP_DEL_VLAN:
1254 		break;
1255 	case VIRTCHNL_OP_ENABLE_QUEUES:
1256 		atomic_store_rel_32(&sc->queues_enabled, 1);
1257 		sc->enable_queues_chan = 1;
1258 		wakeup_one(&sc->enable_queues_chan);
1259 		break;
1260 	case VIRTCHNL_OP_DISABLE_QUEUES:
1261 		atomic_store_rel_32(&sc->queues_enabled, 0);
1262 		sc->disable_queues_chan = 1;
1263 		wakeup_one(&sc->disable_queues_chan);
1264 		break;
1265 	case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
1266 		break;
1267 	case VIRTCHNL_OP_CONFIG_IRQ_MAP:
1268 		break;
1269 	case VIRTCHNL_OP_CONFIG_RSS_KEY:
1270 		break;
1271 	case VIRTCHNL_OP_SET_RSS_HENA:
1272 		break;
1273 	case VIRTCHNL_OP_CONFIG_RSS_LUT:
1274 		break;
1275 	default:
1276 		iavf_dbg_vc(sc,
1277 		    "Received unexpected message %s from PF.\n",
1278 		    iavf_vc_opcode_str(v_opcode));
1279 		break;
1280 	}
1281 }
1282 
1283 /**
1284  * iavf_handle_link_event - Handle Link event virtchml message
1285  * @sc: device softc
1286  * @vpe: virtchnl PF link event structure
1287  *
1288  * Process a virtchnl PF link event and update the driver and stack status of
1289  * the link event.
1290  */
1291 static void
1292 iavf_handle_link_event(struct iavf_sc *sc, struct virtchnl_pf_event *vpe)
1293 {
1294 	MPASS(vpe->event == VIRTCHNL_EVENT_LINK_CHANGE);
1295 
1296 	if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
1297 	{
1298 		iavf_dbg_vc(sc, "Link change (adv): status %d, speed %u\n",
1299 		    vpe->event_data.link_event_adv.link_status,
1300 		    vpe->event_data.link_event_adv.link_speed);
1301 		sc->link_up =
1302 			vpe->event_data.link_event_adv.link_status;
1303 		sc->link_speed_adv =
1304 			vpe->event_data.link_event_adv.link_speed;
1305 
1306 	} else {
1307 		iavf_dbg_vc(sc, "Link change: status %d, speed %x\n",
1308 		    vpe->event_data.link_event.link_status,
1309 		    vpe->event_data.link_event.link_speed);
1310 		sc->link_up =
1311 			vpe->event_data.link_event.link_status;
1312 		sc->link_speed =
1313 			vpe->event_data.link_event.link_speed;
1314 	}
1315 
1316 	iavf_update_link_status(sc);
1317 }
1318