xref: /freebsd/sys/dev/ixgbe/if_sriov.c (revision 2f513db7)
1 /******************************************************************************
2 
3   Copyright (c) 2001-2017, Intel Corporation
4   All rights reserved.
5 
6   Redistribution and use in source and binary forms, with or without
7   modification, are permitted provided that the following conditions are met:
8 
9    1. Redistributions of source code must retain the above copyright notice,
10       this list of conditions and the following disclaimer.
11 
12    2. Redistributions in binary form must reproduce the above copyright
13       notice, this list of conditions and the following disclaimer in the
14       documentation and/or other materials provided with the distribution.
15 
16    3. Neither the name of the Intel Corporation nor the names of its
17       contributors may be used to endorse or promote products derived from
18       this software without specific prior written permission.
19 
20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30   POSSIBILITY OF SUCH DAMAGE.
31 
32 ******************************************************************************/
33 /*$FreeBSD$*/
34 
35 #include "ixgbe.h"
36 #include "ixgbe_sriov.h"
37 
38 #ifdef PCI_IOV
39 
40 #include <sys/ktr.h>
41 
42 MALLOC_DEFINE(M_IXGBE_SRIOV, "ix_sriov", "ix SR-IOV allocations");
43 
44 /************************************************************************
45  * ixgbe_pci_iov_detach
46  ************************************************************************/
47 int
48 ixgbe_pci_iov_detach(device_t dev)
49 {
50 	return pci_iov_detach(dev);
51 }
52 
53 /************************************************************************
54  * ixgbe_define_iov_schemas
55  ************************************************************************/
56 void
57 ixgbe_define_iov_schemas(device_t dev, int *error)
58 {
59 	nvlist_t *pf_schema, *vf_schema;
60 
61 	pf_schema = pci_iov_schema_alloc_node();
62 	vf_schema = pci_iov_schema_alloc_node();
63 	pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
64 	pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
65 	    IOV_SCHEMA_HASDEFAULT, TRUE);
66 	pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
67 	    IOV_SCHEMA_HASDEFAULT, FALSE);
68 	pci_iov_schema_add_bool(vf_schema, "allow-promisc",
69 	    IOV_SCHEMA_HASDEFAULT, FALSE);
70 	*error = pci_iov_attach(dev, pf_schema, vf_schema);
71 	if (*error != 0) {
72 		device_printf(dev,
73 		    "Error %d setting up SR-IOV\n", *error);
74 	}
75 } /* ixgbe_define_iov_schemas */
76 
77 /************************************************************************
78  * ixgbe_align_all_queue_indices
79  ************************************************************************/
80 inline void
81 ixgbe_align_all_queue_indices(struct adapter *adapter)
82 {
83 	int i;
84 	int index;
85 
86 	for (i = 0; i < adapter->num_rx_queues; i++) {
87 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
88 		adapter->rx_queues[i].rxr.me = index;
89 	}
90 
91 	for (i = 0; i < adapter->num_tx_queues; i++) {
92 		index = ixgbe_vf_que_index(adapter->iov_mode, adapter->pool, i);
93 		adapter->tx_queues[i].txr.me = index;
94 	}
95 }
96 
97 /* Support functions for SR-IOV/VF management */
98 static inline void
99 ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
100 {
101 	if (vf->flags & IXGBE_VF_CTS)
102 		msg |= IXGBE_VT_MSGTYPE_CTS;
103 
104 	adapter->hw.mbx.ops.write(&adapter->hw, &msg, 1, vf->pool);
105 }
106 
107 static inline void
108 ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
109 {
110 	msg &= IXGBE_VT_MSG_MASK;
111 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK);
112 }
113 
114 static inline void
115 ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg)
116 {
117 	msg &= IXGBE_VT_MSG_MASK;
118 	ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK);
119 }
120 
121 static inline void
122 ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf)
123 {
124 	if (!(vf->flags & IXGBE_VF_CTS))
125 		ixgbe_send_vf_nack(adapter, vf, 0);
126 }
127 
128 static inline boolean_t
129 ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac)
130 {
131 	return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0);
132 }
133 
134 static inline int
135 ixgbe_vf_queues(int mode)
136 {
137 	switch (mode) {
138 	case IXGBE_64_VM:
139 		return (2);
140 	case IXGBE_32_VM:
141 		return (4);
142 	case IXGBE_NO_VM:
143 	default:
144 		return (0);
145 	}
146 }
147 
148 inline int
149 ixgbe_vf_que_index(int mode, int vfnum, int num)
150 {
151 	return ((vfnum * ixgbe_vf_queues(mode)) + num);
152 }
153 
154 static inline void
155 ixgbe_update_max_frame(struct adapter * adapter, int max_frame)
156 {
157 	if (adapter->max_frame_size < max_frame)
158 		adapter->max_frame_size = max_frame;
159 }
160 
161 inline u32
162 ixgbe_get_mrqc(int iov_mode)
163 {
164 	u32 mrqc;
165 
166 	switch (iov_mode) {
167 	case IXGBE_64_VM:
168 		mrqc = IXGBE_MRQC_VMDQRSS64EN;
169 		break;
170 	case IXGBE_32_VM:
171 		mrqc = IXGBE_MRQC_VMDQRSS32EN;
172 		break;
173 	case IXGBE_NO_VM:
174 		mrqc = 0;
175 		break;
176 	default:
177 		panic("Unexpected SR-IOV mode %d", iov_mode);
178 	}
179 
180 	return mrqc;
181 }
182 
183 
184 inline u32
185 ixgbe_get_mtqc(int iov_mode)
186 {
187 	uint32_t mtqc;
188 
189 	switch (iov_mode) {
190 	case IXGBE_64_VM:
191 		mtqc = IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA;
192 		break;
193 	case IXGBE_32_VM:
194 		mtqc = IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA;
195 		break;
196 	case IXGBE_NO_VM:
197 		mtqc = IXGBE_MTQC_64Q_1PB;
198 		break;
199 	default:
200 		panic("Unexpected SR-IOV mode %d", iov_mode);
201 	}
202 
203 	return mtqc;
204 }
205 
206 void
207 ixgbe_ping_all_vfs(struct adapter *adapter)
208 {
209 	struct ixgbe_vf *vf;
210 
211 	for (int i = 0; i < adapter->num_vfs; i++) {
212 		vf = &adapter->vfs[i];
213 		if (vf->flags & IXGBE_VF_ACTIVE)
214 			ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
215 	}
216 } /* ixgbe_ping_all_vfs */
217 
218 
219 static void
220 ixgbe_vf_set_default_vlan(struct adapter *adapter, struct ixgbe_vf *vf,
221                           uint16_t tag)
222 {
223 	struct ixgbe_hw *hw;
224 	uint32_t vmolr, vmvir;
225 
226 	hw = &adapter->hw;
227 
228 	vf->vlan_tag = tag;
229 
230 	vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf->pool));
231 
232 	/* Do not receive packets that pass inexact filters. */
233 	vmolr &= ~(IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_ROPE);
234 
235 	/* Disable Multicast Promicuous Mode. */
236 	vmolr &= ~IXGBE_VMOLR_MPE;
237 
238 	/* Accept broadcasts. */
239 	vmolr |= IXGBE_VMOLR_BAM;
240 
241 	if (tag == 0) {
242 		/* Accept non-vlan tagged traffic. */
243 		vmolr |= IXGBE_VMOLR_AUPE;
244 
245 		/* Allow VM to tag outgoing traffic; no default tag. */
246 		vmvir = 0;
247 	} else {
248 		/* Require vlan-tagged traffic. */
249 		vmolr &= ~IXGBE_VMOLR_AUPE;
250 
251 		/* Tag all traffic with provided vlan tag. */
252 		vmvir = (tag | IXGBE_VMVIR_VLANA_DEFAULT);
253 	}
254 	IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf->pool), vmolr);
255 	IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
256 } /* ixgbe_vf_set_default_vlan */
257 
258 
259 static boolean_t
260 ixgbe_vf_frame_size_compatible(struct adapter *adapter, struct ixgbe_vf *vf)
261 {
262 
263 	/*
264 	 * Frame size compatibility between PF and VF is only a problem on
265 	 * 82599-based cards.  X540 and later support any combination of jumbo
266 	 * frames on PFs and VFs.
267 	 */
268 	if (adapter->hw.mac.type != ixgbe_mac_82599EB)
269 		return (TRUE);
270 
271 	switch (vf->api_ver) {
272 	case IXGBE_API_VER_1_0:
273 	case IXGBE_API_VER_UNKNOWN:
274 		/*
275 		 * On legacy (1.0 and older) VF versions, we don't support jumbo
276 		 * frames on either the PF or the VF.
277 		 */
278 		if (adapter->max_frame_size > ETHER_MAX_LEN ||
279 		    vf->maximum_frame_size > ETHER_MAX_LEN)
280 			return (FALSE);
281 
282 		return (TRUE);
283 
284 		break;
285 	case IXGBE_API_VER_1_1:
286 	default:
287 		/*
288 		 * 1.1 or later VF versions always work if they aren't using
289 		 * jumbo frames.
290 		 */
291 		if (vf->maximum_frame_size <= ETHER_MAX_LEN)
292 			return (TRUE);
293 
294 		/*
295 		 * Jumbo frames only work with VFs if the PF is also using jumbo
296 		 * frames.
297 		 */
298 		if (adapter->max_frame_size <= ETHER_MAX_LEN)
299 			return (TRUE);
300 
301 		return (FALSE);
302 	}
303 } /* ixgbe_vf_frame_size_compatible */
304 
305 
306 static void
307 ixgbe_process_vf_reset(struct adapter *adapter, struct ixgbe_vf *vf)
308 {
309 	ixgbe_vf_set_default_vlan(adapter, vf, vf->default_vlan);
310 
311 	// XXX clear multicast addresses
312 
313 	ixgbe_clear_rar(&adapter->hw, vf->rar_index);
314 
315 	vf->api_ver = IXGBE_API_VER_UNKNOWN;
316 } /* ixgbe_process_vf_reset */
317 
318 
319 static void
320 ixgbe_vf_enable_transmit(struct adapter *adapter, struct ixgbe_vf *vf)
321 {
322 	struct ixgbe_hw *hw;
323 	uint32_t vf_index, vfte;
324 
325 	hw = &adapter->hw;
326 
327 	vf_index = IXGBE_VF_INDEX(vf->pool);
328 	vfte = IXGBE_READ_REG(hw, IXGBE_VFTE(vf_index));
329 	vfte |= IXGBE_VF_BIT(vf->pool);
330 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_index), vfte);
331 } /* ixgbe_vf_enable_transmit */
332 
333 
334 static void
335 ixgbe_vf_enable_receive(struct adapter *adapter, struct ixgbe_vf *vf)
336 {
337 	struct ixgbe_hw *hw;
338 	uint32_t vf_index, vfre;
339 
340 	hw = &adapter->hw;
341 
342 	vf_index = IXGBE_VF_INDEX(vf->pool);
343 	vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(vf_index));
344 	if (ixgbe_vf_frame_size_compatible(adapter, vf))
345 		vfre |= IXGBE_VF_BIT(vf->pool);
346 	else
347 		vfre &= ~IXGBE_VF_BIT(vf->pool);
348 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_index), vfre);
349 } /* ixgbe_vf_enable_receive */
350 
351 
352 static void
353 ixgbe_vf_reset_msg(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
354 {
355 	struct ixgbe_hw *hw;
356 	uint32_t ack;
357 	uint32_t resp[IXGBE_VF_PERMADDR_MSG_LEN];
358 
359 	hw = &adapter->hw;
360 
361 	ixgbe_process_vf_reset(adapter, vf);
362 
363 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
364 		ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr,
365 		    vf->pool, TRUE);
366 		ack = IXGBE_VT_MSGTYPE_ACK;
367 	} else
368 		ack = IXGBE_VT_MSGTYPE_NACK;
369 
370 	ixgbe_vf_enable_transmit(adapter, vf);
371 	ixgbe_vf_enable_receive(adapter, vf);
372 
373 	vf->flags |= IXGBE_VF_CTS;
374 
375 	resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
376 	bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
377 	resp[3] = hw->mac.mc_filter_type;
378 	hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
379 } /* ixgbe_vf_reset_msg */
380 
381 
382 static void
383 ixgbe_vf_set_mac(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
384 {
385 	uint8_t *mac;
386 
387 	mac = (uint8_t*)&msg[1];
388 
389 	/* Check that the VF has permission to change the MAC address. */
390 	if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
391 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
392 		return;
393 	}
394 
395 	if (ixgbe_validate_mac_addr(mac) != 0) {
396 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
397 		return;
398 	}
399 
400 	bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
401 
402 	ixgbe_set_rar(&adapter->hw, vf->rar_index, vf->ether_addr, vf->pool,
403 	    TRUE);
404 
405 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
406 } /* ixgbe_vf_set_mac */
407 
408 
409 /*
410  * VF multicast addresses are set by using the appropriate bit in
411  * 1 of 128 32 bit addresses (4096 possible).
412  */
413 static void
414 ixgbe_vf_set_mc_addr(struct adapter *adapter, struct ixgbe_vf *vf, u32 *msg)
415 {
416 	u16	*list = (u16*)&msg[1];
417 	int	entries;
418 	u32	vmolr, vec_bit, vec_reg, mta_reg;
419 
420 	entries = (msg[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
421 	entries = min(entries, IXGBE_MAX_VF_MC);
422 
423 	vmolr = IXGBE_READ_REG(&adapter->hw, IXGBE_VMOLR(vf->pool));
424 
425 	vf->num_mc_hashes = entries;
426 
427 	/* Set the appropriate MTA bit */
428 	for (int i = 0; i < entries; i++) {
429 		vf->mc_hash[i] = list[i];
430 		vec_reg = (vf->mc_hash[i] >> 5) & 0x7F;
431 		vec_bit = vf->mc_hash[i] & 0x1F;
432 		mta_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_MTA(vec_reg));
433 		mta_reg |= (1 << vec_bit);
434 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_MTA(vec_reg), mta_reg);
435 	}
436 
437 	vmolr |= IXGBE_VMOLR_ROMPE;
438 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(vf->pool), vmolr);
439 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
440 } /* ixgbe_vf_set_mc_addr */
441 
442 
443 static void
444 ixgbe_vf_set_vlan(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
445 {
446 	struct ixgbe_hw *hw;
447 	int enable;
448 	uint16_t tag;
449 
450 	hw = &adapter->hw;
451 	enable = IXGBE_VT_MSGINFO(msg[0]);
452 	tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
453 
454 	if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
455 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
456 		return;
457 	}
458 
459 	/* It is illegal to enable vlan tag 0. */
460 	if (tag == 0 && enable != 0) {
461 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
462 		return;
463 	}
464 
465 	ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
466 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
467 } /* ixgbe_vf_set_vlan */
468 
469 
470 static void
471 ixgbe_vf_set_lpe(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
472 {
473 	struct ixgbe_hw *hw;
474 	uint32_t vf_max_size, pf_max_size, mhadd;
475 
476 	hw = &adapter->hw;
477 	vf_max_size = msg[1];
478 
479 	if (vf_max_size < ETHER_CRC_LEN) {
480 		/* We intentionally ACK invalid LPE requests. */
481 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
482 		return;
483 	}
484 
485 	vf_max_size -= ETHER_CRC_LEN;
486 
487 	if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
488 		/* We intentionally ACK invalid LPE requests. */
489 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
490 		return;
491 	}
492 
493 	vf->maximum_frame_size = vf_max_size;
494 	ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
495 
496 	/*
497 	 * We might have to disable reception to this VF if the frame size is
498 	 * not compatible with the config on the PF.
499 	 */
500 	ixgbe_vf_enable_receive(adapter, vf);
501 
502 	mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
503 	pf_max_size = (mhadd & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
504 
505 	if (pf_max_size < adapter->max_frame_size) {
506 		mhadd &= ~IXGBE_MHADD_MFS_MASK;
507 		mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
508 		IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
509 	}
510 
511 	ixgbe_send_vf_ack(adapter, vf, msg[0]);
512 } /* ixgbe_vf_set_lpe */
513 
514 
515 static void
516 ixgbe_vf_set_macvlan(struct adapter *adapter, struct ixgbe_vf *vf,
517                      uint32_t *msg)
518 {
519 	//XXX implement this
520 	ixgbe_send_vf_nack(adapter, vf, msg[0]);
521 } /* ixgbe_vf_set_macvlan */
522 
523 
524 static void
525 ixgbe_vf_api_negotiate(struct adapter *adapter, struct ixgbe_vf *vf,
526     uint32_t *msg)
527 {
528 
529 	switch (msg[1]) {
530 	case IXGBE_API_VER_1_0:
531 	case IXGBE_API_VER_1_1:
532 		vf->api_ver = msg[1];
533 		ixgbe_send_vf_ack(adapter, vf, msg[0]);
534 		break;
535 	default:
536 		vf->api_ver = IXGBE_API_VER_UNKNOWN;
537 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
538 		break;
539 	}
540 } /* ixgbe_vf_api_negotiate */
541 
542 
543 static void
544 ixgbe_vf_get_queues(struct adapter *adapter, struct ixgbe_vf *vf, uint32_t *msg)
545 {
546 	struct ixgbe_hw *hw;
547 	uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
548 	int num_queues;
549 
550 	hw = &adapter->hw;
551 
552 	/* GET_QUEUES is not supported on pre-1.1 APIs. */
553 	switch (msg[0]) {
554 	case IXGBE_API_VER_1_0:
555 	case IXGBE_API_VER_UNKNOWN:
556 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
557 		return;
558 	}
559 
560 	resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
561 	    IXGBE_VT_MSGTYPE_CTS;
562 
563 	num_queues = ixgbe_vf_queues(adapter->iov_mode);
564 	resp[IXGBE_VF_TX_QUEUES] = num_queues;
565 	resp[IXGBE_VF_RX_QUEUES] = num_queues;
566 	resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
567 	resp[IXGBE_VF_DEF_QUEUE] = 0;
568 
569 	hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
570 } /* ixgbe_vf_get_queues */
571 
572 
573 static void
574 ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
575 {
576 	struct adapter  *adapter = iflib_get_softc(ctx);
577 #ifdef KTR
578 	struct ifnet	*ifp = iflib_get_ifp(ctx);
579 #endif
580 	struct ixgbe_hw *hw;
581 	uint32_t msg[IXGBE_VFMAILBOX_SIZE];
582 	int error;
583 
584 	hw = &adapter->hw;
585 
586 	error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
587 
588 	if (error != 0)
589 		return;
590 
591 	CTR3(KTR_MALLOC, "%s: received msg %x from %d", ifp->if_xname,
592 	    msg[0], vf->pool);
593 	if (msg[0] == IXGBE_VF_RESET) {
594 		ixgbe_vf_reset_msg(adapter, vf, msg);
595 		return;
596 	}
597 
598 	if (!(vf->flags & IXGBE_VF_CTS)) {
599 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
600 		return;
601 	}
602 
603 	switch (msg[0] & IXGBE_VT_MSG_MASK) {
604 	case IXGBE_VF_SET_MAC_ADDR:
605 		ixgbe_vf_set_mac(adapter, vf, msg);
606 		break;
607 	case IXGBE_VF_SET_MULTICAST:
608 		ixgbe_vf_set_mc_addr(adapter, vf, msg);
609 		break;
610 	case IXGBE_VF_SET_VLAN:
611 		ixgbe_vf_set_vlan(adapter, vf, msg);
612 		break;
613 	case IXGBE_VF_SET_LPE:
614 		ixgbe_vf_set_lpe(adapter, vf, msg);
615 		break;
616 	case IXGBE_VF_SET_MACVLAN:
617 		ixgbe_vf_set_macvlan(adapter, vf, msg);
618 		break;
619 	case IXGBE_VF_API_NEGOTIATE:
620 		ixgbe_vf_api_negotiate(adapter, vf, msg);
621 		break;
622 	case IXGBE_VF_GET_QUEUES:
623 		ixgbe_vf_get_queues(adapter, vf, msg);
624 		break;
625 	default:
626 		ixgbe_send_vf_nack(adapter, vf, msg[0]);
627 	}
628 } /* ixgbe_process_vf_msg */
629 
630 
631 /* Tasklet for handling VF -> PF mailbox messages */
632 void
633 ixgbe_handle_mbx(void *context)
634 {
635 	if_ctx_t        ctx = context;
636 	struct adapter  *adapter = iflib_get_softc(ctx);
637 	struct ixgbe_hw *hw;
638 	struct ixgbe_vf *vf;
639 	int i;
640 
641 	hw = &adapter->hw;
642 
643 	for (i = 0; i < adapter->num_vfs; i++) {
644 		vf = &adapter->vfs[i];
645 
646 		if (vf->flags & IXGBE_VF_ACTIVE) {
647 			if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
648 				ixgbe_process_vf_reset(adapter, vf);
649 
650 			if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
651 				ixgbe_process_vf_msg(ctx, vf);
652 
653 			if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
654 				ixgbe_process_vf_ack(adapter, vf);
655 		}
656 	}
657 } /* ixgbe_handle_mbx */
658 
659 int
660 ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
661 {
662 	struct adapter *adapter;
663 	int retval = 0;
664 
665 	adapter = iflib_get_softc(ctx);
666 	adapter->iov_mode = IXGBE_NO_VM;
667 
668 	if (num_vfs == 0) {
669 		/* Would we ever get num_vfs = 0? */
670 		retval = EINVAL;
671 		goto err_init_iov;
672 	}
673 
674 	/*
675 	 * We've got to reserve a VM's worth of queues for the PF,
676 	 * thus we go into "64 VF mode" if 32+ VFs are requested.
677 	 * With 64 VFs, you can only have two queues per VF.
678 	 * With 32 VFs, you can have up to four queues per VF.
679 	 */
680 	if (num_vfs >= IXGBE_32_VM)
681 		adapter->iov_mode = IXGBE_64_VM;
682 	else
683 		adapter->iov_mode = IXGBE_32_VM;
684 
685 	/* Again, reserving 1 VM's worth of queues for the PF */
686 	adapter->pool = adapter->iov_mode - 1;
687 
688 	if ((num_vfs > adapter->pool) || (num_vfs >= IXGBE_64_VM)) {
689 		retval = ENOSPC;
690 		goto err_init_iov;
691 	}
692 
693 	adapter->vfs = malloc(sizeof(*adapter->vfs) * num_vfs, M_IXGBE_SRIOV,
694 	    M_NOWAIT | M_ZERO);
695 
696 	if (adapter->vfs == NULL) {
697 		retval = ENOMEM;
698 		goto err_init_iov;
699 	}
700 
701 	adapter->num_vfs = num_vfs;
702 	ixgbe_if_init(adapter->ctx);
703 	adapter->feat_en |= IXGBE_FEATURE_SRIOV;
704 
705 	return (retval);
706 
707 err_init_iov:
708 	adapter->num_vfs = 0;
709 	adapter->pool = 0;
710 	adapter->iov_mode = IXGBE_NO_VM;
711 
712 	return (retval);
713 } /* ixgbe_if_iov_init */
714 
715 void
716 ixgbe_if_iov_uninit(if_ctx_t ctx)
717 {
718 	struct ixgbe_hw *hw;
719 	struct adapter *adapter;
720 	uint32_t pf_reg, vf_reg;
721 
722 	adapter = iflib_get_softc(ctx);
723 	hw = &adapter->hw;
724 
725 	/* Enable rx/tx for the PF and disable it for all VFs. */
726 	pf_reg = IXGBE_VF_INDEX(adapter->pool);
727 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(pf_reg), IXGBE_VF_BIT(adapter->pool));
728 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(pf_reg), IXGBE_VF_BIT(adapter->pool));
729 
730 	if (pf_reg == 0)
731 		vf_reg = 1;
732 	else
733 		vf_reg = 0;
734 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), 0);
735 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), 0);
736 
737 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, 0);
738 
739 	free(adapter->vfs, M_IXGBE_SRIOV);
740 	adapter->vfs = NULL;
741 	adapter->num_vfs = 0;
742 	adapter->feat_en &= ~IXGBE_FEATURE_SRIOV;
743 } /* ixgbe_if_iov_uninit */
744 
745 static void
746 ixgbe_init_vf(struct adapter *adapter, struct ixgbe_vf *vf)
747 {
748 	struct ixgbe_hw *hw;
749 	uint32_t vf_index, pfmbimr;
750 
751 	hw = &adapter->hw;
752 
753 	if (!(vf->flags & IXGBE_VF_ACTIVE))
754 		return;
755 
756 	vf_index = IXGBE_VF_INDEX(vf->pool);
757 	pfmbimr = IXGBE_READ_REG(hw, IXGBE_PFMBIMR(vf_index));
758 	pfmbimr |= IXGBE_VF_BIT(vf->pool);
759 	IXGBE_WRITE_REG(hw, IXGBE_PFMBIMR(vf_index), pfmbimr);
760 
761 	ixgbe_vf_set_default_vlan(adapter, vf, vf->vlan_tag);
762 
763 	// XXX multicast addresses
764 
765 	if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
766 		ixgbe_set_rar(&adapter->hw, vf->rar_index,
767 		    vf->ether_addr, vf->pool, TRUE);
768 	}
769 
770 	ixgbe_vf_enable_transmit(adapter, vf);
771 	ixgbe_vf_enable_receive(adapter, vf);
772 
773 	ixgbe_send_vf_msg(adapter, vf, IXGBE_PF_CONTROL_MSG);
774 } /* ixgbe_init_vf */
775 
776 void
777 ixgbe_initialize_iov(struct adapter *adapter)
778 {
779 	struct ixgbe_hw *hw = &adapter->hw;
780 	uint32_t mrqc, mtqc, vt_ctl, vf_reg, gcr_ext, gpie;
781 	int i;
782 
783 	if (adapter->iov_mode == IXGBE_NO_VM)
784 		return;
785 
786 	/* RMW appropriate registers based on IOV mode */
787 	/* Read... */
788 	mrqc    = IXGBE_READ_REG(hw, IXGBE_MRQC);
789 	gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
790 	gpie    = IXGBE_READ_REG(hw, IXGBE_GPIE);
791 	/* Modify... */
792 	mrqc    &= ~IXGBE_MRQC_MRQE_MASK;
793 	mtqc     =  IXGBE_MTQC_VT_ENA;      /* No initial MTQC read needed */
794 	gcr_ext |=  IXGBE_GCR_EXT_MSIX_EN;
795 	gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
796 	gpie    &= ~IXGBE_GPIE_VTMODE_MASK;
797 	switch (adapter->iov_mode) {
798 	case IXGBE_64_VM:
799 		mrqc    |= IXGBE_MRQC_VMDQRSS64EN;
800 		mtqc    |= IXGBE_MTQC_64VF;
801 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
802 		gpie    |= IXGBE_GPIE_VTMODE_64;
803 		break;
804 	case IXGBE_32_VM:
805 		mrqc    |= IXGBE_MRQC_VMDQRSS32EN;
806 		mtqc    |= IXGBE_MTQC_32VF;
807 		gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
808 		gpie    |= IXGBE_GPIE_VTMODE_32;
809 		break;
810 	default:
811 		panic("Unexpected SR-IOV mode %d", adapter->iov_mode);
812 	}
813 	/* Write... */
814 	IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
815 	IXGBE_WRITE_REG(hw, IXGBE_MTQC, mtqc);
816 	IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
817 	IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
818 
819 	/* Enable rx/tx for the PF. */
820 	vf_reg = IXGBE_VF_INDEX(adapter->pool);
821 	IXGBE_WRITE_REG(hw, IXGBE_VFRE(vf_reg), IXGBE_VF_BIT(adapter->pool));
822 	IXGBE_WRITE_REG(hw, IXGBE_VFTE(vf_reg), IXGBE_VF_BIT(adapter->pool));
823 
824 	/* Allow VM-to-VM communication. */
825 	IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
826 
827 	vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
828 	vt_ctl |= (adapter->pool << IXGBE_VT_CTL_POOL_SHIFT);
829 	IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
830 
831 	for (i = 0; i < adapter->num_vfs; i++)
832 		ixgbe_init_vf(adapter, &adapter->vfs[i]);
833 } /* ixgbe_initialize_iov */
834 
835 
836 /* Check the max frame setting of all active VF's */
837 void
838 ixgbe_recalculate_max_frame(struct adapter *adapter)
839 {
840 	struct ixgbe_vf *vf;
841 
842 	for (int i = 0; i < adapter->num_vfs; i++) {
843 		vf = &adapter->vfs[i];
844 		if (vf->flags & IXGBE_VF_ACTIVE)
845 			ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
846 	}
847 } /* ixgbe_recalculate_max_frame */
848 
849 int
850 ixgbe_if_iov_vf_add(if_ctx_t ctx, u16 vfnum, const nvlist_t *config)
851 {
852 	struct adapter *adapter;
853 	struct ixgbe_vf *vf;
854 	const void *mac;
855 
856 	adapter = iflib_get_softc(ctx);
857 
858 	KASSERT(vfnum < adapter->num_vfs, ("VF index %d is out of range %d",
859 	    vfnum, adapter->num_vfs));
860 
861 	vf = &adapter->vfs[vfnum];
862 	vf->pool= vfnum;
863 
864 	/* RAR[0] is used by the PF so use vfnum + 1 for VF RAR. */
865 	vf->rar_index = vfnum + 1;
866 	vf->default_vlan = 0;
867 	vf->maximum_frame_size = ETHER_MAX_LEN;
868 	ixgbe_update_max_frame(adapter, vf->maximum_frame_size);
869 
870 	if (nvlist_exists_binary(config, "mac-addr")) {
871 		mac = nvlist_get_binary(config, "mac-addr", NULL);
872 		bcopy(mac, vf->ether_addr, ETHER_ADDR_LEN);
873 		if (nvlist_get_bool(config, "allow-set-mac"))
874 			vf->flags |= IXGBE_VF_CAP_MAC;
875 	} else
876 		/*
877 		 * If the administrator has not specified a MAC address then
878 		 * we must allow the VF to choose one.
879 		 */
880 		vf->flags |= IXGBE_VF_CAP_MAC;
881 
882 	vf->flags |= IXGBE_VF_ACTIVE;
883 
884 	ixgbe_init_vf(adapter, vf);
885 
886 	return (0);
887 } /* ixgbe_if_iov_vf_add */
888 
889 #else
890 
891 void
892 ixgbe_handle_mbx(void *context)
893 {
894 	UNREFERENCED_PARAMETER(context);
895 } /* ixgbe_handle_mbx */
896 
897 #endif
898