1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_lib.h"
7 #include "ice_fltr.h"
8 #include "ice_flow.h"
9 #include "ice_virtchnl_allowlist.h"
10
11 #define FIELD_SELECTOR(proto_hdr_field) \
12 BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
13
14 struct ice_vc_hdr_match_type {
15 u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
16 u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
17 };
18
19 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_os[] = {
20 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
21 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
22 ICE_FLOW_SEG_HDR_IPV_OTHER},
23 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
24 ICE_FLOW_SEG_HDR_IPV_OTHER},
25 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
26 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
27 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
28 };
29
30 static const struct ice_vc_hdr_match_type ice_vc_hdr_list_comms[] = {
31 {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
32 {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
33 {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
34 {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
35 {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
36 ICE_FLOW_SEG_HDR_IPV_OTHER},
37 {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
38 ICE_FLOW_SEG_HDR_IPV_OTHER},
39 {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
40 {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
41 {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
42 {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
43 {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
44 {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
45 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
46 ICE_FLOW_SEG_HDR_GTPU_DWN},
47 {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
48 ICE_FLOW_SEG_HDR_GTPU_UP},
49 {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
50 {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
51 {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
52 {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
53 };
54
55 struct ice_vc_hash_field_match_type {
56 u32 vc_hdr; /* virtchnl headers
57 * (VIRTCHNL_PROTO_HDR_XXX)
58 */
59 u32 vc_hash_field; /* virtchnl hash fields selector
60 * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
61 */
62 u64 ice_hash_field; /* ice hash fields
63 * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
64 */
65 };
66
67 static const struct
68 ice_vc_hash_field_match_type ice_vc_hash_field_list_os[] = {
69 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
70 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
71 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
72 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
73 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
74 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
75 ICE_FLOW_HASH_IPV4},
76 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
77 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
78 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
79 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
80 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
81 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
82 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
83 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
84 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
85 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
86 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
87 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
88 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
89 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
90 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
91 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
92 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
93 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
94 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
95 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
96 ICE_FLOW_HASH_IPV6},
97 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
98 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
99 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
100 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
101 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
102 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
103 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
104 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
105 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
106 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
107 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
108 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
109 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
110 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
111 {VIRTCHNL_PROTO_HDR_TCP,
112 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
113 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
114 {VIRTCHNL_PROTO_HDR_TCP,
115 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
116 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
117 {VIRTCHNL_PROTO_HDR_TCP,
118 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
119 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
120 ICE_FLOW_HASH_TCP_PORT},
121 {VIRTCHNL_PROTO_HDR_UDP,
122 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
123 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
124 {VIRTCHNL_PROTO_HDR_UDP,
125 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
126 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
127 {VIRTCHNL_PROTO_HDR_UDP,
128 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
129 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
130 ICE_FLOW_HASH_UDP_PORT},
131 {VIRTCHNL_PROTO_HDR_SCTP,
132 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
133 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
134 {VIRTCHNL_PROTO_HDR_SCTP,
135 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
136 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
137 {VIRTCHNL_PROTO_HDR_SCTP,
138 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
139 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
140 ICE_FLOW_HASH_SCTP_PORT},
141 };
142
143 static const struct
144 ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
145 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
146 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
147 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
148 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
149 {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
150 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
151 ICE_FLOW_HASH_ETH},
152 {VIRTCHNL_PROTO_HDR_ETH,
153 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
154 BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
155 {VIRTCHNL_PROTO_HDR_S_VLAN,
156 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
157 BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
158 {VIRTCHNL_PROTO_HDR_C_VLAN,
159 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
160 BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
161 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
162 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
163 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
164 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
165 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
166 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
167 ICE_FLOW_HASH_IPV4},
168 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
169 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
170 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
171 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
172 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
173 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
174 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
175 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
176 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
177 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
178 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
179 ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
180 {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
181 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
182 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
183 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
184 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
185 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
186 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
187 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
188 ICE_FLOW_HASH_IPV6},
189 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
190 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
191 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
192 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
193 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
194 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
195 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
196 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
197 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
198 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
199 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
200 ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
201 {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
202 BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
203 {VIRTCHNL_PROTO_HDR_TCP,
204 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
205 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
206 {VIRTCHNL_PROTO_HDR_TCP,
207 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
208 BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
209 {VIRTCHNL_PROTO_HDR_TCP,
210 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
211 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
212 ICE_FLOW_HASH_TCP_PORT},
213 {VIRTCHNL_PROTO_HDR_UDP,
214 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
215 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
216 {VIRTCHNL_PROTO_HDR_UDP,
217 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
218 BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
219 {VIRTCHNL_PROTO_HDR_UDP,
220 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
221 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
222 ICE_FLOW_HASH_UDP_PORT},
223 {VIRTCHNL_PROTO_HDR_SCTP,
224 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
225 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
226 {VIRTCHNL_PROTO_HDR_SCTP,
227 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
228 BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
229 {VIRTCHNL_PROTO_HDR_SCTP,
230 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
231 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
232 ICE_FLOW_HASH_SCTP_PORT},
233 {VIRTCHNL_PROTO_HDR_PPPOE,
234 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
235 BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
236 {VIRTCHNL_PROTO_HDR_GTPU_IP,
237 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
238 BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
239 {VIRTCHNL_PROTO_HDR_L2TPV3,
240 FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
241 BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
242 {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
243 BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
244 {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
245 BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
246 {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
247 BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
248 };
249
250 /**
251 * ice_get_vf_vsi - get VF's VSI based on the stored index
252 * @vf: VF used to get VSI
253 */
ice_get_vf_vsi(struct ice_vf * vf)254 static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
255 {
256 return vf->pf->vsi[vf->lan_vsi_idx];
257 }
258
259 /**
260 * ice_validate_vf_id - helper to check if VF ID is valid
261 * @pf: pointer to the PF structure
262 * @vf_id: the ID of the VF to check
263 */
ice_validate_vf_id(struct ice_pf * pf,u16 vf_id)264 static int ice_validate_vf_id(struct ice_pf *pf, u16 vf_id)
265 {
266 /* vf_id range is only valid for 0-255, and should always be unsigned */
267 if (vf_id >= pf->num_alloc_vfs) {
268 dev_err(ice_pf_to_dev(pf), "Invalid VF ID: %u\n", vf_id);
269 return -EINVAL;
270 }
271 return 0;
272 }
273
274 /**
275 * ice_check_vf_init - helper to check if VF init complete
276 * @pf: pointer to the PF structure
277 * @vf: the pointer to the VF to check
278 */
ice_check_vf_init(struct ice_pf * pf,struct ice_vf * vf)279 static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
280 {
281 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
282 dev_err(ice_pf_to_dev(pf), "VF ID: %u in reset. Try again.\n",
283 vf->vf_id);
284 return -EBUSY;
285 }
286 return 0;
287 }
288
289 /**
290 * ice_err_to_virt_err - translate errors for VF return code
291 * @ice_err: error return code
292 */
ice_err_to_virt_err(enum ice_status ice_err)293 static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
294 {
295 switch (ice_err) {
296 case ICE_SUCCESS:
297 return VIRTCHNL_STATUS_SUCCESS;
298 case ICE_ERR_BAD_PTR:
299 case ICE_ERR_INVAL_SIZE:
300 case ICE_ERR_DEVICE_NOT_SUPPORTED:
301 case ICE_ERR_PARAM:
302 case ICE_ERR_CFG:
303 return VIRTCHNL_STATUS_ERR_PARAM;
304 case ICE_ERR_NO_MEMORY:
305 return VIRTCHNL_STATUS_ERR_NO_MEMORY;
306 case ICE_ERR_NOT_READY:
307 case ICE_ERR_RESET_FAILED:
308 case ICE_ERR_FW_API_VER:
309 case ICE_ERR_AQ_ERROR:
310 case ICE_ERR_AQ_TIMEOUT:
311 case ICE_ERR_AQ_FULL:
312 case ICE_ERR_AQ_NO_WORK:
313 case ICE_ERR_AQ_EMPTY:
314 return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
315 default:
316 return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
317 }
318 }
319
320 /**
321 * ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
322 * @pf: pointer to the PF structure
323 * @v_opcode: operation code
324 * @v_retval: return value
325 * @msg: pointer to the msg buffer
326 * @msglen: msg length
327 */
328 static void
ice_vc_vf_broadcast(struct ice_pf * pf,enum virtchnl_ops v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)329 ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
330 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
331 {
332 struct ice_hw *hw = &pf->hw;
333 unsigned int i;
334
335 ice_for_each_vf(pf, i) {
336 struct ice_vf *vf = &pf->vf[i];
337
338 /* Not all vfs are enabled so skip the ones that are not */
339 if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
340 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
341 continue;
342
343 /* Ignore return value on purpose - a given VF may fail, but
344 * we need to keep going and send to all of them
345 */
346 ice_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval, msg,
347 msglen, NULL);
348 }
349 }
350
351 /**
352 * ice_set_pfe_link - Set the link speed/status of the virtchnl_pf_event
353 * @vf: pointer to the VF structure
354 * @pfe: pointer to the virtchnl_pf_event to set link speed/status for
355 * @ice_link_speed: link speed specified by ICE_AQ_LINK_SPEED_*
356 * @link_up: whether or not to set the link up/down
357 */
358 static void
ice_set_pfe_link(struct ice_vf * vf,struct virtchnl_pf_event * pfe,int ice_link_speed,bool link_up)359 ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
360 int ice_link_speed, bool link_up)
361 {
362 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED) {
363 pfe->event_data.link_event_adv.link_status = link_up;
364 /* Speed in Mbps */
365 pfe->event_data.link_event_adv.link_speed =
366 ice_conv_link_speed_to_virtchnl(true, ice_link_speed);
367 } else {
368 pfe->event_data.link_event.link_status = link_up;
369 /* Legacy method for virtchnl link speeds */
370 pfe->event_data.link_event.link_speed =
371 (enum virtchnl_link_speed)
372 ice_conv_link_speed_to_virtchnl(false, ice_link_speed);
373 }
374 }
375
376 /**
377 * ice_vf_has_no_qs_ena - check if the VF has any Rx or Tx queues enabled
378 * @vf: the VF to check
379 *
380 * Returns true if the VF has no Rx and no Tx queues enabled and returns false
381 * otherwise
382 */
ice_vf_has_no_qs_ena(struct ice_vf * vf)383 static bool ice_vf_has_no_qs_ena(struct ice_vf *vf)
384 {
385 return (!bitmap_weight(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF) &&
386 !bitmap_weight(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF));
387 }
388
389 /**
390 * ice_is_vf_link_up - check if the VF's link is up
391 * @vf: VF to check if link is up
392 */
ice_is_vf_link_up(struct ice_vf * vf)393 static bool ice_is_vf_link_up(struct ice_vf *vf)
394 {
395 struct ice_pf *pf = vf->pf;
396
397 if (ice_check_vf_init(pf, vf))
398 return false;
399
400 if (ice_vf_has_no_qs_ena(vf))
401 return false;
402 else if (vf->link_forced)
403 return vf->link_up;
404 else
405 return pf->hw.port_info->phy.link_info.link_info &
406 ICE_AQ_LINK_UP;
407 }
408
409 /**
410 * ice_vc_notify_vf_link_state - Inform a VF of link status
411 * @vf: pointer to the VF structure
412 *
413 * send a link status message to a single VF
414 */
ice_vc_notify_vf_link_state(struct ice_vf * vf)415 static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
416 {
417 struct virtchnl_pf_event pfe = { 0 };
418 struct ice_hw *hw = &vf->pf->hw;
419
420 pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
421 pfe.severity = PF_EVENT_SEVERITY_INFO;
422
423 if (ice_is_vf_link_up(vf))
424 ice_set_pfe_link(vf, &pfe,
425 hw->port_info->phy.link_info.link_speed, true);
426 else
427 ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
428
429 ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
430 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
431 sizeof(pfe), NULL);
432 }
433
434 /**
435 * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
436 * @vf: VF to remove access to VSI for
437 */
ice_vf_invalidate_vsi(struct ice_vf * vf)438 static void ice_vf_invalidate_vsi(struct ice_vf *vf)
439 {
440 vf->lan_vsi_idx = ICE_NO_VSI;
441 vf->lan_vsi_num = ICE_NO_VSI;
442 }
443
444 /**
445 * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
446 * @vf: invalidate this VF's VSI after freeing it
447 */
ice_vf_vsi_release(struct ice_vf * vf)448 static void ice_vf_vsi_release(struct ice_vf *vf)
449 {
450 ice_vsi_release(ice_get_vf_vsi(vf));
451 ice_vf_invalidate_vsi(vf);
452 }
453
454 /**
455 * ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
456 * @vf: VF that control VSI is being invalidated on
457 */
ice_vf_ctrl_invalidate_vsi(struct ice_vf * vf)458 static void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf)
459 {
460 vf->ctrl_vsi_idx = ICE_NO_VSI;
461 }
462
463 /**
464 * ice_vf_ctrl_vsi_release - invalidate the VF's control VSI after freeing it
465 * @vf: VF that control VSI is being released on
466 */
ice_vf_ctrl_vsi_release(struct ice_vf * vf)467 static void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
468 {
469 ice_vsi_release(vf->pf->vsi[vf->ctrl_vsi_idx]);
470 ice_vf_ctrl_invalidate_vsi(vf);
471 }
472
473 /**
474 * ice_free_vf_res - Free a VF's resources
475 * @vf: pointer to the VF info
476 */
ice_free_vf_res(struct ice_vf * vf)477 static void ice_free_vf_res(struct ice_vf *vf)
478 {
479 struct ice_pf *pf = vf->pf;
480 int i, last_vector_idx;
481
482 /* First, disable VF's configuration API to prevent OS from
483 * accessing the VF's VSI after it's freed or invalidated.
484 */
485 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
486 ice_vf_fdir_exit(vf);
487 /* free VF control VSI */
488 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
489 ice_vf_ctrl_vsi_release(vf);
490
491 /* free VSI and disconnect it from the parent uplink */
492 if (vf->lan_vsi_idx != ICE_NO_VSI) {
493 ice_vf_vsi_release(vf);
494 vf->num_mac = 0;
495 }
496
497 last_vector_idx = vf->first_vector_idx + pf->num_msix_per_vf - 1;
498
499 /* clear VF MDD event information */
500 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
501 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
502
503 /* Disable interrupts so that VF starts in a known state */
504 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
505 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
506 ice_flush(&pf->hw);
507 }
508 /* reset some of the state variables keeping track of the resources */
509 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
510 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
511 }
512
513 /**
514 * ice_dis_vf_mappings
515 * @vf: pointer to the VF structure
516 */
ice_dis_vf_mappings(struct ice_vf * vf)517 static void ice_dis_vf_mappings(struct ice_vf *vf)
518 {
519 struct ice_pf *pf = vf->pf;
520 struct ice_vsi *vsi;
521 struct device *dev;
522 int first, last, v;
523 struct ice_hw *hw;
524
525 hw = &pf->hw;
526 vsi = ice_get_vf_vsi(vf);
527
528 dev = ice_pf_to_dev(pf);
529 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
530 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
531
532 first = vf->first_vector_idx;
533 last = first + pf->num_msix_per_vf - 1;
534 for (v = first; v <= last; v++) {
535 u32 reg;
536
537 reg = (((1 << GLINT_VECT2FUNC_IS_PF_S) &
538 GLINT_VECT2FUNC_IS_PF_M) |
539 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
540 GLINT_VECT2FUNC_PF_NUM_M));
541 wr32(hw, GLINT_VECT2FUNC(v), reg);
542 }
543
544 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
545 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
546 else
547 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
548
549 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
550 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
551 else
552 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
553 }
554
555 /**
556 * ice_sriov_free_msix_res - Reset/free any used MSIX resources
557 * @pf: pointer to the PF structure
558 *
559 * Since no MSIX entries are taken from the pf->irq_tracker then just clear
560 * the pf->sriov_base_vector.
561 *
562 * Returns 0 on success, and -EINVAL on error.
563 */
ice_sriov_free_msix_res(struct ice_pf * pf)564 static int ice_sriov_free_msix_res(struct ice_pf *pf)
565 {
566 struct ice_res_tracker *res;
567
568 if (!pf)
569 return -EINVAL;
570
571 res = pf->irq_tracker;
572 if (!res)
573 return -EINVAL;
574
575 /* give back irq_tracker resources used */
576 WARN_ON(pf->sriov_base_vector < res->num_entries);
577
578 pf->sriov_base_vector = 0;
579
580 return 0;
581 }
582
583 /**
584 * ice_set_vf_state_qs_dis - Set VF queues state to disabled
585 * @vf: pointer to the VF structure
586 */
ice_set_vf_state_qs_dis(struct ice_vf * vf)587 void ice_set_vf_state_qs_dis(struct ice_vf *vf)
588 {
589 /* Clear Rx/Tx enabled queues flag */
590 bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
591 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
592 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
593 }
594
595 /**
596 * ice_dis_vf_qs - Disable the VF queues
597 * @vf: pointer to the VF structure
598 */
ice_dis_vf_qs(struct ice_vf * vf)599 static void ice_dis_vf_qs(struct ice_vf *vf)
600 {
601 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
602
603 ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
604 ice_vsi_stop_all_rx_rings(vsi);
605 ice_set_vf_state_qs_dis(vf);
606 }
607
608 /**
609 * ice_free_vfs - Free all VFs
610 * @pf: pointer to the PF structure
611 */
ice_free_vfs(struct ice_pf * pf)612 void ice_free_vfs(struct ice_pf *pf)
613 {
614 struct device *dev = ice_pf_to_dev(pf);
615 struct ice_hw *hw = &pf->hw;
616 unsigned int tmp, i;
617
618 if (!pf->vf)
619 return;
620
621 while (test_and_set_bit(ICE_VF_DIS, pf->state))
622 usleep_range(1000, 2000);
623
624 /* Disable IOV before freeing resources. This lets any VF drivers
625 * running in the host get themselves cleaned up before we yank
626 * the carpet out from underneath their feet.
627 */
628 if (!pci_vfs_assigned(pf->pdev))
629 pci_disable_sriov(pf->pdev);
630 else
631 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
632
633 /* Avoid wait time by stopping all VFs at the same time */
634 ice_for_each_vf(pf, i)
635 if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
636 ice_dis_vf_qs(&pf->vf[i]);
637
638 tmp = pf->num_alloc_vfs;
639 pf->num_qps_per_vf = 0;
640 pf->num_alloc_vfs = 0;
641 for (i = 0; i < tmp; i++) {
642 if (test_bit(ICE_VF_STATE_INIT, pf->vf[i].vf_states)) {
643 /* disable VF qp mappings and set VF disable state */
644 ice_dis_vf_mappings(&pf->vf[i]);
645 set_bit(ICE_VF_STATE_DIS, pf->vf[i].vf_states);
646 ice_free_vf_res(&pf->vf[i]);
647 }
648 }
649
650 if (ice_sriov_free_msix_res(pf))
651 dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
652
653 devm_kfree(dev, pf->vf);
654 pf->vf = NULL;
655
656 /* This check is for when the driver is unloaded while VFs are
657 * assigned. Setting the number of VFs to 0 through sysfs is caught
658 * before this function ever gets called.
659 */
660 if (!pci_vfs_assigned(pf->pdev)) {
661 unsigned int vf_id;
662
663 /* Acknowledge VFLR for all VFs. Without this, VFs will fail to
664 * work correctly when SR-IOV gets re-enabled.
665 */
666 for (vf_id = 0; vf_id < tmp; vf_id++) {
667 u32 reg_idx, bit_idx;
668
669 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
670 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
671 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
672 }
673 }
674
675 /* clear malicious info if the VFs are getting released */
676 for (i = 0; i < tmp; i++)
677 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs,
678 ICE_MAX_VF_COUNT, i))
679 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n",
680 i);
681
682 clear_bit(ICE_VF_DIS, pf->state);
683 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
684 }
685
686 /**
687 * ice_trigger_vf_reset - Reset a VF on HW
688 * @vf: pointer to the VF structure
689 * @is_vflr: true if VFLR was issued, false if not
690 * @is_pfr: true if the reset was triggered due to a previous PFR
691 *
692 * Trigger hardware to start a reset for a particular VF. Expects the caller
693 * to wait the proper amount of time to allow hardware to reset the VF before
694 * it cleans up and restores VF functionality.
695 */
ice_trigger_vf_reset(struct ice_vf * vf,bool is_vflr,bool is_pfr)696 static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
697 {
698 struct ice_pf *pf = vf->pf;
699 u32 reg, reg_idx, bit_idx;
700 unsigned int vf_abs_id, i;
701 struct device *dev;
702 struct ice_hw *hw;
703
704 dev = ice_pf_to_dev(pf);
705 hw = &pf->hw;
706 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
707
708 /* Inform VF that it is no longer active, as a warning */
709 clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
710
711 /* Disable VF's configuration API during reset. The flag is re-enabled
712 * when it's safe again to access VF's VSI.
713 */
714 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
715
716 /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
717 * in the case of VFR. If this is done for PFR, it can mess up VF
718 * resets because the VF driver may already have started cleanup
719 * by the time we get here.
720 */
721 if (!is_pfr)
722 wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
723
724 /* In the case of a VFLR, the HW has already reset the VF and we
725 * just need to clean up, so don't hit the VFRTRIG register.
726 */
727 if (!is_vflr) {
728 /* reset VF using VPGEN_VFRTRIG reg */
729 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
730 reg |= VPGEN_VFRTRIG_VFSWR_M;
731 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
732 }
733 /* clear the VFLR bit in GLGEN_VFLRSTAT */
734 reg_idx = (vf_abs_id) / 32;
735 bit_idx = (vf_abs_id) % 32;
736 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
737 ice_flush(hw);
738
739 wr32(hw, PF_PCI_CIAA,
740 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
741 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
742 reg = rd32(hw, PF_PCI_CIAD);
743 /* no transactions pending so stop polling */
744 if ((reg & VF_TRANS_PENDING_M) == 0)
745 break;
746
747 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
748 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
749 }
750 }
751
752 /**
753 * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
754 * @vsi: the VSI to update
755 * @pvid_info: VLAN ID and QoS used to set the PVID VSI context field
756 * @enable: true for enable PVID false for disable
757 */
ice_vsi_manage_pvid(struct ice_vsi * vsi,u16 pvid_info,bool enable)758 static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 pvid_info, bool enable)
759 {
760 struct ice_hw *hw = &vsi->back->hw;
761 struct ice_aqc_vsi_props *info;
762 struct ice_vsi_ctx *ctxt;
763 enum ice_status status;
764 int ret = 0;
765
766 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
767 if (!ctxt)
768 return -ENOMEM;
769
770 ctxt->info = vsi->info;
771 info = &ctxt->info;
772 if (enable) {
773 info->vlan_flags = ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
774 ICE_AQ_VSI_PVLAN_INSERT_PVID |
775 ICE_AQ_VSI_VLAN_EMOD_STR;
776 info->sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
777 } else {
778 info->vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING |
779 ICE_AQ_VSI_VLAN_MODE_ALL;
780 info->sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
781 }
782
783 info->pvid = cpu_to_le16(pvid_info);
784 info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
785 ICE_AQ_VSI_PROP_SW_VALID);
786
787 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
788 if (status) {
789 dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %s aq_err %s\n",
790 ice_stat_str(status),
791 ice_aq_str(hw->adminq.sq_last_status));
792 ret = -EIO;
793 goto out;
794 }
795
796 vsi->info.vlan_flags = info->vlan_flags;
797 vsi->info.sw_flags2 = info->sw_flags2;
798 vsi->info.pvid = info->pvid;
799 out:
800 kfree(ctxt);
801 return ret;
802 }
803
804 /**
805 * ice_vf_get_port_info - Get the VF's port info structure
806 * @vf: VF used to get the port info structure for
807 */
ice_vf_get_port_info(struct ice_vf * vf)808 static struct ice_port_info *ice_vf_get_port_info(struct ice_vf *vf)
809 {
810 return vf->pf->hw.port_info;
811 }
812
813 /**
814 * ice_vf_vsi_setup - Set up a VF VSI
815 * @vf: VF to setup VSI for
816 *
817 * Returns pointer to the successfully allocated VSI struct on success,
818 * otherwise returns NULL on failure.
819 */
ice_vf_vsi_setup(struct ice_vf * vf)820 static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
821 {
822 struct ice_port_info *pi = ice_vf_get_port_info(vf);
823 struct ice_pf *pf = vf->pf;
824 struct ice_vsi *vsi;
825
826 vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
827
828 if (!vsi) {
829 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
830 ice_vf_invalidate_vsi(vf);
831 return NULL;
832 }
833
834 vf->lan_vsi_idx = vsi->idx;
835 vf->lan_vsi_num = vsi->vsi_num;
836
837 return vsi;
838 }
839
840 /**
841 * ice_vf_ctrl_vsi_setup - Set up a VF control VSI
842 * @vf: VF to setup control VSI for
843 *
844 * Returns pointer to the successfully allocated VSI struct on success,
845 * otherwise returns NULL on failure.
846 */
ice_vf_ctrl_vsi_setup(struct ice_vf * vf)847 struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
848 {
849 struct ice_port_info *pi = ice_vf_get_port_info(vf);
850 struct ice_pf *pf = vf->pf;
851 struct ice_vsi *vsi;
852
853 vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
854 if (!vsi) {
855 dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
856 ice_vf_ctrl_invalidate_vsi(vf);
857 }
858
859 return vsi;
860 }
861
862 /**
863 * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
864 * @pf: pointer to PF structure
865 * @vf: pointer to VF that the first MSIX vector index is being calculated for
866 *
867 * This returns the first MSIX vector index in PF space that is used by this VF.
868 * This index is used when accessing PF relative registers such as
869 * GLINT_VECT2FUNC and GLINT_DYN_CTL.
870 * This will always be the OICR index in the AVF driver so any functionality
871 * using vf->first_vector_idx for queue configuration will have to increment by
872 * 1 to avoid meddling with the OICR index.
873 */
ice_calc_vf_first_vector_idx(struct ice_pf * pf,struct ice_vf * vf)874 static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
875 {
876 return pf->sriov_base_vector + vf->vf_id * pf->num_msix_per_vf;
877 }
878
879 /**
880 * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
881 * @vf: VF to add MAC filters for
882 *
883 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
884 * always re-adds either a VLAN 0 or port VLAN based filter after reset.
885 */
ice_vf_rebuild_host_vlan_cfg(struct ice_vf * vf)886 static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf)
887 {
888 struct device *dev = ice_pf_to_dev(vf->pf);
889 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
890 u16 vlan_id = 0;
891 int err;
892
893 if (vf->port_vlan_info) {
894 err = ice_vsi_manage_pvid(vsi, vf->port_vlan_info, true);
895 if (err) {
896 dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
897 vf->vf_id, err);
898 return err;
899 }
900
901 vlan_id = vf->port_vlan_info & VLAN_VID_MASK;
902 }
903
904 /* vlan_id will either be 0 or the port VLAN number */
905 err = ice_vsi_add_vlan(vsi, vlan_id, ICE_FWD_TO_VSI);
906 if (err) {
907 dev_err(dev, "failed to add %s VLAN %u filter for VF %u, error %d\n",
908 vf->port_vlan_info ? "port" : "", vlan_id, vf->vf_id,
909 err);
910 return err;
911 }
912
913 return 0;
914 }
915
916 /**
917 * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
918 * @vf: VF to add MAC filters for
919 *
920 * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
921 * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
922 */
ice_vf_rebuild_host_mac_cfg(struct ice_vf * vf)923 static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
924 {
925 struct device *dev = ice_pf_to_dev(vf->pf);
926 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
927 enum ice_status status;
928 u8 broadcast[ETH_ALEN];
929
930 eth_broadcast_addr(broadcast);
931 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
932 if (status) {
933 dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %s\n",
934 vf->vf_id, ice_stat_str(status));
935 return ice_status_to_errno(status);
936 }
937
938 vf->num_mac++;
939
940 if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
941 status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
942 ICE_FWD_TO_VSI);
943 if (status) {
944 dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
945 &vf->dflt_lan_addr.addr[0], vf->vf_id,
946 ice_stat_str(status));
947 return ice_status_to_errno(status);
948 }
949 vf->num_mac++;
950 }
951
952 return 0;
953 }
954
955 /**
956 * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
957 * @vf: VF to configure trust setting for
958 */
ice_vf_set_host_trust_cfg(struct ice_vf * vf)959 static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
960 {
961 if (vf->trusted)
962 set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
963 else
964 clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
965 }
966
967 /**
968 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
969 * @vf: VF to enable MSIX mappings for
970 *
971 * Some of the registers need to be indexed/configured using hardware global
972 * device values and other registers need 0-based values, which represent PF
973 * based values.
974 */
ice_ena_vf_msix_mappings(struct ice_vf * vf)975 static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
976 {
977 int device_based_first_msix, device_based_last_msix;
978 int pf_based_first_msix, pf_based_last_msix, v;
979 struct ice_pf *pf = vf->pf;
980 int device_based_vf_id;
981 struct ice_hw *hw;
982 u32 reg;
983
984 hw = &pf->hw;
985 pf_based_first_msix = vf->first_vector_idx;
986 pf_based_last_msix = (pf_based_first_msix + pf->num_msix_per_vf) - 1;
987
988 device_based_first_msix = pf_based_first_msix +
989 pf->hw.func_caps.common_cap.msix_vector_first_id;
990 device_based_last_msix =
991 (device_based_first_msix + pf->num_msix_per_vf) - 1;
992 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
993
994 reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
995 VPINT_ALLOC_FIRST_M) |
996 ((device_based_last_msix << VPINT_ALLOC_LAST_S) &
997 VPINT_ALLOC_LAST_M) | VPINT_ALLOC_VALID_M);
998 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
999
1000 reg = (((device_based_first_msix << VPINT_ALLOC_PCI_FIRST_S)
1001 & VPINT_ALLOC_PCI_FIRST_M) |
1002 ((device_based_last_msix << VPINT_ALLOC_PCI_LAST_S) &
1003 VPINT_ALLOC_PCI_LAST_M) | VPINT_ALLOC_PCI_VALID_M);
1004 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
1005
1006 /* map the interrupts to its functions */
1007 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
1008 reg = (((device_based_vf_id << GLINT_VECT2FUNC_VF_NUM_S) &
1009 GLINT_VECT2FUNC_VF_NUM_M) |
1010 ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) &
1011 GLINT_VECT2FUNC_PF_NUM_M));
1012 wr32(hw, GLINT_VECT2FUNC(v), reg);
1013 }
1014
1015 /* Map mailbox interrupt to VF MSI-X vector 0 */
1016 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
1017 }
1018
1019 /**
1020 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
1021 * @vf: VF to enable the mappings for
1022 * @max_txq: max Tx queues allowed on the VF's VSI
1023 * @max_rxq: max Rx queues allowed on the VF's VSI
1024 */
ice_ena_vf_q_mappings(struct ice_vf * vf,u16 max_txq,u16 max_rxq)1025 static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
1026 {
1027 struct device *dev = ice_pf_to_dev(vf->pf);
1028 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1029 struct ice_hw *hw = &vf->pf->hw;
1030 u32 reg;
1031
1032 /* set regardless of mapping mode */
1033 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
1034
1035 /* VF Tx queues allocation */
1036 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1037 /* set the VF PF Tx queue range
1038 * VFNUMQ value should be set to (number of queues - 1). A value
1039 * of 0 means 1 queue and a value of 255 means 256 queues
1040 */
1041 reg = (((vsi->txq_map[0] << VPLAN_TX_QBASE_VFFIRSTQ_S) &
1042 VPLAN_TX_QBASE_VFFIRSTQ_M) |
1043 (((max_txq - 1) << VPLAN_TX_QBASE_VFNUMQ_S) &
1044 VPLAN_TX_QBASE_VFNUMQ_M));
1045 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
1046 } else {
1047 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
1048 }
1049
1050 /* set regardless of mapping mode */
1051 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
1052
1053 /* VF Rx queues allocation */
1054 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
1055 /* set the VF PF Rx queue range
1056 * VFNUMQ value should be set to (number of queues - 1). A value
1057 * of 0 means 1 queue and a value of 255 means 256 queues
1058 */
1059 reg = (((vsi->rxq_map[0] << VPLAN_RX_QBASE_VFFIRSTQ_S) &
1060 VPLAN_RX_QBASE_VFFIRSTQ_M) |
1061 (((max_rxq - 1) << VPLAN_RX_QBASE_VFNUMQ_S) &
1062 VPLAN_RX_QBASE_VFNUMQ_M));
1063 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
1064 } else {
1065 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
1066 }
1067 }
1068
1069 /**
1070 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
1071 * @vf: pointer to the VF structure
1072 */
ice_ena_vf_mappings(struct ice_vf * vf)1073 static void ice_ena_vf_mappings(struct ice_vf *vf)
1074 {
1075 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1076
1077 ice_ena_vf_msix_mappings(vf);
1078 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
1079 }
1080
1081 /**
1082 * ice_determine_res
1083 * @pf: pointer to the PF structure
1084 * @avail_res: available resources in the PF structure
1085 * @max_res: maximum resources that can be given per VF
1086 * @min_res: minimum resources that can be given per VF
1087 *
1088 * Returns non-zero value if resources (queues/vectors) are available or
1089 * returns zero if PF cannot accommodate for all num_alloc_vfs.
1090 */
1091 static int
ice_determine_res(struct ice_pf * pf,u16 avail_res,u16 max_res,u16 min_res)1092 ice_determine_res(struct ice_pf *pf, u16 avail_res, u16 max_res, u16 min_res)
1093 {
1094 bool checked_min_res = false;
1095 int res;
1096
1097 /* start by checking if PF can assign max number of resources for
1098 * all num_alloc_vfs.
1099 * if yes, return number per VF
1100 * If no, divide by 2 and roundup, check again
1101 * repeat the loop till we reach a point where even minimum resources
1102 * are not available, in that case return 0
1103 */
1104 res = max_res;
1105 while ((res >= min_res) && !checked_min_res) {
1106 int num_all_res;
1107
1108 num_all_res = pf->num_alloc_vfs * res;
1109 if (num_all_res <= avail_res)
1110 return res;
1111
1112 if (res == min_res)
1113 checked_min_res = true;
1114
1115 res = DIV_ROUND_UP(res, 2);
1116 }
1117 return 0;
1118 }
1119
1120 /**
1121 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
1122 * @vf: VF to calculate the register index for
1123 * @q_vector: a q_vector associated to the VF
1124 */
ice_calc_vf_reg_idx(struct ice_vf * vf,struct ice_q_vector * q_vector)1125 int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
1126 {
1127 struct ice_pf *pf;
1128
1129 if (!vf || !q_vector)
1130 return -EINVAL;
1131
1132 pf = vf->pf;
1133
1134 /* always add one to account for the OICR being the first MSIX */
1135 return pf->sriov_base_vector + pf->num_msix_per_vf * vf->vf_id +
1136 q_vector->v_idx + 1;
1137 }
1138
1139 /**
1140 * ice_get_max_valid_res_idx - Get the max valid resource index
1141 * @res: pointer to the resource to find the max valid index for
1142 *
1143 * Start from the end of the ice_res_tracker and return right when we find the
1144 * first res->list entry with the ICE_RES_VALID_BIT set. This function is only
1145 * valid for SR-IOV because it is the only consumer that manipulates the
1146 * res->end and this is always called when res->end is set to res->num_entries.
1147 */
ice_get_max_valid_res_idx(struct ice_res_tracker * res)1148 static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
1149 {
1150 int i;
1151
1152 if (!res)
1153 return -EINVAL;
1154
1155 for (i = res->num_entries - 1; i >= 0; i--)
1156 if (res->list[i] & ICE_RES_VALID_BIT)
1157 return i;
1158
1159 return 0;
1160 }
1161
1162 /**
1163 * ice_sriov_set_msix_res - Set any used MSIX resources
1164 * @pf: pointer to PF structure
1165 * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
1166 *
1167 * This function allows SR-IOV resources to be taken from the end of the PF's
1168 * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
1169 * just set the pf->sriov_base_vector and return success.
1170 *
1171 * If there are not enough resources available, return an error. This should
1172 * always be caught by ice_set_per_vf_res().
1173 *
1174 * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
1175 * in the PF's space available for SR-IOV.
1176 */
ice_sriov_set_msix_res(struct ice_pf * pf,u16 num_msix_needed)1177 static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
1178 {
1179 u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
1180 int vectors_used = pf->irq_tracker->num_entries;
1181 int sriov_base_vector;
1182
1183 sriov_base_vector = total_vectors - num_msix_needed;
1184
1185 /* make sure we only grab irq_tracker entries from the list end and
1186 * that we have enough available MSIX vectors
1187 */
1188 if (sriov_base_vector < vectors_used)
1189 return -EINVAL;
1190
1191 pf->sriov_base_vector = sriov_base_vector;
1192
1193 return 0;
1194 }
1195
1196 /**
1197 * ice_set_per_vf_res - check if vectors and queues are available
1198 * @pf: pointer to the PF structure
1199 *
1200 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
1201 * get more vectors and can enable more queues per VF. Note that this does not
1202 * grab any vectors from the SW pool already allocated. Also note, that all
1203 * vector counts include one for each VF's miscellaneous interrupt vector
1204 * (i.e. OICR).
1205 *
1206 * Minimum VFs - 2 vectors, 1 queue pair
1207 * Small VFs - 5 vectors, 4 queue pairs
1208 * Medium VFs - 17 vectors, 16 queue pairs
1209 *
1210 * Second, determine number of queue pairs per VF by starting with a pre-defined
1211 * maximum each VF supports. If this is not possible, then we adjust based on
1212 * queue pairs available on the device.
1213 *
1214 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
1215 * by each VF during VF initialization and reset.
1216 */
ice_set_per_vf_res(struct ice_pf * pf)1217 static int ice_set_per_vf_res(struct ice_pf *pf)
1218 {
1219 int max_valid_res_idx = ice_get_max_valid_res_idx(pf->irq_tracker);
1220 int msix_avail_per_vf, msix_avail_for_sriov;
1221 struct device *dev = ice_pf_to_dev(pf);
1222 u16 num_msix_per_vf, num_txq, num_rxq;
1223
1224 if (!pf->num_alloc_vfs || max_valid_res_idx < 0)
1225 return -EINVAL;
1226
1227 /* determine MSI-X resources per VF */
1228 msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
1229 pf->irq_tracker->num_entries;
1230 msix_avail_per_vf = msix_avail_for_sriov / pf->num_alloc_vfs;
1231 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
1232 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
1233 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
1234 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
1235 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
1236 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
1237 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
1238 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
1239 } else {
1240 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
1241 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
1242 pf->num_alloc_vfs);
1243 return -EIO;
1244 }
1245
1246 /* determine queue resources per VF */
1247 num_txq = ice_determine_res(pf, ice_get_avail_txq_count(pf),
1248 min_t(u16,
1249 num_msix_per_vf - ICE_NONQ_VECS_VF,
1250 ICE_MAX_RSS_QS_PER_VF),
1251 ICE_MIN_QS_PER_VF);
1252
1253 num_rxq = ice_determine_res(pf, ice_get_avail_rxq_count(pf),
1254 min_t(u16,
1255 num_msix_per_vf - ICE_NONQ_VECS_VF,
1256 ICE_MAX_RSS_QS_PER_VF),
1257 ICE_MIN_QS_PER_VF);
1258
1259 if (!num_txq || !num_rxq) {
1260 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
1261 ICE_MIN_QS_PER_VF, pf->num_alloc_vfs);
1262 return -EIO;
1263 }
1264
1265 if (ice_sriov_set_msix_res(pf, num_msix_per_vf * pf->num_alloc_vfs)) {
1266 dev_err(dev, "Unable to set MSI-X resources for %d VFs\n",
1267 pf->num_alloc_vfs);
1268 return -EINVAL;
1269 }
1270
1271 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
1272 pf->num_qps_per_vf = min_t(int, num_txq, num_rxq);
1273 pf->num_msix_per_vf = num_msix_per_vf;
1274 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
1275 pf->num_alloc_vfs, pf->num_msix_per_vf, pf->num_qps_per_vf);
1276
1277 return 0;
1278 }
1279
1280 /**
1281 * ice_clear_vf_reset_trigger - enable VF to access hardware
1282 * @vf: VF to enabled hardware access for
1283 */
ice_clear_vf_reset_trigger(struct ice_vf * vf)1284 static void ice_clear_vf_reset_trigger(struct ice_vf *vf)
1285 {
1286 struct ice_hw *hw = &vf->pf->hw;
1287 u32 reg;
1288
1289 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
1290 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
1291 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
1292 ice_flush(hw);
1293 }
1294
1295 /**
1296 * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
1297 * @vf: pointer to the VF info
1298 * @vsi: the VSI being configured
1299 * @promisc_m: mask of promiscuous config bits
1300 * @rm_promisc: promisc flag request from the VF to remove or add filter
1301 *
1302 * This function configures VF VSI promiscuous mode, based on the VF requests,
1303 * for Unicast, Multicast and VLAN
1304 */
1305 static enum ice_status
ice_vf_set_vsi_promisc(struct ice_vf * vf,struct ice_vsi * vsi,u8 promisc_m,bool rm_promisc)1306 ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
1307 bool rm_promisc)
1308 {
1309 struct ice_pf *pf = vf->pf;
1310 enum ice_status status = 0;
1311 struct ice_hw *hw;
1312
1313 hw = &pf->hw;
1314 if (vsi->num_vlan) {
1315 status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
1316 rm_promisc);
1317 } else if (vf->port_vlan_info) {
1318 if (rm_promisc)
1319 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1320 vf->port_vlan_info);
1321 else
1322 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1323 vf->port_vlan_info);
1324 } else {
1325 if (rm_promisc)
1326 status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
1327 0);
1328 else
1329 status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
1330 0);
1331 }
1332
1333 return status;
1334 }
1335
ice_vf_clear_counters(struct ice_vf * vf)1336 static void ice_vf_clear_counters(struct ice_vf *vf)
1337 {
1338 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1339
1340 vf->num_mac = 0;
1341 vsi->num_vlan = 0;
1342 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
1343 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
1344 }
1345
1346 /**
1347 * ice_vf_pre_vsi_rebuild - tasks to be done prior to VSI rebuild
1348 * @vf: VF to perform pre VSI rebuild tasks
1349 *
1350 * These tasks are items that don't need to be amortized since they are most
1351 * likely called in a for loop with all VF(s) in the reset_all_vfs() case.
1352 */
ice_vf_pre_vsi_rebuild(struct ice_vf * vf)1353 static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
1354 {
1355 ice_vf_clear_counters(vf);
1356 ice_clear_vf_reset_trigger(vf);
1357 }
1358
1359 /**
1360 * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
1361 * @vsi: Pointer to VSI
1362 *
1363 * This function moves VSI into corresponding scheduler aggregator node
1364 * based on cached value of "aggregator node info" per VSI
1365 */
ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi * vsi)1366 static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
1367 {
1368 struct ice_pf *pf = vsi->back;
1369 enum ice_status status;
1370 struct device *dev;
1371
1372 if (!vsi->agg_node)
1373 return;
1374
1375 dev = ice_pf_to_dev(pf);
1376 if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
1377 dev_dbg(dev,
1378 "agg_id %u already has reached max_num_vsis %u\n",
1379 vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
1380 return;
1381 }
1382
1383 status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
1384 vsi->idx, vsi->tc_cfg.ena_tc);
1385 if (status)
1386 dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
1387 vsi->idx, vsi->agg_node->agg_id);
1388 else
1389 vsi->agg_node->num_vsis++;
1390 }
1391
1392 /**
1393 * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
1394 * @vf: VF to rebuild host configuration on
1395 */
ice_vf_rebuild_host_cfg(struct ice_vf * vf)1396 static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
1397 {
1398 struct device *dev = ice_pf_to_dev(vf->pf);
1399 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1400
1401 ice_vf_set_host_trust_cfg(vf);
1402
1403 if (ice_vf_rebuild_host_mac_cfg(vf))
1404 dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
1405 vf->vf_id);
1406
1407 if (ice_vf_rebuild_host_vlan_cfg(vf))
1408 dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
1409 vf->vf_id);
1410 /* rebuild aggregator node config for main VF VSI */
1411 ice_vf_rebuild_aggregator_node_cfg(vsi);
1412 }
1413
1414 /**
1415 * ice_vf_rebuild_vsi_with_release - release and setup the VF's VSI
1416 * @vf: VF to release and setup the VSI for
1417 *
1418 * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
1419 * configuration change, etc.).
1420 */
ice_vf_rebuild_vsi_with_release(struct ice_vf * vf)1421 static int ice_vf_rebuild_vsi_with_release(struct ice_vf *vf)
1422 {
1423 ice_vf_vsi_release(vf);
1424 if (!ice_vf_vsi_setup(vf))
1425 return -ENOMEM;
1426
1427 return 0;
1428 }
1429
1430 /**
1431 * ice_vf_rebuild_vsi - rebuild the VF's VSI
1432 * @vf: VF to rebuild the VSI for
1433 *
1434 * This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
1435 * host, PFR, CORER, etc.).
1436 */
ice_vf_rebuild_vsi(struct ice_vf * vf)1437 static int ice_vf_rebuild_vsi(struct ice_vf *vf)
1438 {
1439 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1440 struct ice_pf *pf = vf->pf;
1441
1442 if (ice_vsi_rebuild(vsi, true)) {
1443 dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
1444 vf->vf_id);
1445 return -EIO;
1446 }
1447 /* vsi->idx will remain the same in this case so don't update
1448 * vf->lan_vsi_idx
1449 */
1450 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
1451 vf->lan_vsi_num = vsi->vsi_num;
1452
1453 return 0;
1454 }
1455
1456 /**
1457 * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
1458 * @vf: VF to set in initialized state
1459 *
1460 * After this function the VF will be ready to receive/handle the
1461 * VIRTCHNL_OP_GET_VF_RESOURCES message
1462 */
ice_vf_set_initialized(struct ice_vf * vf)1463 static void ice_vf_set_initialized(struct ice_vf *vf)
1464 {
1465 ice_set_vf_state_qs_dis(vf);
1466 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
1467 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
1468 clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
1469 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1470 }
1471
1472 /**
1473 * ice_vf_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
1474 * @vf: VF to perform tasks on
1475 */
ice_vf_post_vsi_rebuild(struct ice_vf * vf)1476 static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
1477 {
1478 struct ice_pf *pf = vf->pf;
1479 struct ice_hw *hw;
1480
1481 hw = &pf->hw;
1482
1483 ice_vf_rebuild_host_cfg(vf);
1484
1485 ice_vf_set_initialized(vf);
1486 ice_ena_vf_mappings(vf);
1487 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1488 }
1489
1490 /**
1491 * ice_reset_all_vfs - reset all allocated VFs in one go
1492 * @pf: pointer to the PF structure
1493 * @is_vflr: true if VFLR was issued, false if not
1494 *
1495 * First, tell the hardware to reset each VF, then do all the waiting in one
1496 * chunk, and finally finish restoring each VF after the wait. This is useful
1497 * during PF routines which need to reset all VFs, as otherwise it must perform
1498 * these resets in a serialized fashion.
1499 *
1500 * Returns true if any VFs were reset, and false otherwise.
1501 */
ice_reset_all_vfs(struct ice_pf * pf,bool is_vflr)1502 bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
1503 {
1504 struct device *dev = ice_pf_to_dev(pf);
1505 struct ice_hw *hw = &pf->hw;
1506 struct ice_vf *vf;
1507 int v, i;
1508
1509 /* If we don't have any VFs, then there is nothing to reset */
1510 if (!pf->num_alloc_vfs)
1511 return false;
1512
1513 /* clear all malicious info if the VFs are getting reset */
1514 ice_for_each_vf(pf, i)
1515 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, i))
1516 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1517
1518 /* If VFs have been disabled, there is no need to reset */
1519 if (test_and_set_bit(ICE_VF_DIS, pf->state))
1520 return false;
1521
1522 /* Begin reset on all VFs at once */
1523 ice_for_each_vf(pf, v)
1524 ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
1525
1526 /* HW requires some time to make sure it can flush the FIFO for a VF
1527 * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
1528 * sequence to make sure that it has completed. We'll keep track of
1529 * the VFs using a simple iterator that increments once that VF has
1530 * finished resetting.
1531 */
1532 for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
1533 /* Check each VF in sequence */
1534 while (v < pf->num_alloc_vfs) {
1535 u32 reg;
1536
1537 vf = &pf->vf[v];
1538 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1539 if (!(reg & VPGEN_VFRSTAT_VFRD_M)) {
1540 /* only delay if the check failed */
1541 usleep_range(10, 20);
1542 break;
1543 }
1544
1545 /* If the current VF has finished resetting, move on
1546 * to the next VF in sequence.
1547 */
1548 v++;
1549 }
1550 }
1551
1552 /* Display a warning if at least one VF didn't manage to reset in
1553 * time, but continue on with the operation.
1554 */
1555 if (v < pf->num_alloc_vfs)
1556 dev_warn(dev, "VF reset check timeout\n");
1557
1558 /* free VF resources to begin resetting the VSI state */
1559 ice_for_each_vf(pf, v) {
1560 vf = &pf->vf[v];
1561
1562 vf->driver_caps = 0;
1563 ice_vc_set_default_allowlist(vf);
1564
1565 ice_vf_fdir_exit(vf);
1566 /* clean VF control VSI when resetting VFs since it should be
1567 * setup only when VF creates its first FDIR rule.
1568 */
1569 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1570 ice_vf_ctrl_invalidate_vsi(vf);
1571
1572 ice_vf_pre_vsi_rebuild(vf);
1573 ice_vf_rebuild_vsi(vf);
1574 ice_vf_post_vsi_rebuild(vf);
1575 }
1576
1577 ice_flush(hw);
1578 clear_bit(ICE_VF_DIS, pf->state);
1579
1580 return true;
1581 }
1582
1583 /**
1584 * ice_is_vf_disabled
1585 * @vf: pointer to the VF info
1586 *
1587 * Returns true if the PF or VF is disabled, false otherwise.
1588 */
ice_is_vf_disabled(struct ice_vf * vf)1589 static bool ice_is_vf_disabled(struct ice_vf *vf)
1590 {
1591 struct ice_pf *pf = vf->pf;
1592
1593 /* If the PF has been disabled, there is no need resetting VF until
1594 * PF is active again. Similarly, if the VF has been disabled, this
1595 * means something else is resetting the VF, so we shouldn't continue.
1596 * Otherwise, set disable VF state bit for actual reset, and continue.
1597 */
1598 return (test_bit(ICE_VF_DIS, pf->state) ||
1599 test_bit(ICE_VF_STATE_DIS, vf->vf_states));
1600 }
1601
1602 /**
1603 * ice_reset_vf - Reset a particular VF
1604 * @vf: pointer to the VF structure
1605 * @is_vflr: true if VFLR was issued, false if not
1606 *
1607 * Returns true if the VF is currently in reset, resets successfully, or resets
1608 * are disabled and false otherwise.
1609 */
ice_reset_vf(struct ice_vf * vf,bool is_vflr)1610 bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
1611 {
1612 struct ice_pf *pf = vf->pf;
1613 struct ice_vsi *vsi;
1614 struct device *dev;
1615 struct ice_hw *hw;
1616 bool rsd = false;
1617 u8 promisc_m;
1618 u32 reg;
1619 int i;
1620
1621 dev = ice_pf_to_dev(pf);
1622
1623 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) {
1624 dev_dbg(dev, "Trying to reset VF %d, but all VF resets are disabled\n",
1625 vf->vf_id);
1626 return true;
1627 }
1628
1629 if (ice_is_vf_disabled(vf)) {
1630 dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
1631 vf->vf_id);
1632 return true;
1633 }
1634
1635 /* Set VF disable bit state here, before triggering reset */
1636 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
1637 ice_trigger_vf_reset(vf, is_vflr, false);
1638
1639 vsi = ice_get_vf_vsi(vf);
1640
1641 if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
1642 ice_dis_vf_qs(vf);
1643
1644 /* Call Disable LAN Tx queue AQ whether or not queues are
1645 * enabled. This is needed for successful completion of VFR.
1646 */
1647 ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
1648 NULL, ICE_VF_RESET, vf->vf_id, NULL);
1649
1650 hw = &pf->hw;
1651 /* poll VPGEN_VFRSTAT reg to make sure
1652 * that reset is complete
1653 */
1654 for (i = 0; i < 10; i++) {
1655 /* VF reset requires driver to first reset the VF and then
1656 * poll the status register to make sure that the reset
1657 * completed successfully.
1658 */
1659 reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
1660 if (reg & VPGEN_VFRSTAT_VFRD_M) {
1661 rsd = true;
1662 break;
1663 }
1664
1665 /* only sleep if the reset is not done */
1666 usleep_range(10, 20);
1667 }
1668
1669 vf->driver_caps = 0;
1670 ice_vc_set_default_allowlist(vf);
1671
1672 /* Display a warning if VF didn't manage to reset in time, but need to
1673 * continue on with the operation.
1674 */
1675 if (!rsd)
1676 dev_warn(dev, "VF reset check timeout on VF %d\n", vf->vf_id);
1677
1678 /* disable promiscuous modes in case they were enabled
1679 * ignore any error if disabling process failed
1680 */
1681 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
1682 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
1683 if (vf->port_vlan_info || vsi->num_vlan)
1684 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
1685 else
1686 promisc_m = ICE_UCAST_PROMISC_BITS;
1687
1688 vsi = ice_get_vf_vsi(vf);
1689 if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
1690 dev_err(dev, "disabling promiscuous mode failed\n");
1691 }
1692
1693 ice_vf_fdir_exit(vf);
1694 /* clean VF control VSI when resetting VF since it should be setup
1695 * only when VF creates its first FDIR rule.
1696 */
1697 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
1698 ice_vf_ctrl_vsi_release(vf);
1699
1700 ice_vf_pre_vsi_rebuild(vf);
1701 ice_vf_rebuild_vsi_with_release(vf);
1702 ice_vf_post_vsi_rebuild(vf);
1703
1704 /* if the VF has been reset allow it to come up again */
1705 if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
1706 dev_dbg(dev, "failed to clear malicious VF state for VF %u\n", i);
1707
1708 return true;
1709 }
1710
1711 /**
1712 * ice_vc_notify_link_state - Inform all VFs on a PF of link status
1713 * @pf: pointer to the PF structure
1714 */
ice_vc_notify_link_state(struct ice_pf * pf)1715 void ice_vc_notify_link_state(struct ice_pf *pf)
1716 {
1717 int i;
1718
1719 ice_for_each_vf(pf, i)
1720 ice_vc_notify_vf_link_state(&pf->vf[i]);
1721 }
1722
1723 /**
1724 * ice_vc_notify_reset - Send pending reset message to all VFs
1725 * @pf: pointer to the PF structure
1726 *
1727 * indicate a pending reset to all VFs on a given PF
1728 */
ice_vc_notify_reset(struct ice_pf * pf)1729 void ice_vc_notify_reset(struct ice_pf *pf)
1730 {
1731 struct virtchnl_pf_event pfe;
1732
1733 if (!pf->num_alloc_vfs)
1734 return;
1735
1736 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1737 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1738 ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
1739 (u8 *)&pfe, sizeof(struct virtchnl_pf_event));
1740 }
1741
1742 /**
1743 * ice_vc_notify_vf_reset - Notify VF of a reset event
1744 * @vf: pointer to the VF structure
1745 */
ice_vc_notify_vf_reset(struct ice_vf * vf)1746 static void ice_vc_notify_vf_reset(struct ice_vf *vf)
1747 {
1748 struct virtchnl_pf_event pfe;
1749 struct ice_pf *pf;
1750
1751 if (!vf)
1752 return;
1753
1754 pf = vf->pf;
1755 if (ice_validate_vf_id(pf, vf->vf_id))
1756 return;
1757
1758 /* Bail out if VF is in disabled state, neither initialized, nor active
1759 * state - otherwise proceed with notifications
1760 */
1761 if ((!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
1762 !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) ||
1763 test_bit(ICE_VF_STATE_DIS, vf->vf_states))
1764 return;
1765
1766 pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
1767 pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
1768 ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
1769 VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
1770 NULL);
1771 }
1772
1773 /**
1774 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
1775 * @vf: VF to initialize/setup the VSI for
1776 *
1777 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
1778 * VF VSI's broadcast filter and is only used during initial VF creation.
1779 */
ice_init_vf_vsi_res(struct ice_vf * vf)1780 static int ice_init_vf_vsi_res(struct ice_vf *vf)
1781 {
1782 struct ice_pf *pf = vf->pf;
1783 u8 broadcast[ETH_ALEN];
1784 enum ice_status status;
1785 struct ice_vsi *vsi;
1786 struct device *dev;
1787 int err;
1788
1789 vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
1790
1791 dev = ice_pf_to_dev(pf);
1792 vsi = ice_vf_vsi_setup(vf);
1793 if (!vsi)
1794 return -ENOMEM;
1795
1796 err = ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI);
1797 if (err) {
1798 dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
1799 vf->vf_id);
1800 goto release_vsi;
1801 }
1802
1803 eth_broadcast_addr(broadcast);
1804 status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
1805 if (status) {
1806 dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %s\n",
1807 vf->vf_id, ice_stat_str(status));
1808 err = ice_status_to_errno(status);
1809 goto release_vsi;
1810 }
1811
1812 vf->num_mac = 1;
1813
1814 return 0;
1815
1816 release_vsi:
1817 ice_vf_vsi_release(vf);
1818 return err;
1819 }
1820
1821 /**
1822 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
1823 * @pf: PF the VFs are associated with
1824 */
ice_start_vfs(struct ice_pf * pf)1825 static int ice_start_vfs(struct ice_pf *pf)
1826 {
1827 struct ice_hw *hw = &pf->hw;
1828 int retval, i;
1829
1830 ice_for_each_vf(pf, i) {
1831 struct ice_vf *vf = &pf->vf[i];
1832
1833 ice_clear_vf_reset_trigger(vf);
1834
1835 retval = ice_init_vf_vsi_res(vf);
1836 if (retval) {
1837 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
1838 vf->vf_id, retval);
1839 goto teardown;
1840 }
1841
1842 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
1843 ice_ena_vf_mappings(vf);
1844 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
1845 }
1846
1847 ice_flush(hw);
1848 return 0;
1849
1850 teardown:
1851 for (i = i - 1; i >= 0; i--) {
1852 struct ice_vf *vf = &pf->vf[i];
1853
1854 ice_dis_vf_mappings(vf);
1855 ice_vf_vsi_release(vf);
1856 }
1857
1858 return retval;
1859 }
1860
1861 /**
1862 * ice_set_dflt_settings_vfs - set VF defaults during initialization/creation
1863 * @pf: PF holding reference to all VFs for default configuration
1864 */
ice_set_dflt_settings_vfs(struct ice_pf * pf)1865 static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
1866 {
1867 int i;
1868
1869 ice_for_each_vf(pf, i) {
1870 struct ice_vf *vf = &pf->vf[i];
1871
1872 vf->pf = pf;
1873 vf->vf_id = i;
1874 vf->vf_sw_id = pf->first_sw;
1875 /* assign default capabilities */
1876 set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vf->vf_caps);
1877 vf->spoofchk = true;
1878 vf->num_vf_qs = pf->num_qps_per_vf;
1879 ice_vc_set_default_allowlist(vf);
1880
1881 /* ctrl_vsi_idx will be set to a valid value only when VF
1882 * creates its first fdir rule.
1883 */
1884 ice_vf_ctrl_invalidate_vsi(vf);
1885 ice_vf_fdir_init(vf);
1886 }
1887 }
1888
1889 /**
1890 * ice_alloc_vfs - allocate num_vfs in the PF structure
1891 * @pf: PF to store the allocated VFs in
1892 * @num_vfs: number of VFs to allocate
1893 */
ice_alloc_vfs(struct ice_pf * pf,int num_vfs)1894 static int ice_alloc_vfs(struct ice_pf *pf, int num_vfs)
1895 {
1896 struct ice_vf *vfs;
1897
1898 vfs = devm_kcalloc(ice_pf_to_dev(pf), num_vfs, sizeof(*vfs),
1899 GFP_KERNEL);
1900 if (!vfs)
1901 return -ENOMEM;
1902
1903 pf->vf = vfs;
1904 pf->num_alloc_vfs = num_vfs;
1905
1906 return 0;
1907 }
1908
1909 /**
1910 * ice_ena_vfs - enable VFs so they are ready to be used
1911 * @pf: pointer to the PF structure
1912 * @num_vfs: number of VFs to enable
1913 */
ice_ena_vfs(struct ice_pf * pf,u16 num_vfs)1914 static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
1915 {
1916 struct device *dev = ice_pf_to_dev(pf);
1917 struct ice_hw *hw = &pf->hw;
1918 int ret;
1919
1920 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
1921 wr32(hw, GLINT_DYN_CTL(pf->oicr_idx),
1922 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
1923 set_bit(ICE_OICR_INTR_DIS, pf->state);
1924 ice_flush(hw);
1925
1926 ret = pci_enable_sriov(pf->pdev, num_vfs);
1927 if (ret) {
1928 pf->num_alloc_vfs = 0;
1929 goto err_unroll_intr;
1930 }
1931
1932 ret = ice_alloc_vfs(pf, num_vfs);
1933 if (ret)
1934 goto err_pci_disable_sriov;
1935
1936 if (ice_set_per_vf_res(pf)) {
1937 dev_err(dev, "Not enough resources for %d VFs, try with fewer number of VFs\n",
1938 num_vfs);
1939 ret = -ENOSPC;
1940 goto err_unroll_sriov;
1941 }
1942
1943 ice_set_dflt_settings_vfs(pf);
1944
1945 if (ice_start_vfs(pf)) {
1946 dev_err(dev, "Failed to start VF(s)\n");
1947 ret = -EAGAIN;
1948 goto err_unroll_sriov;
1949 }
1950
1951 clear_bit(ICE_VF_DIS, pf->state);
1952 return 0;
1953
1954 err_unroll_sriov:
1955 devm_kfree(dev, pf->vf);
1956 pf->vf = NULL;
1957 pf->num_alloc_vfs = 0;
1958 err_pci_disable_sriov:
1959 pci_disable_sriov(pf->pdev);
1960 err_unroll_intr:
1961 /* rearm interrupts here */
1962 ice_irq_dynamic_ena(hw, NULL, NULL);
1963 clear_bit(ICE_OICR_INTR_DIS, pf->state);
1964 return ret;
1965 }
1966
1967 /**
1968 * ice_pci_sriov_ena - Enable or change number of VFs
1969 * @pf: pointer to the PF structure
1970 * @num_vfs: number of VFs to allocate
1971 *
1972 * Returns 0 on success and negative on failure
1973 */
ice_pci_sriov_ena(struct ice_pf * pf,int num_vfs)1974 static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
1975 {
1976 int pre_existing_vfs = pci_num_vf(pf->pdev);
1977 struct device *dev = ice_pf_to_dev(pf);
1978 int err;
1979
1980 if (pre_existing_vfs && pre_existing_vfs != num_vfs)
1981 ice_free_vfs(pf);
1982 else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
1983 return 0;
1984
1985 if (num_vfs > pf->num_vfs_supported) {
1986 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
1987 num_vfs, pf->num_vfs_supported);
1988 return -EOPNOTSUPP;
1989 }
1990
1991 dev_info(dev, "Enabling %d VFs\n", num_vfs);
1992 err = ice_ena_vfs(pf, num_vfs);
1993 if (err) {
1994 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
1995 return err;
1996 }
1997
1998 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
1999 return 0;
2000 }
2001
2002 /**
2003 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
2004 * @pf: PF to enabled SR-IOV on
2005 */
ice_check_sriov_allowed(struct ice_pf * pf)2006 static int ice_check_sriov_allowed(struct ice_pf *pf)
2007 {
2008 struct device *dev = ice_pf_to_dev(pf);
2009
2010 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
2011 dev_err(dev, "This device is not capable of SR-IOV\n");
2012 return -EOPNOTSUPP;
2013 }
2014
2015 if (ice_is_safe_mode(pf)) {
2016 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
2017 return -EOPNOTSUPP;
2018 }
2019
2020 if (!ice_pf_state_is_nominal(pf)) {
2021 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
2022 return -EBUSY;
2023 }
2024
2025 return 0;
2026 }
2027
2028 /**
2029 * ice_sriov_configure - Enable or change number of VFs via sysfs
2030 * @pdev: pointer to a pci_dev structure
2031 * @num_vfs: number of VFs to allocate or 0 to free VFs
2032 *
2033 * This function is called when the user updates the number of VFs in sysfs. On
2034 * success return whatever num_vfs was set to by the caller. Return negative on
2035 * failure.
2036 */
ice_sriov_configure(struct pci_dev * pdev,int num_vfs)2037 int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
2038 {
2039 struct ice_pf *pf = pci_get_drvdata(pdev);
2040 struct device *dev = ice_pf_to_dev(pf);
2041 enum ice_status status;
2042 int err;
2043
2044 err = ice_check_sriov_allowed(pf);
2045 if (err)
2046 return err;
2047
2048 if (!num_vfs) {
2049 if (!pci_vfs_assigned(pdev)) {
2050 ice_mbx_deinit_snapshot(&pf->hw);
2051 ice_free_vfs(pf);
2052 if (pf->lag)
2053 ice_enable_lag(pf->lag);
2054 return 0;
2055 }
2056
2057 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
2058 return -EBUSY;
2059 }
2060
2061 status = ice_mbx_init_snapshot(&pf->hw, num_vfs);
2062 if (status)
2063 return ice_status_to_errno(status);
2064
2065 err = ice_pci_sriov_ena(pf, num_vfs);
2066 if (err) {
2067 ice_mbx_deinit_snapshot(&pf->hw);
2068 return err;
2069 }
2070
2071 if (pf->lag)
2072 ice_disable_lag(pf->lag);
2073 return num_vfs;
2074 }
2075
2076 /**
2077 * ice_process_vflr_event - Free VF resources via IRQ calls
2078 * @pf: pointer to the PF structure
2079 *
2080 * called from the VFLR IRQ handler to
2081 * free up VF resources and state variables
2082 */
ice_process_vflr_event(struct ice_pf * pf)2083 void ice_process_vflr_event(struct ice_pf *pf)
2084 {
2085 struct ice_hw *hw = &pf->hw;
2086 unsigned int vf_id;
2087 u32 reg;
2088
2089 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
2090 !pf->num_alloc_vfs)
2091 return;
2092
2093 ice_for_each_vf(pf, vf_id) {
2094 struct ice_vf *vf = &pf->vf[vf_id];
2095 u32 reg_idx, bit_idx;
2096
2097 reg_idx = (hw->func_caps.vf_base_id + vf_id) / 32;
2098 bit_idx = (hw->func_caps.vf_base_id + vf_id) % 32;
2099 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
2100 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
2101 if (reg & BIT(bit_idx))
2102 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
2103 ice_reset_vf(vf, true);
2104 }
2105 }
2106
2107 /**
2108 * ice_vc_reset_vf - Perform software reset on the VF after informing the AVF
2109 * @vf: pointer to the VF info
2110 */
ice_vc_reset_vf(struct ice_vf * vf)2111 static void ice_vc_reset_vf(struct ice_vf *vf)
2112 {
2113 ice_vc_notify_vf_reset(vf);
2114 ice_reset_vf(vf, false);
2115 }
2116
2117 /**
2118 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
2119 * @pf: PF used to index all VFs
2120 * @pfq: queue index relative to the PF's function space
2121 *
2122 * If no VF is found who owns the pfq then return NULL, otherwise return a
2123 * pointer to the VF who owns the pfq
2124 */
ice_get_vf_from_pfq(struct ice_pf * pf,u16 pfq)2125 static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
2126 {
2127 unsigned int vf_id;
2128
2129 ice_for_each_vf(pf, vf_id) {
2130 struct ice_vf *vf = &pf->vf[vf_id];
2131 struct ice_vsi *vsi;
2132 u16 rxq_idx;
2133
2134 vsi = ice_get_vf_vsi(vf);
2135
2136 ice_for_each_rxq(vsi, rxq_idx)
2137 if (vsi->rxq_map[rxq_idx] == pfq)
2138 return vf;
2139 }
2140
2141 return NULL;
2142 }
2143
2144 /**
2145 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
2146 * @pf: PF used for conversion
2147 * @globalq: global queue index used to convert to PF space queue index
2148 */
ice_globalq_to_pfq(struct ice_pf * pf,u32 globalq)2149 static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
2150 {
2151 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
2152 }
2153
2154 /**
2155 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
2156 * @pf: PF that the LAN overflow event happened on
2157 * @event: structure holding the event information for the LAN overflow event
2158 *
2159 * Determine if the LAN overflow event was caused by a VF queue. If it was not
2160 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
2161 * reset on the offending VF.
2162 */
2163 void
ice_vf_lan_overflow_event(struct ice_pf * pf,struct ice_rq_event_info * event)2164 ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
2165 {
2166 u32 gldcb_rtctq, queue;
2167 struct ice_vf *vf;
2168
2169 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
2170 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
2171
2172 /* event returns device global Rx queue number */
2173 queue = (gldcb_rtctq & GLDCB_RTCTQ_RXQNUM_M) >>
2174 GLDCB_RTCTQ_RXQNUM_S;
2175
2176 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
2177 if (!vf)
2178 return;
2179
2180 ice_vc_reset_vf(vf);
2181 }
2182
2183 /**
2184 * ice_vc_send_msg_to_vf - Send message to VF
2185 * @vf: pointer to the VF info
2186 * @v_opcode: virtual channel opcode
2187 * @v_retval: virtual channel return value
2188 * @msg: pointer to the msg buffer
2189 * @msglen: msg length
2190 *
2191 * send msg to VF
2192 */
2193 int
ice_vc_send_msg_to_vf(struct ice_vf * vf,u32 v_opcode,enum virtchnl_status_code v_retval,u8 * msg,u16 msglen)2194 ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
2195 enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
2196 {
2197 enum ice_status aq_ret;
2198 struct device *dev;
2199 struct ice_pf *pf;
2200
2201 if (!vf)
2202 return -EINVAL;
2203
2204 pf = vf->pf;
2205 if (ice_validate_vf_id(pf, vf->vf_id))
2206 return -EINVAL;
2207
2208 dev = ice_pf_to_dev(pf);
2209
2210 /* single place to detect unsuccessful return values */
2211 if (v_retval) {
2212 vf->num_inval_msgs++;
2213 dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
2214 v_opcode, v_retval);
2215 if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
2216 dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
2217 vf->vf_id);
2218 dev_err(dev, "Use PF Control I/F to enable the VF\n");
2219 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
2220 return -EIO;
2221 }
2222 } else {
2223 vf->num_valid_msgs++;
2224 /* reset the invalid counter, if a valid message is received. */
2225 vf->num_inval_msgs = 0;
2226 }
2227
2228 aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
2229 msg, msglen, NULL);
2230 if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
2231 dev_info(dev, "Unable to send the message to VF %d ret %s aq_err %s\n",
2232 vf->vf_id, ice_stat_str(aq_ret),
2233 ice_aq_str(pf->hw.mailboxq.sq_last_status));
2234 return -EIO;
2235 }
2236
2237 return 0;
2238 }
2239
2240 /**
2241 * ice_vc_get_ver_msg
2242 * @vf: pointer to the VF info
2243 * @msg: pointer to the msg buffer
2244 *
2245 * called from the VF to request the API version used by the PF
2246 */
ice_vc_get_ver_msg(struct ice_vf * vf,u8 * msg)2247 static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
2248 {
2249 struct virtchnl_version_info info = {
2250 VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR
2251 };
2252
2253 vf->vf_ver = *(struct virtchnl_version_info *)msg;
2254 /* VFs running the 1.0 API expect to get 1.0 back or they will cry. */
2255 if (VF_IS_V10(&vf->vf_ver))
2256 info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
2257
2258 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
2259 VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
2260 sizeof(struct virtchnl_version_info));
2261 }
2262
2263 /**
2264 * ice_vc_get_max_frame_size - get max frame size allowed for VF
2265 * @vf: VF used to determine max frame size
2266 *
2267 * Max frame size is determined based on the current port's max frame size and
2268 * whether a port VLAN is configured on this VF. The VF is not aware whether
2269 * it's in a port VLAN so the PF needs to account for this in max frame size
2270 * checks and sending the max frame size to the VF.
2271 */
ice_vc_get_max_frame_size(struct ice_vf * vf)2272 static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
2273 {
2274 struct ice_port_info *pi = ice_vf_get_port_info(vf);
2275 u16 max_frame_size;
2276
2277 max_frame_size = pi->phy.link_info.max_frame_size;
2278
2279 if (vf->port_vlan_info)
2280 max_frame_size -= VLAN_HLEN;
2281
2282 return max_frame_size;
2283 }
2284
2285 /**
2286 * ice_vc_get_vf_res_msg
2287 * @vf: pointer to the VF info
2288 * @msg: pointer to the msg buffer
2289 *
2290 * called from the VF to request its resources
2291 */
ice_vc_get_vf_res_msg(struct ice_vf * vf,u8 * msg)2292 static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
2293 {
2294 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2295 struct virtchnl_vf_resource *vfres = NULL;
2296 struct ice_pf *pf = vf->pf;
2297 struct ice_vsi *vsi;
2298 int len = 0;
2299 int ret;
2300
2301 if (ice_check_vf_init(pf, vf)) {
2302 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2303 goto err;
2304 }
2305
2306 len = sizeof(struct virtchnl_vf_resource);
2307
2308 vfres = kzalloc(len, GFP_KERNEL);
2309 if (!vfres) {
2310 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2311 len = 0;
2312 goto err;
2313 }
2314 if (VF_IS_V11(&vf->vf_ver))
2315 vf->driver_caps = *(u32 *)msg;
2316 else
2317 vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 |
2318 VIRTCHNL_VF_OFFLOAD_RSS_REG |
2319 VIRTCHNL_VF_OFFLOAD_VLAN;
2320
2321 vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
2322 vsi = ice_get_vf_vsi(vf);
2323 if (!vsi) {
2324 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2325 goto err;
2326 }
2327
2328 if (!vsi->info.pvid)
2329 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
2330
2331 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
2332 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF;
2333 } else {
2334 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ)
2335 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ;
2336 else
2337 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG;
2338 }
2339
2340 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
2341 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF;
2342
2343 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
2344 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
2345
2346 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP)
2347 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP;
2348
2349 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM)
2350 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM;
2351
2352 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_POLLING)
2353 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_POLLING;
2354
2355 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
2356 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
2357
2358 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
2359 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
2360
2361 if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
2362 vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
2363
2364 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF)
2365 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF;
2366
2367 if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO)
2368 vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO;
2369
2370 vfres->num_vsis = 1;
2371 /* Tx and Rx queue are equal for VF */
2372 vfres->num_queue_pairs = vsi->num_txq;
2373 vfres->max_vectors = pf->num_msix_per_vf;
2374 vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
2375 vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
2376 vfres->max_mtu = ice_vc_get_max_frame_size(vf);
2377
2378 vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
2379 vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
2380 vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
2381 ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
2382 vf->dflt_lan_addr.addr);
2383
2384 /* match guest capabilities */
2385 vf->driver_caps = vfres->vf_cap_flags;
2386
2387 ice_vc_set_caps_allowlist(vf);
2388 ice_vc_set_working_allowlist(vf);
2389
2390 set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
2391
2392 err:
2393 /* send the response back to the VF */
2394 ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
2395 (u8 *)vfres, len);
2396
2397 kfree(vfres);
2398 return ret;
2399 }
2400
2401 /**
2402 * ice_vc_reset_vf_msg
2403 * @vf: pointer to the VF info
2404 *
2405 * called from the VF to reset itself,
2406 * unlike other virtchnl messages, PF driver
2407 * doesn't send the response back to the VF
2408 */
ice_vc_reset_vf_msg(struct ice_vf * vf)2409 static void ice_vc_reset_vf_msg(struct ice_vf *vf)
2410 {
2411 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2412 ice_reset_vf(vf, false);
2413 }
2414
2415 /**
2416 * ice_find_vsi_from_id
2417 * @pf: the PF structure to search for the VSI
2418 * @id: ID of the VSI it is searching for
2419 *
2420 * searches for the VSI with the given ID
2421 */
ice_find_vsi_from_id(struct ice_pf * pf,u16 id)2422 static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
2423 {
2424 int i;
2425
2426 ice_for_each_vsi(pf, i)
2427 if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
2428 return pf->vsi[i];
2429
2430 return NULL;
2431 }
2432
2433 /**
2434 * ice_vc_isvalid_vsi_id
2435 * @vf: pointer to the VF info
2436 * @vsi_id: VF relative VSI ID
2437 *
2438 * check for the valid VSI ID
2439 */
ice_vc_isvalid_vsi_id(struct ice_vf * vf,u16 vsi_id)2440 bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
2441 {
2442 struct ice_pf *pf = vf->pf;
2443 struct ice_vsi *vsi;
2444
2445 vsi = ice_find_vsi_from_id(pf, vsi_id);
2446
2447 return (vsi && (vsi->vf_id == vf->vf_id));
2448 }
2449
2450 /**
2451 * ice_vc_isvalid_q_id
2452 * @vf: pointer to the VF info
2453 * @vsi_id: VSI ID
2454 * @qid: VSI relative queue ID
2455 *
2456 * check for the valid queue ID
2457 */
ice_vc_isvalid_q_id(struct ice_vf * vf,u16 vsi_id,u8 qid)2458 static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
2459 {
2460 struct ice_vsi *vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
2461 /* allocated Tx and Rx queues should be always equal for VF VSI */
2462 return (vsi && (qid < vsi->alloc_txq));
2463 }
2464
2465 /**
2466 * ice_vc_isvalid_ring_len
2467 * @ring_len: length of ring
2468 *
2469 * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
2470 * or zero
2471 */
ice_vc_isvalid_ring_len(u16 ring_len)2472 static bool ice_vc_isvalid_ring_len(u16 ring_len)
2473 {
2474 return ring_len == 0 ||
2475 (ring_len >= ICE_MIN_NUM_DESC &&
2476 ring_len <= ICE_MAX_NUM_DESC &&
2477 !(ring_len % ICE_REQ_DESC_MULTIPLE));
2478 }
2479
2480 /**
2481 * ice_vc_parse_rss_cfg - parses hash fields and headers from
2482 * a specific virtchnl RSS cfg
2483 * @hw: pointer to the hardware
2484 * @rss_cfg: pointer to the virtchnl RSS cfg
2485 * @addl_hdrs: pointer to the protocol header fields (ICE_FLOW_SEG_HDR_*)
2486 * to configure
2487 * @hash_flds: pointer to the hash bit fields (ICE_FLOW_HASH_*) to configure
2488 *
2489 * Return true if all the protocol header and hash fields in the RSS cfg could
2490 * be parsed, else return false
2491 *
2492 * This function parses the virtchnl RSS cfg to be the intended
2493 * hash fields and the intended header for RSS configuration
2494 */
2495 static bool
ice_vc_parse_rss_cfg(struct ice_hw * hw,struct virtchnl_rss_cfg * rss_cfg,u32 * addl_hdrs,u64 * hash_flds)2496 ice_vc_parse_rss_cfg(struct ice_hw *hw, struct virtchnl_rss_cfg *rss_cfg,
2497 u32 *addl_hdrs, u64 *hash_flds)
2498 {
2499 const struct ice_vc_hash_field_match_type *hf_list;
2500 const struct ice_vc_hdr_match_type *hdr_list;
2501 int i, hf_list_len, hdr_list_len;
2502
2503 if (!strncmp(hw->active_pkg_name, "ICE COMMS Package",
2504 sizeof(hw->active_pkg_name))) {
2505 hf_list = ice_vc_hash_field_list_comms;
2506 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_comms);
2507 hdr_list = ice_vc_hdr_list_comms;
2508 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_comms);
2509 } else {
2510 hf_list = ice_vc_hash_field_list_os;
2511 hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list_os);
2512 hdr_list = ice_vc_hdr_list_os;
2513 hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list_os);
2514 }
2515
2516 for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
2517 struct virtchnl_proto_hdr *proto_hdr =
2518 &rss_cfg->proto_hdrs.proto_hdr[i];
2519 bool hdr_found = false;
2520 int j;
2521
2522 /* Find matched ice headers according to virtchnl headers. */
2523 for (j = 0; j < hdr_list_len; j++) {
2524 struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
2525
2526 if (proto_hdr->type == hdr_map.vc_hdr) {
2527 *addl_hdrs |= hdr_map.ice_hdr;
2528 hdr_found = true;
2529 }
2530 }
2531
2532 if (!hdr_found)
2533 return false;
2534
2535 /* Find matched ice hash fields according to
2536 * virtchnl hash fields.
2537 */
2538 for (j = 0; j < hf_list_len; j++) {
2539 struct ice_vc_hash_field_match_type hf_map = hf_list[j];
2540
2541 if (proto_hdr->type == hf_map.vc_hdr &&
2542 proto_hdr->field_selector == hf_map.vc_hash_field) {
2543 *hash_flds |= hf_map.ice_hash_field;
2544 break;
2545 }
2546 }
2547 }
2548
2549 return true;
2550 }
2551
2552 /**
2553 * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
2554 * RSS offloads
2555 * @caps: VF driver negotiated capabilities
2556 *
2557 * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
2558 * else return false
2559 */
ice_vf_adv_rss_offload_ena(u32 caps)2560 static bool ice_vf_adv_rss_offload_ena(u32 caps)
2561 {
2562 return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
2563 }
2564
2565 /**
2566 * ice_vc_handle_rss_cfg
2567 * @vf: pointer to the VF info
2568 * @msg: pointer to the message buffer
2569 * @add: add a RSS config if true, otherwise delete a RSS config
2570 *
2571 * This function adds/deletes a RSS config
2572 */
ice_vc_handle_rss_cfg(struct ice_vf * vf,u8 * msg,bool add)2573 static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
2574 {
2575 u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
2576 struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
2577 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2578 struct device *dev = ice_pf_to_dev(vf->pf);
2579 struct ice_hw *hw = &vf->pf->hw;
2580 struct ice_vsi *vsi;
2581
2582 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2583 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
2584 vf->vf_id);
2585 v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
2586 goto error_param;
2587 }
2588
2589 if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
2590 dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
2591 vf->vf_id);
2592 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2593 goto error_param;
2594 }
2595
2596 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2597 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2598 goto error_param;
2599 }
2600
2601 if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
2602 rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
2603 rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
2604 dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
2605 vf->vf_id);
2606 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2607 goto error_param;
2608 }
2609
2610 vsi = ice_get_vf_vsi(vf);
2611 if (!vsi) {
2612 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2613 goto error_param;
2614 }
2615
2616 if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
2617 struct ice_vsi_ctx *ctx;
2618 enum ice_status status;
2619 u8 lut_type, hash_type;
2620
2621 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
2622 hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_XOR :
2623 ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
2624
2625 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2626 if (!ctx) {
2627 v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
2628 goto error_param;
2629 }
2630
2631 ctx->info.q_opt_rss = ((lut_type <<
2632 ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
2633 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
2634 (hash_type &
2635 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
2636
2637 /* Preserve existing queueing option setting */
2638 ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
2639 ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
2640 ctx->info.q_opt_tc = vsi->info.q_opt_tc;
2641 ctx->info.q_opt_flags = vsi->info.q_opt_rss;
2642
2643 ctx->info.valid_sections =
2644 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
2645
2646 status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
2647 if (status) {
2648 dev_err(dev, "update VSI for RSS failed, err %s aq_err %s\n",
2649 ice_stat_str(status),
2650 ice_aq_str(hw->adminq.sq_last_status));
2651 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2652 } else {
2653 vsi->info.q_opt_rss = ctx->info.q_opt_rss;
2654 }
2655
2656 kfree(ctx);
2657 } else {
2658 u32 addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
2659 u64 hash_flds = ICE_HASH_INVALID;
2660
2661 if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &addl_hdrs,
2662 &hash_flds)) {
2663 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2664 goto error_param;
2665 }
2666
2667 if (add) {
2668 if (ice_add_rss_cfg(hw, vsi->idx, hash_flds,
2669 addl_hdrs)) {
2670 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2671 dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
2672 vsi->vsi_num, v_ret);
2673 }
2674 } else {
2675 enum ice_status status;
2676
2677 status = ice_rem_rss_cfg(hw, vsi->idx, hash_flds,
2678 addl_hdrs);
2679 /* We just ignore ICE_ERR_DOES_NOT_EXIST, because
2680 * if two configurations share the same profile remove
2681 * one of them actually removes both, since the
2682 * profile is deleted.
2683 */
2684 if (status && status != ICE_ERR_DOES_NOT_EXIST) {
2685 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2686 dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%s\n",
2687 vf->vf_id, ice_stat_str(status));
2688 }
2689 }
2690 }
2691
2692 error_param:
2693 return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
2694 }
2695
2696 /**
2697 * ice_vc_config_rss_key
2698 * @vf: pointer to the VF info
2699 * @msg: pointer to the msg buffer
2700 *
2701 * Configure the VF's RSS key
2702 */
ice_vc_config_rss_key(struct ice_vf * vf,u8 * msg)2703 static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
2704 {
2705 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2706 struct virtchnl_rss_key *vrk =
2707 (struct virtchnl_rss_key *)msg;
2708 struct ice_vsi *vsi;
2709
2710 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2711 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2712 goto error_param;
2713 }
2714
2715 if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
2716 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2717 goto error_param;
2718 }
2719
2720 if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
2721 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2722 goto error_param;
2723 }
2724
2725 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2726 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2727 goto error_param;
2728 }
2729
2730 vsi = ice_get_vf_vsi(vf);
2731 if (!vsi) {
2732 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2733 goto error_param;
2734 }
2735
2736 if (ice_set_rss_key(vsi, vrk->key))
2737 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2738 error_param:
2739 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
2740 NULL, 0);
2741 }
2742
2743 /**
2744 * ice_vc_config_rss_lut
2745 * @vf: pointer to the VF info
2746 * @msg: pointer to the msg buffer
2747 *
2748 * Configure the VF's RSS LUT
2749 */
ice_vc_config_rss_lut(struct ice_vf * vf,u8 * msg)2750 static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
2751 {
2752 struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
2753 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2754 struct ice_vsi *vsi;
2755
2756 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2757 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2758 goto error_param;
2759 }
2760
2761 if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
2762 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2763 goto error_param;
2764 }
2765
2766 if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
2767 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2768 goto error_param;
2769 }
2770
2771 if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
2772 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2773 goto error_param;
2774 }
2775
2776 vsi = ice_get_vf_vsi(vf);
2777 if (!vsi) {
2778 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2779 goto error_param;
2780 }
2781
2782 if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
2783 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
2784 error_param:
2785 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
2786 NULL, 0);
2787 }
2788
2789 /**
2790 * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
2791 * @vf: The VF being resseting
2792 *
2793 * The max poll time is about ~800ms, which is about the maximum time it takes
2794 * for a VF to be reset and/or a VF driver to be removed.
2795 */
ice_wait_on_vf_reset(struct ice_vf * vf)2796 static void ice_wait_on_vf_reset(struct ice_vf *vf)
2797 {
2798 int i;
2799
2800 for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
2801 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
2802 break;
2803 msleep(ICE_MAX_VF_RESET_SLEEP_MS);
2804 }
2805 }
2806
2807 /**
2808 * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
2809 * @vf: VF to check if it's ready to be configured/queried
2810 *
2811 * The purpose of this function is to make sure the VF is not in reset, not
2812 * disabled, and initialized so it can be configured and/or queried by a host
2813 * administrator.
2814 */
ice_check_vf_ready_for_cfg(struct ice_vf * vf)2815 static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
2816 {
2817 struct ice_pf *pf;
2818
2819 ice_wait_on_vf_reset(vf);
2820
2821 if (ice_is_vf_disabled(vf))
2822 return -EINVAL;
2823
2824 pf = vf->pf;
2825 if (ice_check_vf_init(pf, vf))
2826 return -EBUSY;
2827
2828 return 0;
2829 }
2830
2831 /**
2832 * ice_set_vf_spoofchk
2833 * @netdev: network interface device structure
2834 * @vf_id: VF identifier
2835 * @ena: flag to enable or disable feature
2836 *
2837 * Enable or disable VF spoof checking
2838 */
ice_set_vf_spoofchk(struct net_device * netdev,int vf_id,bool ena)2839 int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
2840 {
2841 struct ice_netdev_priv *np = netdev_priv(netdev);
2842 struct ice_pf *pf = np->vsi->back;
2843 struct ice_vsi_ctx *ctx;
2844 struct ice_vsi *vf_vsi;
2845 enum ice_status status;
2846 struct device *dev;
2847 struct ice_vf *vf;
2848 int ret;
2849
2850 dev = ice_pf_to_dev(pf);
2851 if (ice_validate_vf_id(pf, vf_id))
2852 return -EINVAL;
2853
2854 vf = &pf->vf[vf_id];
2855 ret = ice_check_vf_ready_for_cfg(vf);
2856 if (ret)
2857 return ret;
2858
2859 vf_vsi = ice_get_vf_vsi(vf);
2860 if (!vf_vsi) {
2861 netdev_err(netdev, "VSI %d for VF %d is null\n",
2862 vf->lan_vsi_idx, vf->vf_id);
2863 return -EINVAL;
2864 }
2865
2866 if (vf_vsi->type != ICE_VSI_VF) {
2867 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
2868 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
2869 return -ENODEV;
2870 }
2871
2872 if (ena == vf->spoofchk) {
2873 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
2874 return 0;
2875 }
2876
2877 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2878 if (!ctx)
2879 return -ENOMEM;
2880
2881 ctx->info.sec_flags = vf_vsi->info.sec_flags;
2882 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
2883 if (ena) {
2884 ctx->info.sec_flags |=
2885 ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2886 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2887 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2888 } else {
2889 ctx->info.sec_flags &=
2890 ~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
2891 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2892 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
2893 }
2894
2895 status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
2896 if (status) {
2897 dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %s\n",
2898 ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num,
2899 ice_stat_str(status));
2900 ret = -EIO;
2901 goto out;
2902 }
2903
2904 /* only update spoofchk state and VSI context on success */
2905 vf_vsi->info.sec_flags = ctx->info.sec_flags;
2906 vf->spoofchk = ena;
2907
2908 out:
2909 kfree(ctx);
2910 return ret;
2911 }
2912
2913 /**
2914 * ice_is_any_vf_in_promisc - check if any VF(s) are in promiscuous mode
2915 * @pf: PF structure for accessing VF(s)
2916 *
2917 * Return false if no VF(s) are in unicast and/or multicast promiscuous mode,
2918 * else return true
2919 */
ice_is_any_vf_in_promisc(struct ice_pf * pf)2920 bool ice_is_any_vf_in_promisc(struct ice_pf *pf)
2921 {
2922 int vf_idx;
2923
2924 ice_for_each_vf(pf, vf_idx) {
2925 struct ice_vf *vf = &pf->vf[vf_idx];
2926
2927 /* found a VF that has promiscuous mode configured */
2928 if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
2929 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
2930 return true;
2931 }
2932
2933 return false;
2934 }
2935
2936 /**
2937 * ice_vc_cfg_promiscuous_mode_msg
2938 * @vf: pointer to the VF info
2939 * @msg: pointer to the msg buffer
2940 *
2941 * called from the VF to configure VF VSIs promiscuous mode
2942 */
ice_vc_cfg_promiscuous_mode_msg(struct ice_vf * vf,u8 * msg)2943 static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
2944 {
2945 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
2946 bool rm_promisc, alluni = false, allmulti = false;
2947 struct virtchnl_promisc_info *info =
2948 (struct virtchnl_promisc_info *)msg;
2949 struct ice_pf *pf = vf->pf;
2950 struct ice_vsi *vsi;
2951 struct device *dev;
2952 int ret = 0;
2953
2954 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
2955 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2956 goto error_param;
2957 }
2958
2959 if (!ice_vc_isvalid_vsi_id(vf, info->vsi_id)) {
2960 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2961 goto error_param;
2962 }
2963
2964 vsi = ice_get_vf_vsi(vf);
2965 if (!vsi) {
2966 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2967 goto error_param;
2968 }
2969
2970 dev = ice_pf_to_dev(pf);
2971 if (!test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
2972 dev_err(dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n",
2973 vf->vf_id);
2974 /* Leave v_ret alone, lie to the VF on purpose. */
2975 goto error_param;
2976 }
2977
2978 if (info->flags & FLAG_VF_UNICAST_PROMISC)
2979 alluni = true;
2980
2981 if (info->flags & FLAG_VF_MULTICAST_PROMISC)
2982 allmulti = true;
2983
2984 rm_promisc = !allmulti && !alluni;
2985
2986 if (vsi->num_vlan || vf->port_vlan_info) {
2987 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
2988 struct net_device *pf_netdev;
2989
2990 if (!pf_vsi) {
2991 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
2992 goto error_param;
2993 }
2994
2995 pf_netdev = pf_vsi->netdev;
2996
2997 ret = ice_set_vf_spoofchk(pf_netdev, vf->vf_id, rm_promisc);
2998 if (ret) {
2999 dev_err(dev, "Failed to update spoofchk to %s for VF %d VSI %d when setting promiscuous mode\n",
3000 rm_promisc ? "ON" : "OFF", vf->vf_id,
3001 vsi->vsi_num);
3002 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3003 }
3004
3005 ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
3006 if (ret) {
3007 dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
3008 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3009 goto error_param;
3010 }
3011 }
3012
3013 if (!test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags)) {
3014 bool set_dflt_vsi = alluni || allmulti;
3015
3016 if (set_dflt_vsi && !ice_is_dflt_vsi_in_use(pf->first_sw))
3017 /* only attempt to set the default forwarding VSI if
3018 * it's not currently set
3019 */
3020 ret = ice_set_dflt_vsi(pf->first_sw, vsi);
3021 else if (!set_dflt_vsi &&
3022 ice_is_vsi_dflt_vsi(pf->first_sw, vsi))
3023 /* only attempt to free the default forwarding VSI if we
3024 * are the owner
3025 */
3026 ret = ice_clear_dflt_vsi(pf->first_sw);
3027
3028 if (ret) {
3029 dev_err(dev, "%sable VF %d as the default VSI failed, error %d\n",
3030 set_dflt_vsi ? "en" : "dis", vf->vf_id, ret);
3031 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3032 goto error_param;
3033 }
3034 } else {
3035 enum ice_status status;
3036 u8 promisc_m;
3037
3038 if (alluni) {
3039 if (vf->port_vlan_info || vsi->num_vlan)
3040 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3041 else
3042 promisc_m = ICE_UCAST_PROMISC_BITS;
3043 } else if (allmulti) {
3044 if (vf->port_vlan_info || vsi->num_vlan)
3045 promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
3046 else
3047 promisc_m = ICE_MCAST_PROMISC_BITS;
3048 } else {
3049 if (vf->port_vlan_info || vsi->num_vlan)
3050 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
3051 else
3052 promisc_m = ICE_UCAST_PROMISC_BITS;
3053 }
3054
3055 /* Configure multicast/unicast with or without VLAN promiscuous
3056 * mode
3057 */
3058 status = ice_vf_set_vsi_promisc(vf, vsi, promisc_m, rm_promisc);
3059 if (status) {
3060 dev_err(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d failed, error: %s\n",
3061 rm_promisc ? "dis" : "en", vf->vf_id,
3062 ice_stat_str(status));
3063 v_ret = ice_err_to_virt_err(status);
3064 goto error_param;
3065 } else {
3066 dev_dbg(dev, "%sable Tx/Rx filter promiscuous mode on VF-%d succeeded\n",
3067 rm_promisc ? "dis" : "en", vf->vf_id);
3068 }
3069 }
3070
3071 if (allmulti &&
3072 !test_and_set_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3073 dev_info(dev, "VF %u successfully set multicast promiscuous mode\n", vf->vf_id);
3074 else if (!allmulti && test_and_clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
3075 dev_info(dev, "VF %u successfully unset multicast promiscuous mode\n", vf->vf_id);
3076
3077 if (alluni && !test_and_set_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3078 dev_info(dev, "VF %u successfully set unicast promiscuous mode\n", vf->vf_id);
3079 else if (!alluni && test_and_clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states))
3080 dev_info(dev, "VF %u successfully unset unicast promiscuous mode\n", vf->vf_id);
3081
3082 error_param:
3083 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
3084 v_ret, NULL, 0);
3085 }
3086
3087 /**
3088 * ice_vc_get_stats_msg
3089 * @vf: pointer to the VF info
3090 * @msg: pointer to the msg buffer
3091 *
3092 * called from the VF to get VSI stats
3093 */
ice_vc_get_stats_msg(struct ice_vf * vf,u8 * msg)3094 static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
3095 {
3096 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3097 struct virtchnl_queue_select *vqs =
3098 (struct virtchnl_queue_select *)msg;
3099 struct ice_eth_stats stats = { 0 };
3100 struct ice_vsi *vsi;
3101
3102 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3103 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3104 goto error_param;
3105 }
3106
3107 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3108 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3109 goto error_param;
3110 }
3111
3112 vsi = ice_get_vf_vsi(vf);
3113 if (!vsi) {
3114 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3115 goto error_param;
3116 }
3117
3118 ice_update_eth_stats(vsi);
3119
3120 stats = vsi->eth_stats;
3121
3122 error_param:
3123 /* send the response to the VF */
3124 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
3125 (u8 *)&stats, sizeof(stats));
3126 }
3127
3128 /**
3129 * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
3130 * @vqs: virtchnl_queue_select structure containing bitmaps to validate
3131 *
3132 * Return true on successful validation, else false
3133 */
ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select * vqs)3134 static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
3135 {
3136 if ((!vqs->rx_queues && !vqs->tx_queues) ||
3137 vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
3138 vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
3139 return false;
3140
3141 return true;
3142 }
3143
3144 /**
3145 * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
3146 * @vsi: VSI of the VF to configure
3147 * @q_idx: VF queue index used to determine the queue in the PF's space
3148 */
ice_vf_ena_txq_interrupt(struct ice_vsi * vsi,u32 q_idx)3149 static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3150 {
3151 struct ice_hw *hw = &vsi->back->hw;
3152 u32 pfq = vsi->txq_map[q_idx];
3153 u32 reg;
3154
3155 reg = rd32(hw, QINT_TQCTL(pfq));
3156
3157 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3158 * this is most likely a poll mode VF driver, so don't enable an
3159 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3160 */
3161 if (!(reg & QINT_TQCTL_MSIX_INDX_M))
3162 return;
3163
3164 wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
3165 }
3166
3167 /**
3168 * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
3169 * @vsi: VSI of the VF to configure
3170 * @q_idx: VF queue index used to determine the queue in the PF's space
3171 */
ice_vf_ena_rxq_interrupt(struct ice_vsi * vsi,u32 q_idx)3172 static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
3173 {
3174 struct ice_hw *hw = &vsi->back->hw;
3175 u32 pfq = vsi->rxq_map[q_idx];
3176 u32 reg;
3177
3178 reg = rd32(hw, QINT_RQCTL(pfq));
3179
3180 /* MSI-X index 0 in the VF's space is always for the OICR, which means
3181 * this is most likely a poll mode VF driver, so don't enable an
3182 * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
3183 */
3184 if (!(reg & QINT_RQCTL_MSIX_INDX_M))
3185 return;
3186
3187 wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
3188 }
3189
3190 /**
3191 * ice_vc_ena_qs_msg
3192 * @vf: pointer to the VF info
3193 * @msg: pointer to the msg buffer
3194 *
3195 * called from the VF to enable all or specific queue(s)
3196 */
ice_vc_ena_qs_msg(struct ice_vf * vf,u8 * msg)3197 static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
3198 {
3199 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3200 struct virtchnl_queue_select *vqs =
3201 (struct virtchnl_queue_select *)msg;
3202 struct ice_vsi *vsi;
3203 unsigned long q_map;
3204 u16 vf_q_id;
3205
3206 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3207 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3208 goto error_param;
3209 }
3210
3211 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3212 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3213 goto error_param;
3214 }
3215
3216 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3217 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3218 goto error_param;
3219 }
3220
3221 vsi = ice_get_vf_vsi(vf);
3222 if (!vsi) {
3223 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3224 goto error_param;
3225 }
3226
3227 /* Enable only Rx rings, Tx rings were enabled by the FW when the
3228 * Tx queue group list was configured and the context bits were
3229 * programmed using ice_vsi_cfg_txqs
3230 */
3231 q_map = vqs->rx_queues;
3232 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3233 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3234 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3235 goto error_param;
3236 }
3237
3238 /* Skip queue if enabled */
3239 if (test_bit(vf_q_id, vf->rxq_ena))
3240 continue;
3241
3242 if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
3243 dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
3244 vf_q_id, vsi->vsi_num);
3245 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3246 goto error_param;
3247 }
3248
3249 ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
3250 set_bit(vf_q_id, vf->rxq_ena);
3251 }
3252
3253 q_map = vqs->tx_queues;
3254 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3255 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3256 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3257 goto error_param;
3258 }
3259
3260 /* Skip queue if enabled */
3261 if (test_bit(vf_q_id, vf->txq_ena))
3262 continue;
3263
3264 ice_vf_ena_txq_interrupt(vsi, vf_q_id);
3265 set_bit(vf_q_id, vf->txq_ena);
3266 }
3267
3268 /* Set flag to indicate that queues are enabled */
3269 if (v_ret == VIRTCHNL_STATUS_SUCCESS)
3270 set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3271
3272 error_param:
3273 /* send the response to the VF */
3274 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
3275 NULL, 0);
3276 }
3277
3278 /**
3279 * ice_vc_dis_qs_msg
3280 * @vf: pointer to the VF info
3281 * @msg: pointer to the msg buffer
3282 *
3283 * called from the VF to disable all or specific
3284 * queue(s)
3285 */
ice_vc_dis_qs_msg(struct ice_vf * vf,u8 * msg)3286 static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
3287 {
3288 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3289 struct virtchnl_queue_select *vqs =
3290 (struct virtchnl_queue_select *)msg;
3291 struct ice_vsi *vsi;
3292 unsigned long q_map;
3293 u16 vf_q_id;
3294
3295 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
3296 !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
3297 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3298 goto error_param;
3299 }
3300
3301 if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
3302 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3303 goto error_param;
3304 }
3305
3306 if (!ice_vc_validate_vqs_bitmaps(vqs)) {
3307 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3308 goto error_param;
3309 }
3310
3311 vsi = ice_get_vf_vsi(vf);
3312 if (!vsi) {
3313 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3314 goto error_param;
3315 }
3316
3317 if (vqs->tx_queues) {
3318 q_map = vqs->tx_queues;
3319
3320 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3321 struct ice_ring *ring = vsi->tx_rings[vf_q_id];
3322 struct ice_txq_meta txq_meta = { 0 };
3323
3324 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3325 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3326 goto error_param;
3327 }
3328
3329 /* Skip queue if not enabled */
3330 if (!test_bit(vf_q_id, vf->txq_ena))
3331 continue;
3332
3333 ice_fill_txq_meta(vsi, ring, &txq_meta);
3334
3335 if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
3336 ring, &txq_meta)) {
3337 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
3338 vf_q_id, vsi->vsi_num);
3339 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3340 goto error_param;
3341 }
3342
3343 /* Clear enabled queues flag */
3344 clear_bit(vf_q_id, vf->txq_ena);
3345 }
3346 }
3347
3348 q_map = vqs->rx_queues;
3349 /* speed up Rx queue disable by batching them if possible */
3350 if (q_map &&
3351 bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
3352 if (ice_vsi_stop_all_rx_rings(vsi)) {
3353 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
3354 vsi->vsi_num);
3355 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3356 goto error_param;
3357 }
3358
3359 bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
3360 } else if (q_map) {
3361 for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
3362 if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
3363 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3364 goto error_param;
3365 }
3366
3367 /* Skip queue if not enabled */
3368 if (!test_bit(vf_q_id, vf->rxq_ena))
3369 continue;
3370
3371 if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
3372 true)) {
3373 dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
3374 vf_q_id, vsi->vsi_num);
3375 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3376 goto error_param;
3377 }
3378
3379 /* Clear enabled queues flag */
3380 clear_bit(vf_q_id, vf->rxq_ena);
3381 }
3382 }
3383
3384 /* Clear enabled queues flag */
3385 if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
3386 clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
3387
3388 error_param:
3389 /* send the response to the VF */
3390 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
3391 NULL, 0);
3392 }
3393
3394 /**
3395 * ice_cfg_interrupt
3396 * @vf: pointer to the VF info
3397 * @vsi: the VSI being configured
3398 * @vector_id: vector ID
3399 * @map: vector map for mapping vectors to queues
3400 * @q_vector: structure for interrupt vector
3401 * configure the IRQ to queue map
3402 */
3403 static int
ice_cfg_interrupt(struct ice_vf * vf,struct ice_vsi * vsi,u16 vector_id,struct virtchnl_vector_map * map,struct ice_q_vector * q_vector)3404 ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
3405 struct virtchnl_vector_map *map,
3406 struct ice_q_vector *q_vector)
3407 {
3408 u16 vsi_q_id, vsi_q_id_idx;
3409 unsigned long qmap;
3410
3411 q_vector->num_ring_rx = 0;
3412 q_vector->num_ring_tx = 0;
3413
3414 qmap = map->rxq_map;
3415 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3416 vsi_q_id = vsi_q_id_idx;
3417
3418 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3419 return VIRTCHNL_STATUS_ERR_PARAM;
3420
3421 q_vector->num_ring_rx++;
3422 q_vector->rx.itr_idx = map->rxitr_idx;
3423 vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
3424 ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
3425 q_vector->rx.itr_idx);
3426 }
3427
3428 qmap = map->txq_map;
3429 for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
3430 vsi_q_id = vsi_q_id_idx;
3431
3432 if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id))
3433 return VIRTCHNL_STATUS_ERR_PARAM;
3434
3435 q_vector->num_ring_tx++;
3436 q_vector->tx.itr_idx = map->txitr_idx;
3437 vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
3438 ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
3439 q_vector->tx.itr_idx);
3440 }
3441
3442 return VIRTCHNL_STATUS_SUCCESS;
3443 }
3444
3445 /**
3446 * ice_vc_cfg_irq_map_msg
3447 * @vf: pointer to the VF info
3448 * @msg: pointer to the msg buffer
3449 *
3450 * called from the VF to configure the IRQ to queue map
3451 */
ice_vc_cfg_irq_map_msg(struct ice_vf * vf,u8 * msg)3452 static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
3453 {
3454 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3455 u16 num_q_vectors_mapped, vsi_id, vector_id;
3456 struct virtchnl_irq_map_info *irqmap_info;
3457 struct virtchnl_vector_map *map;
3458 struct ice_pf *pf = vf->pf;
3459 struct ice_vsi *vsi;
3460 int i;
3461
3462 irqmap_info = (struct virtchnl_irq_map_info *)msg;
3463 num_q_vectors_mapped = irqmap_info->num_vectors;
3464
3465 /* Check to make sure number of VF vectors mapped is not greater than
3466 * number of VF vectors originally allocated, and check that
3467 * there is actually at least a single VF queue vector mapped
3468 */
3469 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3470 pf->num_msix_per_vf < num_q_vectors_mapped ||
3471 !num_q_vectors_mapped) {
3472 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3473 goto error_param;
3474 }
3475
3476 vsi = ice_get_vf_vsi(vf);
3477 if (!vsi) {
3478 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3479 goto error_param;
3480 }
3481
3482 for (i = 0; i < num_q_vectors_mapped; i++) {
3483 struct ice_q_vector *q_vector;
3484
3485 map = &irqmap_info->vecmap[i];
3486
3487 vector_id = map->vector_id;
3488 vsi_id = map->vsi_id;
3489 /* vector_id is always 0-based for each VF, and can never be
3490 * larger than or equal to the max allowed interrupts per VF
3491 */
3492 if (!(vector_id < pf->num_msix_per_vf) ||
3493 !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
3494 (!vector_id && (map->rxq_map || map->txq_map))) {
3495 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3496 goto error_param;
3497 }
3498
3499 /* No need to map VF miscellaneous or rogue vector */
3500 if (!vector_id)
3501 continue;
3502
3503 /* Subtract non queue vector from vector_id passed by VF
3504 * to get actual number of VSI queue vector array index
3505 */
3506 q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
3507 if (!q_vector) {
3508 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3509 goto error_param;
3510 }
3511
3512 /* lookout for the invalid queue index */
3513 v_ret = (enum virtchnl_status_code)
3514 ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
3515 if (v_ret)
3516 goto error_param;
3517 }
3518
3519 error_param:
3520 /* send the response to the VF */
3521 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
3522 NULL, 0);
3523 }
3524
3525 /**
3526 * ice_vc_cfg_qs_msg
3527 * @vf: pointer to the VF info
3528 * @msg: pointer to the msg buffer
3529 *
3530 * called from the VF to configure the Rx/Tx queues
3531 */
ice_vc_cfg_qs_msg(struct ice_vf * vf,u8 * msg)3532 static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
3533 {
3534 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3535 struct virtchnl_vsi_queue_config_info *qci =
3536 (struct virtchnl_vsi_queue_config_info *)msg;
3537 struct virtchnl_queue_pair_info *qpi;
3538 u16 num_rxq = 0, num_txq = 0;
3539 struct ice_pf *pf = vf->pf;
3540 struct ice_vsi *vsi;
3541 int i;
3542
3543 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3544 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3545 goto error_param;
3546 }
3547
3548 if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
3549 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3550 goto error_param;
3551 }
3552
3553 vsi = ice_get_vf_vsi(vf);
3554 if (!vsi) {
3555 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3556 goto error_param;
3557 }
3558
3559 if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
3560 qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
3561 dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
3562 vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
3563 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3564 goto error_param;
3565 }
3566
3567 for (i = 0; i < qci->num_queue_pairs; i++) {
3568 qpi = &qci->qpair[i];
3569 if (qpi->txq.vsi_id != qci->vsi_id ||
3570 qpi->rxq.vsi_id != qci->vsi_id ||
3571 qpi->rxq.queue_id != qpi->txq.queue_id ||
3572 qpi->txq.headwb_enabled ||
3573 !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
3574 !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
3575 !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
3576 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3577 goto error_param;
3578 }
3579 /* copy Tx queue info from VF into VSI */
3580 if (qpi->txq.ring_len > 0) {
3581 num_txq++;
3582 vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
3583 vsi->tx_rings[i]->count = qpi->txq.ring_len;
3584 }
3585
3586 /* copy Rx queue info from VF into VSI */
3587 if (qpi->rxq.ring_len > 0) {
3588 u16 max_frame_size = ice_vc_get_max_frame_size(vf);
3589
3590 num_rxq++;
3591 vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
3592 vsi->rx_rings[i]->count = qpi->rxq.ring_len;
3593
3594 if (qpi->rxq.databuffer_size != 0 &&
3595 (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
3596 qpi->rxq.databuffer_size < 1024)) {
3597 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3598 goto error_param;
3599 }
3600 vsi->rx_buf_len = qpi->rxq.databuffer_size;
3601 vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
3602 if (qpi->rxq.max_pkt_size > max_frame_size ||
3603 qpi->rxq.max_pkt_size < 64) {
3604 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3605 goto error_param;
3606 }
3607 }
3608
3609 vsi->max_frame = qpi->rxq.max_pkt_size;
3610 /* add space for the port VLAN since the VF driver is not
3611 * expected to account for it in the MTU calculation
3612 */
3613 if (vf->port_vlan_info)
3614 vsi->max_frame += VLAN_HLEN;
3615 }
3616
3617 /* VF can request to configure less than allocated queues or default
3618 * allocated queues. So update the VSI with new number
3619 */
3620 vsi->num_txq = num_txq;
3621 vsi->num_rxq = num_rxq;
3622 /* All queues of VF VSI are in TC 0 */
3623 vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
3624 vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
3625
3626 if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
3627 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3628
3629 error_param:
3630 /* send the response to the VF */
3631 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
3632 NULL, 0);
3633 }
3634
3635 /**
3636 * ice_is_vf_trusted
3637 * @vf: pointer to the VF info
3638 */
ice_is_vf_trusted(struct ice_vf * vf)3639 static bool ice_is_vf_trusted(struct ice_vf *vf)
3640 {
3641 return test_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
3642 }
3643
3644 /**
3645 * ice_can_vf_change_mac
3646 * @vf: pointer to the VF info
3647 *
3648 * Return true if the VF is allowed to change its MAC filters, false otherwise
3649 */
ice_can_vf_change_mac(struct ice_vf * vf)3650 static bool ice_can_vf_change_mac(struct ice_vf *vf)
3651 {
3652 /* If the VF MAC address has been set administratively (via the
3653 * ndo_set_vf_mac command), then deny permission to the VF to
3654 * add/delete unicast MAC addresses, unless the VF is trusted
3655 */
3656 if (vf->pf_set_mac && !ice_is_vf_trusted(vf))
3657 return false;
3658
3659 return true;
3660 }
3661
3662 /**
3663 * ice_vc_add_mac_addr - attempt to add the MAC address passed in
3664 * @vf: pointer to the VF info
3665 * @vsi: pointer to the VF's VSI
3666 * @mac_addr: MAC address to add
3667 */
3668 static int
ice_vc_add_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3669 ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3670 {
3671 struct device *dev = ice_pf_to_dev(vf->pf);
3672 enum ice_status status;
3673
3674 /* default unicast MAC already added */
3675 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3676 return 0;
3677
3678 if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
3679 dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
3680 return -EPERM;
3681 }
3682
3683 status = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3684 if (status == ICE_ERR_ALREADY_EXISTS) {
3685 dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
3686 vf->vf_id);
3687 return -EEXIST;
3688 } else if (status) {
3689 dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %s\n",
3690 mac_addr, vf->vf_id, ice_stat_str(status));
3691 return -EIO;
3692 }
3693
3694 /* Set the default LAN address to the latest unicast MAC address added
3695 * by the VF. The default LAN address is reported by the PF via
3696 * ndo_get_vf_config.
3697 */
3698 if (is_unicast_ether_addr(mac_addr))
3699 ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
3700
3701 vf->num_mac++;
3702
3703 return 0;
3704 }
3705
3706 /**
3707 * ice_vc_del_mac_addr - attempt to delete the MAC address passed in
3708 * @vf: pointer to the VF info
3709 * @vsi: pointer to the VF's VSI
3710 * @mac_addr: MAC address to delete
3711 */
3712 static int
ice_vc_del_mac_addr(struct ice_vf * vf,struct ice_vsi * vsi,u8 * mac_addr)3713 ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
3714 {
3715 struct device *dev = ice_pf_to_dev(vf->pf);
3716 enum ice_status status;
3717
3718 if (!ice_can_vf_change_mac(vf) &&
3719 ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3720 return 0;
3721
3722 status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
3723 if (status == ICE_ERR_DOES_NOT_EXIST) {
3724 dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
3725 vf->vf_id);
3726 return -ENOENT;
3727 } else if (status) {
3728 dev_err(dev, "Failed to delete MAC %pM for VF %d, error %s\n",
3729 mac_addr, vf->vf_id, ice_stat_str(status));
3730 return -EIO;
3731 }
3732
3733 if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
3734 eth_zero_addr(vf->dflt_lan_addr.addr);
3735
3736 vf->num_mac--;
3737
3738 return 0;
3739 }
3740
3741 /**
3742 * ice_vc_handle_mac_addr_msg
3743 * @vf: pointer to the VF info
3744 * @msg: pointer to the msg buffer
3745 * @set: true if MAC filters are being set, false otherwise
3746 *
3747 * add guest MAC address filter
3748 */
3749 static int
ice_vc_handle_mac_addr_msg(struct ice_vf * vf,u8 * msg,bool set)3750 ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
3751 {
3752 int (*ice_vc_cfg_mac)
3753 (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
3754 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3755 struct virtchnl_ether_addr_list *al =
3756 (struct virtchnl_ether_addr_list *)msg;
3757 struct ice_pf *pf = vf->pf;
3758 enum virtchnl_ops vc_op;
3759 struct ice_vsi *vsi;
3760 int i;
3761
3762 if (set) {
3763 vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
3764 ice_vc_cfg_mac = ice_vc_add_mac_addr;
3765 } else {
3766 vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
3767 ice_vc_cfg_mac = ice_vc_del_mac_addr;
3768 }
3769
3770 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
3771 !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
3772 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3773 goto handle_mac_exit;
3774 }
3775
3776 /* If this VF is not privileged, then we can't add more than a
3777 * limited number of addresses. Check to make sure that the
3778 * additions do not push us over the limit.
3779 */
3780 if (set && !ice_is_vf_trusted(vf) &&
3781 (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
3782 dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
3783 vf->vf_id);
3784 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3785 goto handle_mac_exit;
3786 }
3787
3788 vsi = ice_get_vf_vsi(vf);
3789 if (!vsi) {
3790 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3791 goto handle_mac_exit;
3792 }
3793
3794 for (i = 0; i < al->num_elements; i++) {
3795 u8 *mac_addr = al->list[i].addr;
3796 int result;
3797
3798 if (is_broadcast_ether_addr(mac_addr) ||
3799 is_zero_ether_addr(mac_addr))
3800 continue;
3801
3802 result = ice_vc_cfg_mac(vf, vsi, mac_addr);
3803 if (result == -EEXIST || result == -ENOENT) {
3804 continue;
3805 } else if (result) {
3806 v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
3807 goto handle_mac_exit;
3808 }
3809 }
3810
3811 handle_mac_exit:
3812 /* send the response to the VF */
3813 return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
3814 }
3815
3816 /**
3817 * ice_vc_add_mac_addr_msg
3818 * @vf: pointer to the VF info
3819 * @msg: pointer to the msg buffer
3820 *
3821 * add guest MAC address filter
3822 */
ice_vc_add_mac_addr_msg(struct ice_vf * vf,u8 * msg)3823 static int ice_vc_add_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3824 {
3825 return ice_vc_handle_mac_addr_msg(vf, msg, true);
3826 }
3827
3828 /**
3829 * ice_vc_del_mac_addr_msg
3830 * @vf: pointer to the VF info
3831 * @msg: pointer to the msg buffer
3832 *
3833 * remove guest MAC address filter
3834 */
ice_vc_del_mac_addr_msg(struct ice_vf * vf,u8 * msg)3835 static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
3836 {
3837 return ice_vc_handle_mac_addr_msg(vf, msg, false);
3838 }
3839
3840 /**
3841 * ice_vc_request_qs_msg
3842 * @vf: pointer to the VF info
3843 * @msg: pointer to the msg buffer
3844 *
3845 * VFs get a default number of queues but can use this message to request a
3846 * different number. If the request is successful, PF will reset the VF and
3847 * return 0. If unsuccessful, PF will send message informing VF of number of
3848 * available queue pairs via virtchnl message response to VF.
3849 */
ice_vc_request_qs_msg(struct ice_vf * vf,u8 * msg)3850 static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
3851 {
3852 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3853 struct virtchnl_vf_res_request *vfres =
3854 (struct virtchnl_vf_res_request *)msg;
3855 u16 req_queues = vfres->num_queue_pairs;
3856 struct ice_pf *pf = vf->pf;
3857 u16 max_allowed_vf_queues;
3858 u16 tx_rx_queue_left;
3859 struct device *dev;
3860 u16 cur_queues;
3861
3862 dev = ice_pf_to_dev(pf);
3863 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3864 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3865 goto error_param;
3866 }
3867
3868 cur_queues = vf->num_vf_qs;
3869 tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
3870 ice_get_avail_rxq_count(pf));
3871 max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
3872 if (!req_queues) {
3873 dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
3874 vf->vf_id);
3875 } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
3876 dev_err(dev, "VF %d tried to request more than %d queues.\n",
3877 vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
3878 vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
3879 } else if (req_queues > cur_queues &&
3880 req_queues - cur_queues > tx_rx_queue_left) {
3881 dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
3882 vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
3883 vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
3884 ICE_MAX_RSS_QS_PER_VF);
3885 } else {
3886 /* request is successful, then reset VF */
3887 vf->num_req_qs = req_queues;
3888 ice_vc_reset_vf(vf);
3889 dev_info(dev, "VF %d granted request of %u queues.\n",
3890 vf->vf_id, req_queues);
3891 return 0;
3892 }
3893
3894 error_param:
3895 /* send the response to the VF */
3896 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
3897 v_ret, (u8 *)vfres, sizeof(*vfres));
3898 }
3899
3900 /**
3901 * ice_set_vf_port_vlan
3902 * @netdev: network interface device structure
3903 * @vf_id: VF identifier
3904 * @vlan_id: VLAN ID being set
3905 * @qos: priority setting
3906 * @vlan_proto: VLAN protocol
3907 *
3908 * program VF Port VLAN ID and/or QoS
3909 */
3910 int
ice_set_vf_port_vlan(struct net_device * netdev,int vf_id,u16 vlan_id,u8 qos,__be16 vlan_proto)3911 ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
3912 __be16 vlan_proto)
3913 {
3914 struct ice_pf *pf = ice_netdev_to_pf(netdev);
3915 struct device *dev;
3916 struct ice_vf *vf;
3917 u16 vlanprio;
3918 int ret;
3919
3920 dev = ice_pf_to_dev(pf);
3921 if (ice_validate_vf_id(pf, vf_id))
3922 return -EINVAL;
3923
3924 if (vlan_id >= VLAN_N_VID || qos > 7) {
3925 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
3926 vf_id, vlan_id, qos);
3927 return -EINVAL;
3928 }
3929
3930 if (vlan_proto != htons(ETH_P_8021Q)) {
3931 dev_err(dev, "VF VLAN protocol is not supported\n");
3932 return -EPROTONOSUPPORT;
3933 }
3934
3935 vf = &pf->vf[vf_id];
3936 ret = ice_check_vf_ready_for_cfg(vf);
3937 if (ret)
3938 return ret;
3939
3940 vlanprio = vlan_id | (qos << VLAN_PRIO_SHIFT);
3941
3942 if (vf->port_vlan_info == vlanprio) {
3943 /* duplicate request, so just return success */
3944 dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
3945 return 0;
3946 }
3947
3948 vf->port_vlan_info = vlanprio;
3949
3950 if (vf->port_vlan_info)
3951 dev_info(dev, "Setting VLAN %d, QoS 0x%x on VF %d\n",
3952 vlan_id, qos, vf_id);
3953 else
3954 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
3955
3956 ice_vc_reset_vf(vf);
3957
3958 return 0;
3959 }
3960
3961 /**
3962 * ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
3963 * @caps: VF driver negotiated capabilities
3964 *
3965 * Return true if VIRTCHNL_VF_OFFLOAD_VLAN capability is set, else return false
3966 */
ice_vf_vlan_offload_ena(u32 caps)3967 static bool ice_vf_vlan_offload_ena(u32 caps)
3968 {
3969 return !!(caps & VIRTCHNL_VF_OFFLOAD_VLAN);
3970 }
3971
3972 /**
3973 * ice_vc_process_vlan_msg
3974 * @vf: pointer to the VF info
3975 * @msg: pointer to the msg buffer
3976 * @add_v: Add VLAN if true, otherwise delete VLAN
3977 *
3978 * Process virtchnl op to add or remove programmed guest VLAN ID
3979 */
ice_vc_process_vlan_msg(struct ice_vf * vf,u8 * msg,bool add_v)3980 static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
3981 {
3982 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
3983 struct virtchnl_vlan_filter_list *vfl =
3984 (struct virtchnl_vlan_filter_list *)msg;
3985 struct ice_pf *pf = vf->pf;
3986 bool vlan_promisc = false;
3987 struct ice_vsi *vsi;
3988 struct device *dev;
3989 struct ice_hw *hw;
3990 int status = 0;
3991 u8 promisc_m;
3992 int i;
3993
3994 dev = ice_pf_to_dev(pf);
3995 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
3996 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
3997 goto error_param;
3998 }
3999
4000 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4001 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4002 goto error_param;
4003 }
4004
4005 if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
4006 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4007 goto error_param;
4008 }
4009
4010 for (i = 0; i < vfl->num_elements; i++) {
4011 if (vfl->vlan_id[i] >= VLAN_N_VID) {
4012 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4013 dev_err(dev, "invalid VF VLAN id %d\n",
4014 vfl->vlan_id[i]);
4015 goto error_param;
4016 }
4017 }
4018
4019 hw = &pf->hw;
4020 vsi = ice_get_vf_vsi(vf);
4021 if (!vsi) {
4022 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4023 goto error_param;
4024 }
4025
4026 if (add_v && !ice_is_vf_trusted(vf) &&
4027 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4028 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4029 vf->vf_id);
4030 /* There is no need to let VF know about being not trusted,
4031 * so we can just return success message here
4032 */
4033 goto error_param;
4034 }
4035
4036 if (vsi->info.pvid) {
4037 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4038 goto error_param;
4039 }
4040
4041 if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
4042 test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
4043 test_bit(ICE_FLAG_VF_TRUE_PROMISC_ENA, pf->flags))
4044 vlan_promisc = true;
4045
4046 if (add_v) {
4047 for (i = 0; i < vfl->num_elements; i++) {
4048 u16 vid = vfl->vlan_id[i];
4049
4050 if (!ice_is_vf_trusted(vf) &&
4051 vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
4052 dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
4053 vf->vf_id);
4054 /* There is no need to let VF know about being
4055 * not trusted, so we can just return success
4056 * message here as well.
4057 */
4058 goto error_param;
4059 }
4060
4061 /* we add VLAN 0 by default for each VF so we can enable
4062 * Tx VLAN anti-spoof without triggering MDD events so
4063 * we don't need to add it again here
4064 */
4065 if (!vid)
4066 continue;
4067
4068 status = ice_vsi_add_vlan(vsi, vid, ICE_FWD_TO_VSI);
4069 if (status) {
4070 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4071 goto error_param;
4072 }
4073
4074 /* Enable VLAN pruning when non-zero VLAN is added */
4075 if (!vlan_promisc && vid &&
4076 !ice_vsi_is_vlan_pruning_ena(vsi)) {
4077 status = ice_cfg_vlan_pruning(vsi, true, false);
4078 if (status) {
4079 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4080 dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
4081 vid, status);
4082 goto error_param;
4083 }
4084 } else if (vlan_promisc) {
4085 /* Enable Ucast/Mcast VLAN promiscuous mode */
4086 promisc_m = ICE_PROMISC_VLAN_TX |
4087 ICE_PROMISC_VLAN_RX;
4088
4089 status = ice_set_vsi_promisc(hw, vsi->idx,
4090 promisc_m, vid);
4091 if (status) {
4092 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4093 dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
4094 vid, status);
4095 }
4096 }
4097 }
4098 } else {
4099 /* In case of non_trusted VF, number of VLAN elements passed
4100 * to PF for removal might be greater than number of VLANs
4101 * filter programmed for that VF - So, use actual number of
4102 * VLANS added earlier with add VLAN opcode. In order to avoid
4103 * removing VLAN that doesn't exist, which result to sending
4104 * erroneous failed message back to the VF
4105 */
4106 int num_vf_vlan;
4107
4108 num_vf_vlan = vsi->num_vlan;
4109 for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
4110 u16 vid = vfl->vlan_id[i];
4111
4112 /* we add VLAN 0 by default for each VF so we can enable
4113 * Tx VLAN anti-spoof without triggering MDD events so
4114 * we don't want a VIRTCHNL request to remove it
4115 */
4116 if (!vid)
4117 continue;
4118
4119 /* Make sure ice_vsi_kill_vlan is successful before
4120 * updating VLAN information
4121 */
4122 status = ice_vsi_kill_vlan(vsi, vid);
4123 if (status) {
4124 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4125 goto error_param;
4126 }
4127
4128 /* Disable VLAN pruning when only VLAN 0 is left */
4129 if (vsi->num_vlan == 1 &&
4130 ice_vsi_is_vlan_pruning_ena(vsi))
4131 ice_cfg_vlan_pruning(vsi, false, false);
4132
4133 /* Disable Unicast/Multicast VLAN promiscuous mode */
4134 if (vlan_promisc) {
4135 promisc_m = ICE_PROMISC_VLAN_TX |
4136 ICE_PROMISC_VLAN_RX;
4137
4138 ice_clear_vsi_promisc(hw, vsi->idx,
4139 promisc_m, vid);
4140 }
4141 }
4142 }
4143
4144 error_param:
4145 /* send the response to the VF */
4146 if (add_v)
4147 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
4148 NULL, 0);
4149 else
4150 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
4151 NULL, 0);
4152 }
4153
4154 /**
4155 * ice_vc_add_vlan_msg
4156 * @vf: pointer to the VF info
4157 * @msg: pointer to the msg buffer
4158 *
4159 * Add and program guest VLAN ID
4160 */
ice_vc_add_vlan_msg(struct ice_vf * vf,u8 * msg)4161 static int ice_vc_add_vlan_msg(struct ice_vf *vf, u8 *msg)
4162 {
4163 return ice_vc_process_vlan_msg(vf, msg, true);
4164 }
4165
4166 /**
4167 * ice_vc_remove_vlan_msg
4168 * @vf: pointer to the VF info
4169 * @msg: pointer to the msg buffer
4170 *
4171 * remove programmed guest VLAN ID
4172 */
ice_vc_remove_vlan_msg(struct ice_vf * vf,u8 * msg)4173 static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
4174 {
4175 return ice_vc_process_vlan_msg(vf, msg, false);
4176 }
4177
4178 /**
4179 * ice_vc_ena_vlan_stripping
4180 * @vf: pointer to the VF info
4181 *
4182 * Enable VLAN header stripping for a given VF
4183 */
ice_vc_ena_vlan_stripping(struct ice_vf * vf)4184 static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
4185 {
4186 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4187 struct ice_vsi *vsi;
4188
4189 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4190 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4191 goto error_param;
4192 }
4193
4194 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4195 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4196 goto error_param;
4197 }
4198
4199 vsi = ice_get_vf_vsi(vf);
4200 if (ice_vsi_manage_vlan_stripping(vsi, true))
4201 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4202
4203 error_param:
4204 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
4205 v_ret, NULL, 0);
4206 }
4207
4208 /**
4209 * ice_vc_dis_vlan_stripping
4210 * @vf: pointer to the VF info
4211 *
4212 * Disable VLAN header stripping for a given VF
4213 */
ice_vc_dis_vlan_stripping(struct ice_vf * vf)4214 static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
4215 {
4216 enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
4217 struct ice_vsi *vsi;
4218
4219 if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
4220 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4221 goto error_param;
4222 }
4223
4224 if (!ice_vf_vlan_offload_ena(vf->driver_caps)) {
4225 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4226 goto error_param;
4227 }
4228
4229 vsi = ice_get_vf_vsi(vf);
4230 if (!vsi) {
4231 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4232 goto error_param;
4233 }
4234
4235 if (ice_vsi_manage_vlan_stripping(vsi, false))
4236 v_ret = VIRTCHNL_STATUS_ERR_PARAM;
4237
4238 error_param:
4239 return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
4240 v_ret, NULL, 0);
4241 }
4242
4243 /**
4244 * ice_vf_init_vlan_stripping - enable/disable VLAN stripping on initialization
4245 * @vf: VF to enable/disable VLAN stripping for on initialization
4246 *
4247 * If the VIRTCHNL_VF_OFFLOAD_VLAN flag is set enable VLAN stripping, else if
4248 * the flag is cleared then we want to disable stripping. For example, the flag
4249 * will be cleared when port VLANs are configured by the administrator before
4250 * passing the VF to the guest or if the AVF driver doesn't support VLAN
4251 * offloads.
4252 */
ice_vf_init_vlan_stripping(struct ice_vf * vf)4253 static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
4254 {
4255 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
4256
4257 if (!vsi)
4258 return -EINVAL;
4259
4260 /* don't modify stripping if port VLAN is configured */
4261 if (vsi->info.pvid)
4262 return 0;
4263
4264 if (ice_vf_vlan_offload_ena(vf->driver_caps))
4265 return ice_vsi_manage_vlan_stripping(vsi, true);
4266 else
4267 return ice_vsi_manage_vlan_stripping(vsi, false);
4268 }
4269
4270 /**
4271 * ice_vc_process_vf_msg - Process request from VF
4272 * @pf: pointer to the PF structure
4273 * @event: pointer to the AQ event
4274 *
4275 * called from the common asq/arq handler to
4276 * process request from VF
4277 */
ice_vc_process_vf_msg(struct ice_pf * pf,struct ice_rq_event_info * event)4278 void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
4279 {
4280 u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
4281 s16 vf_id = le16_to_cpu(event->desc.retval);
4282 u16 msglen = event->msg_len;
4283 u8 *msg = event->msg_buf;
4284 struct ice_vf *vf = NULL;
4285 struct device *dev;
4286 int err = 0;
4287
4288 dev = ice_pf_to_dev(pf);
4289 if (ice_validate_vf_id(pf, vf_id)) {
4290 err = -EINVAL;
4291 goto error_handler;
4292 }
4293
4294 vf = &pf->vf[vf_id];
4295
4296 /* Check if VF is disabled. */
4297 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states)) {
4298 err = -EPERM;
4299 goto error_handler;
4300 }
4301
4302 /* Perform basic checks on the msg */
4303 err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
4304 if (err) {
4305 if (err == VIRTCHNL_STATUS_ERR_PARAM)
4306 err = -EPERM;
4307 else
4308 err = -EINVAL;
4309 }
4310
4311 if (!ice_vc_is_opcode_allowed(vf, v_opcode)) {
4312 ice_vc_send_msg_to_vf(vf, v_opcode,
4313 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL,
4314 0);
4315 return;
4316 }
4317
4318 error_handler:
4319 if (err) {
4320 ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
4321 NULL, 0);
4322 dev_err(dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
4323 vf_id, v_opcode, msglen, err);
4324 return;
4325 }
4326
4327 switch (v_opcode) {
4328 case VIRTCHNL_OP_VERSION:
4329 err = ice_vc_get_ver_msg(vf, msg);
4330 break;
4331 case VIRTCHNL_OP_GET_VF_RESOURCES:
4332 err = ice_vc_get_vf_res_msg(vf, msg);
4333 if (ice_vf_init_vlan_stripping(vf))
4334 dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
4335 vf->vf_id);
4336 ice_vc_notify_vf_link_state(vf);
4337 break;
4338 case VIRTCHNL_OP_RESET_VF:
4339 ice_vc_reset_vf_msg(vf);
4340 break;
4341 case VIRTCHNL_OP_ADD_ETH_ADDR:
4342 err = ice_vc_add_mac_addr_msg(vf, msg);
4343 break;
4344 case VIRTCHNL_OP_DEL_ETH_ADDR:
4345 err = ice_vc_del_mac_addr_msg(vf, msg);
4346 break;
4347 case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
4348 err = ice_vc_cfg_qs_msg(vf, msg);
4349 break;
4350 case VIRTCHNL_OP_ENABLE_QUEUES:
4351 err = ice_vc_ena_qs_msg(vf, msg);
4352 ice_vc_notify_vf_link_state(vf);
4353 break;
4354 case VIRTCHNL_OP_DISABLE_QUEUES:
4355 err = ice_vc_dis_qs_msg(vf, msg);
4356 break;
4357 case VIRTCHNL_OP_REQUEST_QUEUES:
4358 err = ice_vc_request_qs_msg(vf, msg);
4359 break;
4360 case VIRTCHNL_OP_CONFIG_IRQ_MAP:
4361 err = ice_vc_cfg_irq_map_msg(vf, msg);
4362 break;
4363 case VIRTCHNL_OP_CONFIG_RSS_KEY:
4364 err = ice_vc_config_rss_key(vf, msg);
4365 break;
4366 case VIRTCHNL_OP_CONFIG_RSS_LUT:
4367 err = ice_vc_config_rss_lut(vf, msg);
4368 break;
4369 case VIRTCHNL_OP_GET_STATS:
4370 err = ice_vc_get_stats_msg(vf, msg);
4371 break;
4372 case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
4373 err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
4374 break;
4375 case VIRTCHNL_OP_ADD_VLAN:
4376 err = ice_vc_add_vlan_msg(vf, msg);
4377 break;
4378 case VIRTCHNL_OP_DEL_VLAN:
4379 err = ice_vc_remove_vlan_msg(vf, msg);
4380 break;
4381 case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
4382 err = ice_vc_ena_vlan_stripping(vf);
4383 break;
4384 case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
4385 err = ice_vc_dis_vlan_stripping(vf);
4386 break;
4387 case VIRTCHNL_OP_ADD_FDIR_FILTER:
4388 err = ice_vc_add_fdir_fltr(vf, msg);
4389 break;
4390 case VIRTCHNL_OP_DEL_FDIR_FILTER:
4391 err = ice_vc_del_fdir_fltr(vf, msg);
4392 break;
4393 case VIRTCHNL_OP_ADD_RSS_CFG:
4394 err = ice_vc_handle_rss_cfg(vf, msg, true);
4395 break;
4396 case VIRTCHNL_OP_DEL_RSS_CFG:
4397 err = ice_vc_handle_rss_cfg(vf, msg, false);
4398 break;
4399 case VIRTCHNL_OP_UNKNOWN:
4400 default:
4401 dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
4402 vf_id);
4403 err = ice_vc_send_msg_to_vf(vf, v_opcode,
4404 VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
4405 NULL, 0);
4406 break;
4407 }
4408 if (err) {
4409 /* Helper function cares less about error return values here
4410 * as it is busy with pending work.
4411 */
4412 dev_info(dev, "PF failed to honor VF %d, opcode %d, error %d\n",
4413 vf_id, v_opcode, err);
4414 }
4415 }
4416
4417 /**
4418 * ice_get_vf_cfg
4419 * @netdev: network interface device structure
4420 * @vf_id: VF identifier
4421 * @ivi: VF configuration structure
4422 *
4423 * return VF configuration
4424 */
4425 int
ice_get_vf_cfg(struct net_device * netdev,int vf_id,struct ifla_vf_info * ivi)4426 ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
4427 {
4428 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4429 struct ice_vf *vf;
4430
4431 if (ice_validate_vf_id(pf, vf_id))
4432 return -EINVAL;
4433
4434 vf = &pf->vf[vf_id];
4435
4436 if (ice_check_vf_init(pf, vf))
4437 return -EBUSY;
4438
4439 ivi->vf = vf_id;
4440 ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
4441
4442 /* VF configuration for VLAN and applicable QoS */
4443 ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
4444 ivi->qos = (vf->port_vlan_info & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
4445
4446 ivi->trusted = vf->trusted;
4447 ivi->spoofchk = vf->spoofchk;
4448 if (!vf->link_forced)
4449 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
4450 else if (vf->link_up)
4451 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
4452 else
4453 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
4454 ivi->max_tx_rate = vf->tx_rate;
4455 ivi->min_tx_rate = 0;
4456 return 0;
4457 }
4458
4459 /**
4460 * ice_unicast_mac_exists - check if the unicast MAC exists on the PF's switch
4461 * @pf: PF used to reference the switch's rules
4462 * @umac: unicast MAC to compare against existing switch rules
4463 *
4464 * Return true on the first/any match, else return false
4465 */
ice_unicast_mac_exists(struct ice_pf * pf,u8 * umac)4466 static bool ice_unicast_mac_exists(struct ice_pf *pf, u8 *umac)
4467 {
4468 struct ice_sw_recipe *mac_recipe_list =
4469 &pf->hw.switch_info->recp_list[ICE_SW_LKUP_MAC];
4470 struct ice_fltr_mgmt_list_entry *list_itr;
4471 struct list_head *rule_head;
4472 struct mutex *rule_lock; /* protect MAC filter list access */
4473
4474 rule_head = &mac_recipe_list->filt_rules;
4475 rule_lock = &mac_recipe_list->filt_rule_lock;
4476
4477 mutex_lock(rule_lock);
4478 list_for_each_entry(list_itr, rule_head, list_entry) {
4479 u8 *existing_mac = &list_itr->fltr_info.l_data.mac.mac_addr[0];
4480
4481 if (ether_addr_equal(existing_mac, umac)) {
4482 mutex_unlock(rule_lock);
4483 return true;
4484 }
4485 }
4486
4487 mutex_unlock(rule_lock);
4488
4489 return false;
4490 }
4491
4492 /**
4493 * ice_set_vf_mac
4494 * @netdev: network interface device structure
4495 * @vf_id: VF identifier
4496 * @mac: MAC address
4497 *
4498 * program VF MAC address
4499 */
ice_set_vf_mac(struct net_device * netdev,int vf_id,u8 * mac)4500 int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
4501 {
4502 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4503 struct ice_vf *vf;
4504 int ret;
4505
4506 if (ice_validate_vf_id(pf, vf_id))
4507 return -EINVAL;
4508
4509 if (is_multicast_ether_addr(mac)) {
4510 netdev_err(netdev, "%pM not a valid unicast address\n", mac);
4511 return -EINVAL;
4512 }
4513
4514 vf = &pf->vf[vf_id];
4515 /* nothing left to do, unicast MAC already set */
4516 if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
4517 return 0;
4518
4519 ret = ice_check_vf_ready_for_cfg(vf);
4520 if (ret)
4521 return ret;
4522
4523 if (ice_unicast_mac_exists(pf, mac)) {
4524 netdev_err(netdev, "Unicast MAC %pM already exists on this PF. Preventing setting VF %u unicast MAC address to %pM\n",
4525 mac, vf_id, mac);
4526 return -EINVAL;
4527 }
4528
4529 /* VF is notified of its new MAC via the PF's response to the
4530 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
4531 */
4532 ether_addr_copy(vf->dflt_lan_addr.addr, mac);
4533 if (is_zero_ether_addr(mac)) {
4534 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
4535 vf->pf_set_mac = false;
4536 netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
4537 vf->vf_id);
4538 } else {
4539 /* PF will add MAC rule for the VF */
4540 vf->pf_set_mac = true;
4541 netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
4542 mac, vf_id);
4543 }
4544
4545 ice_vc_reset_vf(vf);
4546 return 0;
4547 }
4548
4549 /**
4550 * ice_set_vf_trust
4551 * @netdev: network interface device structure
4552 * @vf_id: VF identifier
4553 * @trusted: Boolean value to enable/disable trusted VF
4554 *
4555 * Enable or disable a given VF as trusted
4556 */
ice_set_vf_trust(struct net_device * netdev,int vf_id,bool trusted)4557 int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
4558 {
4559 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4560 struct ice_vf *vf;
4561 int ret;
4562
4563 if (ice_validate_vf_id(pf, vf_id))
4564 return -EINVAL;
4565
4566 vf = &pf->vf[vf_id];
4567 ret = ice_check_vf_ready_for_cfg(vf);
4568 if (ret)
4569 return ret;
4570
4571 /* Check if already trusted */
4572 if (trusted == vf->trusted)
4573 return 0;
4574
4575 vf->trusted = trusted;
4576 ice_vc_reset_vf(vf);
4577 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
4578 vf_id, trusted ? "" : "un");
4579
4580 return 0;
4581 }
4582
4583 /**
4584 * ice_set_vf_link_state
4585 * @netdev: network interface device structure
4586 * @vf_id: VF identifier
4587 * @link_state: required link state
4588 *
4589 * Set VF's link state, irrespective of physical link state status
4590 */
ice_set_vf_link_state(struct net_device * netdev,int vf_id,int link_state)4591 int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
4592 {
4593 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4594 struct ice_vf *vf;
4595 int ret;
4596
4597 if (ice_validate_vf_id(pf, vf_id))
4598 return -EINVAL;
4599
4600 vf = &pf->vf[vf_id];
4601 ret = ice_check_vf_ready_for_cfg(vf);
4602 if (ret)
4603 return ret;
4604
4605 switch (link_state) {
4606 case IFLA_VF_LINK_STATE_AUTO:
4607 vf->link_forced = false;
4608 break;
4609 case IFLA_VF_LINK_STATE_ENABLE:
4610 vf->link_forced = true;
4611 vf->link_up = true;
4612 break;
4613 case IFLA_VF_LINK_STATE_DISABLE:
4614 vf->link_forced = true;
4615 vf->link_up = false;
4616 break;
4617 default:
4618 return -EINVAL;
4619 }
4620
4621 ice_vc_notify_vf_link_state(vf);
4622
4623 return 0;
4624 }
4625
4626 /**
4627 * ice_get_vf_stats - populate some stats for the VF
4628 * @netdev: the netdev of the PF
4629 * @vf_id: the host OS identifier (0-255)
4630 * @vf_stats: pointer to the OS memory to be initialized
4631 */
ice_get_vf_stats(struct net_device * netdev,int vf_id,struct ifla_vf_stats * vf_stats)4632 int ice_get_vf_stats(struct net_device *netdev, int vf_id,
4633 struct ifla_vf_stats *vf_stats)
4634 {
4635 struct ice_pf *pf = ice_netdev_to_pf(netdev);
4636 struct ice_eth_stats *stats;
4637 struct ice_vsi *vsi;
4638 struct ice_vf *vf;
4639 int ret;
4640
4641 if (ice_validate_vf_id(pf, vf_id))
4642 return -EINVAL;
4643
4644 vf = &pf->vf[vf_id];
4645 ret = ice_check_vf_ready_for_cfg(vf);
4646 if (ret)
4647 return ret;
4648
4649 vsi = ice_get_vf_vsi(vf);
4650 if (!vsi)
4651 return -EINVAL;
4652
4653 ice_update_eth_stats(vsi);
4654 stats = &vsi->eth_stats;
4655
4656 memset(vf_stats, 0, sizeof(*vf_stats));
4657
4658 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
4659 stats->rx_multicast;
4660 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
4661 stats->tx_multicast;
4662 vf_stats->rx_bytes = stats->rx_bytes;
4663 vf_stats->tx_bytes = stats->tx_bytes;
4664 vf_stats->broadcast = stats->rx_broadcast;
4665 vf_stats->multicast = stats->rx_multicast;
4666 vf_stats->rx_dropped = stats->rx_discards;
4667 vf_stats->tx_dropped = stats->tx_discards;
4668
4669 return 0;
4670 }
4671
4672 /**
4673 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
4674 * @vf: pointer to the VF structure
4675 */
ice_print_vf_rx_mdd_event(struct ice_vf * vf)4676 void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
4677 {
4678 struct ice_pf *pf = vf->pf;
4679 struct device *dev;
4680
4681 dev = ice_pf_to_dev(pf);
4682
4683 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
4684 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
4685 vf->dflt_lan_addr.addr,
4686 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
4687 ? "on" : "off");
4688 }
4689
4690 /**
4691 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
4692 * @pf: pointer to the PF structure
4693 *
4694 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
4695 */
ice_print_vfs_mdd_events(struct ice_pf * pf)4696 void ice_print_vfs_mdd_events(struct ice_pf *pf)
4697 {
4698 struct device *dev = ice_pf_to_dev(pf);
4699 struct ice_hw *hw = &pf->hw;
4700 int i;
4701
4702 /* check that there are pending MDD events to print */
4703 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
4704 return;
4705
4706 /* VF MDD event logs are rate limited to one second intervals */
4707 if (time_is_after_jiffies(pf->last_printed_mdd_jiffies + HZ * 1))
4708 return;
4709
4710 pf->last_printed_mdd_jiffies = jiffies;
4711
4712 ice_for_each_vf(pf, i) {
4713 struct ice_vf *vf = &pf->vf[i];
4714
4715 /* only print Rx MDD event message if there are new events */
4716 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
4717 vf->mdd_rx_events.last_printed =
4718 vf->mdd_rx_events.count;
4719 ice_print_vf_rx_mdd_event(vf);
4720 }
4721
4722 /* only print Tx MDD event message if there are new events */
4723 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
4724 vf->mdd_tx_events.last_printed =
4725 vf->mdd_tx_events.count;
4726
4727 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
4728 vf->mdd_tx_events.count, hw->pf_id, i,
4729 vf->dflt_lan_addr.addr);
4730 }
4731 }
4732 }
4733
4734 /**
4735 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
4736 * @pdev: pointer to a pci_dev structure
4737 *
4738 * Called when recovering from a PF FLR to restore interrupt capability to
4739 * the VFs.
4740 */
ice_restore_all_vfs_msi_state(struct pci_dev * pdev)4741 void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
4742 {
4743 u16 vf_id;
4744 int pos;
4745
4746 if (!pci_num_vf(pdev))
4747 return;
4748
4749 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4750 if (pos) {
4751 struct pci_dev *vfdev;
4752
4753 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
4754 &vf_id);
4755 vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
4756 while (vfdev) {
4757 if (vfdev->is_virtfn && vfdev->physfn == pdev)
4758 pci_restore_msi_state(vfdev);
4759 vfdev = pci_get_device(pdev->vendor, vf_id,
4760 vfdev);
4761 }
4762 }
4763 }
4764
4765 /**
4766 * ice_is_malicious_vf - helper function to detect a malicious VF
4767 * @pf: ptr to struct ice_pf
4768 * @event: pointer to the AQ event
4769 * @num_msg_proc: the number of messages processed so far
4770 * @num_msg_pending: the number of messages peinding in admin queue
4771 */
4772 bool
ice_is_malicious_vf(struct ice_pf * pf,struct ice_rq_event_info * event,u16 num_msg_proc,u16 num_msg_pending)4773 ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
4774 u16 num_msg_proc, u16 num_msg_pending)
4775 {
4776 s16 vf_id = le16_to_cpu(event->desc.retval);
4777 struct device *dev = ice_pf_to_dev(pf);
4778 struct ice_mbx_data mbxdata;
4779 enum ice_status status;
4780 bool malvf = false;
4781 struct ice_vf *vf;
4782
4783 if (ice_validate_vf_id(pf, vf_id))
4784 return false;
4785
4786 vf = &pf->vf[vf_id];
4787 /* Check if VF is disabled. */
4788 if (test_bit(ICE_VF_STATE_DIS, vf->vf_states))
4789 return false;
4790
4791 mbxdata.num_msg_proc = num_msg_proc;
4792 mbxdata.num_pending_arq = num_msg_pending;
4793 mbxdata.max_num_msgs_mbx = pf->hw.mailboxq.num_rq_entries;
4794 #define ICE_MBX_OVERFLOW_WATERMARK 64
4795 mbxdata.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK;
4796
4797 /* check to see if we have a malicious VF */
4798 status = ice_mbx_vf_state_handler(&pf->hw, &mbxdata, vf_id, &malvf);
4799 if (status)
4800 return false;
4801
4802 if (malvf) {
4803 bool report_vf = false;
4804
4805 /* if the VF is malicious and we haven't let the user
4806 * know about it, then let them know now
4807 */
4808 status = ice_mbx_report_malvf(&pf->hw, pf->malvfs,
4809 ICE_MAX_VF_COUNT, vf_id,
4810 &report_vf);
4811 if (status)
4812 dev_dbg(dev, "Error reporting malicious VF\n");
4813
4814 if (report_vf) {
4815 struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
4816
4817 if (pf_vsi)
4818 dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
4819 &vf->dflt_lan_addr.addr[0],
4820 pf_vsi->netdev->dev_addr);
4821 }
4822
4823 return true;
4824 }
4825
4826 /* if there was an error in detection or the VF is not malicious then
4827 * return false
4828 */
4829 return false;
4830 }
4831