xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_vport.c (revision 4d846d26)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30 
31 #include <linux/etherdevice.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/vport.h>
34 #include <dev/mlx5/mlx5_core/mlx5_core.h>
35 
36 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
37 					 int inlen);
38 
39 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
40 				   u16 vport, u32 *out, int outlen)
41 {
42 	int err;
43 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
44 
45 	MLX5_SET(query_vport_state_in, in, opcode,
46 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
47 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
48 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
49 	if (vport)
50 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
51 
52 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
53 	if (err)
54 		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
55 
56 	return err;
57 }
58 
59 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
60 {
61 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
62 
63 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
64 
65 	return MLX5_GET(query_vport_state_out, out, state);
66 }
67 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
68 
69 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
70 {
71 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
72 
73 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
74 
75 	return MLX5_GET(query_vport_state_out, out, admin_state);
76 }
77 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
78 
79 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
80 				  u16 vport, u8 state)
81 {
82 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
83 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
84 	int err;
85 
86 	MLX5_SET(modify_vport_state_in, in, opcode,
87 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
88 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
89 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
90 
91 	if (vport)
92 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
93 
94 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
95 
96 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
97 	if (err)
98 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
99 
100 	return err;
101 }
102 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
103 
104 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
105 					u32 *out, int outlen)
106 {
107 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
108 
109 	MLX5_SET(query_nic_vport_context_in, in, opcode,
110 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
111 
112 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
113 	if (vport)
114 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
115 
116 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
117 }
118 
119 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
120 					      int client_id)
121 {
122 	switch (client_id) {
123 	case MLX5_INTERFACE_PROTOCOL_IB:
124 		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
125 			MLX5_QCOUNTER_SETS_NETDEV);
126 	case MLX5_INTERFACE_PROTOCOL_ETH:
127 		return MLX5_QCOUNTER_SETS_NETDEV;
128 	default:
129 		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
130 		return 0;
131 	}
132 }
133 
134 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
135 			       int client_id, u16 *counter_set_id)
136 {
137 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
138 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
139 	int err;
140 
141 	if (mdev->num_q_counter_allocated[client_id] >
142 	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
143 		return -EINVAL;
144 
145 	MLX5_SET(alloc_q_counter_in, in, opcode,
146 		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
147 
148 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
149 
150 	if (!err)
151 		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
152 					   counter_set_id);
153 
154 	mdev->num_q_counter_allocated[client_id]++;
155 
156 	return err;
157 }
158 
159 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
160 				 int client_id, u16 counter_set_id)
161 {
162 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
163 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
164 	int err;
165 
166 	if (mdev->num_q_counter_allocated[client_id] <= 0)
167 		return -EINVAL;
168 
169 	MLX5_SET(dealloc_q_counter_in, in, opcode,
170 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
171 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
172 		 counter_set_id);
173 
174 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
175 
176 	mdev->num_q_counter_allocated[client_id]--;
177 
178 	return err;
179 }
180 
181 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
182 				      u16 counter_set_id,
183 				      int reset,
184 				      void *out,
185 				      int out_size)
186 {
187 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
188 
189 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
190 	MLX5_SET(query_q_counter_in, in, clear, reset);
191 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
192 
193 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
194 }
195 
196 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
197 				      u16 counter_set_id,
198 				      u32 *out_of_rx_buffer)
199 {
200 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
201 	int err;
202 
203 	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
204 					 sizeof(out));
205 
206 	if (err)
207 		return err;
208 
209 	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
210 				     out_of_buffer);
211 	return err;
212 }
213 
214 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
215 				    u16 vport, u8 *min_inline)
216 {
217 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
218 	int err;
219 
220 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
221 	if (!err)
222 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
223 				       nic_vport_context.min_wqe_inline_mode);
224 	return err;
225 }
226 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
227 
228 int mlx5_query_min_inline(struct mlx5_core_dev *mdev,
229 			  u8 *min_inline_mode)
230 {
231 	int err;
232 
233 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
234 	case MLX5_CAP_INLINE_MODE_L2:
235 		*min_inline_mode = MLX5_INLINE_MODE_L2;
236 		err = 0;
237 		break;
238 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
239 		err = mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
240 		break;
241 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
242 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
243 		err = 0;
244 		break;
245 	default:
246 		err = -EINVAL;
247 		break;
248 	}
249 	return err;
250 }
251 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
252 
253 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
254 				     u16 vport, u8 min_inline)
255 {
256 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
257 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
258 	void *nic_vport_ctx;
259 
260 	MLX5_SET(modify_nic_vport_context_in, in,
261 		 field_select.min_wqe_inline_mode, 1);
262 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
263 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
264 
265 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
266 				     in, nic_vport_context);
267 	MLX5_SET(nic_vport_context, nic_vport_ctx,
268 		 min_wqe_inline_mode, min_inline);
269 
270 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
271 }
272 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
273 
274 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
275 				     u16 vport, u8 *addr)
276 {
277 	u32 *out;
278 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
279 	u8 *out_addr;
280 	int err;
281 
282 	out = mlx5_vzalloc(outlen);
283 	if (!out)
284 		return -ENOMEM;
285 
286 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
287 				nic_vport_context.permanent_address);
288 
289 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
290 	if (err)
291 		goto out;
292 
293 	ether_addr_copy(addr, &out_addr[2]);
294 
295 out:
296 	kvfree(out);
297 	return err;
298 }
299 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
300 
301 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
302 				      u16 vport, u8 *addr)
303 {
304 	void *in;
305 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
306 	int err;
307 	void *nic_vport_ctx;
308 	u8 *perm_mac;
309 
310 	in = mlx5_vzalloc(inlen);
311 	if (!in) {
312 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
313 		return -ENOMEM;
314 	}
315 
316 	MLX5_SET(modify_nic_vport_context_in, in,
317 		 field_select.permanent_address, 1);
318 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
319 
320 	if (vport)
321 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
322 
323 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
324 				     in, nic_vport_context);
325 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
326 				permanent_address);
327 
328 	ether_addr_copy(&perm_mac[2], addr);
329 
330 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
331 
332 	kvfree(in);
333 
334 	return err;
335 }
336 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
337 
338 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
339 					   u64 *system_image_guid)
340 {
341 	u32 *out;
342 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
343 	int err;
344 
345 	out = mlx5_vzalloc(outlen);
346 	if (!out)
347 		return -ENOMEM;
348 
349 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
350 	if (err)
351 		goto out;
352 
353 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
354 					nic_vport_context.system_image_guid);
355 out:
356 	kvfree(out);
357 	return err;
358 }
359 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
360 
361 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
362 {
363 	u32 *out;
364 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
365 	int err;
366 
367 	out = mlx5_vzalloc(outlen);
368 	if (!out)
369 		return -ENOMEM;
370 
371 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
372 	if (err)
373 		goto out;
374 
375 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
376 				nic_vport_context.node_guid);
377 
378 out:
379 	kvfree(out);
380 	return err;
381 }
382 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
383 
384 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
385 					  u64 *port_guid)
386 {
387 	u32 *out;
388 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
389 	int err;
390 
391 	out = mlx5_vzalloc(outlen);
392 	if (!out)
393 		return -ENOMEM;
394 
395 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
396 	if (err)
397 		goto out;
398 
399 	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
400 				nic_vport_context.port_guid);
401 
402 out:
403 	kvfree(out);
404 	return err;
405 }
406 
407 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
408 					u16 *qkey_viol_cntr)
409 {
410 	u32 *out;
411 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
412 	int err;
413 
414 	out = mlx5_vzalloc(outlen);
415 	if (!out)
416 		return -ENOMEM;
417 
418 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
419 	if (err)
420 		goto out;
421 
422 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
423 				nic_vport_context.qkey_violation_counter);
424 
425 out:
426 	kvfree(out);
427 	return err;
428 }
429 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
430 
431 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
432 					 int inlen)
433 {
434 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
435 
436 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
437 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
438 
439 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
440 }
441 
442 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
443 					      int enable_disable)
444 {
445 	void *in;
446 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
447 	int err;
448 
449 	in = mlx5_vzalloc(inlen);
450 	if (!in) {
451 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
452 		return -ENOMEM;
453 	}
454 
455 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
456 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
457 		 enable_disable);
458 
459 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
460 
461 	kvfree(in);
462 
463 	return err;
464 }
465 
466 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
467 				   bool other_vport, u8 *addr)
468 {
469 	void *in;
470 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
471 		  + MLX5_ST_SZ_BYTES(mac_address_layout);
472 	u8  *mac_layout;
473 	u8  *mac_ptr;
474 	int err;
475 
476 	in = mlx5_vzalloc(inlen);
477 	if (!in) {
478 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
479 		return -ENOMEM;
480 	}
481 
482 	MLX5_SET(modify_nic_vport_context_in, in,
483 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
484 	MLX5_SET(modify_nic_vport_context_in, in,
485 		 vport_number, vport);
486 	MLX5_SET(modify_nic_vport_context_in, in,
487 		 other_vport, other_vport);
488 	MLX5_SET(modify_nic_vport_context_in, in,
489 		 field_select.addresses_list, 1);
490 	MLX5_SET(modify_nic_vport_context_in, in,
491 		 nic_vport_context.allowed_list_type,
492 		 MLX5_NIC_VPORT_LIST_TYPE_UC);
493 	MLX5_SET(modify_nic_vport_context_in, in,
494 		 nic_vport_context.allowed_list_size, 1);
495 
496 	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
497 		nic_vport_context.current_uc_mac_address);
498 	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
499 		mac_addr_47_32);
500 	ether_addr_copy(mac_ptr, addr);
501 
502 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
503 
504 	kvfree(in);
505 
506 	return err;
507 }
508 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
509 
510 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
511 				    u32 vport, u64 node_guid)
512 {
513 	void *in;
514 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
515 	int err;
516 	void *nic_vport_context;
517 
518 	if (!vport)
519 		return -EINVAL;
520 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
521 		return -EPERM;
522 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
523 		return -ENOTSUPP;
524 
525 	in = mlx5_vzalloc(inlen);
526 	if (!in) {
527 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
528 		return -ENOMEM;
529 	}
530 
531 	MLX5_SET(modify_nic_vport_context_in, in,
532 		 field_select.node_guid, 1);
533 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
534 
535 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
536 
537 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
538 					 in, nic_vport_context);
539 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
540 
541 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
542 
543 	kvfree(in);
544 
545 	return err;
546 }
547 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
548 
549 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
550 				    u32 vport, u64 port_guid)
551 {
552 	void *in;
553 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
554 	int err;
555 	void *nic_vport_context;
556 
557 	if (!vport)
558 		return -EINVAL;
559 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
560 		return -EPERM;
561 	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
562 		return -ENOTSUPP;
563 
564 	in = mlx5_vzalloc(inlen);
565 	if (!in) {
566 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
567 		return -ENOMEM;
568 	}
569 
570 	MLX5_SET(modify_nic_vport_context_in, in,
571 		 field_select.port_guid, 1);
572 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
573 
574 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
575 
576 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
577 					 in, nic_vport_context);
578 	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
579 
580 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
581 
582 	kvfree(in);
583 
584 	return err;
585 }
586 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
587 
588 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
589 				 u16 *vlan_list, int list_len)
590 {
591 	void *in, *ctx;
592 	int i, err;
593 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
594 		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
595 
596 	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
597 
598 	if (list_len > max_list_size) {
599 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
600 			       list_len, max_list_size);
601 		return -ENOSPC;
602 	}
603 
604 	in = mlx5_vzalloc(inlen);
605 	if (!in) {
606 		mlx5_core_warn(dev, "failed to allocate inbox\n");
607 		return -ENOMEM;
608 	}
609 
610 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
611 	if (vport)
612 		MLX5_SET(modify_nic_vport_context_in, in,
613 			 other_vport, 1);
614 	MLX5_SET(modify_nic_vport_context_in, in,
615 		 field_select.addresses_list, 1);
616 
617 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
618 
619 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
620 		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
621 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
622 
623 	for (i = 0; i < list_len; i++) {
624 		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
625 					 current_uc_mac_address[i]);
626 		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
627 	}
628 
629 	err = mlx5_modify_nic_vport_context(dev, in, inlen);
630 
631 	kvfree(in);
632 	return err;
633 }
634 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
635 
636 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
637 			       u64 *addr_list, size_t addr_list_len)
638 {
639 	void *in, *ctx;
640 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
641 		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
642 	int err;
643 	size_t i;
644 	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
645 
646 	if ((int)addr_list_len > max_list_sz) {
647 		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
648 			       (int)addr_list_len, max_list_sz);
649 		return -ENOSPC;
650 	}
651 
652 	in = mlx5_vzalloc(inlen);
653 	if (!in) {
654 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
655 		return -ENOMEM;
656 	}
657 
658 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
659 	if (vport)
660 		MLX5_SET(modify_nic_vport_context_in, in,
661 			 other_vport, 1);
662 	MLX5_SET(modify_nic_vport_context_in, in,
663 		 field_select.addresses_list, 1);
664 
665 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
666 
667 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
668 		 MLX5_NIC_VPORT_LIST_TYPE_MC);
669 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
670 
671 	for (i = 0; i < addr_list_len; i++) {
672 		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
673 						  current_uc_mac_address[i]);
674 		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
675 						 mac_addr_47_32);
676 		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
677 	}
678 
679 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
680 
681 	kvfree(in);
682 
683 	return err;
684 }
685 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
686 
687 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
688 			       bool promisc_mc, bool promisc_uc,
689 			       bool promisc_all)
690 {
691 	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
692 	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
693 			       nic_vport_context);
694 
695 	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
696 
697 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
698 	if (vport)
699 		MLX5_SET(modify_nic_vport_context_in, in,
700 			 other_vport, 1);
701 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
702 	if (promisc_mc)
703 		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
704 	if (promisc_uc)
705 		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
706 	if (promisc_all)
707 		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
708 
709 	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
710 }
711 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
712 
713 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
714 				  u16 vport,
715 				  enum mlx5_list_type list_type,
716 				  u8 addr_list[][ETH_ALEN],
717 				  int *list_size)
718 {
719 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
720 	void *nic_vport_ctx;
721 	int max_list_size;
722 	int req_list_size;
723 	int out_sz;
724 	void *out;
725 	int err;
726 	int i;
727 
728 	req_list_size = *list_size;
729 
730 	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
731 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
732 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
733 
734 	if (req_list_size > max_list_size) {
735 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
736 			       req_list_size, max_list_size);
737 		req_list_size = max_list_size;
738 	}
739 
740 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
741 		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
742 
743 	out = kzalloc(out_sz, GFP_KERNEL);
744 	if (!out)
745 		return -ENOMEM;
746 
747 	MLX5_SET(query_nic_vport_context_in, in, opcode,
748 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
749 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
750 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
751 
752 	if (vport)
753 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
754 
755 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
756 	if (err)
757 		goto out;
758 
759 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
760 				     nic_vport_context);
761 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
762 				 allowed_list_size);
763 
764 	*list_size = req_list_size;
765 	for (i = 0; i < req_list_size; i++) {
766 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
767 					nic_vport_ctx,
768 					current_uc_mac_address[i]) + 2;
769 		ether_addr_copy(addr_list[i], mac_addr);
770 	}
771 out:
772 	kfree(out);
773 	return err;
774 }
775 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
776 
777 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
778 				   enum mlx5_list_type list_type,
779 				   u8 addr_list[][ETH_ALEN],
780 				   int list_size)
781 {
782 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
783 	void *nic_vport_ctx;
784 	int max_list_size;
785 	int in_sz;
786 	void *in;
787 	int err;
788 	int i;
789 
790 	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
791 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
792 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
793 
794 	if (list_size > max_list_size)
795 		return -ENOSPC;
796 
797 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
798 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
799 
800 	in = kzalloc(in_sz, GFP_KERNEL);
801 	if (!in)
802 		return -ENOMEM;
803 
804 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
805 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
806 	MLX5_SET(modify_nic_vport_context_in, in,
807 		 field_select.addresses_list, 1);
808 
809 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
810 				     nic_vport_context);
811 
812 	MLX5_SET(nic_vport_context, nic_vport_ctx,
813 		 allowed_list_type, list_type);
814 	MLX5_SET(nic_vport_context, nic_vport_ctx,
815 		 allowed_list_size, list_size);
816 
817 	for (i = 0; i < list_size; i++) {
818 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
819 					    nic_vport_ctx,
820 					    current_uc_mac_address[i]) + 2;
821 		ether_addr_copy(curr_mac, addr_list[i]);
822 	}
823 
824 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
825 	kfree(in);
826 	return err;
827 }
828 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
829 
830 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
831 			       u16 vport,
832 			       u16 vlans[],
833 			       int *size)
834 {
835 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
836 	void *nic_vport_ctx;
837 	int req_list_size;
838 	int max_list_size;
839 	int out_sz;
840 	void *out;
841 	int err;
842 	int i;
843 
844 	req_list_size = *size;
845 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
846 	if (req_list_size > max_list_size) {
847 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
848 			       req_list_size, max_list_size);
849 		req_list_size = max_list_size;
850 	}
851 
852 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
853 		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
854 
855 	out = kzalloc(out_sz, GFP_KERNEL);
856 	if (!out)
857 		return -ENOMEM;
858 
859 	MLX5_SET(query_nic_vport_context_in, in, opcode,
860 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
861 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
862 		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
863 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
864 
865 	if (vport)
866 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
867 
868 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
869 	if (err)
870 		goto out;
871 
872 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
873 				     nic_vport_context);
874 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
875 				 allowed_list_size);
876 
877 	*size = req_list_size;
878 	for (i = 0; i < req_list_size; i++) {
879 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
880 					       nic_vport_ctx,
881 					 current_uc_mac_address[i]);
882 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
883 	}
884 out:
885 	kfree(out);
886 	return err;
887 }
888 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
889 
890 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
891 				u16 vlans[],
892 				int list_size)
893 {
894 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
895 	void *nic_vport_ctx;
896 	int max_list_size;
897 	int in_sz;
898 	void *in;
899 	int err;
900 	int i;
901 
902 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
903 
904 	if (list_size > max_list_size)
905 		return -ENOSPC;
906 
907 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
908 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
909 
910 	in = kzalloc(in_sz, GFP_KERNEL);
911 	if (!in)
912 		return -ENOMEM;
913 
914 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
915 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
916 	MLX5_SET(modify_nic_vport_context_in, in,
917 		 field_select.addresses_list, 1);
918 
919 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
920 				     nic_vport_context);
921 
922 	MLX5_SET(nic_vport_context, nic_vport_ctx,
923 		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
924 	MLX5_SET(nic_vport_context, nic_vport_ctx,
925 		 allowed_list_size, list_size);
926 
927 	for (i = 0; i < list_size; i++) {
928 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
929 					       nic_vport_ctx,
930 					       current_uc_mac_address[i]);
931 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
932 	}
933 
934 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
935 	kfree(in);
936 	return err;
937 }
938 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
939 
940 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
941 {
942 	u32 *out;
943 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
944 	int err;
945 
946 	out = kzalloc(outlen, GFP_KERNEL);
947 	if (!out)
948 		return -ENOMEM;
949 
950 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
951 	if (err)
952 		goto out;
953 
954 	*enable = MLX5_GET(query_nic_vport_context_out, out,
955 				nic_vport_context.roce_en);
956 
957 out:
958 	kfree(out);
959 	return err;
960 }
961 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
962 
963 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
964 				     u8 *addr)
965 {
966 	void *in;
967 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
968 	u8  *mac_ptr;
969 	int err;
970 
971 	in = mlx5_vzalloc(inlen);
972 	if (!in) {
973 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
974 		return -ENOMEM;
975 	}
976 
977 	MLX5_SET(modify_nic_vport_context_in, in,
978 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
979 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
980 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
981 	MLX5_SET(modify_nic_vport_context_in, in,
982 		 field_select.permanent_address, 1);
983 	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
984 		nic_vport_context.permanent_address.mac_addr_47_32);
985 	ether_addr_copy(mac_ptr, addr);
986 
987 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
988 
989 	kvfree(in);
990 
991 	return err;
992 }
993 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
994 
995 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
996 {
997 	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
998 }
999 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1000 
1001 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1002 {
1003 	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1004 }
1005 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1006 
1007 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1008 				  int vf, u8 port_num, void *out,
1009 				  size_t out_sz)
1010 {
1011 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1012 	int	is_group_manager;
1013 	void   *in;
1014 	int	err;
1015 
1016 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1017 	in = mlx5_vzalloc(in_sz);
1018 	if (!in) {
1019 		err = -ENOMEM;
1020 		return err;
1021 	}
1022 
1023 	MLX5_SET(query_vport_counter_in, in, opcode,
1024 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1025 	if (other_vport) {
1026 		if (is_group_manager) {
1027 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1028 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1029 		} else {
1030 			err = -EPERM;
1031 			goto free;
1032 		}
1033 	}
1034 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1035 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1036 
1037 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1038 free:
1039 	kvfree(in);
1040 	return err;
1041 }
1042 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1043 
1044 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1045 				 u8 port_num, u8 vport_num, u32 *out,
1046 				 int outlen)
1047 {
1048 	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
1049 	int is_group_manager;
1050 
1051 	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1052 
1053 	MLX5_SET(query_hca_vport_context_in, in, opcode,
1054 		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1055 
1056 	if (vport_num) {
1057 		if (is_group_manager) {
1058 			MLX5_SET(query_hca_vport_context_in, in, other_vport,
1059 				 1);
1060 			MLX5_SET(query_hca_vport_context_in, in, vport_number,
1061 				 vport_num);
1062 		} else {
1063 			return -EPERM;
1064 		}
1065 	}
1066 
1067 	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1068 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1069 
1070 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1071 }
1072 
1073 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1074 					   u64 *system_image_guid)
1075 {
1076 	u32 *out;
1077 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1078 	int err;
1079 
1080 	out = mlx5_vzalloc(outlen);
1081 	if (!out)
1082 		return -ENOMEM;
1083 
1084 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1085 	if (err)
1086 		goto out;
1087 
1088 	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1089 					hca_vport_context.system_image_guid);
1090 
1091 out:
1092 	kvfree(out);
1093 	return err;
1094 }
1095 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1096 
1097 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1098 {
1099 	u32 *out;
1100 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1101 	int err;
1102 
1103 	out = mlx5_vzalloc(outlen);
1104 	if (!out)
1105 		return -ENOMEM;
1106 
1107 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1108 	if (err)
1109 		goto out;
1110 
1111 	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1112 				hca_vport_context.node_guid);
1113 
1114 out:
1115 	kvfree(out);
1116 	return err;
1117 }
1118 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1119 
1120 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1121 					  u64 *port_guid)
1122 {
1123 	u32 *out;
1124 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1125 	int err;
1126 
1127 	out = mlx5_vzalloc(outlen);
1128 	if (!out)
1129 		return -ENOMEM;
1130 
1131 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1132 	if (err)
1133 		goto out;
1134 
1135 	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1136 				hca_vport_context.port_guid);
1137 
1138 out:
1139 	kvfree(out);
1140 	return err;
1141 }
1142 
1143 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1144 			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1145 {
1146 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1147 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1148 	int is_group_manager;
1149 	void *out = NULL;
1150 	void *in = NULL;
1151 	union ib_gid *tmp;
1152 	int tbsz;
1153 	int nout;
1154 	int err;
1155 
1156 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1157 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1158 
1159 	if (gid_index > tbsz && gid_index != 0xffff)
1160 		return -EINVAL;
1161 
1162 	if (gid_index == 0xffff)
1163 		nout = tbsz;
1164 	else
1165 		nout = 1;
1166 
1167 	out_sz += nout * sizeof(*gid);
1168 
1169 	in = mlx5_vzalloc(in_sz);
1170 	out = mlx5_vzalloc(out_sz);
1171 	if (!in || !out) {
1172 		err = -ENOMEM;
1173 		goto out;
1174 	}
1175 
1176 	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1177 		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1178 	if (vport_num) {
1179 		if (is_group_manager) {
1180 			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1181 				 vport_num);
1182 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1183 		} else {
1184 			err = -EPERM;
1185 			goto out;
1186 		}
1187 	}
1188 
1189 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1190 
1191 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1192 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1193 
1194 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1195 	if (err)
1196 		goto out;
1197 
1198 	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1199 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1200 	gid->global.interface_id = tmp->global.interface_id;
1201 
1202 out:
1203 	kvfree(in);
1204 	kvfree(out);
1205 	return err;
1206 }
1207 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1208 
1209 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1210 			      u8 port_num, u16 vf_num, u16 pkey_index,
1211 			      u16 *pkey)
1212 {
1213 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1214 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1215 	int is_group_manager;
1216 	void *out = NULL;
1217 	void *in = NULL;
1218 	void *pkarr;
1219 	int nout;
1220 	int tbsz;
1221 	int err;
1222 	int i;
1223 
1224 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1225 
1226 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1227 	if (pkey_index > tbsz && pkey_index != 0xffff)
1228 		return -EINVAL;
1229 
1230 	if (pkey_index == 0xffff)
1231 		nout = tbsz;
1232 	else
1233 		nout = 1;
1234 
1235 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1236 
1237 	in = kzalloc(in_sz, GFP_KERNEL);
1238 	out = kzalloc(out_sz, GFP_KERNEL);
1239 
1240 	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1241 		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1242 	if (other_vport) {
1243 		if (is_group_manager) {
1244 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1245 				 vf_num);
1246 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1247 		} else {
1248 			err = -EPERM;
1249 			goto out;
1250 		}
1251 	}
1252 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1253 
1254 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1255 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1256 
1257 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1258 	if (err)
1259 		goto out;
1260 
1261 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1262 	for (i = 0; i < nout; i++, pkey++,
1263 	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1264 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1265 
1266 out:
1267 	kfree(in);
1268 	kfree(out);
1269 	return err;
1270 }
1271 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1272 
1273 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1274 					 int *min_header)
1275 {
1276 	u32 *out;
1277 	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1278 	int err;
1279 
1280 	out = mlx5_vzalloc(outlen);
1281 	if (!out)
1282 		return -ENOMEM;
1283 
1284 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1285 	if (err)
1286 		goto out;
1287 
1288 	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1289 			       hca_vport_context.min_wqe_inline_mode);
1290 
1291 out:
1292 	kvfree(out);
1293 	return err;
1294 }
1295 
1296 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1297 					     u16 vport, void *in, int inlen)
1298 {
1299 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1300 	int err;
1301 
1302 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1303 	if (vport)
1304 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1305 
1306 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1307 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1308 
1309 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1310 	if (err)
1311 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1312 
1313 	return err;
1314 }
1315 
1316 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1317 				u8 insert_mode, u8 strip_mode,
1318 				u16 vlan, u8 cfi, u8 pcp)
1319 {
1320 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1321 
1322 	memset(in, 0, sizeof(in));
1323 
1324 	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1325 		MLX5_SET(modify_esw_vport_context_in, in,
1326 			 esw_vport_context.cvlan_cfi, cfi);
1327 		MLX5_SET(modify_esw_vport_context_in, in,
1328 			 esw_vport_context.cvlan_pcp, pcp);
1329 		MLX5_SET(modify_esw_vport_context_in, in,
1330 			 esw_vport_context.cvlan_id, vlan);
1331 	}
1332 
1333 	MLX5_SET(modify_esw_vport_context_in, in,
1334 		 esw_vport_context.vport_cvlan_insert, insert_mode);
1335 
1336 	MLX5_SET(modify_esw_vport_context_in, in,
1337 		 esw_vport_context.vport_cvlan_strip, strip_mode);
1338 
1339 	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1340 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1341 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1342 
1343 	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1344 }
1345 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1346 
1347 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1348 {
1349 	u32 *out;
1350 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1351 	int err;
1352 
1353 	out = mlx5_vzalloc(outlen);
1354 	if (!out)
1355 		return -ENOMEM;
1356 
1357 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1358 	if (err)
1359 		goto out;
1360 
1361 	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1362 			nic_vport_context.mtu);
1363 
1364 out:
1365 	kvfree(out);
1366 	return err;
1367 }
1368 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1369 
1370 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1371 {
1372 	u32 *in;
1373 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1374 	int err;
1375 
1376 	in = mlx5_vzalloc(inlen);
1377 	if (!in)
1378 		return -ENOMEM;
1379 
1380 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1381 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1382 
1383 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1384 
1385 	kvfree(in);
1386 	return err;
1387 }
1388 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1389 
1390 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1391 					   int *min_header)
1392 {
1393 	u32 *out;
1394 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1395 	int err;
1396 
1397 	out = mlx5_vzalloc(outlen);
1398 	if (!out)
1399 		return -ENOMEM;
1400 
1401 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1402 	if (err)
1403 		goto out;
1404 
1405 	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1406 			       nic_vport_context.min_wqe_inline_mode);
1407 
1408 out:
1409 	kvfree(out);
1410 	return err;
1411 }
1412 
1413 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1414 				  u8 vport, int min_header)
1415 {
1416 	u32 *in;
1417 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1418 	int err;
1419 
1420 	in = mlx5_vzalloc(inlen);
1421 	if (!in)
1422 		return -ENOMEM;
1423 
1424 	MLX5_SET(modify_nic_vport_context_in, in,
1425 		 field_select.min_wqe_inline_mode, 1);
1426 	MLX5_SET(modify_nic_vport_context_in, in,
1427 		 nic_vport_context.min_wqe_inline_mode, min_header);
1428 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1429 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1430 
1431 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1432 
1433 	kvfree(in);
1434 	return err;
1435 }
1436 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1437 
1438 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1439 {
1440 	switch (MLX5_CAP_GEN(dev, port_type)) {
1441 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1442 		return mlx5_query_hca_min_wqe_header(dev, min_header);
1443 
1444 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1445 		return mlx5_query_vport_min_wqe_header(dev, min_header);
1446 
1447 	default:
1448 		return -EINVAL;
1449 	}
1450 }
1451 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1452 
1453 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1454 				 u16 vport,
1455 				 int *promisc_uc,
1456 				 int *promisc_mc,
1457 				 int *promisc_all)
1458 {
1459 	u32 *out;
1460 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1461 	int err;
1462 
1463 	out = kzalloc(outlen, GFP_KERNEL);
1464 	if (!out)
1465 		return -ENOMEM;
1466 
1467 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1468 	if (err)
1469 		goto out;
1470 
1471 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1472 			       nic_vport_context.promisc_uc);
1473 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1474 			       nic_vport_context.promisc_mc);
1475 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1476 				nic_vport_context.promisc_all);
1477 
1478 out:
1479 	kfree(out);
1480 	return err;
1481 }
1482 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1483 
1484 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1485 				  int promisc_uc,
1486 				  int promisc_mc,
1487 				  int promisc_all)
1488 {
1489 	void *in;
1490 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1491 	int err;
1492 
1493 	in = mlx5_vzalloc(inlen);
1494 	if (!in) {
1495 		mlx5_core_err(mdev, "failed to allocate inbox\n");
1496 		return -ENOMEM;
1497 	}
1498 
1499 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1500 	MLX5_SET(modify_nic_vport_context_in, in,
1501 		 nic_vport_context.promisc_uc, promisc_uc);
1502 	MLX5_SET(modify_nic_vport_context_in, in,
1503 		 nic_vport_context.promisc_mc, promisc_mc);
1504 	MLX5_SET(modify_nic_vport_context_in, in,
1505 		 nic_vport_context.promisc_all, promisc_all);
1506 
1507 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1508 	kvfree(in);
1509 	return err;
1510 }
1511 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1512 
1513 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
1514 {
1515 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1516 	void *in;
1517 	int err;
1518 
1519 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
1520 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1521 		return 0;
1522 
1523 	in = kvzalloc(inlen, GFP_KERNEL);
1524 	if (!in)
1525 		return -ENOMEM;
1526 
1527 	MLX5_SET(modify_nic_vport_context_in, in,
1528 		 nic_vport_context.disable_mc_local_lb, !enable);
1529 	MLX5_SET(modify_nic_vport_context_in, in,
1530 		 nic_vport_context.disable_uc_local_lb, !enable);
1531 
1532 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
1533 		MLX5_SET(modify_nic_vport_context_in, in,
1534 			 field_select.disable_mc_local_lb, 1);
1535 
1536 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1537 		MLX5_SET(modify_nic_vport_context_in, in,
1538 			 field_select.disable_uc_local_lb, 1);
1539 
1540 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1541 
1542 	if (!err)
1543 		mlx5_core_dbg(mdev, "%s local_lb\n",
1544 			      enable ? "enable" : "disable");
1545 
1546 	kvfree(in);
1547 	return err;
1548 }
1549 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
1550 
1551 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1552 				   enum mlx5_local_lb_selection selection,
1553 				   u8 value)
1554 {
1555 	void *in;
1556 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1557 	int err;
1558 
1559 	in = mlx5_vzalloc(inlen);
1560 	if (!in) {
1561 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
1562 		return -ENOMEM;
1563 	}
1564 
1565 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1566 
1567 	if (selection == MLX5_LOCAL_MC_LB) {
1568 		MLX5_SET(modify_nic_vport_context_in, in,
1569 			 field_select.disable_mc_local_lb, 1);
1570 		MLX5_SET(modify_nic_vport_context_in, in,
1571 			 nic_vport_context.disable_mc_local_lb,
1572 			 value);
1573 	} else {
1574 		MLX5_SET(modify_nic_vport_context_in, in,
1575 			 field_select.disable_uc_local_lb, 1);
1576 		MLX5_SET(modify_nic_vport_context_in, in,
1577 			 nic_vport_context.disable_uc_local_lb,
1578 			 value);
1579 	}
1580 
1581 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1582 
1583 	kvfree(in);
1584 	return err;
1585 }
1586 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1587 
1588 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1589 				  enum mlx5_local_lb_selection selection,
1590 				  u8 *value)
1591 {
1592 	void *out;
1593 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1594 	int err;
1595 
1596 	out = kzalloc(outlen, GFP_KERNEL);
1597 	if (!out)
1598 		return -ENOMEM;
1599 
1600 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1601 	if (err)
1602 		goto done;
1603 
1604 	if (selection == MLX5_LOCAL_MC_LB)
1605 		*value = MLX5_GET(query_nic_vport_context_out, out,
1606 				  nic_vport_context.disable_mc_local_lb);
1607 	else
1608 		*value = MLX5_GET(query_nic_vport_context_out, out,
1609 				  nic_vport_context.disable_uc_local_lb);
1610 
1611 done:
1612 	kfree(out);
1613 	return err;
1614 }
1615 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1616 
1617 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1618 			     u8 port_num, u16 vport_num,
1619 			     void *out, int out_size)
1620 {
1621 	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1622 	int is_group_manager;
1623 	void *in;
1624 	int err;
1625 
1626 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1627 
1628 	in = mlx5_vzalloc(in_sz);
1629 	if (!in)
1630 		return -ENOMEM;
1631 
1632 	MLX5_SET(query_vport_counter_in, in, opcode,
1633 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1634 	if (vport_num) {
1635 		if (is_group_manager) {
1636 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1637 			MLX5_SET(query_vport_counter_in, in, vport_number,
1638 				 vport_num);
1639 		} else {
1640 			err = -EPERM;
1641 			goto ex;
1642 		}
1643 	}
1644 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1645 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1646 
1647 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1648 
1649 ex:
1650 	kvfree(in);
1651 	return err;
1652 }
1653 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1654 
1655 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1656 			    struct mlx5_vport_counters *vc)
1657 {
1658 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1659 	void *out;
1660 	int err;
1661 
1662 	out = mlx5_vzalloc(out_sz);
1663 	if (!out)
1664 		return -ENOMEM;
1665 
1666 	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1667 	if (err)
1668 		goto ex;
1669 
1670 	vc->received_errors.packets =
1671 		MLX5_GET64(query_vport_counter_out,
1672 			   out, received_errors.packets);
1673 	vc->received_errors.octets =
1674 		MLX5_GET64(query_vport_counter_out,
1675 			   out, received_errors.octets);
1676 	vc->transmit_errors.packets =
1677 		MLX5_GET64(query_vport_counter_out,
1678 			   out, transmit_errors.packets);
1679 	vc->transmit_errors.octets =
1680 		MLX5_GET64(query_vport_counter_out,
1681 			   out, transmit_errors.octets);
1682 	vc->received_ib_unicast.packets =
1683 		MLX5_GET64(query_vport_counter_out,
1684 			   out, received_ib_unicast.packets);
1685 	vc->received_ib_unicast.octets =
1686 		MLX5_GET64(query_vport_counter_out,
1687 			   out, received_ib_unicast.octets);
1688 	vc->transmitted_ib_unicast.packets =
1689 		MLX5_GET64(query_vport_counter_out,
1690 			   out, transmitted_ib_unicast.packets);
1691 	vc->transmitted_ib_unicast.octets =
1692 		MLX5_GET64(query_vport_counter_out,
1693 			   out, transmitted_ib_unicast.octets);
1694 	vc->received_ib_multicast.packets =
1695 		MLX5_GET64(query_vport_counter_out,
1696 			   out, received_ib_multicast.packets);
1697 	vc->received_ib_multicast.octets =
1698 		MLX5_GET64(query_vport_counter_out,
1699 			   out, received_ib_multicast.octets);
1700 	vc->transmitted_ib_multicast.packets =
1701 		MLX5_GET64(query_vport_counter_out,
1702 			   out, transmitted_ib_multicast.packets);
1703 	vc->transmitted_ib_multicast.octets =
1704 		MLX5_GET64(query_vport_counter_out,
1705 			   out, transmitted_ib_multicast.octets);
1706 	vc->received_eth_broadcast.packets =
1707 		MLX5_GET64(query_vport_counter_out,
1708 			   out, received_eth_broadcast.packets);
1709 	vc->received_eth_broadcast.octets =
1710 		MLX5_GET64(query_vport_counter_out,
1711 			   out, received_eth_broadcast.octets);
1712 	vc->transmitted_eth_broadcast.packets =
1713 		MLX5_GET64(query_vport_counter_out,
1714 			   out, transmitted_eth_broadcast.packets);
1715 	vc->transmitted_eth_broadcast.octets =
1716 		MLX5_GET64(query_vport_counter_out,
1717 			   out, transmitted_eth_broadcast.octets);
1718 	vc->received_eth_unicast.octets =
1719 		MLX5_GET64(query_vport_counter_out,
1720 			   out, received_eth_unicast.octets);
1721 	vc->received_eth_unicast.packets =
1722 		MLX5_GET64(query_vport_counter_out,
1723 			   out, received_eth_unicast.packets);
1724 	vc->transmitted_eth_unicast.octets =
1725 		MLX5_GET64(query_vport_counter_out,
1726 			   out, transmitted_eth_unicast.octets);
1727 	vc->transmitted_eth_unicast.packets =
1728 		MLX5_GET64(query_vport_counter_out,
1729 			   out, transmitted_eth_unicast.packets);
1730 	vc->received_eth_multicast.octets =
1731 		MLX5_GET64(query_vport_counter_out,
1732 			   out, received_eth_multicast.octets);
1733 	vc->received_eth_multicast.packets =
1734 		MLX5_GET64(query_vport_counter_out,
1735 			   out, received_eth_multicast.packets);
1736 	vc->transmitted_eth_multicast.octets =
1737 		MLX5_GET64(query_vport_counter_out,
1738 			   out, transmitted_eth_multicast.octets);
1739 	vc->transmitted_eth_multicast.packets =
1740 		MLX5_GET64(query_vport_counter_out,
1741 			   out, transmitted_eth_multicast.packets);
1742 
1743 ex:
1744 	kvfree(out);
1745 	return err;
1746 }
1747 
1748 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1749 				       u64 *sys_image_guid)
1750 {
1751 	switch (MLX5_CAP_GEN(dev, port_type)) {
1752 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1753 		return mlx5_query_hca_vport_system_image_guid(dev,
1754 							      sys_image_guid);
1755 
1756 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1757 		return mlx5_query_nic_vport_system_image_guid(dev,
1758 							      sys_image_guid);
1759 
1760 	default:
1761 		return -EINVAL;
1762 	}
1763 }
1764 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1765 
1766 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1767 {
1768 	switch (MLX5_CAP_GEN(dev, port_type)) {
1769 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1770 		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1771 
1772 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1773 		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1774 
1775 	default:
1776 		return -EINVAL;
1777 	}
1778 }
1779 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1780 
1781 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1782 {
1783 	switch (MLX5_CAP_GEN(dev, port_type)) {
1784 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1785 		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1786 
1787 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1788 		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1789 
1790 	default:
1791 		return -EINVAL;
1792 	}
1793 }
1794 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1795 
1796 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1797 {
1798 	u32 *out;
1799 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1800 	int err;
1801 
1802 	out = mlx5_vzalloc(outlen);
1803 	if (!out)
1804 		return -ENOMEM;
1805 
1806 	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1807 	if (err)
1808 		goto out;
1809 
1810 	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1811 				hca_vport_context.vport_state);
1812 
1813 out:
1814 	kvfree(out);
1815 	return err;
1816 }
1817 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1818 
1819 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1820 			     u8 port_num, void *out, size_t sz)
1821 {
1822 	u32 *in;
1823 	int err;
1824 
1825 	in  = mlx5_vzalloc(sz);
1826 	if (!in) {
1827 		err = -ENOMEM;
1828 		return err;
1829 	}
1830 
1831 	MLX5_SET(ppcnt_reg, in, local_port, port_num);
1832 
1833 	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1834 	err = mlx5_core_access_reg(dev, in, sz, out,
1835 				   sz, MLX5_REG_PPCNT, 0, 0);
1836 
1837 	kvfree(in);
1838 	return err;
1839 }
1840