xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_vport.c (revision 0957b409)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
32 
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34 					 int inlen);
35 
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 				   u16 vport, u32 *out, int outlen)
38 {
39 	int err;
40 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
41 
42 	MLX5_SET(query_vport_state_in, in, opcode,
43 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
44 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
46 	if (vport)
47 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
48 
49 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
50 	if (err)
51 		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
52 
53 	return err;
54 }
55 
56 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
57 {
58 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
59 
60 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
61 
62 	return MLX5_GET(query_vport_state_out, out, state);
63 }
64 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
65 
66 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
67 {
68 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
69 
70 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
71 
72 	return MLX5_GET(query_vport_state_out, out, admin_state);
73 }
74 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
75 
76 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
77 				  u16 vport, u8 state)
78 {
79 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
80 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
81 	int err;
82 
83 	MLX5_SET(modify_vport_state_in, in, opcode,
84 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
87 
88 	if (vport)
89 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
90 
91 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
92 
93 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
94 	if (err)
95 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
96 
97 	return err;
98 }
99 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
100 
101 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
102 					u32 *out, int outlen)
103 {
104 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
105 
106 	MLX5_SET(query_nic_vport_context_in, in, opcode,
107 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
108 
109 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
110 	if (vport)
111 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
112 
113 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
114 }
115 
116 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
117 					      int client_id)
118 {
119 	switch (client_id) {
120 	case MLX5_INTERFACE_PROTOCOL_IB:
121 		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
122 			MLX5_QCOUNTER_SETS_NETDEV);
123 	case MLX5_INTERFACE_PROTOCOL_ETH:
124 		return MLX5_QCOUNTER_SETS_NETDEV;
125 	default:
126 		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
127 		return 0;
128 	}
129 }
130 
131 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
132 			       int client_id, u16 *counter_set_id)
133 {
134 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
135 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
136 	int err;
137 
138 	if (mdev->num_q_counter_allocated[client_id] >
139 	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
140 		return -EINVAL;
141 
142 	MLX5_SET(alloc_q_counter_in, in, opcode,
143 		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
144 
145 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
146 
147 	if (!err)
148 		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
149 					   counter_set_id);
150 
151 	mdev->num_q_counter_allocated[client_id]++;
152 
153 	return err;
154 }
155 
156 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
157 				 int client_id, u16 counter_set_id)
158 {
159 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
160 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
161 	int err;
162 
163 	if (mdev->num_q_counter_allocated[client_id] <= 0)
164 		return -EINVAL;
165 
166 	MLX5_SET(dealloc_q_counter_in, in, opcode,
167 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
168 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
169 		 counter_set_id);
170 
171 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
172 
173 	mdev->num_q_counter_allocated[client_id]--;
174 
175 	return err;
176 }
177 
178 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
179 				      u16 counter_set_id,
180 				      int reset,
181 				      void *out,
182 				      int out_size)
183 {
184 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
185 
186 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
187 	MLX5_SET(query_q_counter_in, in, clear, reset);
188 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
189 
190 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
191 }
192 
193 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
194 				      u16 counter_set_id,
195 				      u32 *out_of_rx_buffer)
196 {
197 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
198 	int err;
199 
200 	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
201 					 sizeof(out));
202 
203 	if (err)
204 		return err;
205 
206 	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
207 				     out_of_buffer);
208 	return err;
209 }
210 
211 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
212 				    u16 vport, u8 *min_inline)
213 {
214 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
215 	int err;
216 
217 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
218 	if (!err)
219 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
220 				       nic_vport_context.min_wqe_inline_mode);
221 	return err;
222 }
223 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
224 
225 int mlx5_query_min_inline(struct mlx5_core_dev *mdev,
226 			  u8 *min_inline_mode)
227 {
228 	int err;
229 
230 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
231 	case MLX5_CAP_INLINE_MODE_L2:
232 		*min_inline_mode = MLX5_INLINE_MODE_L2;
233 		err = 0;
234 		break;
235 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
236 		err = mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
237 		break;
238 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
239 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
240 		err = 0;
241 		break;
242 	default:
243 		err = -EINVAL;
244 		break;
245 	}
246 	return err;
247 }
248 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
249 
250 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
251 				     u16 vport, u8 min_inline)
252 {
253 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
254 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
255 	void *nic_vport_ctx;
256 
257 	MLX5_SET(modify_nic_vport_context_in, in,
258 		 field_select.min_wqe_inline_mode, 1);
259 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
260 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
261 
262 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
263 				     in, nic_vport_context);
264 	MLX5_SET(nic_vport_context, nic_vport_ctx,
265 		 min_wqe_inline_mode, min_inline);
266 
267 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
268 }
269 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
270 
271 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
272 				     u16 vport, u8 *addr)
273 {
274 	u32 *out;
275 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
276 	u8 *out_addr;
277 	int err;
278 
279 	out = mlx5_vzalloc(outlen);
280 	if (!out)
281 		return -ENOMEM;
282 
283 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
284 				nic_vport_context.permanent_address);
285 
286 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
287 	if (err)
288 		goto out;
289 
290 	ether_addr_copy(addr, &out_addr[2]);
291 
292 out:
293 	kvfree(out);
294 	return err;
295 }
296 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
297 
298 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
299 				      u16 vport, u8 *addr)
300 {
301 	void *in;
302 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
303 	int err;
304 	void *nic_vport_ctx;
305 	u8 *perm_mac;
306 
307 	in = mlx5_vzalloc(inlen);
308 	if (!in) {
309 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
310 		return -ENOMEM;
311 	}
312 
313 	MLX5_SET(modify_nic_vport_context_in, in,
314 		 field_select.permanent_address, 1);
315 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
316 
317 	if (vport)
318 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
319 
320 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
321 				     in, nic_vport_context);
322 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
323 				permanent_address);
324 
325 	ether_addr_copy(&perm_mac[2], addr);
326 
327 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
328 
329 	kvfree(in);
330 
331 	return err;
332 }
333 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
334 
335 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
336 					   u64 *system_image_guid)
337 {
338 	u32 *out;
339 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
340 	int err;
341 
342 	out = mlx5_vzalloc(outlen);
343 	if (!out)
344 		return -ENOMEM;
345 
346 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
347 	if (err)
348 		goto out;
349 
350 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
351 					nic_vport_context.system_image_guid);
352 out:
353 	kvfree(out);
354 	return err;
355 }
356 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
357 
358 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
359 {
360 	u32 *out;
361 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
362 	int err;
363 
364 	out = mlx5_vzalloc(outlen);
365 	if (!out)
366 		return -ENOMEM;
367 
368 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
369 	if (err)
370 		goto out;
371 
372 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
373 				nic_vport_context.node_guid);
374 
375 out:
376 	kvfree(out);
377 	return err;
378 }
379 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
380 
381 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
382 					  u64 *port_guid)
383 {
384 	u32 *out;
385 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
386 	int err;
387 
388 	out = mlx5_vzalloc(outlen);
389 	if (!out)
390 		return -ENOMEM;
391 
392 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
393 	if (err)
394 		goto out;
395 
396 	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
397 				nic_vport_context.port_guid);
398 
399 out:
400 	kvfree(out);
401 	return err;
402 }
403 
404 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
405 					u16 *qkey_viol_cntr)
406 {
407 	u32 *out;
408 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
409 	int err;
410 
411 	out = mlx5_vzalloc(outlen);
412 	if (!out)
413 		return -ENOMEM;
414 
415 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
416 	if (err)
417 		goto out;
418 
419 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
420 				nic_vport_context.qkey_violation_counter);
421 
422 out:
423 	kvfree(out);
424 	return err;
425 }
426 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
427 
428 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
429 					 int inlen)
430 {
431 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
432 
433 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
434 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
435 
436 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
437 }
438 
439 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
440 					      int enable_disable)
441 {
442 	void *in;
443 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
444 	int err;
445 
446 	in = mlx5_vzalloc(inlen);
447 	if (!in) {
448 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
449 		return -ENOMEM;
450 	}
451 
452 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
453 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
454 		 enable_disable);
455 
456 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
457 
458 	kvfree(in);
459 
460 	return err;
461 }
462 
463 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
464 				   bool other_vport, u8 *addr)
465 {
466 	void *in;
467 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
468 		  + MLX5_ST_SZ_BYTES(mac_address_layout);
469 	u8  *mac_layout;
470 	u8  *mac_ptr;
471 	int err;
472 
473 	in = mlx5_vzalloc(inlen);
474 	if (!in) {
475 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
476 		return -ENOMEM;
477 	}
478 
479 	MLX5_SET(modify_nic_vport_context_in, in,
480 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
481 	MLX5_SET(modify_nic_vport_context_in, in,
482 		 vport_number, vport);
483 	MLX5_SET(modify_nic_vport_context_in, in,
484 		 other_vport, other_vport);
485 	MLX5_SET(modify_nic_vport_context_in, in,
486 		 field_select.addresses_list, 1);
487 	MLX5_SET(modify_nic_vport_context_in, in,
488 		 nic_vport_context.allowed_list_type,
489 		 MLX5_NIC_VPORT_LIST_TYPE_UC);
490 	MLX5_SET(modify_nic_vport_context_in, in,
491 		 nic_vport_context.allowed_list_size, 1);
492 
493 	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
494 		nic_vport_context.current_uc_mac_address);
495 	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
496 		mac_addr_47_32);
497 	ether_addr_copy(mac_ptr, addr);
498 
499 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
500 
501 	kvfree(in);
502 
503 	return err;
504 }
505 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
506 
507 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
508 				    u32 vport, u64 node_guid)
509 {
510 	void *in;
511 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
512 	int err;
513 	void *nic_vport_context;
514 
515 	if (!vport)
516 		return -EINVAL;
517 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
518 		return -EPERM;
519 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
520 		return -ENOTSUPP;
521 
522 	in = mlx5_vzalloc(inlen);
523 	if (!in) {
524 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
525 		return -ENOMEM;
526 	}
527 
528 	MLX5_SET(modify_nic_vport_context_in, in,
529 		 field_select.node_guid, 1);
530 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
531 
532 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
533 
534 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
535 					 in, nic_vport_context);
536 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
537 
538 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
539 
540 	kvfree(in);
541 
542 	return err;
543 }
544 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
545 
546 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
547 				    u32 vport, u64 port_guid)
548 {
549 	void *in;
550 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
551 	int err;
552 	void *nic_vport_context;
553 
554 	if (!vport)
555 		return -EINVAL;
556 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
557 		return -EPERM;
558 	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
559 		return -ENOTSUPP;
560 
561 	in = mlx5_vzalloc(inlen);
562 	if (!in) {
563 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
564 		return -ENOMEM;
565 	}
566 
567 	MLX5_SET(modify_nic_vport_context_in, in,
568 		 field_select.port_guid, 1);
569 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
570 
571 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
572 
573 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
574 					 in, nic_vport_context);
575 	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
576 
577 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
578 
579 	kvfree(in);
580 
581 	return err;
582 }
583 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
584 
585 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
586 				 u16 *vlan_list, int list_len)
587 {
588 	void *in, *ctx;
589 	int i, err;
590 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
591 		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
592 
593 	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
594 
595 	if (list_len > max_list_size) {
596 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
597 			       list_len, max_list_size);
598 		return -ENOSPC;
599 	}
600 
601 	in = mlx5_vzalloc(inlen);
602 	if (!in) {
603 		mlx5_core_warn(dev, "failed to allocate inbox\n");
604 		return -ENOMEM;
605 	}
606 
607 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
608 	if (vport)
609 		MLX5_SET(modify_nic_vport_context_in, in,
610 			 other_vport, 1);
611 	MLX5_SET(modify_nic_vport_context_in, in,
612 		 field_select.addresses_list, 1);
613 
614 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
615 
616 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
617 		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
618 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
619 
620 	for (i = 0; i < list_len; i++) {
621 		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
622 					 current_uc_mac_address[i]);
623 		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
624 	}
625 
626 	err = mlx5_modify_nic_vport_context(dev, in, inlen);
627 
628 	kvfree(in);
629 	return err;
630 }
631 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
632 
633 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
634 			       u64 *addr_list, size_t addr_list_len)
635 {
636 	void *in, *ctx;
637 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
638 		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
639 	int err;
640 	size_t i;
641 	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
642 
643 	if ((int)addr_list_len > max_list_sz) {
644 		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
645 			       (int)addr_list_len, max_list_sz);
646 		return -ENOSPC;
647 	}
648 
649 	in = mlx5_vzalloc(inlen);
650 	if (!in) {
651 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
652 		return -ENOMEM;
653 	}
654 
655 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
656 	if (vport)
657 		MLX5_SET(modify_nic_vport_context_in, in,
658 			 other_vport, 1);
659 	MLX5_SET(modify_nic_vport_context_in, in,
660 		 field_select.addresses_list, 1);
661 
662 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
663 
664 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
665 		 MLX5_NIC_VPORT_LIST_TYPE_MC);
666 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
667 
668 	for (i = 0; i < addr_list_len; i++) {
669 		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
670 						  current_uc_mac_address[i]);
671 		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
672 						 mac_addr_47_32);
673 		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
674 	}
675 
676 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
677 
678 	kvfree(in);
679 
680 	return err;
681 }
682 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
683 
684 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
685 			       bool promisc_mc, bool promisc_uc,
686 			       bool promisc_all)
687 {
688 	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
689 	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
690 			       nic_vport_context);
691 
692 	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
693 
694 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
695 	if (vport)
696 		MLX5_SET(modify_nic_vport_context_in, in,
697 			 other_vport, 1);
698 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
699 	if (promisc_mc)
700 		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
701 	if (promisc_uc)
702 		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
703 	if (promisc_all)
704 		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
705 
706 	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
707 }
708 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
709 
710 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
711 				  u16 vport,
712 				  enum mlx5_list_type list_type,
713 				  u8 addr_list[][ETH_ALEN],
714 				  int *list_size)
715 {
716 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
717 	void *nic_vport_ctx;
718 	int max_list_size;
719 	int req_list_size;
720 	int out_sz;
721 	void *out;
722 	int err;
723 	int i;
724 
725 	req_list_size = *list_size;
726 
727 	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
728 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
729 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
730 
731 	if (req_list_size > max_list_size) {
732 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
733 			       req_list_size, max_list_size);
734 		req_list_size = max_list_size;
735 	}
736 
737 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
738 		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
739 
740 	out = kzalloc(out_sz, GFP_KERNEL);
741 	if (!out)
742 		return -ENOMEM;
743 
744 	MLX5_SET(query_nic_vport_context_in, in, opcode,
745 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
746 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
747 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
748 
749 	if (vport)
750 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
751 
752 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
753 	if (err)
754 		goto out;
755 
756 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
757 				     nic_vport_context);
758 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
759 				 allowed_list_size);
760 
761 	*list_size = req_list_size;
762 	for (i = 0; i < req_list_size; i++) {
763 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
764 					nic_vport_ctx,
765 					current_uc_mac_address[i]) + 2;
766 		ether_addr_copy(addr_list[i], mac_addr);
767 	}
768 out:
769 	kfree(out);
770 	return err;
771 }
772 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
773 
774 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
775 				   enum mlx5_list_type list_type,
776 				   u8 addr_list[][ETH_ALEN],
777 				   int list_size)
778 {
779 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
780 	void *nic_vport_ctx;
781 	int max_list_size;
782 	int in_sz;
783 	void *in;
784 	int err;
785 	int i;
786 
787 	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
788 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
789 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
790 
791 	if (list_size > max_list_size)
792 		return -ENOSPC;
793 
794 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
795 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
796 
797 	in = kzalloc(in_sz, GFP_KERNEL);
798 	if (!in)
799 		return -ENOMEM;
800 
801 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
802 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
803 	MLX5_SET(modify_nic_vport_context_in, in,
804 		 field_select.addresses_list, 1);
805 
806 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
807 				     nic_vport_context);
808 
809 	MLX5_SET(nic_vport_context, nic_vport_ctx,
810 		 allowed_list_type, list_type);
811 	MLX5_SET(nic_vport_context, nic_vport_ctx,
812 		 allowed_list_size, list_size);
813 
814 	for (i = 0; i < list_size; i++) {
815 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
816 					    nic_vport_ctx,
817 					    current_uc_mac_address[i]) + 2;
818 		ether_addr_copy(curr_mac, addr_list[i]);
819 	}
820 
821 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
822 	kfree(in);
823 	return err;
824 }
825 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
826 
827 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
828 			       u16 vport,
829 			       u16 vlans[],
830 			       int *size)
831 {
832 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
833 	void *nic_vport_ctx;
834 	int req_list_size;
835 	int max_list_size;
836 	int out_sz;
837 	void *out;
838 	int err;
839 	int i;
840 
841 	req_list_size = *size;
842 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
843 	if (req_list_size > max_list_size) {
844 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
845 			       req_list_size, max_list_size);
846 		req_list_size = max_list_size;
847 	}
848 
849 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
850 		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
851 
852 	out = kzalloc(out_sz, GFP_KERNEL);
853 	if (!out)
854 		return -ENOMEM;
855 
856 	MLX5_SET(query_nic_vport_context_in, in, opcode,
857 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
858 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
859 		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
860 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
861 
862 	if (vport)
863 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
864 
865 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
866 	if (err)
867 		goto out;
868 
869 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
870 				     nic_vport_context);
871 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
872 				 allowed_list_size);
873 
874 	*size = req_list_size;
875 	for (i = 0; i < req_list_size; i++) {
876 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
877 					       nic_vport_ctx,
878 					 current_uc_mac_address[i]);
879 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
880 	}
881 out:
882 	kfree(out);
883 	return err;
884 }
885 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
886 
887 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
888 				u16 vlans[],
889 				int list_size)
890 {
891 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
892 	void *nic_vport_ctx;
893 	int max_list_size;
894 	int in_sz;
895 	void *in;
896 	int err;
897 	int i;
898 
899 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
900 
901 	if (list_size > max_list_size)
902 		return -ENOSPC;
903 
904 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
905 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
906 
907 	in = kzalloc(in_sz, GFP_KERNEL);
908 	if (!in)
909 		return -ENOMEM;
910 
911 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
912 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
913 	MLX5_SET(modify_nic_vport_context_in, in,
914 		 field_select.addresses_list, 1);
915 
916 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
917 				     nic_vport_context);
918 
919 	MLX5_SET(nic_vport_context, nic_vport_ctx,
920 		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
921 	MLX5_SET(nic_vport_context, nic_vport_ctx,
922 		 allowed_list_size, list_size);
923 
924 	for (i = 0; i < list_size; i++) {
925 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
926 					       nic_vport_ctx,
927 					       current_uc_mac_address[i]);
928 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
929 	}
930 
931 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
932 	kfree(in);
933 	return err;
934 }
935 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
936 
937 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
938 {
939 	u32 *out;
940 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
941 	int err;
942 
943 	out = kzalloc(outlen, GFP_KERNEL);
944 	if (!out)
945 		return -ENOMEM;
946 
947 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
948 	if (err)
949 		goto out;
950 
951 	*enable = MLX5_GET(query_nic_vport_context_out, out,
952 				nic_vport_context.roce_en);
953 
954 out:
955 	kfree(out);
956 	return err;
957 }
958 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
959 
960 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
961 				     u8 *addr)
962 {
963 	void *in;
964 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
965 	u8  *mac_ptr;
966 	int err;
967 
968 	in = mlx5_vzalloc(inlen);
969 	if (!in) {
970 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
971 		return -ENOMEM;
972 	}
973 
974 	MLX5_SET(modify_nic_vport_context_in, in,
975 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
976 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
977 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
978 	MLX5_SET(modify_nic_vport_context_in, in,
979 		 field_select.permanent_address, 1);
980 	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
981 		nic_vport_context.permanent_address.mac_addr_47_32);
982 	ether_addr_copy(mac_ptr, addr);
983 
984 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
985 
986 	kvfree(in);
987 
988 	return err;
989 }
990 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
991 
992 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
993 {
994 	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
995 }
996 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
997 
998 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
999 {
1000 	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1001 }
1002 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1003 
1004 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1005 				  int vf, u8 port_num, void *out,
1006 				  size_t out_sz)
1007 {
1008 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1009 	int	is_group_manager;
1010 	void   *in;
1011 	int	err;
1012 
1013 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1014 	in = mlx5_vzalloc(in_sz);
1015 	if (!in) {
1016 		err = -ENOMEM;
1017 		return err;
1018 	}
1019 
1020 	MLX5_SET(query_vport_counter_in, in, opcode,
1021 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1022 	if (other_vport) {
1023 		if (is_group_manager) {
1024 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1025 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1026 		} else {
1027 			err = -EPERM;
1028 			goto free;
1029 		}
1030 	}
1031 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1032 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1033 
1034 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1035 free:
1036 	kvfree(in);
1037 	return err;
1038 }
1039 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1040 
1041 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1042 				 u8 port_num, u8 vport_num, u32 *out,
1043 				 int outlen)
1044 {
1045 	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
1046 	int is_group_manager;
1047 
1048 	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1049 
1050 	MLX5_SET(query_hca_vport_context_in, in, opcode,
1051 		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1052 
1053 	if (vport_num) {
1054 		if (is_group_manager) {
1055 			MLX5_SET(query_hca_vport_context_in, in, other_vport,
1056 				 1);
1057 			MLX5_SET(query_hca_vport_context_in, in, vport_number,
1058 				 vport_num);
1059 		} else {
1060 			return -EPERM;
1061 		}
1062 	}
1063 
1064 	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1065 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1066 
1067 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1068 }
1069 
1070 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1071 					   u64 *system_image_guid)
1072 {
1073 	u32 *out;
1074 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1075 	int err;
1076 
1077 	out = mlx5_vzalloc(outlen);
1078 	if (!out)
1079 		return -ENOMEM;
1080 
1081 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1082 	if (err)
1083 		goto out;
1084 
1085 	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1086 					hca_vport_context.system_image_guid);
1087 
1088 out:
1089 	kvfree(out);
1090 	return err;
1091 }
1092 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1093 
1094 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1095 {
1096 	u32 *out;
1097 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1098 	int err;
1099 
1100 	out = mlx5_vzalloc(outlen);
1101 	if (!out)
1102 		return -ENOMEM;
1103 
1104 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1105 	if (err)
1106 		goto out;
1107 
1108 	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1109 				hca_vport_context.node_guid);
1110 
1111 out:
1112 	kvfree(out);
1113 	return err;
1114 }
1115 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1116 
1117 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1118 					  u64 *port_guid)
1119 {
1120 	u32 *out;
1121 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1122 	int err;
1123 
1124 	out = mlx5_vzalloc(outlen);
1125 	if (!out)
1126 		return -ENOMEM;
1127 
1128 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1129 	if (err)
1130 		goto out;
1131 
1132 	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1133 				hca_vport_context.port_guid);
1134 
1135 out:
1136 	kvfree(out);
1137 	return err;
1138 }
1139 
1140 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1141 			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1142 {
1143 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1144 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1145 	int is_group_manager;
1146 	void *out = NULL;
1147 	void *in = NULL;
1148 	union ib_gid *tmp;
1149 	int tbsz;
1150 	int nout;
1151 	int err;
1152 
1153 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1154 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1155 
1156 	if (gid_index > tbsz && gid_index != 0xffff)
1157 		return -EINVAL;
1158 
1159 	if (gid_index == 0xffff)
1160 		nout = tbsz;
1161 	else
1162 		nout = 1;
1163 
1164 	out_sz += nout * sizeof(*gid);
1165 
1166 	in = mlx5_vzalloc(in_sz);
1167 	out = mlx5_vzalloc(out_sz);
1168 	if (!in || !out) {
1169 		err = -ENOMEM;
1170 		goto out;
1171 	}
1172 
1173 	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1174 		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1175 	if (vport_num) {
1176 		if (is_group_manager) {
1177 			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1178 				 vport_num);
1179 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1180 		} else {
1181 			err = -EPERM;
1182 			goto out;
1183 		}
1184 	}
1185 
1186 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1187 
1188 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1189 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1190 
1191 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1192 	if (err)
1193 		goto out;
1194 
1195 	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1196 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1197 	gid->global.interface_id = tmp->global.interface_id;
1198 
1199 out:
1200 	kvfree(in);
1201 	kvfree(out);
1202 	return err;
1203 }
1204 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1205 
1206 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1207 			      u8 port_num, u16 vf_num, u16 pkey_index,
1208 			      u16 *pkey)
1209 {
1210 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1211 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1212 	int is_group_manager;
1213 	void *out = NULL;
1214 	void *in = NULL;
1215 	void *pkarr;
1216 	int nout;
1217 	int tbsz;
1218 	int err;
1219 	int i;
1220 
1221 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1222 
1223 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1224 	if (pkey_index > tbsz && pkey_index != 0xffff)
1225 		return -EINVAL;
1226 
1227 	if (pkey_index == 0xffff)
1228 		nout = tbsz;
1229 	else
1230 		nout = 1;
1231 
1232 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1233 
1234 	in = kzalloc(in_sz, GFP_KERNEL);
1235 	out = kzalloc(out_sz, GFP_KERNEL);
1236 
1237 	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1238 		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1239 	if (other_vport) {
1240 		if (is_group_manager) {
1241 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1242 				 vf_num);
1243 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1244 		} else {
1245 			err = -EPERM;
1246 			goto out;
1247 		}
1248 	}
1249 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1250 
1251 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1252 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1253 
1254 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1255 	if (err)
1256 		goto out;
1257 
1258 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1259 	for (i = 0; i < nout; i++, pkey++,
1260 	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1261 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1262 
1263 out:
1264 	kfree(in);
1265 	kfree(out);
1266 	return err;
1267 }
1268 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1269 
1270 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1271 					 int *min_header)
1272 {
1273 	u32 *out;
1274 	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1275 	int err;
1276 
1277 	out = mlx5_vzalloc(outlen);
1278 	if (!out)
1279 		return -ENOMEM;
1280 
1281 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1282 	if (err)
1283 		goto out;
1284 
1285 	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1286 			       hca_vport_context.min_wqe_inline_mode);
1287 
1288 out:
1289 	kvfree(out);
1290 	return err;
1291 }
1292 
1293 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1294 					     u16 vport, void *in, int inlen)
1295 {
1296 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1297 	int err;
1298 
1299 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1300 	if (vport)
1301 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1302 
1303 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1304 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1305 
1306 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1307 	if (err)
1308 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1309 
1310 	return err;
1311 }
1312 
1313 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1314 				u8 insert_mode, u8 strip_mode,
1315 				u16 vlan, u8 cfi, u8 pcp)
1316 {
1317 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1318 
1319 	memset(in, 0, sizeof(in));
1320 
1321 	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1322 		MLX5_SET(modify_esw_vport_context_in, in,
1323 			 esw_vport_context.cvlan_cfi, cfi);
1324 		MLX5_SET(modify_esw_vport_context_in, in,
1325 			 esw_vport_context.cvlan_pcp, pcp);
1326 		MLX5_SET(modify_esw_vport_context_in, in,
1327 			 esw_vport_context.cvlan_id, vlan);
1328 	}
1329 
1330 	MLX5_SET(modify_esw_vport_context_in, in,
1331 		 esw_vport_context.vport_cvlan_insert, insert_mode);
1332 
1333 	MLX5_SET(modify_esw_vport_context_in, in,
1334 		 esw_vport_context.vport_cvlan_strip, strip_mode);
1335 
1336 	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1337 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1338 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1339 
1340 	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1341 }
1342 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1343 
1344 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1345 {
1346 	u32 *out;
1347 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1348 	int err;
1349 
1350 	out = mlx5_vzalloc(outlen);
1351 	if (!out)
1352 		return -ENOMEM;
1353 
1354 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1355 	if (err)
1356 		goto out;
1357 
1358 	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1359 			nic_vport_context.mtu);
1360 
1361 out:
1362 	kvfree(out);
1363 	return err;
1364 }
1365 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1366 
1367 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1368 {
1369 	u32 *in;
1370 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1371 	int err;
1372 
1373 	in = mlx5_vzalloc(inlen);
1374 	if (!in)
1375 		return -ENOMEM;
1376 
1377 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1378 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1379 
1380 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1381 
1382 	kvfree(in);
1383 	return err;
1384 }
1385 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1386 
1387 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1388 					   int *min_header)
1389 {
1390 	u32 *out;
1391 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1392 	int err;
1393 
1394 	out = mlx5_vzalloc(outlen);
1395 	if (!out)
1396 		return -ENOMEM;
1397 
1398 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1399 	if (err)
1400 		goto out;
1401 
1402 	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1403 			       nic_vport_context.min_wqe_inline_mode);
1404 
1405 out:
1406 	kvfree(out);
1407 	return err;
1408 }
1409 
1410 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1411 				  u8 vport, int min_header)
1412 {
1413 	u32 *in;
1414 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1415 	int err;
1416 
1417 	in = mlx5_vzalloc(inlen);
1418 	if (!in)
1419 		return -ENOMEM;
1420 
1421 	MLX5_SET(modify_nic_vport_context_in, in,
1422 		 field_select.min_wqe_inline_mode, 1);
1423 	MLX5_SET(modify_nic_vport_context_in, in,
1424 		 nic_vport_context.min_wqe_inline_mode, min_header);
1425 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1426 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1427 
1428 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1429 
1430 	kvfree(in);
1431 	return err;
1432 }
1433 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1434 
1435 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1436 {
1437 	switch (MLX5_CAP_GEN(dev, port_type)) {
1438 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1439 		return mlx5_query_hca_min_wqe_header(dev, min_header);
1440 
1441 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1442 		return mlx5_query_vport_min_wqe_header(dev, min_header);
1443 
1444 	default:
1445 		return -EINVAL;
1446 	}
1447 }
1448 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1449 
1450 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1451 				 u16 vport,
1452 				 int *promisc_uc,
1453 				 int *promisc_mc,
1454 				 int *promisc_all)
1455 {
1456 	u32 *out;
1457 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1458 	int err;
1459 
1460 	out = kzalloc(outlen, GFP_KERNEL);
1461 	if (!out)
1462 		return -ENOMEM;
1463 
1464 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1465 	if (err)
1466 		goto out;
1467 
1468 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1469 			       nic_vport_context.promisc_uc);
1470 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1471 			       nic_vport_context.promisc_mc);
1472 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1473 				nic_vport_context.promisc_all);
1474 
1475 out:
1476 	kfree(out);
1477 	return err;
1478 }
1479 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1480 
1481 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1482 				  int promisc_uc,
1483 				  int promisc_mc,
1484 				  int promisc_all)
1485 {
1486 	void *in;
1487 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1488 	int err;
1489 
1490 	in = mlx5_vzalloc(inlen);
1491 	if (!in) {
1492 		mlx5_core_err(mdev, "failed to allocate inbox\n");
1493 		return -ENOMEM;
1494 	}
1495 
1496 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1497 	MLX5_SET(modify_nic_vport_context_in, in,
1498 		 nic_vport_context.promisc_uc, promisc_uc);
1499 	MLX5_SET(modify_nic_vport_context_in, in,
1500 		 nic_vport_context.promisc_mc, promisc_mc);
1501 	MLX5_SET(modify_nic_vport_context_in, in,
1502 		 nic_vport_context.promisc_all, promisc_all);
1503 
1504 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1505 	kvfree(in);
1506 	return err;
1507 }
1508 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1509 
1510 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1511 				   enum mlx5_local_lb_selection selection,
1512 				   u8 value)
1513 {
1514 	void *in;
1515 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1516 	int err;
1517 
1518 	in = mlx5_vzalloc(inlen);
1519 	if (!in) {
1520 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
1521 		return -ENOMEM;
1522 	}
1523 
1524 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1525 
1526 	if (selection == MLX5_LOCAL_MC_LB) {
1527 		MLX5_SET(modify_nic_vport_context_in, in,
1528 			 field_select.disable_mc_local_lb, 1);
1529 		MLX5_SET(modify_nic_vport_context_in, in,
1530 			 nic_vport_context.disable_mc_local_lb,
1531 			 value);
1532 	} else {
1533 		MLX5_SET(modify_nic_vport_context_in, in,
1534 			 field_select.disable_uc_local_lb, 1);
1535 		MLX5_SET(modify_nic_vport_context_in, in,
1536 			 nic_vport_context.disable_uc_local_lb,
1537 			 value);
1538 	}
1539 
1540 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1541 
1542 	kvfree(in);
1543 	return err;
1544 }
1545 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1546 
1547 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1548 				  enum mlx5_local_lb_selection selection,
1549 				  u8 *value)
1550 {
1551 	void *out;
1552 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1553 	int err;
1554 
1555 	out = kzalloc(outlen, GFP_KERNEL);
1556 	if (!out)
1557 		return -ENOMEM;
1558 
1559 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1560 	if (err)
1561 		goto done;
1562 
1563 	if (selection == MLX5_LOCAL_MC_LB)
1564 		*value = MLX5_GET(query_nic_vport_context_out, out,
1565 				  nic_vport_context.disable_mc_local_lb);
1566 	else
1567 		*value = MLX5_GET(query_nic_vport_context_out, out,
1568 				  nic_vport_context.disable_uc_local_lb);
1569 
1570 done:
1571 	kfree(out);
1572 	return err;
1573 }
1574 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1575 
1576 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1577 			     u8 port_num, u16 vport_num,
1578 			     void *out, int out_size)
1579 {
1580 	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1581 	int is_group_manager;
1582 	void *in;
1583 	int err;
1584 
1585 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1586 
1587 	in = mlx5_vzalloc(in_sz);
1588 	if (!in)
1589 		return -ENOMEM;
1590 
1591 	MLX5_SET(query_vport_counter_in, in, opcode,
1592 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1593 	if (vport_num) {
1594 		if (is_group_manager) {
1595 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1596 			MLX5_SET(query_vport_counter_in, in, vport_number,
1597 				 vport_num);
1598 		} else {
1599 			err = -EPERM;
1600 			goto ex;
1601 		}
1602 	}
1603 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1604 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1605 
1606 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1607 
1608 ex:
1609 	kvfree(in);
1610 	return err;
1611 }
1612 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1613 
1614 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1615 			    struct mlx5_vport_counters *vc)
1616 {
1617 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1618 	void *out;
1619 	int err;
1620 
1621 	out = mlx5_vzalloc(out_sz);
1622 	if (!out)
1623 		return -ENOMEM;
1624 
1625 	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1626 	if (err)
1627 		goto ex;
1628 
1629 	vc->received_errors.packets =
1630 		MLX5_GET64(query_vport_counter_out,
1631 			   out, received_errors.packets);
1632 	vc->received_errors.octets =
1633 		MLX5_GET64(query_vport_counter_out,
1634 			   out, received_errors.octets);
1635 	vc->transmit_errors.packets =
1636 		MLX5_GET64(query_vport_counter_out,
1637 			   out, transmit_errors.packets);
1638 	vc->transmit_errors.octets =
1639 		MLX5_GET64(query_vport_counter_out,
1640 			   out, transmit_errors.octets);
1641 	vc->received_ib_unicast.packets =
1642 		MLX5_GET64(query_vport_counter_out,
1643 			   out, received_ib_unicast.packets);
1644 	vc->received_ib_unicast.octets =
1645 		MLX5_GET64(query_vport_counter_out,
1646 			   out, received_ib_unicast.octets);
1647 	vc->transmitted_ib_unicast.packets =
1648 		MLX5_GET64(query_vport_counter_out,
1649 			   out, transmitted_ib_unicast.packets);
1650 	vc->transmitted_ib_unicast.octets =
1651 		MLX5_GET64(query_vport_counter_out,
1652 			   out, transmitted_ib_unicast.octets);
1653 	vc->received_ib_multicast.packets =
1654 		MLX5_GET64(query_vport_counter_out,
1655 			   out, received_ib_multicast.packets);
1656 	vc->received_ib_multicast.octets =
1657 		MLX5_GET64(query_vport_counter_out,
1658 			   out, received_ib_multicast.octets);
1659 	vc->transmitted_ib_multicast.packets =
1660 		MLX5_GET64(query_vport_counter_out,
1661 			   out, transmitted_ib_multicast.packets);
1662 	vc->transmitted_ib_multicast.octets =
1663 		MLX5_GET64(query_vport_counter_out,
1664 			   out, transmitted_ib_multicast.octets);
1665 	vc->received_eth_broadcast.packets =
1666 		MLX5_GET64(query_vport_counter_out,
1667 			   out, received_eth_broadcast.packets);
1668 	vc->received_eth_broadcast.octets =
1669 		MLX5_GET64(query_vport_counter_out,
1670 			   out, received_eth_broadcast.octets);
1671 	vc->transmitted_eth_broadcast.packets =
1672 		MLX5_GET64(query_vport_counter_out,
1673 			   out, transmitted_eth_broadcast.packets);
1674 	vc->transmitted_eth_broadcast.octets =
1675 		MLX5_GET64(query_vport_counter_out,
1676 			   out, transmitted_eth_broadcast.octets);
1677 	vc->received_eth_unicast.octets =
1678 		MLX5_GET64(query_vport_counter_out,
1679 			   out, received_eth_unicast.octets);
1680 	vc->received_eth_unicast.packets =
1681 		MLX5_GET64(query_vport_counter_out,
1682 			   out, received_eth_unicast.packets);
1683 	vc->transmitted_eth_unicast.octets =
1684 		MLX5_GET64(query_vport_counter_out,
1685 			   out, transmitted_eth_unicast.octets);
1686 	vc->transmitted_eth_unicast.packets =
1687 		MLX5_GET64(query_vport_counter_out,
1688 			   out, transmitted_eth_unicast.packets);
1689 	vc->received_eth_multicast.octets =
1690 		MLX5_GET64(query_vport_counter_out,
1691 			   out, received_eth_multicast.octets);
1692 	vc->received_eth_multicast.packets =
1693 		MLX5_GET64(query_vport_counter_out,
1694 			   out, received_eth_multicast.packets);
1695 	vc->transmitted_eth_multicast.octets =
1696 		MLX5_GET64(query_vport_counter_out,
1697 			   out, transmitted_eth_multicast.octets);
1698 	vc->transmitted_eth_multicast.packets =
1699 		MLX5_GET64(query_vport_counter_out,
1700 			   out, transmitted_eth_multicast.packets);
1701 
1702 ex:
1703 	kvfree(out);
1704 	return err;
1705 }
1706 
1707 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1708 				       u64 *sys_image_guid)
1709 {
1710 	switch (MLX5_CAP_GEN(dev, port_type)) {
1711 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1712 		return mlx5_query_hca_vport_system_image_guid(dev,
1713 							      sys_image_guid);
1714 
1715 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1716 		return mlx5_query_nic_vport_system_image_guid(dev,
1717 							      sys_image_guid);
1718 
1719 	default:
1720 		return -EINVAL;
1721 	}
1722 }
1723 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1724 
1725 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1726 {
1727 	switch (MLX5_CAP_GEN(dev, port_type)) {
1728 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1729 		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1730 
1731 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1732 		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1733 
1734 	default:
1735 		return -EINVAL;
1736 	}
1737 }
1738 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1739 
1740 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1741 {
1742 	switch (MLX5_CAP_GEN(dev, port_type)) {
1743 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1744 		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1745 
1746 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1747 		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1748 
1749 	default:
1750 		return -EINVAL;
1751 	}
1752 }
1753 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1754 
1755 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1756 {
1757 	u32 *out;
1758 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1759 	int err;
1760 
1761 	out = mlx5_vzalloc(outlen);
1762 	if (!out)
1763 		return -ENOMEM;
1764 
1765 	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1766 	if (err)
1767 		goto out;
1768 
1769 	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1770 				hca_vport_context.vport_state);
1771 
1772 out:
1773 	kvfree(out);
1774 	return err;
1775 }
1776 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1777 
1778 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1779 			     u8 port_num, void *out, size_t sz)
1780 {
1781 	u32 *in;
1782 	int err;
1783 
1784 	in  = mlx5_vzalloc(sz);
1785 	if (!in) {
1786 		err = -ENOMEM;
1787 		return err;
1788 	}
1789 
1790 	MLX5_SET(ppcnt_reg, in, local_port, port_num);
1791 
1792 	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1793 	err = mlx5_core_access_reg(dev, in, sz, out,
1794 				   sz, MLX5_REG_PPCNT, 0, 0);
1795 
1796 	kvfree(in);
1797 	return err;
1798 }
1799