xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_vport.c (revision 780fb4a2)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/etherdevice.h>
29 #include <dev/mlx5/driver.h>
30 #include <dev/mlx5/vport.h>
31 #include "mlx5_core.h"
32 
33 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
34 					 int inlen);
35 
36 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
37 				   u16 vport, u32 *out, int outlen)
38 {
39 	int err;
40 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
41 
42 	MLX5_SET(query_vport_state_in, in, opcode,
43 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
44 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
45 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
46 	if (vport)
47 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
48 
49 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
50 	if (err)
51 		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
52 
53 	return err;
54 }
55 
56 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
57 {
58 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
59 
60 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
61 
62 	return MLX5_GET(query_vport_state_out, out, state);
63 }
64 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
65 
66 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
67 {
68 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
69 
70 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
71 
72 	return MLX5_GET(query_vport_state_out, out, admin_state);
73 }
74 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
75 
76 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
77 				  u16 vport, u8 state)
78 {
79 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
80 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
81 	int err;
82 
83 	MLX5_SET(modify_vport_state_in, in, opcode,
84 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
85 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
86 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
87 
88 	if (vport)
89 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
90 
91 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
92 
93 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
94 	if (err)
95 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
96 
97 	return err;
98 }
99 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
100 
101 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
102 					u32 *out, int outlen)
103 {
104 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
105 
106 	MLX5_SET(query_nic_vport_context_in, in, opcode,
107 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
108 
109 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
110 	if (vport)
111 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
112 
113 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
114 }
115 
116 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
117 					      int client_id)
118 {
119 	switch (client_id) {
120 	case MLX5_INTERFACE_PROTOCOL_IB:
121 		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
122 			MLX5_QCOUNTER_SETS_NETDEV);
123 	case MLX5_INTERFACE_PROTOCOL_ETH:
124 		return MLX5_QCOUNTER_SETS_NETDEV;
125 	default:
126 		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
127 		return 0;
128 	}
129 }
130 
131 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
132 			       int client_id, u16 *counter_set_id)
133 {
134 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
135 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
136 	int err;
137 
138 	if (mdev->num_q_counter_allocated[client_id] >
139 	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
140 		return -EINVAL;
141 
142 	MLX5_SET(alloc_q_counter_in, in, opcode,
143 		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
144 
145 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
146 
147 	if (!err)
148 		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
149 					   counter_set_id);
150 
151 	mdev->num_q_counter_allocated[client_id]++;
152 
153 	return err;
154 }
155 
156 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
157 				 int client_id, u16 counter_set_id)
158 {
159 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
160 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
161 	int err;
162 
163 	if (mdev->num_q_counter_allocated[client_id] <= 0)
164 		return -EINVAL;
165 
166 	MLX5_SET(dealloc_q_counter_in, in, opcode,
167 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
168 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
169 		 counter_set_id);
170 
171 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
172 
173 	mdev->num_q_counter_allocated[client_id]--;
174 
175 	return err;
176 }
177 
178 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
179 				      u16 counter_set_id,
180 				      int reset,
181 				      void *out,
182 				      int out_size)
183 {
184 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
185 
186 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
187 	MLX5_SET(query_q_counter_in, in, clear, reset);
188 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
189 
190 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
191 }
192 
193 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
194 				      u16 counter_set_id,
195 				      u32 *out_of_rx_buffer)
196 {
197 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
198 	int err;
199 
200 	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
201 					 sizeof(out));
202 
203 	if (err)
204 		return err;
205 
206 	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
207 				     out_of_buffer);
208 	return err;
209 }
210 
211 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
212 				     u16 vport, u8 *addr)
213 {
214 	u32 *out;
215 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
216 	u8 *out_addr;
217 	int err;
218 
219 	out = mlx5_vzalloc(outlen);
220 	if (!out)
221 		return -ENOMEM;
222 
223 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
224 				nic_vport_context.permanent_address);
225 
226 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
227 	if (err)
228 		goto out;
229 
230 	ether_addr_copy(addr, &out_addr[2]);
231 
232 out:
233 	kvfree(out);
234 	return err;
235 }
236 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
237 
238 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
239 				      u16 vport, u8 *addr)
240 {
241 	void *in;
242 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
243 	int err;
244 	void *nic_vport_ctx;
245 	u8 *perm_mac;
246 
247 	in = mlx5_vzalloc(inlen);
248 	if (!in) {
249 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
250 		return -ENOMEM;
251 	}
252 
253 	MLX5_SET(modify_nic_vport_context_in, in,
254 		 field_select.permanent_address, 1);
255 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
256 
257 	if (vport)
258 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
259 
260 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
261 				     in, nic_vport_context);
262 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
263 				permanent_address);
264 
265 	ether_addr_copy(&perm_mac[2], addr);
266 
267 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
268 
269 	kvfree(in);
270 
271 	return err;
272 }
273 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
274 
275 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
276 					   u64 *system_image_guid)
277 {
278 	u32 *out;
279 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
280 	int err;
281 
282 	out = mlx5_vzalloc(outlen);
283 	if (!out)
284 		return -ENOMEM;
285 
286 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
287 	if (err)
288 		goto out;
289 
290 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
291 					nic_vport_context.system_image_guid);
292 out:
293 	kvfree(out);
294 	return err;
295 }
296 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
297 
298 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
299 {
300 	u32 *out;
301 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
302 	int err;
303 
304 	out = mlx5_vzalloc(outlen);
305 	if (!out)
306 		return -ENOMEM;
307 
308 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
309 	if (err)
310 		goto out;
311 
312 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
313 				nic_vport_context.node_guid);
314 
315 out:
316 	kvfree(out);
317 	return err;
318 }
319 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
320 
321 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
322 					  u64 *port_guid)
323 {
324 	u32 *out;
325 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
326 	int err;
327 
328 	out = mlx5_vzalloc(outlen);
329 	if (!out)
330 		return -ENOMEM;
331 
332 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
333 	if (err)
334 		goto out;
335 
336 	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
337 				nic_vport_context.port_guid);
338 
339 out:
340 	kvfree(out);
341 	return err;
342 }
343 
344 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
345 					u16 *qkey_viol_cntr)
346 {
347 	u32 *out;
348 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
349 	int err;
350 
351 	out = mlx5_vzalloc(outlen);
352 	if (!out)
353 		return -ENOMEM;
354 
355 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
356 	if (err)
357 		goto out;
358 
359 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
360 				nic_vport_context.qkey_violation_counter);
361 
362 out:
363 	kvfree(out);
364 	return err;
365 }
366 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
367 
368 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
369 					 int inlen)
370 {
371 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
372 
373 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
374 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
375 
376 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
377 }
378 
379 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
380 					      int enable_disable)
381 {
382 	void *in;
383 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
384 	int err;
385 
386 	in = mlx5_vzalloc(inlen);
387 	if (!in) {
388 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
389 		return -ENOMEM;
390 	}
391 
392 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
393 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
394 		 enable_disable);
395 
396 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
397 
398 	kvfree(in);
399 
400 	return err;
401 }
402 
403 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
404 				   bool other_vport, u8 *addr)
405 {
406 	void *in;
407 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
408 		  + MLX5_ST_SZ_BYTES(mac_address_layout);
409 	u8  *mac_layout;
410 	u8  *mac_ptr;
411 	int err;
412 
413 	in = mlx5_vzalloc(inlen);
414 	if (!in) {
415 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
416 		return -ENOMEM;
417 	}
418 
419 	MLX5_SET(modify_nic_vport_context_in, in,
420 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
421 	MLX5_SET(modify_nic_vport_context_in, in,
422 		 vport_number, vport);
423 	MLX5_SET(modify_nic_vport_context_in, in,
424 		 other_vport, other_vport);
425 	MLX5_SET(modify_nic_vport_context_in, in,
426 		 field_select.addresses_list, 1);
427 	MLX5_SET(modify_nic_vport_context_in, in,
428 		 nic_vport_context.allowed_list_type,
429 		 MLX5_NIC_VPORT_LIST_TYPE_UC);
430 	MLX5_SET(modify_nic_vport_context_in, in,
431 		 nic_vport_context.allowed_list_size, 1);
432 
433 	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
434 		nic_vport_context.current_uc_mac_address);
435 	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
436 		mac_addr_47_32);
437 	ether_addr_copy(mac_ptr, addr);
438 
439 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
440 
441 	kvfree(in);
442 
443 	return err;
444 }
445 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
446 
447 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
448 				    u32 vport, u64 node_guid)
449 {
450 	void *in;
451 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
452 	int err;
453 	void *nic_vport_context;
454 
455 	if (!vport)
456 		return -EINVAL;
457 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
458 		return -EPERM;
459 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
460 		return -ENOTSUPP;
461 
462 	in = mlx5_vzalloc(inlen);
463 	if (!in) {
464 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
465 		return -ENOMEM;
466 	}
467 
468 	MLX5_SET(modify_nic_vport_context_in, in,
469 		 field_select.node_guid, 1);
470 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
471 
472 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
473 
474 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
475 					 in, nic_vport_context);
476 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
477 
478 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
479 
480 	kvfree(in);
481 
482 	return err;
483 }
484 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
485 
486 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
487 				    u32 vport, u64 port_guid)
488 {
489 	void *in;
490 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
491 	int err;
492 	void *nic_vport_context;
493 
494 	if (!vport)
495 		return -EINVAL;
496 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
497 		return -EPERM;
498 	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
499 		return -ENOTSUPP;
500 
501 	in = mlx5_vzalloc(inlen);
502 	if (!in) {
503 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
504 		return -ENOMEM;
505 	}
506 
507 	MLX5_SET(modify_nic_vport_context_in, in,
508 		 field_select.port_guid, 1);
509 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
510 
511 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
512 
513 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
514 					 in, nic_vport_context);
515 	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
516 
517 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
518 
519 	kvfree(in);
520 
521 	return err;
522 }
523 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
524 
525 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
526 				 u16 *vlan_list, int list_len)
527 {
528 	void *in, *ctx;
529 	int i, err;
530 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
531 		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
532 
533 	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
534 
535 	if (list_len > max_list_size) {
536 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
537 			       list_len, max_list_size);
538 		return -ENOSPC;
539 	}
540 
541 	in = mlx5_vzalloc(inlen);
542 	if (!in) {
543 		mlx5_core_warn(dev, "failed to allocate inbox\n");
544 		return -ENOMEM;
545 	}
546 
547 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
548 	if (vport)
549 		MLX5_SET(modify_nic_vport_context_in, in,
550 			 other_vport, 1);
551 	MLX5_SET(modify_nic_vport_context_in, in,
552 		 field_select.addresses_list, 1);
553 
554 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
555 
556 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
557 		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
558 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
559 
560 	for (i = 0; i < list_len; i++) {
561 		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
562 					 current_uc_mac_address[i]);
563 		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
564 	}
565 
566 	err = mlx5_modify_nic_vport_context(dev, in, inlen);
567 
568 	kvfree(in);
569 	return err;
570 }
571 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
572 
573 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
574 			       u64 *addr_list, size_t addr_list_len)
575 {
576 	void *in, *ctx;
577 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
578 		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
579 	int err;
580 	size_t i;
581 	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
582 
583 	if ((int)addr_list_len > max_list_sz) {
584 		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
585 			       (int)addr_list_len, max_list_sz);
586 		return -ENOSPC;
587 	}
588 
589 	in = mlx5_vzalloc(inlen);
590 	if (!in) {
591 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
592 		return -ENOMEM;
593 	}
594 
595 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
596 	if (vport)
597 		MLX5_SET(modify_nic_vport_context_in, in,
598 			 other_vport, 1);
599 	MLX5_SET(modify_nic_vport_context_in, in,
600 		 field_select.addresses_list, 1);
601 
602 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
603 
604 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
605 		 MLX5_NIC_VPORT_LIST_TYPE_MC);
606 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
607 
608 	for (i = 0; i < addr_list_len; i++) {
609 		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
610 						  current_uc_mac_address[i]);
611 		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
612 						 mac_addr_47_32);
613 		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
614 	}
615 
616 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
617 
618 	kvfree(in);
619 
620 	return err;
621 }
622 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
623 
624 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
625 			       bool promisc_mc, bool promisc_uc,
626 			       bool promisc_all)
627 {
628 	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
629 	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
630 			       nic_vport_context);
631 
632 	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
633 
634 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
635 	if (vport)
636 		MLX5_SET(modify_nic_vport_context_in, in,
637 			 other_vport, 1);
638 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
639 	if (promisc_mc)
640 		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
641 	if (promisc_uc)
642 		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
643 	if (promisc_all)
644 		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
645 
646 	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
647 }
648 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
649 
650 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
651 				  u16 vport,
652 				  enum mlx5_list_type list_type,
653 				  u8 addr_list[][ETH_ALEN],
654 				  int *list_size)
655 {
656 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
657 	void *nic_vport_ctx;
658 	int max_list_size;
659 	int req_list_size;
660 	int out_sz;
661 	void *out;
662 	int err;
663 	int i;
664 
665 	req_list_size = *list_size;
666 
667 	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
668 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
669 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
670 
671 	if (req_list_size > max_list_size) {
672 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
673 			       req_list_size, max_list_size);
674 		req_list_size = max_list_size;
675 	}
676 
677 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
678 		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
679 
680 	out = kzalloc(out_sz, GFP_KERNEL);
681 	if (!out)
682 		return -ENOMEM;
683 
684 	MLX5_SET(query_nic_vport_context_in, in, opcode,
685 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
686 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
687 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
688 
689 	if (vport)
690 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
691 
692 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
693 	if (err)
694 		goto out;
695 
696 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
697 				     nic_vport_context);
698 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
699 				 allowed_list_size);
700 
701 	*list_size = req_list_size;
702 	for (i = 0; i < req_list_size; i++) {
703 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
704 					nic_vport_ctx,
705 					current_uc_mac_address[i]) + 2;
706 		ether_addr_copy(addr_list[i], mac_addr);
707 	}
708 out:
709 	kfree(out);
710 	return err;
711 }
712 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
713 
714 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
715 				   enum mlx5_list_type list_type,
716 				   u8 addr_list[][ETH_ALEN],
717 				   int list_size)
718 {
719 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
720 	void *nic_vport_ctx;
721 	int max_list_size;
722 	int in_sz;
723 	void *in;
724 	int err;
725 	int i;
726 
727 	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
728 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
729 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
730 
731 	if (list_size > max_list_size)
732 		return -ENOSPC;
733 
734 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
735 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
736 
737 	in = kzalloc(in_sz, GFP_KERNEL);
738 	if (!in)
739 		return -ENOMEM;
740 
741 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
742 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
743 	MLX5_SET(modify_nic_vport_context_in, in,
744 		 field_select.addresses_list, 1);
745 
746 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
747 				     nic_vport_context);
748 
749 	MLX5_SET(nic_vport_context, nic_vport_ctx,
750 		 allowed_list_type, list_type);
751 	MLX5_SET(nic_vport_context, nic_vport_ctx,
752 		 allowed_list_size, list_size);
753 
754 	for (i = 0; i < list_size; i++) {
755 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
756 					    nic_vport_ctx,
757 					    current_uc_mac_address[i]) + 2;
758 		ether_addr_copy(curr_mac, addr_list[i]);
759 	}
760 
761 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
762 	kfree(in);
763 	return err;
764 }
765 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
766 
767 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
768 			       u16 vport,
769 			       u16 vlans[],
770 			       int *size)
771 {
772 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
773 	void *nic_vport_ctx;
774 	int req_list_size;
775 	int max_list_size;
776 	int out_sz;
777 	void *out;
778 	int err;
779 	int i;
780 
781 	req_list_size = *size;
782 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
783 	if (req_list_size > max_list_size) {
784 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
785 			       req_list_size, max_list_size);
786 		req_list_size = max_list_size;
787 	}
788 
789 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
790 		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
791 
792 	out = kzalloc(out_sz, GFP_KERNEL);
793 	if (!out)
794 		return -ENOMEM;
795 
796 	MLX5_SET(query_nic_vport_context_in, in, opcode,
797 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
798 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
799 		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
800 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
801 
802 	if (vport)
803 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
804 
805 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
806 	if (err)
807 		goto out;
808 
809 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
810 				     nic_vport_context);
811 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
812 				 allowed_list_size);
813 
814 	*size = req_list_size;
815 	for (i = 0; i < req_list_size; i++) {
816 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
817 					       nic_vport_ctx,
818 					 current_uc_mac_address[i]);
819 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
820 	}
821 out:
822 	kfree(out);
823 	return err;
824 }
825 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
826 
827 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
828 				u16 vlans[],
829 				int list_size)
830 {
831 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
832 	void *nic_vport_ctx;
833 	int max_list_size;
834 	int in_sz;
835 	void *in;
836 	int err;
837 	int i;
838 
839 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
840 
841 	if (list_size > max_list_size)
842 		return -ENOSPC;
843 
844 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
845 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
846 
847 	in = kzalloc(in_sz, GFP_KERNEL);
848 	if (!in)
849 		return -ENOMEM;
850 
851 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
852 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
853 	MLX5_SET(modify_nic_vport_context_in, in,
854 		 field_select.addresses_list, 1);
855 
856 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
857 				     nic_vport_context);
858 
859 	MLX5_SET(nic_vport_context, nic_vport_ctx,
860 		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
861 	MLX5_SET(nic_vport_context, nic_vport_ctx,
862 		 allowed_list_size, list_size);
863 
864 	for (i = 0; i < list_size; i++) {
865 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
866 					       nic_vport_ctx,
867 					       current_uc_mac_address[i]);
868 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
869 	}
870 
871 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
872 	kfree(in);
873 	return err;
874 }
875 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
876 
877 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
878 {
879 	u32 *out;
880 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
881 	int err;
882 
883 	out = kzalloc(outlen, GFP_KERNEL);
884 	if (!out)
885 		return -ENOMEM;
886 
887 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
888 	if (err)
889 		goto out;
890 
891 	*enable = MLX5_GET(query_nic_vport_context_out, out,
892 				nic_vport_context.roce_en);
893 
894 out:
895 	kfree(out);
896 	return err;
897 }
898 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
899 
900 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
901 				     u8 *addr)
902 {
903 	void *in;
904 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
905 	u8  *mac_ptr;
906 	int err;
907 
908 	in = mlx5_vzalloc(inlen);
909 	if (!in) {
910 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
911 		return -ENOMEM;
912 	}
913 
914 	MLX5_SET(modify_nic_vport_context_in, in,
915 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
916 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
917 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
918 	MLX5_SET(modify_nic_vport_context_in, in,
919 		 field_select.permanent_address, 1);
920 	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
921 		nic_vport_context.permanent_address.mac_addr_47_32);
922 	ether_addr_copy(mac_ptr, addr);
923 
924 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
925 
926 	kvfree(in);
927 
928 	return err;
929 }
930 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
931 
932 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
933 {
934 	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
935 }
936 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
937 
938 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
939 {
940 	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
941 }
942 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
943 
944 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
945 				  int vf, u8 port_num, void *out,
946 				  size_t out_sz)
947 {
948 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
949 	int	is_group_manager;
950 	void   *in;
951 	int	err;
952 
953 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
954 	in = mlx5_vzalloc(in_sz);
955 	if (!in) {
956 		err = -ENOMEM;
957 		return err;
958 	}
959 
960 	MLX5_SET(query_vport_counter_in, in, opcode,
961 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
962 	if (other_vport) {
963 		if (is_group_manager) {
964 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
965 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
966 		} else {
967 			err = -EPERM;
968 			goto free;
969 		}
970 	}
971 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
972 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
973 
974 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
975 free:
976 	kvfree(in);
977 	return err;
978 }
979 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
980 
981 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
982 				 u8 port_num, u8 vport_num, u32 *out,
983 				 int outlen)
984 {
985 	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
986 	int is_group_manager;
987 
988 	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
989 
990 	MLX5_SET(query_hca_vport_context_in, in, opcode,
991 		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
992 
993 	if (vport_num) {
994 		if (is_group_manager) {
995 			MLX5_SET(query_hca_vport_context_in, in, other_vport,
996 				 1);
997 			MLX5_SET(query_hca_vport_context_in, in, vport_number,
998 				 vport_num);
999 		} else {
1000 			return -EPERM;
1001 		}
1002 	}
1003 
1004 	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1005 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1006 
1007 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1008 }
1009 
1010 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1011 					   u64 *system_image_guid)
1012 {
1013 	u32 *out;
1014 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1015 	int err;
1016 
1017 	out = mlx5_vzalloc(outlen);
1018 	if (!out)
1019 		return -ENOMEM;
1020 
1021 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1022 	if (err)
1023 		goto out;
1024 
1025 	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1026 					hca_vport_context.system_image_guid);
1027 
1028 out:
1029 	kvfree(out);
1030 	return err;
1031 }
1032 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1033 
1034 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1035 {
1036 	u32 *out;
1037 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1038 	int err;
1039 
1040 	out = mlx5_vzalloc(outlen);
1041 	if (!out)
1042 		return -ENOMEM;
1043 
1044 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1045 	if (err)
1046 		goto out;
1047 
1048 	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1049 				hca_vport_context.node_guid);
1050 
1051 out:
1052 	kvfree(out);
1053 	return err;
1054 }
1055 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1056 
1057 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1058 					  u64 *port_guid)
1059 {
1060 	u32 *out;
1061 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1062 	int err;
1063 
1064 	out = mlx5_vzalloc(outlen);
1065 	if (!out)
1066 		return -ENOMEM;
1067 
1068 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1069 	if (err)
1070 		goto out;
1071 
1072 	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1073 				hca_vport_context.port_guid);
1074 
1075 out:
1076 	kvfree(out);
1077 	return err;
1078 }
1079 
1080 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1081 			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1082 {
1083 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1084 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1085 	int is_group_manager;
1086 	void *out = NULL;
1087 	void *in = NULL;
1088 	union ib_gid *tmp;
1089 	int tbsz;
1090 	int nout;
1091 	int err;
1092 
1093 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1094 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1095 
1096 	if (gid_index > tbsz && gid_index != 0xffff)
1097 		return -EINVAL;
1098 
1099 	if (gid_index == 0xffff)
1100 		nout = tbsz;
1101 	else
1102 		nout = 1;
1103 
1104 	out_sz += nout * sizeof(*gid);
1105 
1106 	in = mlx5_vzalloc(in_sz);
1107 	out = mlx5_vzalloc(out_sz);
1108 	if (!in || !out) {
1109 		err = -ENOMEM;
1110 		goto out;
1111 	}
1112 
1113 	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1114 		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1115 	if (vport_num) {
1116 		if (is_group_manager) {
1117 			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1118 				 vport_num);
1119 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1120 		} else {
1121 			err = -EPERM;
1122 			goto out;
1123 		}
1124 	}
1125 
1126 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1127 
1128 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1129 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1130 
1131 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1132 	if (err)
1133 		goto out;
1134 
1135 	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1136 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1137 	gid->global.interface_id = tmp->global.interface_id;
1138 
1139 out:
1140 	kvfree(in);
1141 	kvfree(out);
1142 	return err;
1143 }
1144 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1145 
1146 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1147 			      u8 port_num, u16 vf_num, u16 pkey_index,
1148 			      u16 *pkey)
1149 {
1150 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1151 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1152 	int is_group_manager;
1153 	void *out = NULL;
1154 	void *in = NULL;
1155 	void *pkarr;
1156 	int nout;
1157 	int tbsz;
1158 	int err;
1159 	int i;
1160 
1161 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1162 
1163 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1164 	if (pkey_index > tbsz && pkey_index != 0xffff)
1165 		return -EINVAL;
1166 
1167 	if (pkey_index == 0xffff)
1168 		nout = tbsz;
1169 	else
1170 		nout = 1;
1171 
1172 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1173 
1174 	in = kzalloc(in_sz, GFP_KERNEL);
1175 	out = kzalloc(out_sz, GFP_KERNEL);
1176 
1177 	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1178 		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1179 	if (other_vport) {
1180 		if (is_group_manager) {
1181 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1182 				 vf_num);
1183 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1184 		} else {
1185 			err = -EPERM;
1186 			goto out;
1187 		}
1188 	}
1189 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1190 
1191 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1192 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1193 
1194 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1195 	if (err)
1196 		goto out;
1197 
1198 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1199 	for (i = 0; i < nout; i++, pkey++,
1200 	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1201 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1202 
1203 out:
1204 	kfree(in);
1205 	kfree(out);
1206 	return err;
1207 }
1208 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1209 
1210 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1211 					 int *min_header)
1212 {
1213 	u32 *out;
1214 	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1215 	int err;
1216 
1217 	out = mlx5_vzalloc(outlen);
1218 	if (!out)
1219 		return -ENOMEM;
1220 
1221 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1222 	if (err)
1223 		goto out;
1224 
1225 	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1226 			       hca_vport_context.min_wqe_inline_mode);
1227 
1228 out:
1229 	kvfree(out);
1230 	return err;
1231 }
1232 
1233 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1234 					     u16 vport, void *in, int inlen)
1235 {
1236 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1237 	int err;
1238 
1239 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1240 	if (vport)
1241 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1242 
1243 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1244 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1245 
1246 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1247 	if (err)
1248 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1249 
1250 	return err;
1251 }
1252 
1253 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1254 				u8 insert_mode, u8 strip_mode,
1255 				u16 vlan, u8 cfi, u8 pcp)
1256 {
1257 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1258 
1259 	memset(in, 0, sizeof(in));
1260 
1261 	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1262 		MLX5_SET(modify_esw_vport_context_in, in,
1263 			 esw_vport_context.cvlan_cfi, cfi);
1264 		MLX5_SET(modify_esw_vport_context_in, in,
1265 			 esw_vport_context.cvlan_pcp, pcp);
1266 		MLX5_SET(modify_esw_vport_context_in, in,
1267 			 esw_vport_context.cvlan_id, vlan);
1268 	}
1269 
1270 	MLX5_SET(modify_esw_vport_context_in, in,
1271 		 esw_vport_context.vport_cvlan_insert, insert_mode);
1272 
1273 	MLX5_SET(modify_esw_vport_context_in, in,
1274 		 esw_vport_context.vport_cvlan_strip, strip_mode);
1275 
1276 	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1277 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1278 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1279 
1280 	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1281 }
1282 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1283 
1284 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1285 {
1286 	u32 *out;
1287 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1288 	int err;
1289 
1290 	out = mlx5_vzalloc(outlen);
1291 	if (!out)
1292 		return -ENOMEM;
1293 
1294 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1295 	if (err)
1296 		goto out;
1297 
1298 	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1299 			nic_vport_context.mtu);
1300 
1301 out:
1302 	kvfree(out);
1303 	return err;
1304 }
1305 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1306 
1307 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1308 {
1309 	u32 *in;
1310 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1311 	int err;
1312 
1313 	in = mlx5_vzalloc(inlen);
1314 	if (!in)
1315 		return -ENOMEM;
1316 
1317 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1318 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1319 
1320 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1321 
1322 	kvfree(in);
1323 	return err;
1324 }
1325 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1326 
1327 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1328 					   int *min_header)
1329 {
1330 	u32 *out;
1331 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1332 	int err;
1333 
1334 	out = mlx5_vzalloc(outlen);
1335 	if (!out)
1336 		return -ENOMEM;
1337 
1338 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1339 	if (err)
1340 		goto out;
1341 
1342 	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1343 			       nic_vport_context.min_wqe_inline_mode);
1344 
1345 out:
1346 	kvfree(out);
1347 	return err;
1348 }
1349 
1350 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1351 				  u8 vport, int min_header)
1352 {
1353 	u32 *in;
1354 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1355 	int err;
1356 
1357 	in = mlx5_vzalloc(inlen);
1358 	if (!in)
1359 		return -ENOMEM;
1360 
1361 	MLX5_SET(modify_nic_vport_context_in, in,
1362 		 field_select.min_wqe_inline_mode, 1);
1363 	MLX5_SET(modify_nic_vport_context_in, in,
1364 		 nic_vport_context.min_wqe_inline_mode, min_header);
1365 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1366 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1367 
1368 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1369 
1370 	kvfree(in);
1371 	return err;
1372 }
1373 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1374 
1375 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1376 {
1377 	switch (MLX5_CAP_GEN(dev, port_type)) {
1378 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1379 		return mlx5_query_hca_min_wqe_header(dev, min_header);
1380 
1381 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1382 		return mlx5_query_vport_min_wqe_header(dev, min_header);
1383 
1384 	default:
1385 		return -EINVAL;
1386 	}
1387 }
1388 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1389 
1390 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1391 				 u16 vport,
1392 				 int *promisc_uc,
1393 				 int *promisc_mc,
1394 				 int *promisc_all)
1395 {
1396 	u32 *out;
1397 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1398 	int err;
1399 
1400 	out = kzalloc(outlen, GFP_KERNEL);
1401 	if (!out)
1402 		return -ENOMEM;
1403 
1404 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1405 	if (err)
1406 		goto out;
1407 
1408 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1409 			       nic_vport_context.promisc_uc);
1410 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1411 			       nic_vport_context.promisc_mc);
1412 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1413 				nic_vport_context.promisc_all);
1414 
1415 out:
1416 	kfree(out);
1417 	return err;
1418 }
1419 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1420 
1421 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1422 				  int promisc_uc,
1423 				  int promisc_mc,
1424 				  int promisc_all)
1425 {
1426 	void *in;
1427 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1428 	int err;
1429 
1430 	in = mlx5_vzalloc(inlen);
1431 	if (!in) {
1432 		mlx5_core_err(mdev, "failed to allocate inbox\n");
1433 		return -ENOMEM;
1434 	}
1435 
1436 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1437 	MLX5_SET(modify_nic_vport_context_in, in,
1438 		 nic_vport_context.promisc_uc, promisc_uc);
1439 	MLX5_SET(modify_nic_vport_context_in, in,
1440 		 nic_vport_context.promisc_mc, promisc_mc);
1441 	MLX5_SET(modify_nic_vport_context_in, in,
1442 		 nic_vport_context.promisc_all, promisc_all);
1443 
1444 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1445 	kvfree(in);
1446 	return err;
1447 }
1448 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1449 
1450 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1451 				   enum mlx5_local_lb_selection selection,
1452 				   u8 value)
1453 {
1454 	void *in;
1455 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1456 	int err;
1457 
1458 	in = mlx5_vzalloc(inlen);
1459 	if (!in) {
1460 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
1461 		return -ENOMEM;
1462 	}
1463 
1464 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1465 
1466 	if (selection == MLX5_LOCAL_MC_LB) {
1467 		MLX5_SET(modify_nic_vport_context_in, in,
1468 			 field_select.disable_mc_local_lb, 1);
1469 		MLX5_SET(modify_nic_vport_context_in, in,
1470 			 nic_vport_context.disable_mc_local_lb,
1471 			 value);
1472 	} else {
1473 		MLX5_SET(modify_nic_vport_context_in, in,
1474 			 field_select.disable_uc_local_lb, 1);
1475 		MLX5_SET(modify_nic_vport_context_in, in,
1476 			 nic_vport_context.disable_uc_local_lb,
1477 			 value);
1478 	}
1479 
1480 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1481 
1482 	kvfree(in);
1483 	return err;
1484 }
1485 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1486 
1487 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1488 				  enum mlx5_local_lb_selection selection,
1489 				  u8 *value)
1490 {
1491 	void *out;
1492 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1493 	int err;
1494 
1495 	out = kzalloc(outlen, GFP_KERNEL);
1496 	if (!out)
1497 		return -ENOMEM;
1498 
1499 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1500 	if (err)
1501 		goto done;
1502 
1503 	if (selection == MLX5_LOCAL_MC_LB)
1504 		*value = MLX5_GET(query_nic_vport_context_out, out,
1505 				  nic_vport_context.disable_mc_local_lb);
1506 	else
1507 		*value = MLX5_GET(query_nic_vport_context_out, out,
1508 				  nic_vport_context.disable_uc_local_lb);
1509 
1510 done:
1511 	kfree(out);
1512 	return err;
1513 }
1514 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1515 
1516 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1517 			     u8 port_num, u16 vport_num,
1518 			     void *out, int out_size)
1519 {
1520 	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1521 	int is_group_manager;
1522 	void *in;
1523 	int err;
1524 
1525 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1526 
1527 	in = mlx5_vzalloc(in_sz);
1528 	if (!in)
1529 		return -ENOMEM;
1530 
1531 	MLX5_SET(query_vport_counter_in, in, opcode,
1532 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1533 	if (vport_num) {
1534 		if (is_group_manager) {
1535 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1536 			MLX5_SET(query_vport_counter_in, in, vport_number,
1537 				 vport_num);
1538 		} else {
1539 			err = -EPERM;
1540 			goto ex;
1541 		}
1542 	}
1543 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1544 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1545 
1546 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1547 
1548 	kvfree(in);
1549 ex:
1550 	return err;
1551 }
1552 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1553 
1554 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1555 			    struct mlx5_vport_counters *vc)
1556 {
1557 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1558 	void *out;
1559 	int err;
1560 
1561 	out = mlx5_vzalloc(out_sz);
1562 	if (!out)
1563 		return -ENOMEM;
1564 
1565 	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1566 	if (err)
1567 		goto ex;
1568 
1569 	vc->received_errors.packets =
1570 		MLX5_GET64(query_vport_counter_out,
1571 			   out, received_errors.packets);
1572 	vc->received_errors.octets =
1573 		MLX5_GET64(query_vport_counter_out,
1574 			   out, received_errors.octets);
1575 	vc->transmit_errors.packets =
1576 		MLX5_GET64(query_vport_counter_out,
1577 			   out, transmit_errors.packets);
1578 	vc->transmit_errors.octets =
1579 		MLX5_GET64(query_vport_counter_out,
1580 			   out, transmit_errors.octets);
1581 	vc->received_ib_unicast.packets =
1582 		MLX5_GET64(query_vport_counter_out,
1583 			   out, received_ib_unicast.packets);
1584 	vc->received_ib_unicast.octets =
1585 		MLX5_GET64(query_vport_counter_out,
1586 			   out, received_ib_unicast.octets);
1587 	vc->transmitted_ib_unicast.packets =
1588 		MLX5_GET64(query_vport_counter_out,
1589 			   out, transmitted_ib_unicast.packets);
1590 	vc->transmitted_ib_unicast.octets =
1591 		MLX5_GET64(query_vport_counter_out,
1592 			   out, transmitted_ib_unicast.octets);
1593 	vc->received_ib_multicast.packets =
1594 		MLX5_GET64(query_vport_counter_out,
1595 			   out, received_ib_multicast.packets);
1596 	vc->received_ib_multicast.octets =
1597 		MLX5_GET64(query_vport_counter_out,
1598 			   out, received_ib_multicast.octets);
1599 	vc->transmitted_ib_multicast.packets =
1600 		MLX5_GET64(query_vport_counter_out,
1601 			   out, transmitted_ib_multicast.packets);
1602 	vc->transmitted_ib_multicast.octets =
1603 		MLX5_GET64(query_vport_counter_out,
1604 			   out, transmitted_ib_multicast.octets);
1605 	vc->received_eth_broadcast.packets =
1606 		MLX5_GET64(query_vport_counter_out,
1607 			   out, received_eth_broadcast.packets);
1608 	vc->received_eth_broadcast.octets =
1609 		MLX5_GET64(query_vport_counter_out,
1610 			   out, received_eth_broadcast.octets);
1611 	vc->transmitted_eth_broadcast.packets =
1612 		MLX5_GET64(query_vport_counter_out,
1613 			   out, transmitted_eth_broadcast.packets);
1614 	vc->transmitted_eth_broadcast.octets =
1615 		MLX5_GET64(query_vport_counter_out,
1616 			   out, transmitted_eth_broadcast.octets);
1617 	vc->received_eth_unicast.octets =
1618 		MLX5_GET64(query_vport_counter_out,
1619 			   out, received_eth_unicast.octets);
1620 	vc->received_eth_unicast.packets =
1621 		MLX5_GET64(query_vport_counter_out,
1622 			   out, received_eth_unicast.packets);
1623 	vc->transmitted_eth_unicast.octets =
1624 		MLX5_GET64(query_vport_counter_out,
1625 			   out, transmitted_eth_unicast.octets);
1626 	vc->transmitted_eth_unicast.packets =
1627 		MLX5_GET64(query_vport_counter_out,
1628 			   out, transmitted_eth_unicast.packets);
1629 	vc->received_eth_multicast.octets =
1630 		MLX5_GET64(query_vport_counter_out,
1631 			   out, received_eth_multicast.octets);
1632 	vc->received_eth_multicast.packets =
1633 		MLX5_GET64(query_vport_counter_out,
1634 			   out, received_eth_multicast.packets);
1635 	vc->transmitted_eth_multicast.octets =
1636 		MLX5_GET64(query_vport_counter_out,
1637 			   out, transmitted_eth_multicast.octets);
1638 	vc->transmitted_eth_multicast.packets =
1639 		MLX5_GET64(query_vport_counter_out,
1640 			   out, transmitted_eth_multicast.packets);
1641 
1642 ex:
1643 	kvfree(out);
1644 	return err;
1645 }
1646 
1647 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1648 				       u64 *sys_image_guid)
1649 {
1650 	switch (MLX5_CAP_GEN(dev, port_type)) {
1651 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1652 		return mlx5_query_hca_vport_system_image_guid(dev,
1653 							      sys_image_guid);
1654 
1655 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1656 		return mlx5_query_nic_vport_system_image_guid(dev,
1657 							      sys_image_guid);
1658 
1659 	default:
1660 		return -EINVAL;
1661 	}
1662 }
1663 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1664 
1665 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1666 {
1667 	switch (MLX5_CAP_GEN(dev, port_type)) {
1668 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1669 		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1670 
1671 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1672 		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1673 
1674 	default:
1675 		return -EINVAL;
1676 	}
1677 }
1678 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1679 
1680 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1681 {
1682 	switch (MLX5_CAP_GEN(dev, port_type)) {
1683 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1684 		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1685 
1686 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1687 		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1688 
1689 	default:
1690 		return -EINVAL;
1691 	}
1692 }
1693 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1694 
1695 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1696 {
1697 	u32 *out;
1698 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1699 	int err;
1700 
1701 	out = mlx5_vzalloc(outlen);
1702 	if (!out)
1703 		return -ENOMEM;
1704 
1705 	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1706 	if (err)
1707 		goto out;
1708 
1709 	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1710 				hca_vport_context.vport_state);
1711 
1712 out:
1713 	kvfree(out);
1714 	return err;
1715 }
1716 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1717 
1718 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1719 			     u8 port_num, void *out, size_t sz)
1720 {
1721 	u32 *in;
1722 	int err;
1723 
1724 	in  = mlx5_vzalloc(sz);
1725 	if (!in) {
1726 		err = -ENOMEM;
1727 		return err;
1728 	}
1729 
1730 	MLX5_SET(ppcnt_reg, in, local_port, port_num);
1731 
1732 	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1733 	err = mlx5_core_access_reg(dev, in, sz, out,
1734 				   sz, MLX5_REG_PPCNT, 0, 0);
1735 
1736 	kvfree(in);
1737 	return err;
1738 }
1739