xref: /freebsd/sys/dev/mlx4/mlx4_core/mlx4_cmd.c (revision 780fb4a2)
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/module.h>
38 #include <linux/pci.h>
39 #include <linux/errno.h>
40 #include <linux/delay.h>
41 
42 #include <dev/mlx4/cmd.h>
43 #include <dev/mlx4/device.h>
44 #include <linux/semaphore.h>
45 #include <rdma/ib_smi.h>
46 
47 #include <asm/io.h>
48 #include <linux/ktime.h>
49 
50 #include "mlx4.h"
51 #include "fw.h"
52 #include "fw_qos.h"
53 
54 #define CMD_POLL_TOKEN 0xffff
55 #define INBOX_MASK	0xffffffffffffff00ULL
56 
57 #define CMD_CHAN_VER 1
58 #define CMD_CHAN_IF_REV 1
59 
60 enum {
61 	/* command completed successfully: */
62 	CMD_STAT_OK		= 0x00,
63 	/* Internal error (such as a bus error) occurred while processing command: */
64 	CMD_STAT_INTERNAL_ERR	= 0x01,
65 	/* Operation/command not supported or opcode modifier not supported: */
66 	CMD_STAT_BAD_OP		= 0x02,
67 	/* Parameter not supported or parameter out of range: */
68 	CMD_STAT_BAD_PARAM	= 0x03,
69 	/* System not enabled or bad system state: */
70 	CMD_STAT_BAD_SYS_STATE	= 0x04,
71 	/* Attempt to access reserved or unallocaterd resource: */
72 	CMD_STAT_BAD_RESOURCE	= 0x05,
73 	/* Requested resource is currently executing a command, or is otherwise busy: */
74 	CMD_STAT_RESOURCE_BUSY	= 0x06,
75 	/* Required capability exceeds device limits: */
76 	CMD_STAT_EXCEED_LIM	= 0x08,
77 	/* Resource is not in the appropriate state or ownership: */
78 	CMD_STAT_BAD_RES_STATE	= 0x09,
79 	/* Index out of range: */
80 	CMD_STAT_BAD_INDEX	= 0x0a,
81 	/* FW image corrupted: */
82 	CMD_STAT_BAD_NVMEM	= 0x0b,
83 	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
84 	CMD_STAT_ICM_ERROR	= 0x0c,
85 	/* Attempt to modify a QP/EE which is not in the presumed state: */
86 	CMD_STAT_BAD_QP_STATE   = 0x10,
87 	/* Bad segment parameters (Address/Size): */
88 	CMD_STAT_BAD_SEG_PARAM	= 0x20,
89 	/* Memory Region has Memory Windows bound to: */
90 	CMD_STAT_REG_BOUND	= 0x21,
91 	/* HCA local attached memory not present: */
92 	CMD_STAT_LAM_NOT_PRE	= 0x22,
93 	/* Bad management packet (silently discarded): */
94 	CMD_STAT_BAD_PKT	= 0x30,
95 	/* More outstanding CQEs in CQ than new CQ size: */
96 	CMD_STAT_BAD_SIZE	= 0x40,
97 	/* Multi Function device support required: */
98 	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
99 };
100 
101 enum {
102 	HCR_IN_PARAM_OFFSET	= 0x00,
103 	HCR_IN_MODIFIER_OFFSET	= 0x08,
104 	HCR_OUT_PARAM_OFFSET	= 0x0c,
105 	HCR_TOKEN_OFFSET	= 0x14,
106 	HCR_STATUS_OFFSET	= 0x18,
107 
108 	HCR_OPMOD_SHIFT		= 12,
109 	HCR_T_BIT		= 21,
110 	HCR_E_BIT		= 22,
111 	HCR_GO_BIT		= 23
112 };
113 
114 enum {
115 	GO_BIT_TIMEOUT_MSECS	= 10000
116 };
117 
118 enum mlx4_vlan_transition {
119 	MLX4_VLAN_TRANSITION_VST_VST = 0,
120 	MLX4_VLAN_TRANSITION_VST_VGT = 1,
121 	MLX4_VLAN_TRANSITION_VGT_VST = 2,
122 	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
123 };
124 
125 
126 struct mlx4_cmd_context {
127 	struct completion	done;
128 	int			result;
129 	int			next;
130 	u64			out_param;
131 	u16			token;
132 	u8			fw_status;
133 };
134 
135 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
136 				    struct mlx4_vhcr_cmd *in_vhcr);
137 
138 static int mlx4_status_to_errno(u8 status)
139 {
140 	static const int trans_table[] = {
141 		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
142 		[CMD_STAT_BAD_OP]	  = -EPERM,
143 		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
144 		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
145 		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
146 		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
147 		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
148 		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
149 		[CMD_STAT_BAD_INDEX]	  = -EBADF,
150 		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
151 		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
152 		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
153 		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
154 		[CMD_STAT_REG_BOUND]	  = -EBUSY,
155 		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
156 		[CMD_STAT_BAD_PKT]	  = -EINVAL,
157 		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
158 		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
159 	};
160 
161 	if (status >= ARRAY_SIZE(trans_table) ||
162 	    (status != CMD_STAT_OK && trans_table[status] == 0))
163 		return -EIO;
164 
165 	return trans_table[status];
166 }
167 
168 static u8 mlx4_errno_to_status(int errno)
169 {
170 	switch (errno) {
171 	case -EPERM:
172 		return CMD_STAT_BAD_OP;
173 	case -EINVAL:
174 		return CMD_STAT_BAD_PARAM;
175 	case -ENXIO:
176 		return CMD_STAT_BAD_SYS_STATE;
177 	case -EBUSY:
178 		return CMD_STAT_RESOURCE_BUSY;
179 	case -ENOMEM:
180 		return CMD_STAT_EXCEED_LIM;
181 	case -ENFILE:
182 		return CMD_STAT_ICM_ERROR;
183 	default:
184 		return CMD_STAT_INTERNAL_ERR;
185 	}
186 }
187 
188 static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
189 				       u8 op_modifier)
190 {
191 	switch (op) {
192 	case MLX4_CMD_UNMAP_ICM:
193 	case MLX4_CMD_UNMAP_ICM_AUX:
194 	case MLX4_CMD_UNMAP_FA:
195 	case MLX4_CMD_2RST_QP:
196 	case MLX4_CMD_HW2SW_EQ:
197 	case MLX4_CMD_HW2SW_CQ:
198 	case MLX4_CMD_HW2SW_SRQ:
199 	case MLX4_CMD_HW2SW_MPT:
200 	case MLX4_CMD_CLOSE_HCA:
201 	case MLX4_QP_FLOW_STEERING_DETACH:
202 	case MLX4_CMD_FREE_RES:
203 	case MLX4_CMD_CLOSE_PORT:
204 		return CMD_STAT_OK;
205 
206 	case MLX4_CMD_QP_ATTACH:
207 		/* On Detach case return success */
208 		if (op_modifier == 0)
209 			return CMD_STAT_OK;
210 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
211 
212 	default:
213 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
214 	}
215 }
216 
217 static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
218 {
219 	/* Any error during the closing commands below is considered fatal */
220 	if (op == MLX4_CMD_CLOSE_HCA ||
221 	    op == MLX4_CMD_HW2SW_EQ ||
222 	    op == MLX4_CMD_HW2SW_CQ ||
223 	    op == MLX4_CMD_2RST_QP ||
224 	    op == MLX4_CMD_HW2SW_SRQ ||
225 	    op == MLX4_CMD_SYNC_TPT ||
226 	    op == MLX4_CMD_UNMAP_ICM ||
227 	    op == MLX4_CMD_UNMAP_ICM_AUX ||
228 	    op == MLX4_CMD_UNMAP_FA)
229 		return 1;
230 	/* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
231 	  * CMD_STAT_REG_BOUND.
232 	  * This status indicates that memory region has memory windows bound to it
233 	  * which may result from invalid user space usage and is not fatal.
234 	  */
235 	if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
236 		return 1;
237 	return 0;
238 }
239 
240 static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
241 			       int err)
242 {
243 	/* Only if reset flow is really active return code is based on
244 	  * command, otherwise current error code is returned.
245 	  */
246 	if (mlx4_internal_err_reset) {
247 		mlx4_enter_error_state(dev->persist);
248 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
249 	}
250 
251 	return err;
252 }
253 
254 static int comm_pending(struct mlx4_dev *dev)
255 {
256 	struct mlx4_priv *priv = mlx4_priv(dev);
257 	u32 status = readl(&priv->mfunc.comm->slave_read);
258 
259 	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
260 }
261 
262 static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
263 {
264 	struct mlx4_priv *priv = mlx4_priv(dev);
265 	u32 val;
266 
267 	/* To avoid writing to unknown addresses after the device state was
268 	 * changed to internal error and the function was rest,
269 	 * check the INTERNAL_ERROR flag which is updated under
270 	 * device_state_mutex lock.
271 	 */
272 	mutex_lock(&dev->persist->device_state_mutex);
273 
274 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
275 		mutex_unlock(&dev->persist->device_state_mutex);
276 		return -EIO;
277 	}
278 
279 	priv->cmd.comm_toggle ^= 1;
280 	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
281 	__raw_writel((__force u32) cpu_to_be32(val),
282 		     &priv->mfunc.comm->slave_write);
283 	mmiowb();
284 	mutex_unlock(&dev->persist->device_state_mutex);
285 	return 0;
286 }
287 
288 static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
289 		       unsigned long timeout)
290 {
291 	struct mlx4_priv *priv = mlx4_priv(dev);
292 	unsigned long end;
293 	int err = 0;
294 	int ret_from_pending = 0;
295 
296 	/* First, verify that the master reports correct status */
297 	if (comm_pending(dev)) {
298 		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
299 			  priv->cmd.comm_toggle, cmd);
300 		return -EAGAIN;
301 	}
302 
303 	/* Write command */
304 	down(&priv->cmd.poll_sem);
305 	if (mlx4_comm_cmd_post(dev, cmd, param)) {
306 		/* Only in case the device state is INTERNAL_ERROR,
307 		 * mlx4_comm_cmd_post returns with an error
308 		 */
309 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
310 		goto out;
311 	}
312 
313 	end = msecs_to_jiffies(timeout) + jiffies;
314 	while (comm_pending(dev) && time_before(jiffies, end))
315 		cond_resched();
316 	ret_from_pending = comm_pending(dev);
317 	if (ret_from_pending) {
318 		/* check if the slave is trying to boot in the middle of
319 		 * FLR process. The only non-zero result in the RESET command
320 		 * is MLX4_DELAY_RESET_SLAVE*/
321 		if ((MLX4_COMM_CMD_RESET == cmd)) {
322 			err = MLX4_DELAY_RESET_SLAVE;
323 			goto out;
324 		} else {
325 			mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
326 				  cmd);
327 			err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
328 		}
329 	}
330 
331 	if (err)
332 		mlx4_enter_error_state(dev->persist);
333 out:
334 	up(&priv->cmd.poll_sem);
335 	return err;
336 }
337 
338 static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
339 			      u16 param, u16 op, unsigned long timeout)
340 {
341 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
342 	struct mlx4_cmd_context *context;
343 	unsigned long end;
344 	int err = 0;
345 
346 	down(&cmd->event_sem);
347 
348 	spin_lock(&cmd->context_lock);
349 	BUG_ON(cmd->free_head < 0);
350 	context = &cmd->context[cmd->free_head];
351 	context->token += cmd->token_mask + 1;
352 	cmd->free_head = context->next;
353 	spin_unlock(&cmd->context_lock);
354 
355 	reinit_completion(&context->done);
356 
357 	if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
358 		/* Only in case the device state is INTERNAL_ERROR,
359 		 * mlx4_comm_cmd_post returns with an error
360 		 */
361 		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
362 		goto out;
363 	}
364 
365 	if (!wait_for_completion_timeout(&context->done,
366 					 msecs_to_jiffies(timeout))) {
367 		mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
368 			  vhcr_cmd, op);
369 		goto out_reset;
370 	}
371 
372 	err = context->result;
373 	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
374 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
375 			 vhcr_cmd, context->fw_status);
376 		if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
377 			goto out_reset;
378 	}
379 
380 	/* wait for comm channel ready
381 	 * this is necessary for prevention the race
382 	 * when switching between event to polling mode
383 	 * Skipping this section in case the device is in FATAL_ERROR state,
384 	 * In this state, no commands are sent via the comm channel until
385 	 * the device has returned from reset.
386 	 */
387 	if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
388 		end = msecs_to_jiffies(timeout) + jiffies;
389 		while (comm_pending(dev) && time_before(jiffies, end))
390 			cond_resched();
391 	}
392 	goto out;
393 
394 out_reset:
395 	err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
396 	mlx4_enter_error_state(dev->persist);
397 out:
398 	spin_lock(&cmd->context_lock);
399 	context->next = cmd->free_head;
400 	cmd->free_head = context - cmd->context;
401 	spin_unlock(&cmd->context_lock);
402 
403 	up(&cmd->event_sem);
404 	return err;
405 }
406 
407 int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
408 		  u16 op, unsigned long timeout)
409 {
410 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
411 		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
412 
413 	if (mlx4_priv(dev)->cmd.use_events)
414 		return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
415 	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
416 }
417 
418 static int cmd_pending(struct mlx4_dev *dev)
419 {
420 	u32 status;
421 
422 	if (pci_channel_offline(dev->persist->pdev))
423 		return -EIO;
424 
425 	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
426 
427 	return (status & swab32(1 << HCR_GO_BIT)) ||
428 		(mlx4_priv(dev)->cmd.toggle ==
429 		 !!(status & swab32(1 << HCR_T_BIT)));
430 }
431 
432 static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
433 			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
434 			 int event)
435 {
436 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
437 	u32 __iomem *hcr = cmd->hcr;
438 	int ret = -EIO;
439 	unsigned long end;
440 
441 	mutex_lock(&dev->persist->device_state_mutex);
442 	/* To avoid writing to unknown addresses after the device state was
443 	  * changed to internal error and the chip was reset,
444 	  * check the INTERNAL_ERROR flag which is updated under
445 	  * device_state_mutex lock.
446 	  */
447 	if (pci_channel_offline(dev->persist->pdev) ||
448 	    (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
449 		/*
450 		 * Device is going through error recovery
451 		 * and cannot accept commands.
452 		 */
453 		goto out;
454 	}
455 
456 	end = jiffies;
457 	if (event)
458 		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
459 
460 	while (cmd_pending(dev)) {
461 		if (pci_channel_offline(dev->persist->pdev)) {
462 			/*
463 			 * Device is going through error recovery
464 			 * and cannot accept commands.
465 			 */
466 			goto out;
467 		}
468 
469 		if (time_after_eq(jiffies, end)) {
470 			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
471 			goto out;
472 		}
473 		cond_resched();
474 	}
475 
476 	/*
477 	 * We use writel (instead of something like memcpy_toio)
478 	 * because writes of less than 32 bits to the HCR don't work
479 	 * (and some architectures such as ia64 implement memcpy_toio
480 	 * in terms of writeb).
481 	 */
482 	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
483 	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
484 	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
485 	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
486 	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
487 	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
488 
489 	/* __raw_writel may not order writes. */
490 	wmb();
491 
492 	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
493 					       (cmd->toggle << HCR_T_BIT)	|
494 					       (event ? (1 << HCR_E_BIT) : 0)	|
495 					       (op_modifier << HCR_OPMOD_SHIFT) |
496 					       op), hcr + 6);
497 
498 	/*
499 	 * Make sure that our HCR writes don't get mixed in with
500 	 * writes from another CPU starting a FW command.
501 	 */
502 	mmiowb();
503 
504 	cmd->toggle = cmd->toggle ^ 1;
505 
506 	ret = 0;
507 
508 out:
509 	if (ret)
510 		mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
511 			  op, ret, (long long)in_param, in_modifier, op_modifier);
512 	mutex_unlock(&dev->persist->device_state_mutex);
513 
514 	return ret;
515 }
516 
517 static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
518 			  int out_is_imm, u32 in_modifier, u8 op_modifier,
519 			  u16 op, unsigned long timeout)
520 {
521 	struct mlx4_priv *priv = mlx4_priv(dev);
522 	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
523 	int ret;
524 
525 	mutex_lock(&priv->cmd.slave_cmd_mutex);
526 
527 	vhcr->in_param = cpu_to_be64(in_param);
528 	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
529 	vhcr->in_modifier = cpu_to_be32(in_modifier);
530 	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
531 	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
532 	vhcr->status = 0;
533 	vhcr->flags = !!(priv->cmd.use_events) << 6;
534 
535 	if (mlx4_is_master(dev)) {
536 		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
537 		if (!ret) {
538 			if (out_is_imm) {
539 				if (out_param)
540 					*out_param =
541 						be64_to_cpu(vhcr->out_param);
542 				else {
543 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
544 						 op);
545 					vhcr->status = CMD_STAT_BAD_PARAM;
546 				}
547 			}
548 			ret = mlx4_status_to_errno(vhcr->status);
549 		}
550 		if (ret &&
551 		    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
552 			ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
553 	} else {
554 		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
555 				    MLX4_COMM_TIME + timeout);
556 		if (!ret) {
557 			if (out_is_imm) {
558 				if (out_param)
559 					*out_param =
560 						be64_to_cpu(vhcr->out_param);
561 				else {
562 					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
563 						 op);
564 					vhcr->status = CMD_STAT_BAD_PARAM;
565 				}
566 			}
567 			ret = mlx4_status_to_errno(vhcr->status);
568 		} else {
569 			if (dev->persist->state &
570 			    MLX4_DEVICE_STATE_INTERNAL_ERROR)
571 				ret = mlx4_internal_err_ret_value(dev, op,
572 								  op_modifier);
573 			else
574 				mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
575 		}
576 	}
577 
578 	mutex_unlock(&priv->cmd.slave_cmd_mutex);
579 	return ret;
580 }
581 
582 static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
583 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
584 			 u16 op, unsigned long timeout)
585 {
586 	struct mlx4_priv *priv = mlx4_priv(dev);
587 	void __iomem *hcr = priv->cmd.hcr;
588 	int err = 0;
589 	unsigned long end;
590 	u32 stat;
591 
592 	down(&priv->cmd.poll_sem);
593 
594 	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
595 		/*
596 		 * Device is going through error recovery
597 		 * and cannot accept commands.
598 		 */
599 		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
600 		goto out;
601 	}
602 
603 	if (out_is_imm && !out_param) {
604 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
605 			 op);
606 		err = -EINVAL;
607 		goto out;
608 	}
609 
610 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
611 			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
612 	if (err)
613 		goto out_reset;
614 
615 	end = msecs_to_jiffies(timeout) + jiffies;
616 	while (cmd_pending(dev) && time_before(jiffies, end)) {
617 		if (pci_channel_offline(dev->persist->pdev)) {
618 			/*
619 			 * Device is going through error recovery
620 			 * and cannot accept commands.
621 			 */
622 			err = -EIO;
623 			goto out_reset;
624 		}
625 
626 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
627 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
628 			goto out;
629 		}
630 
631 		cond_resched();
632 	}
633 
634 	if (cmd_pending(dev)) {
635 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
636 			  op);
637 		err = -EIO;
638 		goto out_reset;
639 	}
640 
641 	if (out_is_imm)
642 		*out_param =
643 			(u64) be32_to_cpu((__force __be32)
644 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
645 			(u64) be32_to_cpu((__force __be32)
646 					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
647 	stat = be32_to_cpu((__force __be32)
648 			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
649 	err = mlx4_status_to_errno(stat);
650 	if (err) {
651 		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
652 			 op, stat);
653 		if (mlx4_closing_cmd_fatal_error(op, stat))
654 			goto out_reset;
655 		goto out;
656 	}
657 
658 out_reset:
659 	if (err)
660 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
661 out:
662 	up(&priv->cmd.poll_sem);
663 	return err;
664 }
665 
666 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
667 {
668 	struct mlx4_priv *priv = mlx4_priv(dev);
669 	struct mlx4_cmd_context *context =
670 		&priv->cmd.context[token & priv->cmd.token_mask];
671 
672 	/* previously timed out command completing at long last */
673 	if (token != context->token)
674 		return;
675 
676 	context->fw_status = status;
677 	context->result    = mlx4_status_to_errno(status);
678 	context->out_param = out_param;
679 
680 	complete(&context->done);
681 }
682 
683 static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
684 			 int out_is_imm, u32 in_modifier, u8 op_modifier,
685 			 u16 op, unsigned long timeout)
686 {
687 	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 	struct mlx4_cmd_context *context;
689 	long ret_wait;
690 	int err = 0;
691 
692 	down(&cmd->event_sem);
693 
694 	spin_lock(&cmd->context_lock);
695 	BUG_ON(cmd->free_head < 0);
696 	context = &cmd->context[cmd->free_head];
697 	context->token += cmd->token_mask + 1;
698 	cmd->free_head = context->next;
699 	spin_unlock(&cmd->context_lock);
700 
701 	if (out_is_imm && !out_param) {
702 		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
703 			 op);
704 		err = -EINVAL;
705 		goto out;
706 	}
707 
708 	reinit_completion(&context->done);
709 
710 	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
711 			    in_modifier, op_modifier, op, context->token, 1);
712 	if (err)
713 		goto out_reset;
714 
715 	if (op == MLX4_CMD_SENSE_PORT) {
716 		ret_wait =
717 			wait_for_completion_interruptible_timeout(&context->done,
718 								  msecs_to_jiffies(timeout));
719 		if (ret_wait < 0) {
720 			context->fw_status = 0;
721 			context->out_param = 0;
722 			context->result = 0;
723 		}
724 	} else {
725 		ret_wait = (long)wait_for_completion_timeout(&context->done,
726 							     msecs_to_jiffies(timeout));
727 	}
728 	if (!ret_wait) {
729 		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
730 			  op);
731 		if (op == MLX4_CMD_NOP) {
732 			err = -EBUSY;
733 			goto out;
734 		} else {
735 			err = -EIO;
736 			goto out_reset;
737 		}
738 	}
739 
740 	err = context->result;
741 	if (err) {
742 		/* Since we do not want to have this error message always
743 		 * displayed at driver start when there are ConnectX2 HCAs
744 		 * on the host, we deprecate the error message for this
745 		 * specific command/input_mod/opcode_mod/fw-status to be debug.
746 		 */
747 		if (op == MLX4_CMD_SET_PORT &&
748 		    (in_modifier == 1 || in_modifier == 2) &&
749 		    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
750 		    context->fw_status == CMD_STAT_BAD_SIZE)
751 			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
752 				 op, context->fw_status);
753 		else
754 			mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
755 				 op, context->fw_status);
756 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
757 			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
758 		else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
759 			goto out_reset;
760 
761 		goto out;
762 	}
763 
764 	if (out_is_imm)
765 		*out_param = context->out_param;
766 
767 out_reset:
768 	if (err)
769 		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
770 out:
771 	spin_lock(&cmd->context_lock);
772 	context->next = cmd->free_head;
773 	cmd->free_head = context - cmd->context;
774 	spin_unlock(&cmd->context_lock);
775 
776 	up(&cmd->event_sem);
777 	return err;
778 }
779 
780 int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
781 	       int out_is_imm, u32 in_modifier, u8 op_modifier,
782 	       u16 op, unsigned long timeout, int native)
783 {
784 	if (pci_channel_offline(dev->persist->pdev))
785 		return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
786 
787 	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
788 		int ret;
789 
790 		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
791 			return mlx4_internal_err_ret_value(dev, op,
792 							  op_modifier);
793 		down_read(&mlx4_priv(dev)->cmd.switch_sem);
794 		if (mlx4_priv(dev)->cmd.use_events)
795 			ret = mlx4_cmd_wait(dev, in_param, out_param,
796 					    out_is_imm, in_modifier,
797 					    op_modifier, op, timeout);
798 		else
799 			ret = mlx4_cmd_poll(dev, in_param, out_param,
800 					    out_is_imm, in_modifier,
801 					    op_modifier, op, timeout);
802 
803 		up_read(&mlx4_priv(dev)->cmd.switch_sem);
804 		return ret;
805 	}
806 	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
807 			      in_modifier, op_modifier, op, timeout);
808 }
809 EXPORT_SYMBOL_GPL(__mlx4_cmd);
810 
811 
812 int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
813 {
814 	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
815 			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
816 }
817 
818 static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
819 			   int slave, u64 slave_addr,
820 			   int size, int is_read)
821 {
822 	u64 in_param;
823 	u64 out_param;
824 
825 	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
826 	    (slave & ~0x7f) | (size & 0xff)) {
827 		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
828 			      "master_addr:0x%llx slave_id:%d size:%d\n",
829 			      (unsigned long long)slave_addr,
830 			      (unsigned long long)master_addr, slave, size);
831 		return -EINVAL;
832 	}
833 
834 	if (is_read) {
835 		in_param = (u64) slave | slave_addr;
836 		out_param = (u64) dev->caps.function | master_addr;
837 	} else {
838 		in_param = (u64) dev->caps.function | master_addr;
839 		out_param = (u64) slave | slave_addr;
840 	}
841 
842 	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
843 			    MLX4_CMD_ACCESS_MEM,
844 			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
845 }
846 
847 static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
848 			       struct mlx4_cmd_mailbox *inbox,
849 			       struct mlx4_cmd_mailbox *outbox)
850 {
851 	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
852 	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
853 	int err;
854 	int i;
855 
856 	if (index & 0x1f)
857 		return -EINVAL;
858 
859 	in_mad->attr_mod = cpu_to_be32(index / 32);
860 
861 	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
862 			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
863 			   MLX4_CMD_NATIVE);
864 	if (err)
865 		return err;
866 
867 	for (i = 0; i < 32; ++i)
868 		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
869 
870 	return err;
871 }
872 
873 static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
874 			       struct mlx4_cmd_mailbox *inbox,
875 			       struct mlx4_cmd_mailbox *outbox)
876 {
877 	int i;
878 	int err;
879 
880 	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
881 		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
882 		if (err)
883 			return err;
884 	}
885 
886 	return 0;
887 }
888 #define PORT_CAPABILITY_LOCATION_IN_SMP 20
889 #define PORT_STATE_OFFSET 32
890 
891 static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
892 {
893 	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
894 		return IB_PORT_ACTIVE;
895 	else
896 		return IB_PORT_DOWN;
897 }
898 
899 static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
900 				struct mlx4_vhcr *vhcr,
901 				struct mlx4_cmd_mailbox *inbox,
902 				struct mlx4_cmd_mailbox *outbox,
903 				struct mlx4_cmd_info *cmd)
904 {
905 	struct ib_smp *smp = inbox->buf;
906 	u32 index;
907 	u8 port, slave_port;
908 	u8 opcode_modifier;
909 	u16 *table;
910 	int err;
911 	int vidx, pidx;
912 	int network_view;
913 	struct mlx4_priv *priv = mlx4_priv(dev);
914 	struct ib_smp *outsmp = outbox->buf;
915 	__be16 *outtab = (__be16 *)(outsmp->data);
916 	__be32 slave_cap_mask;
917 	__be64 slave_node_guid;
918 
919 	slave_port = vhcr->in_modifier;
920 	port = mlx4_slave_convert_port(dev, slave, slave_port);
921 
922 	/* network-view bit is for driver use only, and should not be passed to FW */
923 	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
924 	network_view = !!(vhcr->op_modifier & 0x8);
925 
926 	if (smp->base_version == 1 &&
927 	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
928 	    smp->class_version == 1) {
929 		/* host view is paravirtualized */
930 		if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
931 			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
932 				index = be32_to_cpu(smp->attr_mod);
933 				if (port < 1 || port > dev->caps.num_ports)
934 					return -EINVAL;
935 				table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
936 						sizeof(*table) * 32, GFP_KERNEL);
937 
938 				if (!table)
939 					return -ENOMEM;
940 				/* need to get the full pkey table because the paravirtualized
941 				 * pkeys may be scattered among several pkey blocks.
942 				 */
943 				err = get_full_pkey_table(dev, port, table, inbox, outbox);
944 				if (!err) {
945 					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
946 						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
947 						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
948 					}
949 				}
950 				kfree(table);
951 				return err;
952 			}
953 			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
954 				/*get the slave specific caps:*/
955 				/*do the command */
956 				smp->attr_mod = cpu_to_be32(port);
957 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
958 					    port, opcode_modifier,
959 					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
960 				/* modify the response for slaves */
961 				if (!err && slave != mlx4_master_func_num(dev)) {
962 					u8 *state = outsmp->data + PORT_STATE_OFFSET;
963 
964 					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
965 					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
966 					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
967 				}
968 				return err;
969 			}
970 			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
971 				__be64 guid = mlx4_get_admin_guid(dev, slave,
972 								  port);
973 
974 				/* set the PF admin guid to the FW/HW burned
975 				 * GUID, if it wasn't yet set
976 				 */
977 				if (slave == 0 && guid == 0) {
978 					smp->attr_mod = 0;
979 					err = mlx4_cmd_box(dev,
980 							   inbox->dma,
981 							   outbox->dma,
982 							   vhcr->in_modifier,
983 							   opcode_modifier,
984 							   vhcr->op,
985 							   MLX4_CMD_TIME_CLASS_C,
986 							   MLX4_CMD_NATIVE);
987 					if (err)
988 						return err;
989 					mlx4_set_admin_guid(dev,
990 							    *(__be64 *)outsmp->
991 							    data, slave, port);
992 				} else {
993 					memcpy(outsmp->data, &guid, 8);
994 				}
995 
996 				/* clean all other gids */
997 				memset(outsmp->data + 8, 0, 56);
998 				return 0;
999 			}
1000 			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
1001 				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1002 					     port, opcode_modifier,
1003 					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1004 				if (!err) {
1005 					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
1006 					memcpy(outsmp->data + 12, &slave_node_guid, 8);
1007 				}
1008 				return err;
1009 			}
1010 		}
1011 	}
1012 
1013 	/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
1014 	 * These are the MADs used by ib verbs (such as ib_query_gids).
1015 	 */
1016 	if (slave != mlx4_master_func_num(dev) &&
1017 	    !mlx4_vf_smi_enabled(dev, slave, port)) {
1018 		if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
1019 		      smp->method == IB_MGMT_METHOD_GET) || network_view) {
1020 			mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
1021 				 slave, smp->mgmt_class, smp->method,
1022 				 network_view ? "Network" : "Host",
1023 				 be16_to_cpu(smp->attr_id));
1024 			return -EPERM;
1025 		}
1026 	}
1027 
1028 	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1029 				    vhcr->in_modifier, opcode_modifier,
1030 				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1031 }
1032 
1033 static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1034 		     struct mlx4_vhcr *vhcr,
1035 		     struct mlx4_cmd_mailbox *inbox,
1036 		     struct mlx4_cmd_mailbox *outbox,
1037 		     struct mlx4_cmd_info *cmd)
1038 {
1039 	return -EPERM;
1040 }
1041 
1042 int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1043 		     struct mlx4_vhcr *vhcr,
1044 		     struct mlx4_cmd_mailbox *inbox,
1045 		     struct mlx4_cmd_mailbox *outbox,
1046 		     struct mlx4_cmd_info *cmd)
1047 {
1048 	u64 in_param;
1049 	u64 out_param;
1050 	int err;
1051 
1052 	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1053 	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1054 	if (cmd->encode_slave_id) {
1055 		in_param &= 0xffffffffffffff00ll;
1056 		in_param |= slave;
1057 	}
1058 
1059 	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1060 			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1061 			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1062 
1063 	if (cmd->out_is_imm)
1064 		vhcr->out_param = out_param;
1065 
1066 	return err;
1067 }
1068 
1069 static struct mlx4_cmd_info cmd_info[] = {
1070 	{
1071 		.opcode = MLX4_CMD_QUERY_FW,
1072 		.has_inbox = false,
1073 		.has_outbox = true,
1074 		.out_is_imm = false,
1075 		.encode_slave_id = false,
1076 		.verify = NULL,
1077 		.wrapper = mlx4_QUERY_FW_wrapper
1078 	},
1079 	{
1080 		.opcode = MLX4_CMD_QUERY_HCA,
1081 		.has_inbox = false,
1082 		.has_outbox = true,
1083 		.out_is_imm = false,
1084 		.encode_slave_id = false,
1085 		.verify = NULL,
1086 		.wrapper = NULL
1087 	},
1088 	{
1089 		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1090 		.has_inbox = false,
1091 		.has_outbox = true,
1092 		.out_is_imm = false,
1093 		.encode_slave_id = false,
1094 		.verify = NULL,
1095 		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1096 	},
1097 	{
1098 		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1099 		.has_inbox = false,
1100 		.has_outbox = true,
1101 		.out_is_imm = false,
1102 		.encode_slave_id = false,
1103 		.verify = NULL,
1104 		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1105 	},
1106 	{
1107 		.opcode = MLX4_CMD_QUERY_ADAPTER,
1108 		.has_inbox = false,
1109 		.has_outbox = true,
1110 		.out_is_imm = false,
1111 		.encode_slave_id = false,
1112 		.verify = NULL,
1113 		.wrapper = NULL
1114 	},
1115 	{
1116 		.opcode = MLX4_CMD_INIT_PORT,
1117 		.has_inbox = false,
1118 		.has_outbox = false,
1119 		.out_is_imm = false,
1120 		.encode_slave_id = false,
1121 		.verify = NULL,
1122 		.wrapper = mlx4_INIT_PORT_wrapper
1123 	},
1124 	{
1125 		.opcode = MLX4_CMD_CLOSE_PORT,
1126 		.has_inbox = false,
1127 		.has_outbox = false,
1128 		.out_is_imm  = false,
1129 		.encode_slave_id = false,
1130 		.verify = NULL,
1131 		.wrapper = mlx4_CLOSE_PORT_wrapper
1132 	},
1133 	{
1134 		.opcode = MLX4_CMD_QUERY_PORT,
1135 		.has_inbox = false,
1136 		.has_outbox = true,
1137 		.out_is_imm = false,
1138 		.encode_slave_id = false,
1139 		.verify = NULL,
1140 		.wrapper = mlx4_QUERY_PORT_wrapper
1141 	},
1142 	{
1143 		.opcode = MLX4_CMD_SET_PORT,
1144 		.has_inbox = true,
1145 		.has_outbox = false,
1146 		.out_is_imm = false,
1147 		.encode_slave_id = false,
1148 		.verify = NULL,
1149 		.wrapper = mlx4_SET_PORT_wrapper
1150 	},
1151 	{
1152 		.opcode = MLX4_CMD_MAP_EQ,
1153 		.has_inbox = false,
1154 		.has_outbox = false,
1155 		.out_is_imm = false,
1156 		.encode_slave_id = false,
1157 		.verify = NULL,
1158 		.wrapper = mlx4_MAP_EQ_wrapper
1159 	},
1160 	{
1161 		.opcode = MLX4_CMD_SW2HW_EQ,
1162 		.has_inbox = true,
1163 		.has_outbox = false,
1164 		.out_is_imm = false,
1165 		.encode_slave_id = true,
1166 		.verify = NULL,
1167 		.wrapper = mlx4_SW2HW_EQ_wrapper
1168 	},
1169 	{
1170 		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1171 		.has_inbox = false,
1172 		.has_outbox = false,
1173 		.out_is_imm = false,
1174 		.encode_slave_id = false,
1175 		.verify = NULL,
1176 		.wrapper = NULL
1177 	},
1178 	{
1179 		.opcode = MLX4_CMD_NOP,
1180 		.has_inbox = false,
1181 		.has_outbox = false,
1182 		.out_is_imm = false,
1183 		.encode_slave_id = false,
1184 		.verify = NULL,
1185 		.wrapper = NULL
1186 	},
1187 	{
1188 		.opcode = MLX4_CMD_CONFIG_DEV,
1189 		.has_inbox = false,
1190 		.has_outbox = true,
1191 		.out_is_imm = false,
1192 		.encode_slave_id = false,
1193 		.verify = NULL,
1194 		.wrapper = mlx4_CONFIG_DEV_wrapper
1195 	},
1196 	{
1197 		.opcode = MLX4_CMD_ALLOC_RES,
1198 		.has_inbox = false,
1199 		.has_outbox = false,
1200 		.out_is_imm = true,
1201 		.encode_slave_id = false,
1202 		.verify = NULL,
1203 		.wrapper = mlx4_ALLOC_RES_wrapper
1204 	},
1205 	{
1206 		.opcode = MLX4_CMD_FREE_RES,
1207 		.has_inbox = false,
1208 		.has_outbox = false,
1209 		.out_is_imm = false,
1210 		.encode_slave_id = false,
1211 		.verify = NULL,
1212 		.wrapper = mlx4_FREE_RES_wrapper
1213 	},
1214 	{
1215 		.opcode = MLX4_CMD_SW2HW_MPT,
1216 		.has_inbox = true,
1217 		.has_outbox = false,
1218 		.out_is_imm = false,
1219 		.encode_slave_id = true,
1220 		.verify = NULL,
1221 		.wrapper = mlx4_SW2HW_MPT_wrapper
1222 	},
1223 	{
1224 		.opcode = MLX4_CMD_QUERY_MPT,
1225 		.has_inbox = false,
1226 		.has_outbox = true,
1227 		.out_is_imm = false,
1228 		.encode_slave_id = false,
1229 		.verify = NULL,
1230 		.wrapper = mlx4_QUERY_MPT_wrapper
1231 	},
1232 	{
1233 		.opcode = MLX4_CMD_HW2SW_MPT,
1234 		.has_inbox = false,
1235 		.has_outbox = false,
1236 		.out_is_imm = false,
1237 		.encode_slave_id = false,
1238 		.verify = NULL,
1239 		.wrapper = mlx4_HW2SW_MPT_wrapper
1240 	},
1241 	{
1242 		.opcode = MLX4_CMD_READ_MTT,
1243 		.has_inbox = false,
1244 		.has_outbox = true,
1245 		.out_is_imm = false,
1246 		.encode_slave_id = false,
1247 		.verify = NULL,
1248 		.wrapper = NULL
1249 	},
1250 	{
1251 		.opcode = MLX4_CMD_WRITE_MTT,
1252 		.has_inbox = true,
1253 		.has_outbox = false,
1254 		.out_is_imm = false,
1255 		.encode_slave_id = false,
1256 		.verify = NULL,
1257 		.wrapper = mlx4_WRITE_MTT_wrapper
1258 	},
1259 	{
1260 		.opcode = MLX4_CMD_SYNC_TPT,
1261 		.has_inbox = true,
1262 		.has_outbox = false,
1263 		.out_is_imm = false,
1264 		.encode_slave_id = false,
1265 		.verify = NULL,
1266 		.wrapper = NULL
1267 	},
1268 	{
1269 		.opcode = MLX4_CMD_HW2SW_EQ,
1270 		.has_inbox = false,
1271 		.has_outbox = false,
1272 		.out_is_imm = false,
1273 		.encode_slave_id = true,
1274 		.verify = NULL,
1275 		.wrapper = mlx4_HW2SW_EQ_wrapper
1276 	},
1277 	{
1278 		.opcode = MLX4_CMD_QUERY_EQ,
1279 		.has_inbox = false,
1280 		.has_outbox = true,
1281 		.out_is_imm = false,
1282 		.encode_slave_id = true,
1283 		.verify = NULL,
1284 		.wrapper = mlx4_QUERY_EQ_wrapper
1285 	},
1286 	{
1287 		.opcode = MLX4_CMD_SW2HW_CQ,
1288 		.has_inbox = true,
1289 		.has_outbox = false,
1290 		.out_is_imm = false,
1291 		.encode_slave_id = true,
1292 		.verify = NULL,
1293 		.wrapper = mlx4_SW2HW_CQ_wrapper
1294 	},
1295 	{
1296 		.opcode = MLX4_CMD_HW2SW_CQ,
1297 		.has_inbox = false,
1298 		.has_outbox = false,
1299 		.out_is_imm = false,
1300 		.encode_slave_id = false,
1301 		.verify = NULL,
1302 		.wrapper = mlx4_HW2SW_CQ_wrapper
1303 	},
1304 	{
1305 		.opcode = MLX4_CMD_QUERY_CQ,
1306 		.has_inbox = false,
1307 		.has_outbox = true,
1308 		.out_is_imm = false,
1309 		.encode_slave_id = false,
1310 		.verify = NULL,
1311 		.wrapper = mlx4_QUERY_CQ_wrapper
1312 	},
1313 	{
1314 		.opcode = MLX4_CMD_MODIFY_CQ,
1315 		.has_inbox = true,
1316 		.has_outbox = false,
1317 		.out_is_imm = true,
1318 		.encode_slave_id = false,
1319 		.verify = NULL,
1320 		.wrapper = mlx4_MODIFY_CQ_wrapper
1321 	},
1322 	{
1323 		.opcode = MLX4_CMD_SW2HW_SRQ,
1324 		.has_inbox = true,
1325 		.has_outbox = false,
1326 		.out_is_imm = false,
1327 		.encode_slave_id = true,
1328 		.verify = NULL,
1329 		.wrapper = mlx4_SW2HW_SRQ_wrapper
1330 	},
1331 	{
1332 		.opcode = MLX4_CMD_HW2SW_SRQ,
1333 		.has_inbox = false,
1334 		.has_outbox = false,
1335 		.out_is_imm = false,
1336 		.encode_slave_id = false,
1337 		.verify = NULL,
1338 		.wrapper = mlx4_HW2SW_SRQ_wrapper
1339 	},
1340 	{
1341 		.opcode = MLX4_CMD_QUERY_SRQ,
1342 		.has_inbox = false,
1343 		.has_outbox = true,
1344 		.out_is_imm = false,
1345 		.encode_slave_id = false,
1346 		.verify = NULL,
1347 		.wrapper = mlx4_QUERY_SRQ_wrapper
1348 	},
1349 	{
1350 		.opcode = MLX4_CMD_ARM_SRQ,
1351 		.has_inbox = false,
1352 		.has_outbox = false,
1353 		.out_is_imm = false,
1354 		.encode_slave_id = false,
1355 		.verify = NULL,
1356 		.wrapper = mlx4_ARM_SRQ_wrapper
1357 	},
1358 	{
1359 		.opcode = MLX4_CMD_RST2INIT_QP,
1360 		.has_inbox = true,
1361 		.has_outbox = false,
1362 		.out_is_imm = false,
1363 		.encode_slave_id = true,
1364 		.verify = NULL,
1365 		.wrapper = mlx4_RST2INIT_QP_wrapper
1366 	},
1367 	{
1368 		.opcode = MLX4_CMD_INIT2INIT_QP,
1369 		.has_inbox = true,
1370 		.has_outbox = false,
1371 		.out_is_imm = false,
1372 		.encode_slave_id = false,
1373 		.verify = NULL,
1374 		.wrapper = mlx4_INIT2INIT_QP_wrapper
1375 	},
1376 	{
1377 		.opcode = MLX4_CMD_INIT2RTR_QP,
1378 		.has_inbox = true,
1379 		.has_outbox = false,
1380 		.out_is_imm = false,
1381 		.encode_slave_id = false,
1382 		.verify = NULL,
1383 		.wrapper = mlx4_INIT2RTR_QP_wrapper
1384 	},
1385 	{
1386 		.opcode = MLX4_CMD_RTR2RTS_QP,
1387 		.has_inbox = true,
1388 		.has_outbox = false,
1389 		.out_is_imm = false,
1390 		.encode_slave_id = false,
1391 		.verify = NULL,
1392 		.wrapper = mlx4_RTR2RTS_QP_wrapper
1393 	},
1394 	{
1395 		.opcode = MLX4_CMD_RTS2RTS_QP,
1396 		.has_inbox = true,
1397 		.has_outbox = false,
1398 		.out_is_imm = false,
1399 		.encode_slave_id = false,
1400 		.verify = NULL,
1401 		.wrapper = mlx4_RTS2RTS_QP_wrapper
1402 	},
1403 	{
1404 		.opcode = MLX4_CMD_SQERR2RTS_QP,
1405 		.has_inbox = true,
1406 		.has_outbox = false,
1407 		.out_is_imm = false,
1408 		.encode_slave_id = false,
1409 		.verify = NULL,
1410 		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1411 	},
1412 	{
1413 		.opcode = MLX4_CMD_2ERR_QP,
1414 		.has_inbox = false,
1415 		.has_outbox = false,
1416 		.out_is_imm = false,
1417 		.encode_slave_id = false,
1418 		.verify = NULL,
1419 		.wrapper = mlx4_GEN_QP_wrapper
1420 	},
1421 	{
1422 		.opcode = MLX4_CMD_RTS2SQD_QP,
1423 		.has_inbox = false,
1424 		.has_outbox = false,
1425 		.out_is_imm = false,
1426 		.encode_slave_id = false,
1427 		.verify = NULL,
1428 		.wrapper = mlx4_GEN_QP_wrapper
1429 	},
1430 	{
1431 		.opcode = MLX4_CMD_SQD2SQD_QP,
1432 		.has_inbox = true,
1433 		.has_outbox = false,
1434 		.out_is_imm = false,
1435 		.encode_slave_id = false,
1436 		.verify = NULL,
1437 		.wrapper = mlx4_SQD2SQD_QP_wrapper
1438 	},
1439 	{
1440 		.opcode = MLX4_CMD_SQD2RTS_QP,
1441 		.has_inbox = true,
1442 		.has_outbox = false,
1443 		.out_is_imm = false,
1444 		.encode_slave_id = false,
1445 		.verify = NULL,
1446 		.wrapper = mlx4_SQD2RTS_QP_wrapper
1447 	},
1448 	{
1449 		.opcode = MLX4_CMD_2RST_QP,
1450 		.has_inbox = false,
1451 		.has_outbox = false,
1452 		.out_is_imm = false,
1453 		.encode_slave_id = false,
1454 		.verify = NULL,
1455 		.wrapper = mlx4_2RST_QP_wrapper
1456 	},
1457 	{
1458 		.opcode = MLX4_CMD_QUERY_QP,
1459 		.has_inbox = false,
1460 		.has_outbox = true,
1461 		.out_is_imm = false,
1462 		.encode_slave_id = false,
1463 		.verify = NULL,
1464 		.wrapper = mlx4_GEN_QP_wrapper
1465 	},
1466 	{
1467 		.opcode = MLX4_CMD_SUSPEND_QP,
1468 		.has_inbox = false,
1469 		.has_outbox = false,
1470 		.out_is_imm = false,
1471 		.encode_slave_id = false,
1472 		.verify = NULL,
1473 		.wrapper = mlx4_GEN_QP_wrapper
1474 	},
1475 	{
1476 		.opcode = MLX4_CMD_UNSUSPEND_QP,
1477 		.has_inbox = false,
1478 		.has_outbox = false,
1479 		.out_is_imm = false,
1480 		.encode_slave_id = false,
1481 		.verify = NULL,
1482 		.wrapper = mlx4_GEN_QP_wrapper
1483 	},
1484 	{
1485 		.opcode = MLX4_CMD_UPDATE_QP,
1486 		.has_inbox = true,
1487 		.has_outbox = false,
1488 		.out_is_imm = false,
1489 		.encode_slave_id = false,
1490 		.verify = NULL,
1491 		.wrapper = mlx4_UPDATE_QP_wrapper
1492 	},
1493 	{
1494 		.opcode = MLX4_CMD_GET_OP_REQ,
1495 		.has_inbox = false,
1496 		.has_outbox = false,
1497 		.out_is_imm = false,
1498 		.encode_slave_id = false,
1499 		.verify = NULL,
1500 		.wrapper = mlx4_CMD_EPERM_wrapper,
1501 	},
1502 	{
1503 		.opcode = MLX4_CMD_ALLOCATE_VPP,
1504 		.has_inbox = false,
1505 		.has_outbox = true,
1506 		.out_is_imm = false,
1507 		.encode_slave_id = false,
1508 		.verify = NULL,
1509 		.wrapper = mlx4_CMD_EPERM_wrapper,
1510 	},
1511 	{
1512 		.opcode = MLX4_CMD_SET_VPORT_QOS,
1513 		.has_inbox = false,
1514 		.has_outbox = true,
1515 		.out_is_imm = false,
1516 		.encode_slave_id = false,
1517 		.verify = NULL,
1518 		.wrapper = mlx4_CMD_EPERM_wrapper,
1519 	},
1520 	{
1521 		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1522 		.has_inbox = false,
1523 		.has_outbox = false,
1524 		.out_is_imm = false,
1525 		.encode_slave_id = false,
1526 		.verify = NULL, /* XXX verify: only demux can do this */
1527 		.wrapper = NULL
1528 	},
1529 	{
1530 		.opcode = MLX4_CMD_MAD_IFC,
1531 		.has_inbox = true,
1532 		.has_outbox = true,
1533 		.out_is_imm = false,
1534 		.encode_slave_id = false,
1535 		.verify = NULL,
1536 		.wrapper = mlx4_MAD_IFC_wrapper
1537 	},
1538 	{
1539 		.opcode = MLX4_CMD_MAD_DEMUX,
1540 		.has_inbox = false,
1541 		.has_outbox = false,
1542 		.out_is_imm = false,
1543 		.encode_slave_id = false,
1544 		.verify = NULL,
1545 		.wrapper = mlx4_CMD_EPERM_wrapper
1546 	},
1547 	{
1548 		.opcode = MLX4_CMD_QUERY_IF_STAT,
1549 		.has_inbox = false,
1550 		.has_outbox = true,
1551 		.out_is_imm = false,
1552 		.encode_slave_id = false,
1553 		.verify = NULL,
1554 		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1555 	},
1556 	{
1557 		.opcode = MLX4_CMD_ACCESS_REG,
1558 		.has_inbox = true,
1559 		.has_outbox = true,
1560 		.out_is_imm = false,
1561 		.encode_slave_id = false,
1562 		.verify = NULL,
1563 		.wrapper = mlx4_ACCESS_REG_wrapper,
1564 	},
1565 	{
1566 		.opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1567 		.has_inbox = false,
1568 		.has_outbox = false,
1569 		.out_is_imm = false,
1570 		.encode_slave_id = false,
1571 		.verify = NULL,
1572 		.wrapper = mlx4_CMD_EPERM_wrapper,
1573 	},
1574 	/* Native multicast commands are not available for guests */
1575 	{
1576 		.opcode = MLX4_CMD_QP_ATTACH,
1577 		.has_inbox = true,
1578 		.has_outbox = false,
1579 		.out_is_imm = false,
1580 		.encode_slave_id = false,
1581 		.verify = NULL,
1582 		.wrapper = mlx4_QP_ATTACH_wrapper
1583 	},
1584 	{
1585 		.opcode = MLX4_CMD_PROMISC,
1586 		.has_inbox = false,
1587 		.has_outbox = false,
1588 		.out_is_imm = false,
1589 		.encode_slave_id = false,
1590 		.verify = NULL,
1591 		.wrapper = mlx4_PROMISC_wrapper
1592 	},
1593 	/* Ethernet specific commands */
1594 	{
1595 		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1596 		.has_inbox = true,
1597 		.has_outbox = false,
1598 		.out_is_imm = false,
1599 		.encode_slave_id = false,
1600 		.verify = NULL,
1601 		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1602 	},
1603 	{
1604 		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1605 		.has_inbox = false,
1606 		.has_outbox = false,
1607 		.out_is_imm = false,
1608 		.encode_slave_id = false,
1609 		.verify = NULL,
1610 		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1611 	},
1612 	{
1613 		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1614 		.has_inbox = false,
1615 		.has_outbox = true,
1616 		.out_is_imm = false,
1617 		.encode_slave_id = false,
1618 		.verify = NULL,
1619 		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1620 	},
1621 	{
1622 		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1623 		.has_inbox = false,
1624 		.has_outbox = false,
1625 		.out_is_imm = false,
1626 		.encode_slave_id = false,
1627 		.verify = NULL,
1628 		.wrapper = NULL
1629 	},
1630 	/* flow steering commands */
1631 	{
1632 		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1633 		.has_inbox = true,
1634 		.has_outbox = false,
1635 		.out_is_imm = true,
1636 		.encode_slave_id = false,
1637 		.verify = NULL,
1638 		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1639 	},
1640 	{
1641 		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1642 		.has_inbox = false,
1643 		.has_outbox = false,
1644 		.out_is_imm = false,
1645 		.encode_slave_id = false,
1646 		.verify = NULL,
1647 		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1648 	},
1649 	{
1650 		.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1651 		.has_inbox = false,
1652 		.has_outbox = false,
1653 		.out_is_imm = false,
1654 		.encode_slave_id = false,
1655 		.verify = NULL,
1656 		.wrapper = mlx4_CMD_EPERM_wrapper
1657 	},
1658 	{
1659 		.opcode = MLX4_CMD_VIRT_PORT_MAP,
1660 		.has_inbox = false,
1661 		.has_outbox = false,
1662 		.out_is_imm = false,
1663 		.encode_slave_id = false,
1664 		.verify = NULL,
1665 		.wrapper = mlx4_CMD_EPERM_wrapper
1666 	},
1667 };
1668 
1669 static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1670 				    struct mlx4_vhcr_cmd *in_vhcr)
1671 {
1672 	struct mlx4_priv *priv = mlx4_priv(dev);
1673 	struct mlx4_cmd_info *cmd = NULL;
1674 	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1675 	struct mlx4_vhcr *vhcr;
1676 	struct mlx4_cmd_mailbox *inbox = NULL;
1677 	struct mlx4_cmd_mailbox *outbox = NULL;
1678 	u64 in_param;
1679 	u64 out_param;
1680 	int ret = 0;
1681 	int i;
1682 	int err = 0;
1683 
1684 	/* Create sw representation of Virtual HCR */
1685 	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1686 	if (!vhcr)
1687 		return -ENOMEM;
1688 
1689 	/* DMA in the vHCR */
1690 	if (!in_vhcr) {
1691 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1692 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1693 				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1694 					    MLX4_ACCESS_MEM_ALIGN), 1);
1695 		if (ret) {
1696 			if (!(dev->persist->state &
1697 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1698 				mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1699 					 __func__, ret);
1700 			kfree(vhcr);
1701 			return ret;
1702 		}
1703 	}
1704 
1705 	/* Fill SW VHCR fields */
1706 	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1707 	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1708 	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1709 	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1710 	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1711 	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1712 	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1713 
1714 	/* Lookup command */
1715 	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1716 		if (vhcr->op == cmd_info[i].opcode) {
1717 			cmd = &cmd_info[i];
1718 			break;
1719 		}
1720 	}
1721 	if (!cmd) {
1722 		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1723 			 vhcr->op, slave);
1724 		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1725 		goto out_status;
1726 	}
1727 
1728 	/* Read inbox */
1729 	if (cmd->has_inbox) {
1730 		vhcr->in_param &= INBOX_MASK;
1731 		inbox = mlx4_alloc_cmd_mailbox(dev);
1732 		if (IS_ERR(inbox)) {
1733 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1734 			inbox = NULL;
1735 			goto out_status;
1736 		}
1737 
1738 		ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1739 				      vhcr->in_param,
1740 				      MLX4_MAILBOX_SIZE, 1);
1741 		if (ret) {
1742 			if (!(dev->persist->state &
1743 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1744 				mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1745 					 __func__, cmd->opcode);
1746 			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1747 			goto out_status;
1748 		}
1749 	}
1750 
1751 	/* Apply permission and bound checks if applicable */
1752 	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1753 		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1754 			  vhcr->op, slave, vhcr->in_modifier);
1755 		vhcr_cmd->status = CMD_STAT_BAD_OP;
1756 		goto out_status;
1757 	}
1758 
1759 	/* Allocate outbox */
1760 	if (cmd->has_outbox) {
1761 		outbox = mlx4_alloc_cmd_mailbox(dev);
1762 		if (IS_ERR(outbox)) {
1763 			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1764 			outbox = NULL;
1765 			goto out_status;
1766 		}
1767 	}
1768 
1769 	/* Execute the command! */
1770 	if (cmd->wrapper) {
1771 		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1772 				   cmd);
1773 		if (cmd->out_is_imm)
1774 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1775 	} else {
1776 		in_param = cmd->has_inbox ? (u64) inbox->dma :
1777 			vhcr->in_param;
1778 		out_param = cmd->has_outbox ? (u64) outbox->dma :
1779 			vhcr->out_param;
1780 		err = __mlx4_cmd(dev, in_param, &out_param,
1781 				 cmd->out_is_imm, vhcr->in_modifier,
1782 				 vhcr->op_modifier, vhcr->op,
1783 				 MLX4_CMD_TIME_CLASS_A,
1784 				 MLX4_CMD_NATIVE);
1785 
1786 		if (cmd->out_is_imm) {
1787 			vhcr->out_param = out_param;
1788 			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1789 		}
1790 	}
1791 
1792 	if (err) {
1793 		if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1794 			mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1795 				  vhcr->op, slave, vhcr->errno, err);
1796 		vhcr_cmd->status = mlx4_errno_to_status(err);
1797 		goto out_status;
1798 	}
1799 
1800 
1801 	/* Write outbox if command completed successfully */
1802 	if (cmd->has_outbox && !vhcr_cmd->status) {
1803 		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1804 				      vhcr->out_param,
1805 				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1806 		if (ret) {
1807 			/* If we failed to write back the outbox after the
1808 			 *command was successfully executed, we must fail this
1809 			 * slave, as it is now in undefined state */
1810 			if (!(dev->persist->state &
1811 			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1812 				mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1813 			goto out;
1814 		}
1815 	}
1816 
1817 out_status:
1818 	/* DMA back vhcr result */
1819 	if (!in_vhcr) {
1820 		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1821 				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1822 				      ALIGN(sizeof(struct mlx4_vhcr),
1823 					    MLX4_ACCESS_MEM_ALIGN),
1824 				      MLX4_CMD_WRAPPED);
1825 		if (ret)
1826 			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1827 				 __func__);
1828 		else if (vhcr->e_bit &&
1829 			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1830 				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1831 					  slave);
1832 	}
1833 
1834 out:
1835 	kfree(vhcr);
1836 	mlx4_free_cmd_mailbox(dev, inbox);
1837 	mlx4_free_cmd_mailbox(dev, outbox);
1838 	return ret;
1839 }
1840 
1841 static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1842 					    int slave, int port)
1843 {
1844 	struct mlx4_vport_oper_state *vp_oper;
1845 	struct mlx4_vport_state *vp_admin;
1846 	struct mlx4_vf_immed_vlan_work *work;
1847 	struct mlx4_dev *dev = &priv->dev;
1848 	int err;
1849 	int admin_vlan_ix = NO_INDX;
1850 
1851 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1852 	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1853 
1854 	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1855 	    vp_oper->state.default_qos == vp_admin->default_qos &&
1856 	    vp_oper->state.vlan_proto == vp_admin->vlan_proto &&
1857 	    vp_oper->state.qos_vport == vp_admin->qos_vport)
1858 		return 0;
1859 
1860 	if (!(priv->mfunc.master.slave_state[slave].active &&
1861 	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1862 		/* even if the UPDATE_QP command isn't supported, we still want
1863 		 * to set this VF link according to the admin directive
1864 		 */
1865 		return -1;
1866 	}
1867 
1868 	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1869 		 slave, port);
1870 	mlx4_dbg(dev, "vlan %d QoS %d link down\n",
1871 		 vp_admin->default_vlan, vp_admin->default_qos);
1872 
1873 	work = kzalloc(sizeof(*work), GFP_KERNEL);
1874 	if (!work)
1875 		return -ENOMEM;
1876 
1877 	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1878 		if (MLX4_VGT != vp_admin->default_vlan) {
1879 			err = __mlx4_register_vlan(&priv->dev, port,
1880 						   vp_admin->default_vlan,
1881 						   &admin_vlan_ix);
1882 			if (err) {
1883 				kfree(work);
1884 				mlx4_warn(&priv->dev,
1885 					  "No vlan resources slave %d, port %d\n",
1886 					  slave, port);
1887 				return err;
1888 			}
1889 		} else {
1890 			admin_vlan_ix = NO_INDX;
1891 		}
1892 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1893 		mlx4_dbg(&priv->dev,
1894 			 "alloc vlan %d idx  %d slave %d port %d\n",
1895 			 (int)(vp_admin->default_vlan),
1896 			 admin_vlan_ix, slave, port);
1897 	}
1898 
1899 	/* save original vlan ix and vlan id */
1900 	work->orig_vlan_id = vp_oper->state.default_vlan;
1901 	work->orig_vlan_ix = vp_oper->vlan_idx;
1902 
1903 	/* handle new qos */
1904 	if (vp_oper->state.default_qos != vp_admin->default_qos)
1905 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1906 
1907 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1908 		vp_oper->vlan_idx = admin_vlan_ix;
1909 
1910 	vp_oper->state.default_vlan = vp_admin->default_vlan;
1911 	vp_oper->state.default_qos = vp_admin->default_qos;
1912 	vp_oper->state.vlan_proto = vp_admin->vlan_proto;
1913 	vp_oper->state.qos_vport = vp_admin->qos_vport;
1914 
1915 	if (1 /* vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE */)
1916 		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1917 
1918 	/* iterate over QPs owned by this slave, using UPDATE_QP */
1919 	work->port = port;
1920 	work->slave = slave;
1921 	work->qos = vp_oper->state.default_qos;
1922 	work->qos_vport = vp_oper->state.qos_vport;
1923 	work->vlan_id = vp_oper->state.default_vlan;
1924 	work->vlan_ix = vp_oper->vlan_idx;
1925 	work->vlan_proto = vp_oper->state.vlan_proto;
1926 	work->priv = priv;
1927 	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1928 	queue_work(priv->mfunc.master.comm_wq, &work->work);
1929 
1930 	return 0;
1931 }
1932 
1933 static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1934 {
1935 	struct mlx4_qos_manager *port_qos_ctl;
1936 	struct mlx4_priv *priv = mlx4_priv(dev);
1937 
1938 	port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1939 	bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1940 
1941 	/* Enable only default prio at PF init routine */
1942 	set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1943 }
1944 
1945 static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1946 {
1947 	int i;
1948 	int err;
1949 	int num_vfs;
1950 	u16 availible_vpp;
1951 	u8 vpp_param[MLX4_NUM_UP];
1952 	struct mlx4_qos_manager *port_qos;
1953 	struct mlx4_priv *priv = mlx4_priv(dev);
1954 
1955 	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1956 	if (err) {
1957 		mlx4_info(dev, "Failed query availible VPPs\n");
1958 		return;
1959 	}
1960 
1961 	port_qos = &priv->mfunc.master.qos_ctl[port];
1962 	num_vfs = (availible_vpp /
1963 		   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1964 
1965 	for (i = 0; i < MLX4_NUM_UP; i++) {
1966 		if (test_bit(i, port_qos->priority_bm))
1967 			vpp_param[i] = num_vfs;
1968 	}
1969 
1970 	err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1971 	if (err) {
1972 		mlx4_info(dev, "Failed allocating VPPs\n");
1973 		return;
1974 	}
1975 
1976 	/* Query actual allocated VPP, just to make sure */
1977 	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1978 	if (err) {
1979 		mlx4_info(dev, "Failed query availible VPPs\n");
1980 		return;
1981 	}
1982 
1983 	port_qos->num_of_qos_vfs = num_vfs;
1984 	mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1985 
1986 	for (i = 0; i < MLX4_NUM_UP; i++)
1987 		mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1988 			 vpp_param[i]);
1989 }
1990 
1991 static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1992 {
1993 	int port, err;
1994 	struct mlx4_vport_state *vp_admin;
1995 	struct mlx4_vport_oper_state *vp_oper;
1996 	struct mlx4_slave_state *slave_state =
1997 		&priv->mfunc.master.slave_state[slave];
1998 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1999 			&priv->dev, slave);
2000 	int min_port = find_first_bit(actv_ports.ports,
2001 				      priv->dev.caps.num_ports) + 1;
2002 	int max_port = min_port - 1 +
2003 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2004 
2005 	for (port = min_port; port <= max_port; port++) {
2006 		if (!test_bit(port - 1, actv_ports.ports))
2007 			continue;
2008 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2009 			priv->mfunc.master.vf_admin[slave].enable_smi[port];
2010 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2011 		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2012 		if (vp_admin->vlan_proto != htons(ETH_P_8021AD) ||
2013 		    slave_state->vst_qinq_supported) {
2014 			vp_oper->state.vlan_proto   = vp_admin->vlan_proto;
2015 			vp_oper->state.default_vlan = vp_admin->default_vlan;
2016 			vp_oper->state.default_qos  = vp_admin->default_qos;
2017 		}
2018 		vp_oper->state.mac        = vp_admin->mac;
2019 		vp_oper->state.spoofchk   = vp_admin->spoofchk;
2020 		vp_oper->state.tx_rate    = vp_admin->tx_rate;
2021 		vp_oper->state.qos_vport  = vp_admin->qos_vport;
2022 		vp_oper->state.guid       = vp_admin->guid;
2023 
2024 		if (MLX4_VGT != vp_admin->default_vlan) {
2025 			err = __mlx4_register_vlan(&priv->dev, port,
2026 						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
2027 			if (err) {
2028 				vp_oper->vlan_idx = NO_INDX;
2029 				vp_oper->state.default_vlan = MLX4_VGT;
2030 				vp_oper->state.vlan_proto = htons(ETH_P_8021Q);
2031 				mlx4_warn(&priv->dev,
2032 					  "No vlan resources slave %d, port %d\n",
2033 					  slave, port);
2034 				return err;
2035 			}
2036 			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
2037 				 (int)(vp_oper->state.default_vlan),
2038 				 vp_oper->vlan_idx, slave, port);
2039 		}
2040 		if (vp_admin->spoofchk) {
2041 			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2042 							       port,
2043 							       vp_admin->mac);
2044 			if (0 > vp_oper->mac_idx) {
2045 				err = vp_oper->mac_idx;
2046 				vp_oper->mac_idx = NO_INDX;
2047 				mlx4_warn(&priv->dev,
2048 					  "No mac resources slave %d, port %d\n",
2049 					  slave, port);
2050 				return err;
2051 			}
2052 			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2053 				 (unsigned long long) vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2054 		}
2055 	}
2056 	return 0;
2057 }
2058 
2059 static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2060 {
2061 	int port;
2062 	struct mlx4_vport_oper_state *vp_oper;
2063 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2064 			&priv->dev, slave);
2065 	int min_port = find_first_bit(actv_ports.ports,
2066 				      priv->dev.caps.num_ports) + 1;
2067 	int max_port = min_port - 1 +
2068 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2069 
2070 
2071 	for (port = min_port; port <= max_port; port++) {
2072 		if (!test_bit(port - 1, actv_ports.ports))
2073 			continue;
2074 		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2075 			MLX4_VF_SMI_DISABLED;
2076 		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2077 		if (NO_INDX != vp_oper->vlan_idx) {
2078 			__mlx4_unregister_vlan(&priv->dev,
2079 					       port, vp_oper->state.default_vlan);
2080 			vp_oper->vlan_idx = NO_INDX;
2081 		}
2082 		if (NO_INDX != vp_oper->mac_idx) {
2083 			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2084 			vp_oper->mac_idx = NO_INDX;
2085 		}
2086 	}
2087 	return;
2088 }
2089 
2090 static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2091 			       u16 param, u8 toggle)
2092 {
2093 	struct mlx4_priv *priv = mlx4_priv(dev);
2094 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2095 	u32 reply;
2096 	u8 is_going_down = 0;
2097 	int i;
2098 	unsigned long flags;
2099 
2100 	slave_state[slave].comm_toggle ^= 1;
2101 	reply = (u32) slave_state[slave].comm_toggle << 31;
2102 	if (toggle != slave_state[slave].comm_toggle) {
2103 		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2104 			  toggle, slave);
2105 		goto reset_slave;
2106 	}
2107 	if (cmd == MLX4_COMM_CMD_RESET) {
2108 		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2109 		slave_state[slave].active = false;
2110 		slave_state[slave].old_vlan_api = false;
2111 		slave_state[slave].vst_qinq_supported = false;
2112 		mlx4_master_deactivate_admin_state(priv, slave);
2113 		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2114 				slave_state[slave].event_eq[i].eqn = -1;
2115 				slave_state[slave].event_eq[i].token = 0;
2116 		}
2117 		/*check if we are in the middle of FLR process,
2118 		if so return "retry" status to the slave*/
2119 		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2120 			goto inform_slave_state;
2121 
2122 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2123 
2124 		/* write the version in the event field */
2125 		reply |= mlx4_comm_get_version();
2126 
2127 		goto reset_slave;
2128 	}
2129 	/*command from slave in the middle of FLR*/
2130 	if (cmd != MLX4_COMM_CMD_RESET &&
2131 	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2132 		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2133 			  slave, cmd);
2134 		return;
2135 	}
2136 
2137 	switch (cmd) {
2138 	case MLX4_COMM_CMD_VHCR0:
2139 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2140 			goto reset_slave;
2141 		slave_state[slave].vhcr_dma = ((u64) param) << 48;
2142 		priv->mfunc.master.slave_state[slave].cookie = 0;
2143 		break;
2144 	case MLX4_COMM_CMD_VHCR1:
2145 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2146 			goto reset_slave;
2147 		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2148 		break;
2149 	case MLX4_COMM_CMD_VHCR2:
2150 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2151 			goto reset_slave;
2152 		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2153 		break;
2154 	case MLX4_COMM_CMD_VHCR_EN:
2155 		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2156 			goto reset_slave;
2157 		slave_state[slave].vhcr_dma |= param;
2158 		if (mlx4_master_activate_admin_state(priv, slave))
2159 				goto reset_slave;
2160 		slave_state[slave].active = true;
2161 		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2162 		break;
2163 	case MLX4_COMM_CMD_VHCR_POST:
2164 		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2165 		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2166 			mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2167 				  slave, cmd, slave_state[slave].last_cmd);
2168 			goto reset_slave;
2169 		}
2170 
2171 		mutex_lock(&priv->cmd.slave_cmd_mutex);
2172 		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2173 			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2174 				 slave);
2175 			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2176 			goto reset_slave;
2177 		}
2178 		mutex_unlock(&priv->cmd.slave_cmd_mutex);
2179 		break;
2180 	default:
2181 		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2182 		goto reset_slave;
2183 	}
2184 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2185 	if (!slave_state[slave].is_slave_going_down)
2186 		slave_state[slave].last_cmd = cmd;
2187 	else
2188 		is_going_down = 1;
2189 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2190 	if (is_going_down) {
2191 		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2192 			  cmd, slave);
2193 		return;
2194 	}
2195 	__raw_writel((__force u32) cpu_to_be32(reply),
2196 		     &priv->mfunc.comm[slave].slave_read);
2197 	mmiowb();
2198 
2199 	return;
2200 
2201 reset_slave:
2202 	/* cleanup any slave resources */
2203 	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2204 		mlx4_delete_all_resources_for_slave(dev, slave);
2205 
2206 	if (cmd != MLX4_COMM_CMD_RESET) {
2207 		mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2208 			  slave, cmd);
2209 		/* Turn on internal error letting slave reset itself immeditaly,
2210 		 * otherwise it might take till timeout on command is passed
2211 		 */
2212 		reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2213 	}
2214 
2215 	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2216 	if (!slave_state[slave].is_slave_going_down)
2217 		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2218 	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2219 	/*with slave in the middle of flr, no need to clean resources again.*/
2220 inform_slave_state:
2221 	memset(&slave_state[slave].event_eq, 0,
2222 	       sizeof(struct mlx4_slave_event_eq_info));
2223 	__raw_writel((__force u32) cpu_to_be32(reply),
2224 		     &priv->mfunc.comm[slave].slave_read);
2225 	wmb();
2226 }
2227 
2228 /* master command processing */
2229 void mlx4_master_comm_channel(struct work_struct *work)
2230 {
2231 	struct mlx4_mfunc_master_ctx *master =
2232 		container_of(work,
2233 			     struct mlx4_mfunc_master_ctx,
2234 			     comm_work);
2235 	struct mlx4_mfunc *mfunc =
2236 		container_of(master, struct mlx4_mfunc, master);
2237 	struct mlx4_priv *priv =
2238 		container_of(mfunc, struct mlx4_priv, mfunc);
2239 	struct mlx4_dev *dev = &priv->dev;
2240 	__be32 *bit_vec;
2241 	u32 comm_cmd;
2242 	u32 vec;
2243 	int i, j, slave;
2244 	int toggle;
2245 	int served = 0;
2246 	int reported = 0;
2247 	u32 slt;
2248 
2249 	bit_vec = master->comm_arm_bit_vector;
2250 	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2251 		vec = be32_to_cpu(bit_vec[i]);
2252 		for (j = 0; j < 32; j++) {
2253 			if (!(vec & (1 << j)))
2254 				continue;
2255 			++reported;
2256 			slave = (i * 32) + j;
2257 			comm_cmd = swab32(readl(
2258 					  &mfunc->comm[slave].slave_write));
2259 			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2260 				     >> 31;
2261 			toggle = comm_cmd >> 31;
2262 			if (toggle != slt) {
2263 				if (master->slave_state[slave].comm_toggle
2264 				    != slt) {
2265 					pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2266 						slave, slt,
2267 						master->slave_state[slave].comm_toggle);
2268 					master->slave_state[slave].comm_toggle =
2269 						slt;
2270 				}
2271 				mlx4_master_do_cmd(dev, slave,
2272 						   comm_cmd >> 16 & 0xff,
2273 						   comm_cmd & 0xffff, toggle);
2274 				++served;
2275 			}
2276 		}
2277 	}
2278 
2279 	if (reported && reported != served)
2280 		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2281 			  reported, served);
2282 
2283 	if (mlx4_ARM_COMM_CHANNEL(dev))
2284 		mlx4_warn(dev, "Failed to arm comm channel events\n");
2285 }
2286 
2287 static int sync_toggles(struct mlx4_dev *dev)
2288 {
2289 	struct mlx4_priv *priv = mlx4_priv(dev);
2290 	u32 wr_toggle;
2291 	u32 rd_toggle;
2292 	unsigned long end;
2293 
2294 	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2295 	if (wr_toggle == 0xffffffff)
2296 		end = jiffies + msecs_to_jiffies(30000);
2297 	else
2298 		end = jiffies + msecs_to_jiffies(5000);
2299 
2300 	while (time_before(jiffies, end)) {
2301 		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2302 		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2303 			/* PCI might be offline */
2304 			msleep(100);
2305 			wr_toggle = swab32(readl(&priv->mfunc.comm->
2306 					   slave_write));
2307 			continue;
2308 		}
2309 
2310 		if (rd_toggle >> 31 == wr_toggle >> 31) {
2311 			priv->cmd.comm_toggle = rd_toggle >> 31;
2312 			return 0;
2313 		}
2314 
2315 		cond_resched();
2316 	}
2317 
2318 	/*
2319 	 * we could reach here if for example the previous VM using this
2320 	 * function misbehaved and left the channel with unsynced state. We
2321 	 * should fix this here and give this VM a chance to use a properly
2322 	 * synced channel
2323 	 */
2324 	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2325 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2326 	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2327 	priv->cmd.comm_toggle = 0;
2328 
2329 	return 0;
2330 }
2331 
2332 int mlx4_multi_func_init(struct mlx4_dev *dev)
2333 {
2334 	struct mlx4_priv *priv = mlx4_priv(dev);
2335 	struct mlx4_slave_state *s_state;
2336 	int i, j, err, port;
2337 
2338 	if (mlx4_is_master(dev))
2339 		priv->mfunc.comm =
2340 		ioremap(pci_resource_start(dev->persist->pdev,
2341 					   priv->fw.comm_bar) +
2342 			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2343 	else
2344 		priv->mfunc.comm =
2345 		ioremap(pci_resource_start(dev->persist->pdev, 2) +
2346 			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2347 	if (!priv->mfunc.comm) {
2348 		mlx4_err(dev, "Couldn't map communication vector\n");
2349 		goto err_vhcr;
2350 	}
2351 
2352 	if (mlx4_is_master(dev)) {
2353 		struct mlx4_vf_oper_state *vf_oper;
2354 		struct mlx4_vf_admin_state *vf_admin;
2355 
2356 		priv->mfunc.master.slave_state =
2357 			kzalloc(dev->num_slaves *
2358 				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2359 		if (!priv->mfunc.master.slave_state)
2360 			goto err_comm;
2361 
2362 		priv->mfunc.master.vf_admin =
2363 			kzalloc(dev->num_slaves *
2364 				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2365 		if (!priv->mfunc.master.vf_admin)
2366 			goto err_comm_admin;
2367 
2368 		priv->mfunc.master.vf_oper =
2369 			kzalloc(dev->num_slaves *
2370 				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2371 		if (!priv->mfunc.master.vf_oper)
2372 			goto err_comm_oper;
2373 
2374 		for (i = 0; i < dev->num_slaves; ++i) {
2375 			vf_admin = &priv->mfunc.master.vf_admin[i];
2376 			vf_oper = &priv->mfunc.master.vf_oper[i];
2377 			s_state = &priv->mfunc.master.slave_state[i];
2378 			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2379 			s_state->vst_qinq_supported = false;
2380 			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2381 			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2382 				s_state->event_eq[j].eqn = -1;
2383 			__raw_writel((__force u32) 0,
2384 				     &priv->mfunc.comm[i].slave_write);
2385 			__raw_writel((__force u32) 0,
2386 				     &priv->mfunc.comm[i].slave_read);
2387 			mmiowb();
2388 			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2389 				struct mlx4_vport_state *admin_vport;
2390 				struct mlx4_vport_state *oper_vport;
2391 
2392 				s_state->vlan_filter[port] =
2393 					kzalloc(sizeof(struct mlx4_vlan_fltr),
2394 						GFP_KERNEL);
2395 				if (!s_state->vlan_filter[port]) {
2396 					if (--port)
2397 						kfree(s_state->vlan_filter[port]);
2398 					goto err_slaves;
2399 				}
2400 
2401 				admin_vport = &vf_admin->vport[port];
2402 				oper_vport = &vf_oper->vport[port].state;
2403 				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2404 				admin_vport->default_vlan = MLX4_VGT;
2405 				oper_vport->default_vlan = MLX4_VGT;
2406 				admin_vport->qos_vport =
2407 						MLX4_VPP_DEFAULT_VPORT;
2408 				oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2409 				admin_vport->vlan_proto = htons(ETH_P_8021Q);
2410 				oper_vport->vlan_proto = htons(ETH_P_8021Q);
2411 				vf_oper->vport[port].vlan_idx = NO_INDX;
2412 				vf_oper->vport[port].mac_idx = NO_INDX;
2413 				mlx4_set_random_admin_guid(dev, i, port);
2414 			}
2415 			spin_lock_init(&s_state->lock);
2416 		}
2417 
2418 		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2419 			for (port = 1; port <= dev->caps.num_ports; port++) {
2420 				if (mlx4_is_eth(dev, port)) {
2421 					mlx4_set_default_port_qos(dev, port);
2422 					mlx4_allocate_port_vpps(dev, port);
2423 				}
2424 			}
2425 		}
2426 
2427 		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2428 		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2429 		INIT_WORK(&priv->mfunc.master.comm_work,
2430 			  mlx4_master_comm_channel);
2431 		INIT_WORK(&priv->mfunc.master.slave_event_work,
2432 			  mlx4_gen_slave_eqe);
2433 		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2434 			  mlx4_master_handle_slave_flr);
2435 		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2436 		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2437 		priv->mfunc.master.comm_wq =
2438 			create_singlethread_workqueue("mlx4_comm");
2439 		if (!priv->mfunc.master.comm_wq)
2440 			goto err_slaves;
2441 
2442 		if (mlx4_init_resource_tracker(dev))
2443 			goto err_thread;
2444 
2445 	} else {
2446 		err = sync_toggles(dev);
2447 		if (err) {
2448 			mlx4_err(dev, "Couldn't sync toggles\n");
2449 			goto err_comm;
2450 		}
2451 	}
2452 	return 0;
2453 
2454 err_thread:
2455 	flush_workqueue(priv->mfunc.master.comm_wq);
2456 	destroy_workqueue(priv->mfunc.master.comm_wq);
2457 err_slaves:
2458 	while (i--) {
2459 		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2460 			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2461 	}
2462 	kfree(priv->mfunc.master.vf_oper);
2463 err_comm_oper:
2464 	kfree(priv->mfunc.master.vf_admin);
2465 err_comm_admin:
2466 	kfree(priv->mfunc.master.slave_state);
2467 err_comm:
2468 	iounmap(priv->mfunc.comm);
2469 	priv->mfunc.comm = NULL;
2470 err_vhcr:
2471 	dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2472 			  priv->mfunc.vhcr,
2473 			  priv->mfunc.vhcr_dma);
2474 	priv->mfunc.vhcr = NULL;
2475 	return -ENOMEM;
2476 }
2477 
2478 int mlx4_cmd_init(struct mlx4_dev *dev)
2479 {
2480 	struct mlx4_priv *priv = mlx4_priv(dev);
2481 	int flags = 0;
2482 
2483 	if (!priv->cmd.initialized) {
2484 		init_rwsem(&priv->cmd.switch_sem);
2485 		mutex_init(&priv->cmd.slave_cmd_mutex);
2486 		sema_init(&priv->cmd.poll_sem, 1);
2487 		priv->cmd.use_events = 0;
2488 		priv->cmd.toggle     = 1;
2489 		priv->cmd.initialized = 1;
2490 		flags |= MLX4_CMD_CLEANUP_STRUCT;
2491 	}
2492 
2493 	if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2494 		priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2495 					0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2496 		if (!priv->cmd.hcr) {
2497 			mlx4_err(dev, "Couldn't map command register\n");
2498 			goto err;
2499 		}
2500 		flags |= MLX4_CMD_CLEANUP_HCR;
2501 	}
2502 
2503 	if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2504 		priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2505 						      PAGE_SIZE,
2506 						      &priv->mfunc.vhcr_dma,
2507 						      GFP_KERNEL);
2508 		if (!priv->mfunc.vhcr)
2509 			goto err;
2510 
2511 		flags |= MLX4_CMD_CLEANUP_VHCR;
2512 	}
2513 
2514 	if (!priv->cmd.pool) {
2515 		priv->cmd.pool = pci_pool_create("mlx4_cmd",
2516 						 dev->persist->pdev,
2517 						 MLX4_MAILBOX_SIZE,
2518 						 MLX4_MAILBOX_SIZE, 0);
2519 		if (!priv->cmd.pool)
2520 			goto err;
2521 
2522 		flags |= MLX4_CMD_CLEANUP_POOL;
2523 	}
2524 
2525 	return 0;
2526 
2527 err:
2528 	mlx4_cmd_cleanup(dev, flags);
2529 	return -ENOMEM;
2530 }
2531 
2532 void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2533 {
2534 	struct mlx4_priv *priv = mlx4_priv(dev);
2535 	int slave;
2536 	u32 slave_read;
2537 
2538 	/* If the comm channel has not yet been initialized,
2539 	 * skip reporting the internal error event to all
2540 	 * the communication channels.
2541 	 */
2542 	if (!priv->mfunc.comm)
2543 		return;
2544 
2545 	/* Report an internal error event to all
2546 	 * communication channels.
2547 	 */
2548 	for (slave = 0; slave < dev->num_slaves; slave++) {
2549 		slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2550 		slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2551 		__raw_writel((__force u32)cpu_to_be32(slave_read),
2552 			     &priv->mfunc.comm[slave].slave_read);
2553 		/* Make sure that our comm channel write doesn't
2554 		 * get mixed in with writes from another CPU.
2555 		 */
2556 		mmiowb();
2557 	}
2558 }
2559 
2560 void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2561 {
2562 	struct mlx4_priv *priv = mlx4_priv(dev);
2563 	int i, port;
2564 
2565 	if (mlx4_is_master(dev)) {
2566 		flush_workqueue(priv->mfunc.master.comm_wq);
2567 		destroy_workqueue(priv->mfunc.master.comm_wq);
2568 		for (i = 0; i < dev->num_slaves; i++) {
2569 			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2570 				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2571 		}
2572 		kfree(priv->mfunc.master.slave_state);
2573 		kfree(priv->mfunc.master.vf_admin);
2574 		kfree(priv->mfunc.master.vf_oper);
2575 		dev->num_slaves = 0;
2576 	}
2577 
2578 	iounmap(priv->mfunc.comm);
2579 	priv->mfunc.comm = NULL;
2580 }
2581 
2582 void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2583 {
2584 	struct mlx4_priv *priv = mlx4_priv(dev);
2585 
2586 	if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2587 		pci_pool_destroy(priv->cmd.pool);
2588 		priv->cmd.pool = NULL;
2589 	}
2590 
2591 	if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2592 	    (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2593 		iounmap(priv->cmd.hcr);
2594 		priv->cmd.hcr = NULL;
2595 	}
2596 	if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2597 	    (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2598 		dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2599 				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2600 		priv->mfunc.vhcr = NULL;
2601 	}
2602 	if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2603 		priv->cmd.initialized = 0;
2604 }
2605 
2606 /*
2607  * Switch to using events to issue FW commands (can only be called
2608  * after event queue for command events has been initialized).
2609  */
2610 int mlx4_cmd_use_events(struct mlx4_dev *dev)
2611 {
2612 	struct mlx4_priv *priv = mlx4_priv(dev);
2613 	int i;
2614 	int err = 0;
2615 
2616 	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2617 				   sizeof (struct mlx4_cmd_context),
2618 				   GFP_KERNEL);
2619 	if (!priv->cmd.context)
2620 		return -ENOMEM;
2621 
2622 	down_write(&priv->cmd.switch_sem);
2623 	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2624 		priv->cmd.context[i].token = i;
2625 		priv->cmd.context[i].next  = i + 1;
2626 		/* To support fatal error flow, initialize all
2627 		 * cmd contexts to allow simulating completions
2628 		 * with complete() at any time.
2629 		 */
2630 		init_completion(&priv->cmd.context[i].done);
2631 	}
2632 
2633 	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2634 	priv->cmd.free_head = 0;
2635 
2636 	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2637 
2638 	for (priv->cmd.token_mask = 1;
2639 	     priv->cmd.token_mask < priv->cmd.max_cmds;
2640 	     priv->cmd.token_mask <<= 1)
2641 		; /* nothing */
2642 	--priv->cmd.token_mask;
2643 
2644 	down(&priv->cmd.poll_sem);
2645 	priv->cmd.use_events = 1;
2646 	up_write(&priv->cmd.switch_sem);
2647 
2648 	return err;
2649 }
2650 
2651 /*
2652  * Switch back to polling (used when shutting down the device)
2653  */
2654 void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2655 {
2656 	struct mlx4_priv *priv = mlx4_priv(dev);
2657 	int i;
2658 
2659 	down_write(&priv->cmd.switch_sem);
2660 	priv->cmd.use_events = 0;
2661 
2662 	for (i = 0; i < priv->cmd.max_cmds; ++i)
2663 		down(&priv->cmd.event_sem);
2664 
2665 	kfree(priv->cmd.context);
2666 
2667 	up(&priv->cmd.poll_sem);
2668 	up_write(&priv->cmd.switch_sem);
2669 }
2670 
2671 struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2672 {
2673 	struct mlx4_cmd_mailbox *mailbox;
2674 
2675 	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2676 	if (!mailbox)
2677 		return ERR_PTR(-ENOMEM);
2678 
2679 	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2680 				      &mailbox->dma);
2681 	if (!mailbox->buf) {
2682 		kfree(mailbox);
2683 		return ERR_PTR(-ENOMEM);
2684 	}
2685 
2686 	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2687 
2688 	return mailbox;
2689 }
2690 EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2691 
2692 void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2693 			   struct mlx4_cmd_mailbox *mailbox)
2694 {
2695 	if (!mailbox)
2696 		return;
2697 
2698 	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2699 	kfree(mailbox);
2700 }
2701 EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2702 
2703 u32 mlx4_comm_get_version(void)
2704 {
2705 	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2706 }
2707 
2708 static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2709 {
2710 	if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2711 		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2712 			 vf, dev->persist->num_vfs);
2713 		return -EINVAL;
2714 	}
2715 
2716 	return vf+1;
2717 }
2718 
2719 int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2720 {
2721 	if (slave < 1 || slave > dev->persist->num_vfs) {
2722 		mlx4_err(dev,
2723 			 "Bad slave number:%d (number of activated slaves: %lu)\n",
2724 			 slave, dev->num_slaves);
2725 		return -EINVAL;
2726 	}
2727 	return slave - 1;
2728 }
2729 
2730 void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2731 {
2732 	struct mlx4_priv *priv = mlx4_priv(dev);
2733 	struct mlx4_cmd_context *context;
2734 	int i;
2735 
2736 	spin_lock(&priv->cmd.context_lock);
2737 	if (priv->cmd.context) {
2738 		for (i = 0; i < priv->cmd.max_cmds; ++i) {
2739 			context = &priv->cmd.context[i];
2740 			context->fw_status = CMD_STAT_INTERNAL_ERR;
2741 			context->result    =
2742 				mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2743 			complete(&context->done);
2744 		}
2745 	}
2746 	spin_unlock(&priv->cmd.context_lock);
2747 }
2748 
2749 struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2750 {
2751 	struct mlx4_active_ports actv_ports;
2752 	int vf;
2753 
2754 	bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2755 
2756 	if (slave == 0) {
2757 		bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2758 		return actv_ports;
2759 	}
2760 
2761 	vf = mlx4_get_vf_indx(dev, slave);
2762 	if (vf < 0)
2763 		return actv_ports;
2764 
2765 	bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2766 		   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2767 		   dev->caps.num_ports));
2768 
2769 	return actv_ports;
2770 }
2771 EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2772 
2773 int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2774 {
2775 	unsigned n;
2776 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2777 	unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2778 
2779 	if (port <= 0 || port > m)
2780 		return -EINVAL;
2781 
2782 	n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2783 	if (port <= n)
2784 		port = n + 1;
2785 
2786 	return port;
2787 }
2788 EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2789 
2790 int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2791 {
2792 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2793 	if (test_bit(port - 1, actv_ports.ports))
2794 		return port -
2795 			find_first_bit(actv_ports.ports, dev->caps.num_ports);
2796 
2797 	return -1;
2798 }
2799 EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2800 
2801 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2802 						   int port)
2803 {
2804 	unsigned i;
2805 	struct mlx4_slaves_pport slaves_pport;
2806 
2807 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2808 
2809 	if (port <= 0 || port > dev->caps.num_ports)
2810 		return slaves_pport;
2811 
2812 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2813 		struct mlx4_active_ports actv_ports =
2814 			mlx4_get_active_ports(dev, i);
2815 		if (test_bit(port - 1, actv_ports.ports))
2816 			set_bit(i, slaves_pport.slaves);
2817 	}
2818 
2819 	return slaves_pport;
2820 }
2821 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2822 
2823 struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2824 		struct mlx4_dev *dev,
2825 		const struct mlx4_active_ports *crit_ports)
2826 {
2827 	unsigned i;
2828 	struct mlx4_slaves_pport slaves_pport;
2829 
2830 	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2831 
2832 	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2833 		struct mlx4_active_ports actv_ports =
2834 			mlx4_get_active_ports(dev, i);
2835 		if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2836 				 dev->caps.num_ports))
2837 			set_bit(i, slaves_pport.slaves);
2838 	}
2839 
2840 	return slaves_pport;
2841 }
2842 EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2843 
2844 static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2845 {
2846 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2847 	int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2848 			+ 1;
2849 	int max_port = min_port +
2850 		bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2851 
2852 	if (port < min_port)
2853 		port = min_port;
2854 	else if (port >= max_port)
2855 		port = max_port - 1;
2856 
2857 	return port;
2858 }
2859 
2860 static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2861 			      int max_tx_rate)
2862 {
2863 	int i;
2864 	int err;
2865 	struct mlx4_qos_manager *port_qos;
2866 	struct mlx4_dev *dev = &priv->dev;
2867 	struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2868 
2869 	port_qos = &priv->mfunc.master.qos_ctl[port];
2870 	memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2871 
2872 	if (slave > port_qos->num_of_qos_vfs) {
2873 		mlx4_info(dev, "No availible VPP resources for this VF\n");
2874 		return -EINVAL;
2875 	}
2876 
2877 	/* Query for default QoS values from Vport 0 is needed */
2878 	err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2879 	if (err) {
2880 		mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2881 		return err;
2882 	}
2883 
2884 	for (i = 0; i < MLX4_NUM_UP; i++) {
2885 		if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2886 			vpp_qos[i].max_avg_bw = max_tx_rate;
2887 			vpp_qos[i].enable = 1;
2888 		} else {
2889 			/* if user supplied tx_rate == 0, meaning no rate limit
2890 			 * configuration is required. so we are leaving the
2891 			 * value of max_avg_bw as queried from Vport 0.
2892 			 */
2893 			vpp_qos[i].enable = 0;
2894 		}
2895 	}
2896 
2897 	err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2898 	if (err) {
2899 		mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2900 		return err;
2901 	}
2902 
2903 	return 0;
2904 }
2905 
2906 static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2907 					struct mlx4_vport_state *vf_admin)
2908 {
2909 	struct mlx4_qos_manager *info;
2910 	struct mlx4_priv *priv = mlx4_priv(dev);
2911 
2912 	if (!mlx4_is_master(dev) ||
2913 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2914 		return false;
2915 
2916 	info = &priv->mfunc.master.qos_ctl[port];
2917 
2918 	if (vf_admin->default_vlan != MLX4_VGT &&
2919 	    test_bit(vf_admin->default_qos, info->priority_bm))
2920 		return true;
2921 
2922 	return false;
2923 }
2924 
2925 static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2926 				       struct mlx4_vport_state *vf_admin,
2927 				       int vlan, int qos)
2928 {
2929 	struct mlx4_vport_state dummy_admin = {0};
2930 
2931 	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2932 	    !vf_admin->tx_rate)
2933 		return true;
2934 
2935 	dummy_admin.default_qos = qos;
2936 	dummy_admin.default_vlan = vlan;
2937 
2938 	/* VF wants to move to other VST state which is valid with current
2939 	 * rate limit. Either differnt default vlan in VST or other
2940 	 * supported QoS priority. Otherwise we don't allow this change when
2941 	 * the TX rate is still configured.
2942 	 */
2943 	if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2944 		return true;
2945 
2946 	mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2947 		  (vlan == MLX4_VGT) ? "VGT" : "VST");
2948 
2949 	if (vlan != MLX4_VGT)
2950 		mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2951 
2952 	mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2953 
2954 	return false;
2955 }
2956 
2957 int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2958 {
2959 	struct mlx4_priv *priv = mlx4_priv(dev);
2960 	struct mlx4_vport_state *s_info;
2961 	int slave;
2962 
2963 	if (!mlx4_is_master(dev))
2964 		return -EPROTONOSUPPORT;
2965 
2966 	slave = mlx4_get_slave_indx(dev, vf);
2967 	if (slave < 0)
2968 		return -EINVAL;
2969 
2970 	port = mlx4_slaves_closest_port(dev, slave, port);
2971 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2972 	s_info->mac = mac;
2973 	mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
2974 		  vf, port, (unsigned long long)s_info->mac);
2975 	return 0;
2976 }
2977 EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2978 
2979 
2980 int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos,
2981 		     __be16 proto)
2982 {
2983 	struct mlx4_priv *priv = mlx4_priv(dev);
2984 	struct mlx4_vport_state *vf_admin;
2985 	struct mlx4_slave_state *slave_state;
2986 	struct mlx4_vport_oper_state *vf_oper;
2987 	int slave;
2988 
2989 	if ((!mlx4_is_master(dev)) ||
2990 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2991 		return -EPROTONOSUPPORT;
2992 
2993 	if ((vlan > 4095) || (qos > 7))
2994 		return -EINVAL;
2995 
2996 	if (proto == htons(ETH_P_8021AD) &&
2997 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP))
2998 		return -EPROTONOSUPPORT;
2999 
3000 	if (proto != htons(ETH_P_8021Q) &&
3001 	    proto != htons(ETH_P_8021AD))
3002 		return -EINVAL;
3003 
3004 	if ((proto == htons(ETH_P_8021AD)) &&
3005 	    ((vlan == 0) || (vlan == MLX4_VGT)))
3006 		return -EINVAL;
3007 
3008 	slave = mlx4_get_slave_indx(dev, vf);
3009 	if (slave < 0)
3010 		return -EINVAL;
3011 
3012 	slave_state = &priv->mfunc.master.slave_state[slave];
3013 	if ((proto == htons(ETH_P_8021AD)) && (slave_state->active) &&
3014 	    (!slave_state->vst_qinq_supported)) {
3015 		mlx4_err(dev, "vf %d does not support VST QinQ mode\n", vf);
3016 		return -EPROTONOSUPPORT;
3017 	}
3018 	port = mlx4_slaves_closest_port(dev, slave, port);
3019 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3020 	vf_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3021 
3022 	if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
3023 		return -EPERM;
3024 
3025 	if ((0 == vlan) && (0 == qos))
3026 		vf_admin->default_vlan = MLX4_VGT;
3027 	else
3028 		vf_admin->default_vlan = vlan;
3029 	vf_admin->default_qos = qos;
3030 	vf_admin->vlan_proto = proto;
3031 
3032 	/* If rate was configured prior to VST, we saved the configured rate
3033 	 * in vf_admin->rate and now, if priority supported we enforce the QoS
3034 	 */
3035 	if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
3036 	    vf_admin->tx_rate)
3037 		vf_admin->qos_vport = slave;
3038 
3039 	/* Try to activate new vf state without restart,
3040 	 * this option is not supported while moving to VST QinQ mode.
3041 	 */
3042 	if ((proto == htons(ETH_P_8021AD) &&
3043 	     vf_oper->state.vlan_proto != proto) ||
3044 	    mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3045 		mlx4_info(dev,
3046 			  "updating vf %d port %d config will take effect on next VF restart\n",
3047 			  vf, port);
3048 	return 0;
3049 }
3050 EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
3051 
3052 int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
3053 		     int max_tx_rate)
3054 {
3055 	int err;
3056 	int slave;
3057 	struct mlx4_vport_state *vf_admin;
3058 	struct mlx4_priv *priv = mlx4_priv(dev);
3059 
3060 	if (!mlx4_is_master(dev) ||
3061 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
3062 		return -EPROTONOSUPPORT;
3063 
3064 	if (min_tx_rate) {
3065 		mlx4_info(dev, "Minimum BW share not supported\n");
3066 		return -EPROTONOSUPPORT;
3067 	}
3068 
3069 	slave = mlx4_get_slave_indx(dev, vf);
3070 	if (slave < 0)
3071 		return -EINVAL;
3072 
3073 	port = mlx4_slaves_closest_port(dev, slave, port);
3074 	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
3075 
3076 	err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
3077 	if (err) {
3078 		mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
3079 			  max_tx_rate);
3080 		return err;
3081 	}
3082 
3083 	vf_admin->tx_rate = max_tx_rate;
3084 	/* if VF is not in supported mode (VST with supported prio),
3085 	 * we do not change vport configuration for its QPs, but save
3086 	 * the rate, so it will be enforced when it moves to supported
3087 	 * mode next time.
3088 	 */
3089 	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3090 		mlx4_info(dev,
3091 			  "rate set for VF %d when not in valid state\n", vf);
3092 
3093 		if (vf_admin->default_vlan != MLX4_VGT)
3094 			mlx4_info(dev, "VST priority not supported by QoS\n");
3095 		else
3096 			mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3097 
3098 		mlx4_info(dev,
3099 			  "rate %d take affect when VF moves to valid state\n",
3100 			  max_tx_rate);
3101 		return 0;
3102 	}
3103 
3104 	/* If user sets rate 0 assigning default vport for its QPs */
3105 	vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3106 
3107 	if (priv->mfunc.master.slave_state[slave].active &&
3108 	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3109 		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3110 
3111 	return 0;
3112 }
3113 EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3114 
3115  /* mlx4_get_slave_default_vlan -
3116  * return true if VST ( default vlan)
3117  * if VST, will return vlan & qos (if not NULL)
3118  */
3119 bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3120 				 u16 *vlan, u8 *qos)
3121 {
3122 	struct mlx4_vport_oper_state *vp_oper;
3123 	struct mlx4_priv *priv;
3124 
3125 	priv = mlx4_priv(dev);
3126 	port = mlx4_slaves_closest_port(dev, slave, port);
3127 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3128 
3129 	if (MLX4_VGT != vp_oper->state.default_vlan) {
3130 		if (vlan)
3131 			*vlan = vp_oper->state.default_vlan;
3132 		if (qos)
3133 			*qos = vp_oper->state.default_qos;
3134 		return true;
3135 	}
3136 	return false;
3137 }
3138 EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3139 
3140 int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3141 {
3142 	struct mlx4_priv *priv = mlx4_priv(dev);
3143 	struct mlx4_vport_state *s_info;
3144 	int slave;
3145 
3146 	if ((!mlx4_is_master(dev)) ||
3147 	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3148 		return -EPROTONOSUPPORT;
3149 
3150 	slave = mlx4_get_slave_indx(dev, vf);
3151 	if (slave < 0)
3152 		return -EINVAL;
3153 
3154 	port = mlx4_slaves_closest_port(dev, slave, port);
3155 	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3156 	s_info->spoofchk = setting;
3157 
3158 	return 0;
3159 }
3160 EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3161 
3162 int mlx4_get_counter_stats(struct mlx4_dev *dev, int counter_index,
3163 			   struct mlx4_counter *counter_stats, int reset)
3164 {
3165 	struct mlx4_cmd_mailbox *mailbox = NULL;
3166 	struct mlx4_counter *tmp_counter;
3167 	int err;
3168 	u32 if_stat_in_mod;
3169 
3170 	if (!counter_stats)
3171 		return -EINVAL;
3172 
3173 	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
3174 		return 0;
3175 
3176 	mailbox = mlx4_alloc_cmd_mailbox(dev);
3177 	if (IS_ERR(mailbox))
3178 		return PTR_ERR(mailbox);
3179 
3180 	memset(mailbox->buf, 0, sizeof(struct mlx4_counter));
3181 	if_stat_in_mod = counter_index;
3182 	if (reset)
3183 		if_stat_in_mod |= MLX4_QUERY_IF_STAT_RESET;
3184 	err = mlx4_cmd_box(dev, 0, mailbox->dma,
3185 			   if_stat_in_mod, 0,
3186 			   MLX4_CMD_QUERY_IF_STAT,
3187 			   MLX4_CMD_TIME_CLASS_C,
3188 			   MLX4_CMD_NATIVE);
3189 	if (err) {
3190 		mlx4_dbg(dev, "%s: failed to read statistics for counter index %d\n",
3191 			 __func__, counter_index);
3192 		goto if_stat_out;
3193 	}
3194 	tmp_counter = (struct mlx4_counter *)mailbox->buf;
3195 	counter_stats->counter_mode = tmp_counter->counter_mode;
3196 	if (counter_stats->counter_mode == 0) {
3197 		counter_stats->rx_frames =
3198 			cpu_to_be64(be64_to_cpu(counter_stats->rx_frames) +
3199 				    be64_to_cpu(tmp_counter->rx_frames));
3200 		counter_stats->tx_frames =
3201 			cpu_to_be64(be64_to_cpu(counter_stats->tx_frames) +
3202 				    be64_to_cpu(tmp_counter->tx_frames));
3203 		counter_stats->rx_bytes =
3204 			cpu_to_be64(be64_to_cpu(counter_stats->rx_bytes) +
3205 				    be64_to_cpu(tmp_counter->rx_bytes));
3206 		counter_stats->tx_bytes =
3207 			cpu_to_be64(be64_to_cpu(counter_stats->tx_bytes) +
3208 				    be64_to_cpu(tmp_counter->tx_bytes));
3209 	}
3210 
3211 if_stat_out:
3212 	mlx4_free_cmd_mailbox(dev, mailbox);
3213 
3214 	return err;
3215 }
3216 EXPORT_SYMBOL_GPL(mlx4_get_counter_stats);
3217 
3218 int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3219 {
3220 	struct mlx4_priv *priv = mlx4_priv(dev);
3221 
3222 	if (slave < 1 || slave >= dev->num_slaves ||
3223 	    port < 1 || port > MLX4_MAX_PORTS)
3224 		return 0;
3225 
3226 	return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3227 		MLX4_VF_SMI_ENABLED;
3228 }
3229 EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3230 
3231 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3232 {
3233 	struct mlx4_priv *priv = mlx4_priv(dev);
3234 
3235 	if (slave == mlx4_master_func_num(dev))
3236 		return 1;
3237 
3238 	if (slave < 1 || slave >= dev->num_slaves ||
3239 	    port < 1 || port > MLX4_MAX_PORTS)
3240 		return 0;
3241 
3242 	return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3243 		MLX4_VF_SMI_ENABLED;
3244 }
3245 EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3246 
3247 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3248 				 int enabled)
3249 {
3250 	struct mlx4_priv *priv = mlx4_priv(dev);
3251 	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
3252 			&priv->dev, slave);
3253 	int min_port = find_first_bit(actv_ports.ports,
3254 				      priv->dev.caps.num_ports) + 1;
3255 	int max_port = min_port - 1 +
3256 		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
3257 
3258 	if (slave == mlx4_master_func_num(dev))
3259 		return 0;
3260 
3261 	if (slave < 1 || slave >= dev->num_slaves ||
3262 	    port < 1 || port > MLX4_MAX_PORTS ||
3263 	    enabled < 0 || enabled > 1)
3264 		return -EINVAL;
3265 
3266 	if (min_port == max_port && dev->caps.num_ports > 1) {
3267 		mlx4_info(dev, "SMI access disallowed for single ported VFs\n");
3268 		return -EPROTONOSUPPORT;
3269 	}
3270 
3271 	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3272 	return 0;
3273 }
3274 EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3275 
3276