1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <dev/mlx4/cmd.h>
43 #include <dev/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46 
47 #include "mlx4.h"
48 #include "fw.h"
49 
50 #define MLX4_MAC_VALID		(1ull << 63)
51 #define MLX4_PF_COUNTERS_PER_PORT	2
52 #define MLX4_VF_COUNTERS_PER_PORT	1
53 
54 struct mac_res {
55 	struct list_head list;
56 	u64 mac;
57 	int ref_count;
58 	u8 smac_index;
59 	u8 port;
60 };
61 
62 struct vlan_res {
63 	struct list_head list;
64 	u16 vlan;
65 	int ref_count;
66 	int vlan_index;
67 	u8 port;
68 };
69 
70 struct res_common {
71 	struct list_head	list;
72 	struct rb_node		node;
73 	u64		        res_id;
74 	int			owner;
75 	int			state;
76 	int			from_state;
77 	int			to_state;
78 	int			removing;
79 };
80 
81 enum {
82 	RES_ANY_BUSY = 1
83 };
84 
85 struct res_gid {
86 	struct list_head	list;
87 	u8			gid[16];
88 	enum mlx4_protocol	prot;
89 	enum mlx4_steer_type	steer;
90 	u64			reg_id;
91 };
92 
93 enum res_qp_states {
94 	RES_QP_BUSY = RES_ANY_BUSY,
95 
96 	/* QP number was allocated */
97 	RES_QP_RESERVED,
98 
99 	/* ICM memory for QP context was mapped */
100 	RES_QP_MAPPED,
101 
102 	/* QP is in hw ownership */
103 	RES_QP_HW
104 };
105 
106 struct res_qp {
107 	struct res_common	com;
108 	struct res_mtt	       *mtt;
109 	struct res_cq	       *rcq;
110 	struct res_cq	       *scq;
111 	struct res_srq	       *srq;
112 	struct list_head	mcg_list;
113 	spinlock_t		mcg_spl;
114 	int			local_qpn;
115 	atomic_t		ref_count;
116 	u32			qpc_flags;
117 	/* saved qp params before VST enforcement in order to restore on VGT */
118 	u8			sched_queue;
119 	__be32			param3;
120 	u8			vlan_control;
121 	u8			fvl_rx;
122 	u8			pri_path_fl;
123 	u8			vlan_index;
124 	u8			feup;
125 };
126 
127 enum res_mtt_states {
128 	RES_MTT_BUSY = RES_ANY_BUSY,
129 	RES_MTT_ALLOCATED,
130 };
131 
mtt_states_str(enum res_mtt_states state)132 static inline const char *mtt_states_str(enum res_mtt_states state)
133 {
134 	switch (state) {
135 	case RES_MTT_BUSY: return "RES_MTT_BUSY";
136 	case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
137 	default: return "Unknown";
138 	}
139 }
140 
141 struct res_mtt {
142 	struct res_common	com;
143 	int			order;
144 	atomic_t		ref_count;
145 };
146 
147 enum res_mpt_states {
148 	RES_MPT_BUSY = RES_ANY_BUSY,
149 	RES_MPT_RESERVED,
150 	RES_MPT_MAPPED,
151 	RES_MPT_HW,
152 };
153 
154 struct res_mpt {
155 	struct res_common	com;
156 	struct res_mtt	       *mtt;
157 	int			key;
158 };
159 
160 enum res_eq_states {
161 	RES_EQ_BUSY = RES_ANY_BUSY,
162 	RES_EQ_RESERVED,
163 	RES_EQ_HW,
164 };
165 
166 struct res_eq {
167 	struct res_common	com;
168 	struct res_mtt	       *mtt;
169 };
170 
171 enum res_cq_states {
172 	RES_CQ_BUSY = RES_ANY_BUSY,
173 	RES_CQ_ALLOCATED,
174 	RES_CQ_HW,
175 };
176 
177 struct res_cq {
178 	struct res_common	com;
179 	struct res_mtt	       *mtt;
180 	atomic_t		ref_count;
181 };
182 
183 enum res_srq_states {
184 	RES_SRQ_BUSY = RES_ANY_BUSY,
185 	RES_SRQ_ALLOCATED,
186 	RES_SRQ_HW,
187 };
188 
189 struct res_srq {
190 	struct res_common	com;
191 	struct res_mtt	       *mtt;
192 	struct res_cq	       *cq;
193 	atomic_t		ref_count;
194 };
195 
196 enum res_counter_states {
197 	RES_COUNTER_BUSY = RES_ANY_BUSY,
198 	RES_COUNTER_ALLOCATED,
199 };
200 
201 struct res_counter {
202 	struct res_common	com;
203 	int			port;
204 };
205 
206 enum res_xrcdn_states {
207 	RES_XRCD_BUSY = RES_ANY_BUSY,
208 	RES_XRCD_ALLOCATED,
209 };
210 
211 struct res_xrcdn {
212 	struct res_common	com;
213 	int			port;
214 };
215 
216 enum res_fs_rule_states {
217 	RES_FS_RULE_BUSY = RES_ANY_BUSY,
218 	RES_FS_RULE_ALLOCATED,
219 };
220 
221 struct res_fs_rule {
222 	struct res_common	com;
223 	int			qpn;
224 	/* VF DMFS mbox with port flipped */
225 	void			*mirr_mbox;
226 	/* > 0 --> apply mirror when getting into HA mode      */
227 	/* = 0 --> un-apply mirror when getting out of HA mode */
228 	u32			mirr_mbox_size;
229 	struct list_head	mirr_list;
230 	u64			mirr_rule_id;
231 };
232 
res_tracker_lookup(struct rb_root * root,u64 res_id)233 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
234 {
235 	struct rb_node *node = root->rb_node;
236 
237 	while (node) {
238 		struct res_common *res = container_of(node, struct res_common,
239 						      node);
240 
241 		if (res_id < res->res_id)
242 			node = node->rb_left;
243 		else if (res_id > res->res_id)
244 			node = node->rb_right;
245 		else
246 			return res;
247 	}
248 	return NULL;
249 }
250 
res_tracker_insert(struct rb_root * root,struct res_common * res)251 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
252 {
253 	struct rb_node **new = &(root->rb_node), *parent = NULL;
254 
255 	/* Figure out where to put new node */
256 	while (*new) {
257 		struct res_common *this = container_of(*new, struct res_common,
258 						       node);
259 
260 		parent = *new;
261 		if (res->res_id < this->res_id)
262 			new = &((*new)->rb_left);
263 		else if (res->res_id > this->res_id)
264 			new = &((*new)->rb_right);
265 		else
266 			return -EEXIST;
267 	}
268 
269 	/* Add new node and rebalance tree. */
270 	rb_link_node(&res->node, parent, new);
271 	rb_insert_color(&res->node, root);
272 
273 	return 0;
274 }
275 
276 enum qp_transition {
277 	QP_TRANS_INIT2RTR,
278 	QP_TRANS_RTR2RTS,
279 	QP_TRANS_RTS2RTS,
280 	QP_TRANS_SQERR2RTS,
281 	QP_TRANS_SQD2SQD,
282 	QP_TRANS_SQD2RTS
283 };
284 
285 /* For Debug uses */
resource_str(enum mlx4_resource rt)286 static const char *resource_str(enum mlx4_resource rt)
287 {
288 	switch (rt) {
289 	case RES_QP: return "RES_QP";
290 	case RES_CQ: return "RES_CQ";
291 	case RES_SRQ: return "RES_SRQ";
292 	case RES_MPT: return "RES_MPT";
293 	case RES_MTT: return "RES_MTT";
294 	case RES_MAC: return  "RES_MAC";
295 	case RES_VLAN: return  "RES_VLAN";
296 	case RES_EQ: return "RES_EQ";
297 	case RES_COUNTER: return "RES_COUNTER";
298 	case RES_FS_RULE: return "RES_FS_RULE";
299 	case RES_XRCD: return "RES_XRCD";
300 	default: return "Unknown resource type !!!";
301 	};
302 }
303 
304 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
mlx4_grant_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)305 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
306 				      enum mlx4_resource res_type, int count,
307 				      int port)
308 {
309 	struct mlx4_priv *priv = mlx4_priv(dev);
310 	struct resource_allocator *res_alloc =
311 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
312 	int err = -EINVAL;
313 	int allocated, free, reserved, guaranteed, from_free;
314 	int from_rsvd;
315 
316 	if (slave > dev->persist->num_vfs)
317 		return -EINVAL;
318 
319 	spin_lock(&res_alloc->alloc_lock);
320 	allocated = (port > 0) ?
321 		res_alloc->allocated[(port - 1) *
322 		(dev->persist->num_vfs + 1) + slave] :
323 		res_alloc->allocated[slave];
324 	free = (port > 0) ? res_alloc->res_port_free[port - 1] :
325 		res_alloc->res_free;
326 	reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
327 		res_alloc->res_reserved;
328 	guaranteed = res_alloc->guaranteed[slave];
329 
330 	if (allocated + count > res_alloc->quota[slave]) {
331 		mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
332 			  slave, port, resource_str(res_type), count,
333 			  allocated, res_alloc->quota[slave]);
334 		goto out;
335 	}
336 
337 	if (allocated + count <= guaranteed) {
338 		err = 0;
339 		from_rsvd = count;
340 	} else {
341 		/* portion may need to be obtained from free area */
342 		if (guaranteed - allocated > 0)
343 			from_free = count - (guaranteed - allocated);
344 		else
345 			from_free = count;
346 
347 		from_rsvd = count - from_free;
348 
349 		if (free - from_free >= reserved)
350 			err = 0;
351 		else
352 			mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
353 				  slave, port, resource_str(res_type), free,
354 				  from_free, reserved);
355 	}
356 
357 	if (!err) {
358 		/* grant the request */
359 		if (port > 0) {
360 			res_alloc->allocated[(port - 1) *
361 			(dev->persist->num_vfs + 1) + slave] += count;
362 			res_alloc->res_port_free[port - 1] -= count;
363 			res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
364 		} else {
365 			res_alloc->allocated[slave] += count;
366 			res_alloc->res_free -= count;
367 			res_alloc->res_reserved -= from_rsvd;
368 		}
369 	}
370 
371 out:
372 	spin_unlock(&res_alloc->alloc_lock);
373 	return err;
374 }
375 
mlx4_release_resource(struct mlx4_dev * dev,int slave,enum mlx4_resource res_type,int count,int port)376 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
377 				    enum mlx4_resource res_type, int count,
378 				    int port)
379 {
380 	struct mlx4_priv *priv = mlx4_priv(dev);
381 	struct resource_allocator *res_alloc =
382 		&priv->mfunc.master.res_tracker.res_alloc[res_type];
383 	int allocated, guaranteed, from_rsvd;
384 
385 	if (slave > dev->persist->num_vfs)
386 		return;
387 
388 	spin_lock(&res_alloc->alloc_lock);
389 
390 	allocated = (port > 0) ?
391 		res_alloc->allocated[(port - 1) *
392 		(dev->persist->num_vfs + 1) + slave] :
393 		res_alloc->allocated[slave];
394 	guaranteed = res_alloc->guaranteed[slave];
395 
396 	if (allocated - count >= guaranteed) {
397 		from_rsvd = 0;
398 	} else {
399 		/* portion may need to be returned to reserved area */
400 		if (allocated - guaranteed > 0)
401 			from_rsvd = count - (allocated - guaranteed);
402 		else
403 			from_rsvd = count;
404 	}
405 
406 	if (port > 0) {
407 		res_alloc->allocated[(port - 1) *
408 		(dev->persist->num_vfs + 1) + slave] -= count;
409 		res_alloc->res_port_free[port - 1] += count;
410 		res_alloc->res_port_rsvd[port - 1] += from_rsvd;
411 	} else {
412 		res_alloc->allocated[slave] -= count;
413 		res_alloc->res_free += count;
414 		res_alloc->res_reserved += from_rsvd;
415 	}
416 
417 	spin_unlock(&res_alloc->alloc_lock);
418 	return;
419 }
420 
initialize_res_quotas(struct mlx4_dev * dev,struct resource_allocator * res_alloc,enum mlx4_resource res_type,int vf,int num_instances)421 static inline void initialize_res_quotas(struct mlx4_dev *dev,
422 					 struct resource_allocator *res_alloc,
423 					 enum mlx4_resource res_type,
424 					 int vf, int num_instances)
425 {
426 	res_alloc->guaranteed[vf] = num_instances /
427 				    (2 * (dev->persist->num_vfs + 1));
428 	res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
429 	if (vf == mlx4_master_func_num(dev)) {
430 		res_alloc->res_free = num_instances;
431 		if (res_type == RES_MTT) {
432 			/* reserved mtts will be taken out of the PF allocation */
433 			res_alloc->res_free += dev->caps.reserved_mtts;
434 			res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
435 			res_alloc->quota[vf] += dev->caps.reserved_mtts;
436 		}
437 	}
438 }
439 
mlx4_init_quotas(struct mlx4_dev * dev)440 void mlx4_init_quotas(struct mlx4_dev *dev)
441 {
442 	struct mlx4_priv *priv = mlx4_priv(dev);
443 	int pf;
444 
445 	/* quotas for VFs are initialized in mlx4_slave_cap */
446 	if (mlx4_is_slave(dev))
447 		return;
448 
449 	if (!mlx4_is_mfunc(dev)) {
450 		dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
451 			mlx4_num_reserved_sqps(dev);
452 		dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
453 		dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
454 		dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
455 		dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
456 		return;
457 	}
458 
459 	pf = mlx4_master_func_num(dev);
460 	dev->quotas.qp =
461 		priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
462 	dev->quotas.cq =
463 		priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
464 	dev->quotas.srq =
465 		priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
466 	dev->quotas.mtt =
467 		priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
468 	dev->quotas.mpt =
469 		priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
470 }
471 
get_max_gauranteed_vfs_counter(struct mlx4_dev * dev)472 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
473 {
474 	/* reduce the sink counter */
475 	return (dev->caps.max_counters - 1 -
476 		(MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
477 		/ MLX4_MAX_PORTS;
478 }
479 
mlx4_init_resource_tracker(struct mlx4_dev * dev)480 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
481 {
482 	struct mlx4_priv *priv = mlx4_priv(dev);
483 	int i, j;
484 	int t;
485 	int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
486 
487 	priv->mfunc.master.res_tracker.slave_list =
488 		kzalloc(dev->num_slaves * sizeof(struct slave_list),
489 			GFP_KERNEL);
490 	if (!priv->mfunc.master.res_tracker.slave_list)
491 		return -ENOMEM;
492 
493 	for (i = 0 ; i < dev->num_slaves; i++) {
494 		for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
495 			INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
496 				       slave_list[i].res_list[t]);
497 		mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
498 	}
499 
500 	mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
501 		 dev->num_slaves);
502 	for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
503 		priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
504 
505 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
506 		struct resource_allocator *res_alloc =
507 			&priv->mfunc.master.res_tracker.res_alloc[i];
508 		res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
509 					   sizeof(int), GFP_KERNEL);
510 		res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
511 						sizeof(int), GFP_KERNEL);
512 		if (i == RES_MAC || i == RES_VLAN)
513 			res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
514 						       (dev->persist->num_vfs
515 						       + 1) *
516 						       sizeof(int), GFP_KERNEL);
517 		else
518 			res_alloc->allocated = kzalloc((dev->persist->
519 							num_vfs + 1) *
520 						       sizeof(int), GFP_KERNEL);
521 		/* Reduce the sink counter */
522 		if (i == RES_COUNTER)
523 			res_alloc->res_free = dev->caps.max_counters - 1;
524 
525 		if (!res_alloc->quota || !res_alloc->guaranteed ||
526 		    !res_alloc->allocated)
527 			goto no_mem_err;
528 
529 		spin_lock_init(&res_alloc->alloc_lock);
530 		for (t = 0; t < dev->persist->num_vfs + 1; t++) {
531 			struct mlx4_active_ports actv_ports =
532 				mlx4_get_active_ports(dev, t);
533 			switch (i) {
534 			case RES_QP:
535 				initialize_res_quotas(dev, res_alloc, RES_QP,
536 						      t, dev->caps.num_qps -
537 						      dev->caps.reserved_qps -
538 						      mlx4_num_reserved_sqps(dev));
539 				break;
540 			case RES_CQ:
541 				initialize_res_quotas(dev, res_alloc, RES_CQ,
542 						      t, dev->caps.num_cqs -
543 						      dev->caps.reserved_cqs);
544 				break;
545 			case RES_SRQ:
546 				initialize_res_quotas(dev, res_alloc, RES_SRQ,
547 						      t, dev->caps.num_srqs -
548 						      dev->caps.reserved_srqs);
549 				break;
550 			case RES_MPT:
551 				initialize_res_quotas(dev, res_alloc, RES_MPT,
552 						      t, dev->caps.num_mpts -
553 						      dev->caps.reserved_mrws);
554 				break;
555 			case RES_MTT:
556 				initialize_res_quotas(dev, res_alloc, RES_MTT,
557 						      t, dev->caps.num_mtts -
558 						      dev->caps.reserved_mtts);
559 				break;
560 			case RES_MAC:
561 				if (t == mlx4_master_func_num(dev)) {
562 					int max_vfs_pport = 0;
563 					/* Calculate the max vfs per port for */
564 					/* both ports.			      */
565 					for (j = 0; j < dev->caps.num_ports;
566 					     j++) {
567 						struct mlx4_slaves_pport slaves_pport =
568 							mlx4_phys_to_slaves_pport(dev, j + 1);
569 						unsigned current_slaves =
570 							bitmap_weight(slaves_pport.slaves,
571 								      dev->caps.num_ports) - 1;
572 						if (max_vfs_pport < current_slaves)
573 							max_vfs_pport =
574 								current_slaves;
575 					}
576 					res_alloc->quota[t] =
577 						MLX4_MAX_MAC_NUM -
578 						2 * max_vfs_pport;
579 					res_alloc->guaranteed[t] = 2;
580 					for (j = 0; j < MLX4_MAX_PORTS; j++)
581 						res_alloc->res_port_free[j] =
582 							MLX4_MAX_MAC_NUM;
583 				} else {
584 					res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
585 					res_alloc->guaranteed[t] = 2;
586 				}
587 				break;
588 			case RES_VLAN:
589 				if (t == mlx4_master_func_num(dev)) {
590 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
591 					res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
592 					for (j = 0; j < MLX4_MAX_PORTS; j++)
593 						res_alloc->res_port_free[j] =
594 							res_alloc->quota[t];
595 				} else {
596 					res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
597 					res_alloc->guaranteed[t] = 0;
598 				}
599 				break;
600 			case RES_COUNTER:
601 				res_alloc->quota[t] = dev->caps.max_counters;
602 				if (t == mlx4_master_func_num(dev))
603 					res_alloc->guaranteed[t] =
604 						MLX4_PF_COUNTERS_PER_PORT *
605 						MLX4_MAX_PORTS;
606 				else if (t <= max_vfs_guarantee_counter)
607 					res_alloc->guaranteed[t] =
608 						MLX4_VF_COUNTERS_PER_PORT *
609 						MLX4_MAX_PORTS;
610 				else
611 					res_alloc->guaranteed[t] = 0;
612 				res_alloc->res_free -= res_alloc->guaranteed[t];
613 				break;
614 			default:
615 				break;
616 			}
617 			if (i == RES_MAC || i == RES_VLAN) {
618 				for (j = 0; j < dev->caps.num_ports; j++)
619 					if (test_bit(j, actv_ports.ports))
620 						res_alloc->res_port_rsvd[j] +=
621 							res_alloc->guaranteed[t];
622 			} else {
623 				res_alloc->res_reserved += res_alloc->guaranteed[t];
624 			}
625 		}
626 	}
627 	spin_lock_init(&priv->mfunc.master.res_tracker.lock);
628 	return 0;
629 
630 no_mem_err:
631 	for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
632 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
633 		priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
634 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
635 		priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
636 		kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
637 		priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
638 	}
639 	return -ENOMEM;
640 }
641 
mlx4_free_resource_tracker(struct mlx4_dev * dev,enum mlx4_res_tracker_free_type type)642 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
643 				enum mlx4_res_tracker_free_type type)
644 {
645 	struct mlx4_priv *priv = mlx4_priv(dev);
646 	int i;
647 
648 	if (priv->mfunc.master.res_tracker.slave_list) {
649 		if (type != RES_TR_FREE_STRUCTS_ONLY) {
650 			for (i = 0; i < dev->num_slaves; i++) {
651 				if (type == RES_TR_FREE_ALL ||
652 				    dev->caps.function != i)
653 					mlx4_delete_all_resources_for_slave(dev, i);
654 			}
655 			/* free master's vlans */
656 			i = dev->caps.function;
657 			mlx4_reset_roce_gids(dev, i);
658 			mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
659 			rem_slave_vlans(dev, i);
660 			mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
661 		}
662 
663 		if (type != RES_TR_FREE_SLAVES_ONLY) {
664 			for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
665 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
666 				priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
667 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
668 				priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
669 				kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
670 				priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
671 			}
672 			kfree(priv->mfunc.master.res_tracker.slave_list);
673 			priv->mfunc.master.res_tracker.slave_list = NULL;
674 		}
675 	}
676 }
677 
update_pkey_index(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox)678 static void update_pkey_index(struct mlx4_dev *dev, int slave,
679 			      struct mlx4_cmd_mailbox *inbox)
680 {
681 	u8 sched = *(u8 *)(inbox->buf + 64);
682 	u8 orig_index = *(u8 *)(inbox->buf + 35);
683 	u8 new_index;
684 	struct mlx4_priv *priv = mlx4_priv(dev);
685 	int port;
686 
687 	port = (sched >> 6 & 1) + 1;
688 
689 	new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
690 	*(u8 *)(inbox->buf + 35) = new_index;
691 }
692 
update_gid(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave)693 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
694 		       u8 slave)
695 {
696 	struct mlx4_qp_context	*qp_ctx = inbox->buf + 8;
697 	enum mlx4_qp_optpar	optpar = be32_to_cpu(*(__be32 *) inbox->buf);
698 	u32			ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
699 	int port;
700 
701 	if (MLX4_QP_ST_UD == ts) {
702 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
703 		if (mlx4_is_eth(dev, port))
704 			qp_ctx->pri_path.mgid_index =
705 				mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
706 		else
707 			qp_ctx->pri_path.mgid_index = slave | 0x80;
708 
709 	} else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
710 		if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
711 			port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
712 			if (mlx4_is_eth(dev, port)) {
713 				qp_ctx->pri_path.mgid_index +=
714 					mlx4_get_base_gid_ix(dev, slave, port);
715 				qp_ctx->pri_path.mgid_index &= 0x7f;
716 			} else {
717 				qp_ctx->pri_path.mgid_index = slave & 0x7F;
718 			}
719 		}
720 		if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
721 			port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
722 			if (mlx4_is_eth(dev, port)) {
723 				qp_ctx->alt_path.mgid_index +=
724 					mlx4_get_base_gid_ix(dev, slave, port);
725 				qp_ctx->alt_path.mgid_index &= 0x7f;
726 			} else {
727 				qp_ctx->alt_path.mgid_index = slave & 0x7F;
728 			}
729 		}
730 	}
731 }
732 
733 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
734 			  u8 slave, int port);
735 
update_vport_qp_param(struct mlx4_dev * dev,struct mlx4_cmd_mailbox * inbox,u8 slave,u32 qpn)736 static int update_vport_qp_param(struct mlx4_dev *dev,
737 				 struct mlx4_cmd_mailbox *inbox,
738 				 u8 slave, u32 qpn)
739 {
740 	struct mlx4_qp_context	*qpc = inbox->buf + 8;
741 	struct mlx4_vport_oper_state *vp_oper;
742 	struct mlx4_priv *priv;
743 	u32 qp_type;
744 	int port, err = 0;
745 
746 	port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
747 	priv = mlx4_priv(dev);
748 	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
749 	qp_type	= (be32_to_cpu(qpc->flags) >> 16) & 0xff;
750 
751 	err = handle_counter(dev, qpc, slave, port);
752 	if (err)
753 		goto out;
754 
755 	if (MLX4_VGT != vp_oper->state.default_vlan) {
756 		/* the reserved QPs (special, proxy, tunnel)
757 		 * do not operate over vlans
758 		 */
759 		if (mlx4_is_qp_reserved(dev, qpn))
760 			return 0;
761 
762 		/* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
763 		if (qp_type == MLX4_QP_ST_UD ||
764 		    (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
765 			if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
766 				*(__be32 *)inbox->buf =
767 					cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
768 					MLX4_QP_OPTPAR_VLAN_STRIPPING);
769 				qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
770 			} else {
771 				struct mlx4_update_qp_params params = {.flags = 0};
772 
773 				err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
774 				if (err)
775 					goto out;
776 			}
777 		}
778 
779 		/* preserve IF_COUNTER flag */
780 		qpc->pri_path.vlan_control &=
781 			MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
782 		if (1 /*vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE*/ &&
783 		    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
784 			qpc->pri_path.vlan_control |=
785 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
786 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
787 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
788 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
789 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
790 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
791 		} else if (0 != vp_oper->state.default_vlan) {
792 			if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
793 				/* vst QinQ should block untagged on TX,
794 				 * but cvlan is in payload and phv is set so
795 				 * hw see it as untagged. Block tagged instead.
796 				 */
797 				qpc->pri_path.vlan_control |=
798 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
799 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
800 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
801 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
802 			} else { /* vst 802.1Q */
803 				qpc->pri_path.vlan_control |=
804 					MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
805 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
806 					MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
807 			}
808 		} else { /* priority tagged */
809 			qpc->pri_path.vlan_control |=
810 				MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
811 				MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
812 		}
813 
814 		qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
815 		qpc->pri_path.vlan_index = vp_oper->vlan_idx;
816 		qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
817 		if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
818 			qpc->pri_path.fl |= MLX4_FL_SV;
819 		else
820 			qpc->pri_path.fl |= MLX4_FL_CV;
821 		qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
822 		qpc->pri_path.sched_queue &= 0xC7;
823 		qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
824 		qpc->qos_vport = vp_oper->state.qos_vport;
825 	}
826 	if (vp_oper->state.spoofchk) {
827 		qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
828 		qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
829 	}
830 out:
831 	return err;
832 }
833 
mpt_mask(struct mlx4_dev * dev)834 static int mpt_mask(struct mlx4_dev *dev)
835 {
836 	return dev->caps.num_mpts - 1;
837 }
838 
find_res(struct mlx4_dev * dev,u64 res_id,enum mlx4_resource type)839 static void *find_res(struct mlx4_dev *dev, u64 res_id,
840 		      enum mlx4_resource type)
841 {
842 	struct mlx4_priv *priv = mlx4_priv(dev);
843 
844 	return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
845 				  res_id);
846 }
847 
get_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type,void * res)848 static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
849 		   enum mlx4_resource type,
850 		   void *res)
851 {
852 	struct res_common *r;
853 	int err = 0;
854 
855 	spin_lock_irq(mlx4_tlock(dev));
856 	r = find_res(dev, res_id, type);
857 	if (!r) {
858 		err = -ENONET;
859 		goto exit;
860 	}
861 
862 	if (r->state == RES_ANY_BUSY) {
863 		err = -EBUSY;
864 		goto exit;
865 	}
866 
867 	if (r->owner != slave) {
868 		err = -EPERM;
869 		goto exit;
870 	}
871 
872 	r->from_state = r->state;
873 	r->state = RES_ANY_BUSY;
874 
875 	if (res)
876 		*((struct res_common **)res) = r;
877 
878 exit:
879 	spin_unlock_irq(mlx4_tlock(dev));
880 	return err;
881 }
882 
mlx4_get_slave_from_resource_id(struct mlx4_dev * dev,enum mlx4_resource type,u64 res_id,int * slave)883 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
884 				    enum mlx4_resource type,
885 				    u64 res_id, int *slave)
886 {
887 
888 	struct res_common *r;
889 	int err = -ENOENT;
890 	int id = res_id;
891 
892 	if (type == RES_QP)
893 		id &= 0x7fffff;
894 	spin_lock(mlx4_tlock(dev));
895 
896 	r = find_res(dev, id, type);
897 	if (r) {
898 		*slave = r->owner;
899 		err = 0;
900 	}
901 	spin_unlock(mlx4_tlock(dev));
902 
903 	return err;
904 }
905 
put_res(struct mlx4_dev * dev,int slave,u64 res_id,enum mlx4_resource type)906 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
907 		    enum mlx4_resource type)
908 {
909 	struct res_common *r;
910 
911 	spin_lock_irq(mlx4_tlock(dev));
912 	r = find_res(dev, res_id, type);
913 	if (r)
914 		r->state = r->from_state;
915 	spin_unlock_irq(mlx4_tlock(dev));
916 }
917 
918 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
919 			     u64 in_param, u64 *out_param, int port);
920 
handle_existing_counter(struct mlx4_dev * dev,u8 slave,int port,int counter_index)921 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
922 				   int counter_index)
923 {
924 	struct res_common *r;
925 	struct res_counter *counter;
926 	int ret = 0;
927 
928 	if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
929 		return ret;
930 
931 	spin_lock_irq(mlx4_tlock(dev));
932 	r = find_res(dev, counter_index, RES_COUNTER);
933 	if (!r || r->owner != slave) {
934 		ret = -EINVAL;
935 	} else {
936 		counter = container_of(r, struct res_counter, com);
937 		if (!counter->port)
938 			counter->port = port;
939 	}
940 
941 	spin_unlock_irq(mlx4_tlock(dev));
942 	return ret;
943 }
944 
handle_unexisting_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)945 static int handle_unexisting_counter(struct mlx4_dev *dev,
946 				     struct mlx4_qp_context *qpc, u8 slave,
947 				     int port)
948 {
949 	struct mlx4_priv *priv = mlx4_priv(dev);
950 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
951 	struct res_common *tmp;
952 	struct res_counter *counter;
953 	u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
954 	int err = 0;
955 
956 	spin_lock_irq(mlx4_tlock(dev));
957 	list_for_each_entry(tmp,
958 			    &tracker->slave_list[slave].res_list[RES_COUNTER],
959 			    list) {
960 		counter = container_of(tmp, struct res_counter, com);
961 		if (port == counter->port) {
962 			qpc->pri_path.counter_index  = counter->com.res_id;
963 			spin_unlock_irq(mlx4_tlock(dev));
964 			return 0;
965 		}
966 	}
967 	spin_unlock_irq(mlx4_tlock(dev));
968 
969 	/* No existing counter, need to allocate a new counter */
970 	err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
971 				port);
972 	if (err == -ENOENT) {
973 		err = 0;
974 	} else if (err && err != -ENOSPC) {
975 		mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
976 			 __func__, slave, err);
977 	} else {
978 		qpc->pri_path.counter_index = counter_idx;
979 		mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
980 			 __func__, slave, qpc->pri_path.counter_index);
981 		err = 0;
982 	}
983 
984 	return err;
985 }
986 
handle_counter(struct mlx4_dev * dev,struct mlx4_qp_context * qpc,u8 slave,int port)987 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
988 			  u8 slave, int port)
989 {
990 	if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
991 		return handle_existing_counter(dev, slave, port,
992 					       qpc->pri_path.counter_index);
993 
994 	return handle_unexisting_counter(dev, qpc, slave, port);
995 }
996 
alloc_qp_tr(int id)997 static struct res_common *alloc_qp_tr(int id)
998 {
999 	struct res_qp *ret;
1000 
1001 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1002 	if (!ret)
1003 		return NULL;
1004 
1005 	ret->com.res_id = id;
1006 	ret->com.state = RES_QP_RESERVED;
1007 	ret->local_qpn = id;
1008 	INIT_LIST_HEAD(&ret->mcg_list);
1009 	spin_lock_init(&ret->mcg_spl);
1010 	atomic_set(&ret->ref_count, 0);
1011 
1012 	return &ret->com;
1013 }
1014 
alloc_mtt_tr(int id,int order)1015 static struct res_common *alloc_mtt_tr(int id, int order)
1016 {
1017 	struct res_mtt *ret;
1018 
1019 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1020 	if (!ret)
1021 		return NULL;
1022 
1023 	ret->com.res_id = id;
1024 	ret->order = order;
1025 	ret->com.state = RES_MTT_ALLOCATED;
1026 	atomic_set(&ret->ref_count, 0);
1027 
1028 	return &ret->com;
1029 }
1030 
alloc_mpt_tr(int id,int key)1031 static struct res_common *alloc_mpt_tr(int id, int key)
1032 {
1033 	struct res_mpt *ret;
1034 
1035 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1036 	if (!ret)
1037 		return NULL;
1038 
1039 	ret->com.res_id = id;
1040 	ret->com.state = RES_MPT_RESERVED;
1041 	ret->key = key;
1042 
1043 	return &ret->com;
1044 }
1045 
alloc_eq_tr(int id)1046 static struct res_common *alloc_eq_tr(int id)
1047 {
1048 	struct res_eq *ret;
1049 
1050 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1051 	if (!ret)
1052 		return NULL;
1053 
1054 	ret->com.res_id = id;
1055 	ret->com.state = RES_EQ_RESERVED;
1056 
1057 	return &ret->com;
1058 }
1059 
alloc_cq_tr(int id)1060 static struct res_common *alloc_cq_tr(int id)
1061 {
1062 	struct res_cq *ret;
1063 
1064 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1065 	if (!ret)
1066 		return NULL;
1067 
1068 	ret->com.res_id = id;
1069 	ret->com.state = RES_CQ_ALLOCATED;
1070 	atomic_set(&ret->ref_count, 0);
1071 
1072 	return &ret->com;
1073 }
1074 
alloc_srq_tr(int id)1075 static struct res_common *alloc_srq_tr(int id)
1076 {
1077 	struct res_srq *ret;
1078 
1079 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1080 	if (!ret)
1081 		return NULL;
1082 
1083 	ret->com.res_id = id;
1084 	ret->com.state = RES_SRQ_ALLOCATED;
1085 	atomic_set(&ret->ref_count, 0);
1086 
1087 	return &ret->com;
1088 }
1089 
alloc_counter_tr(int id,int port)1090 static struct res_common *alloc_counter_tr(int id, int port)
1091 {
1092 	struct res_counter *ret;
1093 
1094 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1095 	if (!ret)
1096 		return NULL;
1097 
1098 	ret->com.res_id = id;
1099 	ret->com.state = RES_COUNTER_ALLOCATED;
1100 	ret->port = port;
1101 
1102 	return &ret->com;
1103 }
1104 
alloc_xrcdn_tr(int id)1105 static struct res_common *alloc_xrcdn_tr(int id)
1106 {
1107 	struct res_xrcdn *ret;
1108 
1109 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1110 	if (!ret)
1111 		return NULL;
1112 
1113 	ret->com.res_id = id;
1114 	ret->com.state = RES_XRCD_ALLOCATED;
1115 
1116 	return &ret->com;
1117 }
1118 
alloc_fs_rule_tr(u64 id,int qpn)1119 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1120 {
1121 	struct res_fs_rule *ret;
1122 
1123 	ret = kzalloc(sizeof *ret, GFP_KERNEL);
1124 	if (!ret)
1125 		return NULL;
1126 
1127 	ret->com.res_id = id;
1128 	ret->com.state = RES_FS_RULE_ALLOCATED;
1129 	ret->qpn = qpn;
1130 	return &ret->com;
1131 }
1132 
alloc_tr(u64 id,enum mlx4_resource type,int slave,int extra)1133 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1134 				   int extra)
1135 {
1136 	struct res_common *ret;
1137 
1138 	switch (type) {
1139 	case RES_QP:
1140 		ret = alloc_qp_tr(id);
1141 		break;
1142 	case RES_MPT:
1143 		ret = alloc_mpt_tr(id, extra);
1144 		break;
1145 	case RES_MTT:
1146 		ret = alloc_mtt_tr(id, extra);
1147 		break;
1148 	case RES_EQ:
1149 		ret = alloc_eq_tr(id);
1150 		break;
1151 	case RES_CQ:
1152 		ret = alloc_cq_tr(id);
1153 		break;
1154 	case RES_SRQ:
1155 		ret = alloc_srq_tr(id);
1156 		break;
1157 	case RES_MAC:
1158 		pr_err("implementation missing\n");
1159 		return NULL;
1160 	case RES_COUNTER:
1161 		ret = alloc_counter_tr(id, extra);
1162 		break;
1163 	case RES_XRCD:
1164 		ret = alloc_xrcdn_tr(id);
1165 		break;
1166 	case RES_FS_RULE:
1167 		ret = alloc_fs_rule_tr(id, extra);
1168 		break;
1169 	default:
1170 		return NULL;
1171 	}
1172 	if (ret)
1173 		ret->owner = slave;
1174 
1175 	return ret;
1176 }
1177 
mlx4_calc_vf_counters(struct mlx4_dev * dev,int slave,int port,struct mlx4_counter * data)1178 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1179 			  struct mlx4_counter *data)
1180 {
1181 	struct mlx4_priv *priv = mlx4_priv(dev);
1182 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1183 	struct res_common *tmp;
1184 	struct res_counter *counter;
1185 	int *counters_arr;
1186 	int i = 0, err = 0;
1187 
1188 	memset(data, 0, sizeof(*data));
1189 
1190 	counters_arr = kmalloc_array(dev->caps.max_counters,
1191 				     sizeof(*counters_arr), GFP_KERNEL);
1192 	if (!counters_arr)
1193 		return -ENOMEM;
1194 
1195 	spin_lock_irq(mlx4_tlock(dev));
1196 	list_for_each_entry(tmp,
1197 			    &tracker->slave_list[slave].res_list[RES_COUNTER],
1198 			    list) {
1199 		counter = container_of(tmp, struct res_counter, com);
1200 		if (counter->port == port) {
1201 			counters_arr[i] = (int)tmp->res_id;
1202 			i++;
1203 		}
1204 	}
1205 	spin_unlock_irq(mlx4_tlock(dev));
1206 	counters_arr[i] = -1;
1207 
1208 	i = 0;
1209 
1210 	while (counters_arr[i] != -1) {
1211 		err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1212 					     0);
1213 		if (err) {
1214 			memset(data, 0, sizeof(*data));
1215 			goto table_changed;
1216 		}
1217 		i++;
1218 	}
1219 
1220 table_changed:
1221 	kfree(counters_arr);
1222 	return 0;
1223 }
1224 
add_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1225 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1226 			 enum mlx4_resource type, int extra)
1227 {
1228 	int i;
1229 	int err;
1230 	struct mlx4_priv *priv = mlx4_priv(dev);
1231 	struct res_common **res_arr;
1232 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1233 	struct rb_root *root = &tracker->res_tree[type];
1234 
1235 	res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1236 	if (!res_arr)
1237 		return -ENOMEM;
1238 
1239 	for (i = 0; i < count; ++i) {
1240 		res_arr[i] = alloc_tr(base + i, type, slave, extra);
1241 		if (!res_arr[i]) {
1242 			for (--i; i >= 0; --i)
1243 				kfree(res_arr[i]);
1244 
1245 			kfree(res_arr);
1246 			return -ENOMEM;
1247 		}
1248 	}
1249 
1250 	spin_lock_irq(mlx4_tlock(dev));
1251 	for (i = 0; i < count; ++i) {
1252 		if (find_res(dev, base + i, type)) {
1253 			err = -EEXIST;
1254 			goto undo;
1255 		}
1256 		err = res_tracker_insert(root, res_arr[i]);
1257 		if (err)
1258 			goto undo;
1259 		list_add_tail(&res_arr[i]->list,
1260 			      &tracker->slave_list[slave].res_list[type]);
1261 	}
1262 	spin_unlock_irq(mlx4_tlock(dev));
1263 	kfree(res_arr);
1264 
1265 	return 0;
1266 
1267 undo:
1268 	for (--i; i >= 0; --i) {
1269 		rb_erase(&res_arr[i]->node, root);
1270 		list_del_init(&res_arr[i]->list);
1271 	}
1272 
1273 	spin_unlock_irq(mlx4_tlock(dev));
1274 
1275 	for (i = 0; i < count; ++i)
1276 		kfree(res_arr[i]);
1277 
1278 	kfree(res_arr);
1279 
1280 	return err;
1281 }
1282 
remove_qp_ok(struct res_qp * res)1283 static int remove_qp_ok(struct res_qp *res)
1284 {
1285 	if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1286 	    !list_empty(&res->mcg_list)) {
1287 		pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1288 		       res->com.state, atomic_read(&res->ref_count));
1289 		return -EBUSY;
1290 	} else if (res->com.state != RES_QP_RESERVED) {
1291 		return -EPERM;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
remove_mtt_ok(struct res_mtt * res,int order)1297 static int remove_mtt_ok(struct res_mtt *res, int order)
1298 {
1299 	if (res->com.state == RES_MTT_BUSY ||
1300 	    atomic_read(&res->ref_count)) {
1301 		pr_devel("%s-%d: state %s, ref_count %d\n",
1302 			 __func__, __LINE__,
1303 			 mtt_states_str(res->com.state),
1304 			 atomic_read(&res->ref_count));
1305 		return -EBUSY;
1306 	} else if (res->com.state != RES_MTT_ALLOCATED)
1307 		return -EPERM;
1308 	else if (res->order != order)
1309 		return -EINVAL;
1310 
1311 	return 0;
1312 }
1313 
remove_mpt_ok(struct res_mpt * res)1314 static int remove_mpt_ok(struct res_mpt *res)
1315 {
1316 	if (res->com.state == RES_MPT_BUSY)
1317 		return -EBUSY;
1318 	else if (res->com.state != RES_MPT_RESERVED)
1319 		return -EPERM;
1320 
1321 	return 0;
1322 }
1323 
remove_eq_ok(struct res_eq * res)1324 static int remove_eq_ok(struct res_eq *res)
1325 {
1326 	if (res->com.state == RES_MPT_BUSY)
1327 		return -EBUSY;
1328 	else if (res->com.state != RES_MPT_RESERVED)
1329 		return -EPERM;
1330 
1331 	return 0;
1332 }
1333 
remove_counter_ok(struct res_counter * res)1334 static int remove_counter_ok(struct res_counter *res)
1335 {
1336 	if (res->com.state == RES_COUNTER_BUSY)
1337 		return -EBUSY;
1338 	else if (res->com.state != RES_COUNTER_ALLOCATED)
1339 		return -EPERM;
1340 
1341 	return 0;
1342 }
1343 
remove_xrcdn_ok(struct res_xrcdn * res)1344 static int remove_xrcdn_ok(struct res_xrcdn *res)
1345 {
1346 	if (res->com.state == RES_XRCD_BUSY)
1347 		return -EBUSY;
1348 	else if (res->com.state != RES_XRCD_ALLOCATED)
1349 		return -EPERM;
1350 
1351 	return 0;
1352 }
1353 
remove_fs_rule_ok(struct res_fs_rule * res)1354 static int remove_fs_rule_ok(struct res_fs_rule *res)
1355 {
1356 	if (res->com.state == RES_FS_RULE_BUSY)
1357 		return -EBUSY;
1358 	else if (res->com.state != RES_FS_RULE_ALLOCATED)
1359 		return -EPERM;
1360 
1361 	return 0;
1362 }
1363 
remove_cq_ok(struct res_cq * res)1364 static int remove_cq_ok(struct res_cq *res)
1365 {
1366 	if (res->com.state == RES_CQ_BUSY)
1367 		return -EBUSY;
1368 	else if (res->com.state != RES_CQ_ALLOCATED)
1369 		return -EPERM;
1370 
1371 	return 0;
1372 }
1373 
remove_srq_ok(struct res_srq * res)1374 static int remove_srq_ok(struct res_srq *res)
1375 {
1376 	if (res->com.state == RES_SRQ_BUSY)
1377 		return -EBUSY;
1378 	else if (res->com.state != RES_SRQ_ALLOCATED)
1379 		return -EPERM;
1380 
1381 	return 0;
1382 }
1383 
remove_ok(struct res_common * res,enum mlx4_resource type,int extra)1384 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1385 {
1386 	switch (type) {
1387 	case RES_QP:
1388 		return remove_qp_ok((struct res_qp *)res);
1389 	case RES_CQ:
1390 		return remove_cq_ok((struct res_cq *)res);
1391 	case RES_SRQ:
1392 		return remove_srq_ok((struct res_srq *)res);
1393 	case RES_MPT:
1394 		return remove_mpt_ok((struct res_mpt *)res);
1395 	case RES_MTT:
1396 		return remove_mtt_ok((struct res_mtt *)res, extra);
1397 	case RES_MAC:
1398 		return -ENOSYS;
1399 	case RES_EQ:
1400 		return remove_eq_ok((struct res_eq *)res);
1401 	case RES_COUNTER:
1402 		return remove_counter_ok((struct res_counter *)res);
1403 	case RES_XRCD:
1404 		return remove_xrcdn_ok((struct res_xrcdn *)res);
1405 	case RES_FS_RULE:
1406 		return remove_fs_rule_ok((struct res_fs_rule *)res);
1407 	default:
1408 		return -EINVAL;
1409 	}
1410 }
1411 
rem_res_range(struct mlx4_dev * dev,int slave,u64 base,int count,enum mlx4_resource type,int extra)1412 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1413 			 enum mlx4_resource type, int extra)
1414 {
1415 	u64 i;
1416 	int err;
1417 	struct mlx4_priv *priv = mlx4_priv(dev);
1418 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1419 	struct res_common *r;
1420 
1421 	spin_lock_irq(mlx4_tlock(dev));
1422 	for (i = base; i < base + count; ++i) {
1423 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1424 		if (!r) {
1425 			err = -ENOENT;
1426 			goto out;
1427 		}
1428 		if (r->owner != slave) {
1429 			err = -EPERM;
1430 			goto out;
1431 		}
1432 		err = remove_ok(r, type, extra);
1433 		if (err)
1434 			goto out;
1435 	}
1436 
1437 	for (i = base; i < base + count; ++i) {
1438 		r = res_tracker_lookup(&tracker->res_tree[type], i);
1439 		rb_erase(&r->node, &tracker->res_tree[type]);
1440 		list_del(&r->list);
1441 		kfree(r);
1442 	}
1443 	err = 0;
1444 
1445 out:
1446 	spin_unlock_irq(mlx4_tlock(dev));
1447 
1448 	return err;
1449 }
1450 
qp_res_start_move_to(struct mlx4_dev * dev,int slave,int qpn,enum res_qp_states state,struct res_qp ** qp,int alloc)1451 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1452 				enum res_qp_states state, struct res_qp **qp,
1453 				int alloc)
1454 {
1455 	struct mlx4_priv *priv = mlx4_priv(dev);
1456 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1457 	struct res_qp *r;
1458 	int err = 0;
1459 
1460 	spin_lock_irq(mlx4_tlock(dev));
1461 	r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1462 	if (!r)
1463 		err = -ENOENT;
1464 	else if (r->com.owner != slave)
1465 		err = -EPERM;
1466 	else {
1467 		switch (state) {
1468 		case RES_QP_BUSY:
1469 			mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1470 				 __func__, (unsigned long long)r->com.res_id);
1471 			err = -EBUSY;
1472 			break;
1473 
1474 		case RES_QP_RESERVED:
1475 			if (r->com.state == RES_QP_MAPPED && !alloc)
1476 				break;
1477 
1478 			mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", (unsigned long long)r->com.res_id);
1479 			err = -EINVAL;
1480 			break;
1481 
1482 		case RES_QP_MAPPED:
1483 			if ((r->com.state == RES_QP_RESERVED && alloc) ||
1484 			    r->com.state == RES_QP_HW)
1485 				break;
1486 			else {
1487 				mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1488 					  (unsigned long long)r->com.res_id);
1489 				err = -EINVAL;
1490 			}
1491 
1492 			break;
1493 
1494 		case RES_QP_HW:
1495 			if (r->com.state != RES_QP_MAPPED)
1496 				err = -EINVAL;
1497 			break;
1498 		default:
1499 			err = -EINVAL;
1500 		}
1501 
1502 		if (!err) {
1503 			r->com.from_state = r->com.state;
1504 			r->com.to_state = state;
1505 			r->com.state = RES_QP_BUSY;
1506 			if (qp)
1507 				*qp = r;
1508 		}
1509 	}
1510 
1511 	spin_unlock_irq(mlx4_tlock(dev));
1512 
1513 	return err;
1514 }
1515 
mr_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_mpt_states state,struct res_mpt ** mpt)1516 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1517 				enum res_mpt_states state, struct res_mpt **mpt)
1518 {
1519 	struct mlx4_priv *priv = mlx4_priv(dev);
1520 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1521 	struct res_mpt *r;
1522 	int err = 0;
1523 
1524 	spin_lock_irq(mlx4_tlock(dev));
1525 	r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1526 	if (!r)
1527 		err = -ENOENT;
1528 	else if (r->com.owner != slave)
1529 		err = -EPERM;
1530 	else {
1531 		switch (state) {
1532 		case RES_MPT_BUSY:
1533 			err = -EINVAL;
1534 			break;
1535 
1536 		case RES_MPT_RESERVED:
1537 			if (r->com.state != RES_MPT_MAPPED)
1538 				err = -EINVAL;
1539 			break;
1540 
1541 		case RES_MPT_MAPPED:
1542 			if (r->com.state != RES_MPT_RESERVED &&
1543 			    r->com.state != RES_MPT_HW)
1544 				err = -EINVAL;
1545 			break;
1546 
1547 		case RES_MPT_HW:
1548 			if (r->com.state != RES_MPT_MAPPED)
1549 				err = -EINVAL;
1550 			break;
1551 		default:
1552 			err = -EINVAL;
1553 		}
1554 
1555 		if (!err) {
1556 			r->com.from_state = r->com.state;
1557 			r->com.to_state = state;
1558 			r->com.state = RES_MPT_BUSY;
1559 			if (mpt)
1560 				*mpt = r;
1561 		}
1562 	}
1563 
1564 	spin_unlock_irq(mlx4_tlock(dev));
1565 
1566 	return err;
1567 }
1568 
eq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_eq_states state,struct res_eq ** eq)1569 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1570 				enum res_eq_states state, struct res_eq **eq)
1571 {
1572 	struct mlx4_priv *priv = mlx4_priv(dev);
1573 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1574 	struct res_eq *r;
1575 	int err = 0;
1576 
1577 	spin_lock_irq(mlx4_tlock(dev));
1578 	r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1579 	if (!r)
1580 		err = -ENOENT;
1581 	else if (r->com.owner != slave)
1582 		err = -EPERM;
1583 	else {
1584 		switch (state) {
1585 		case RES_EQ_BUSY:
1586 			err = -EINVAL;
1587 			break;
1588 
1589 		case RES_EQ_RESERVED:
1590 			if (r->com.state != RES_EQ_HW)
1591 				err = -EINVAL;
1592 			break;
1593 
1594 		case RES_EQ_HW:
1595 			if (r->com.state != RES_EQ_RESERVED)
1596 				err = -EINVAL;
1597 			break;
1598 
1599 		default:
1600 			err = -EINVAL;
1601 		}
1602 
1603 		if (!err) {
1604 			r->com.from_state = r->com.state;
1605 			r->com.to_state = state;
1606 			r->com.state = RES_EQ_BUSY;
1607 		}
1608 	}
1609 
1610 	spin_unlock_irq(mlx4_tlock(dev));
1611 
1612 	if (!err && eq)
1613 		*eq = r;
1614 
1615 	return err;
1616 }
1617 
cq_res_start_move_to(struct mlx4_dev * dev,int slave,int cqn,enum res_cq_states state,struct res_cq ** cq)1618 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1619 				enum res_cq_states state, struct res_cq **cq)
1620 {
1621 	struct mlx4_priv *priv = mlx4_priv(dev);
1622 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1623 	struct res_cq *r;
1624 	int err;
1625 
1626 	spin_lock_irq(mlx4_tlock(dev));
1627 	r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1628 	if (!r) {
1629 		err = -ENOENT;
1630 	} else if (r->com.owner != slave) {
1631 		err = -EPERM;
1632 	} else if (state == RES_CQ_ALLOCATED) {
1633 		if (r->com.state != RES_CQ_HW)
1634 			err = -EINVAL;
1635 		else if (atomic_read(&r->ref_count))
1636 			err = -EBUSY;
1637 		else
1638 			err = 0;
1639 	} else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1640 		err = -EINVAL;
1641 	} else {
1642 		err = 0;
1643 	}
1644 
1645 	if (!err) {
1646 		r->com.from_state = r->com.state;
1647 		r->com.to_state = state;
1648 		r->com.state = RES_CQ_BUSY;
1649 		if (cq)
1650 			*cq = r;
1651 	}
1652 
1653 	spin_unlock_irq(mlx4_tlock(dev));
1654 
1655 	return err;
1656 }
1657 
srq_res_start_move_to(struct mlx4_dev * dev,int slave,int index,enum res_srq_states state,struct res_srq ** srq)1658 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1659 				 enum res_srq_states state, struct res_srq **srq)
1660 {
1661 	struct mlx4_priv *priv = mlx4_priv(dev);
1662 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1663 	struct res_srq *r;
1664 	int err = 0;
1665 
1666 	spin_lock_irq(mlx4_tlock(dev));
1667 	r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1668 	if (!r) {
1669 		err = -ENOENT;
1670 	} else if (r->com.owner != slave) {
1671 		err = -EPERM;
1672 	} else if (state == RES_SRQ_ALLOCATED) {
1673 		if (r->com.state != RES_SRQ_HW)
1674 			err = -EINVAL;
1675 		else if (atomic_read(&r->ref_count))
1676 			err = -EBUSY;
1677 	} else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1678 		err = -EINVAL;
1679 	}
1680 
1681 	if (!err) {
1682 		r->com.from_state = r->com.state;
1683 		r->com.to_state = state;
1684 		r->com.state = RES_SRQ_BUSY;
1685 		if (srq)
1686 			*srq = r;
1687 	}
1688 
1689 	spin_unlock_irq(mlx4_tlock(dev));
1690 
1691 	return err;
1692 }
1693 
res_abort_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1694 static void res_abort_move(struct mlx4_dev *dev, int slave,
1695 			   enum mlx4_resource type, int id)
1696 {
1697 	struct mlx4_priv *priv = mlx4_priv(dev);
1698 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1699 	struct res_common *r;
1700 
1701 	spin_lock_irq(mlx4_tlock(dev));
1702 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1703 	if (r && (r->owner == slave))
1704 		r->state = r->from_state;
1705 	spin_unlock_irq(mlx4_tlock(dev));
1706 }
1707 
res_end_move(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int id)1708 static void res_end_move(struct mlx4_dev *dev, int slave,
1709 			 enum mlx4_resource type, int id)
1710 {
1711 	struct mlx4_priv *priv = mlx4_priv(dev);
1712 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1713 	struct res_common *r;
1714 
1715 	spin_lock_irq(mlx4_tlock(dev));
1716 	r = res_tracker_lookup(&tracker->res_tree[type], id);
1717 	if (r && (r->owner == slave))
1718 		r->state = r->to_state;
1719 	spin_unlock_irq(mlx4_tlock(dev));
1720 }
1721 
valid_reserved(struct mlx4_dev * dev,int slave,int qpn)1722 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1723 {
1724 	return mlx4_is_qp_reserved(dev, qpn) &&
1725 		(mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1726 }
1727 
fw_reserved(struct mlx4_dev * dev,int qpn)1728 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1729 {
1730 	return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1731 }
1732 
qp_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1733 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1734 			u64 in_param, u64 *out_param)
1735 {
1736 	int err;
1737 	int count;
1738 	int align;
1739 	int base;
1740 	int qpn;
1741 	u8 flags;
1742 
1743 	switch (op) {
1744 	case RES_OP_RESERVE:
1745 		count = get_param_l(&in_param) & 0xffffff;
1746 		/* Turn off all unsupported QP allocation flags that the
1747 		 * slave tries to set.
1748 		 */
1749 		flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1750 		align = get_param_h(&in_param);
1751 		err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1752 		if (err)
1753 			return err;
1754 
1755 		err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1756 		if (err) {
1757 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1758 			return err;
1759 		}
1760 
1761 		err = add_res_range(dev, slave, base, count, RES_QP, 0);
1762 		if (err) {
1763 			mlx4_release_resource(dev, slave, RES_QP, count, 0);
1764 			__mlx4_qp_release_range(dev, base, count);
1765 			return err;
1766 		}
1767 		set_param_l(out_param, base);
1768 		break;
1769 	case RES_OP_MAP_ICM:
1770 		qpn = get_param_l(&in_param) & 0x7fffff;
1771 		if (valid_reserved(dev, slave, qpn)) {
1772 			err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1773 			if (err)
1774 				return err;
1775 		}
1776 
1777 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1778 					   NULL, 1);
1779 		if (err)
1780 			return err;
1781 
1782 		if (!fw_reserved(dev, qpn)) {
1783 			err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1784 			if (err) {
1785 				res_abort_move(dev, slave, RES_QP, qpn);
1786 				return err;
1787 			}
1788 		}
1789 
1790 		res_end_move(dev, slave, RES_QP, qpn);
1791 		break;
1792 
1793 	default:
1794 		err = -EINVAL;
1795 		break;
1796 	}
1797 	return err;
1798 }
1799 
mtt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1800 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1801 			 u64 in_param, u64 *out_param)
1802 {
1803 	int err = -EINVAL;
1804 	int base;
1805 	int order;
1806 
1807 	if (op != RES_OP_RESERVE_AND_MAP)
1808 		return err;
1809 
1810 	order = get_param_l(&in_param);
1811 
1812 	err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1813 	if (err)
1814 		return err;
1815 
1816 	base = __mlx4_alloc_mtt_range(dev, order);
1817 	if (base == -1) {
1818 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1819 		return -ENOMEM;
1820 	}
1821 
1822 	err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1823 	if (err) {
1824 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1825 		__mlx4_free_mtt_range(dev, base, order);
1826 	} else {
1827 		set_param_l(out_param, base);
1828 	}
1829 
1830 	return err;
1831 }
1832 
mpt_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1833 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1834 			 u64 in_param, u64 *out_param)
1835 {
1836 	int err = -EINVAL;
1837 	int index;
1838 	int id;
1839 	struct res_mpt *mpt;
1840 
1841 	switch (op) {
1842 	case RES_OP_RESERVE:
1843 		err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1844 		if (err)
1845 			break;
1846 
1847 		index = __mlx4_mpt_reserve(dev);
1848 		if (index == -1) {
1849 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1850 			break;
1851 		}
1852 		id = index & mpt_mask(dev);
1853 
1854 		err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1855 		if (err) {
1856 			mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1857 			__mlx4_mpt_release(dev, index);
1858 			break;
1859 		}
1860 		set_param_l(out_param, index);
1861 		break;
1862 	case RES_OP_MAP_ICM:
1863 		index = get_param_l(&in_param);
1864 		id = index & mpt_mask(dev);
1865 		err = mr_res_start_move_to(dev, slave, id,
1866 					   RES_MPT_MAPPED, &mpt);
1867 		if (err)
1868 			return err;
1869 
1870 		err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1871 		if (err) {
1872 			res_abort_move(dev, slave, RES_MPT, id);
1873 			return err;
1874 		}
1875 
1876 		res_end_move(dev, slave, RES_MPT, id);
1877 		break;
1878 	}
1879 	return err;
1880 }
1881 
cq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1882 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1883 			u64 in_param, u64 *out_param)
1884 {
1885 	int cqn;
1886 	int err;
1887 
1888 	switch (op) {
1889 	case RES_OP_RESERVE_AND_MAP:
1890 		err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1891 		if (err)
1892 			break;
1893 
1894 		err = __mlx4_cq_alloc_icm(dev, &cqn);
1895 		if (err) {
1896 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1897 			break;
1898 		}
1899 
1900 		err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1901 		if (err) {
1902 			mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1903 			__mlx4_cq_free_icm(dev, cqn);
1904 			break;
1905 		}
1906 
1907 		set_param_l(out_param, cqn);
1908 		break;
1909 
1910 	default:
1911 		err = -EINVAL;
1912 	}
1913 
1914 	return err;
1915 }
1916 
srq_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)1917 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1918 			 u64 in_param, u64 *out_param)
1919 {
1920 	int srqn;
1921 	int err;
1922 
1923 	switch (op) {
1924 	case RES_OP_RESERVE_AND_MAP:
1925 		err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1926 		if (err)
1927 			break;
1928 
1929 		err = __mlx4_srq_alloc_icm(dev, &srqn);
1930 		if (err) {
1931 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1932 			break;
1933 		}
1934 
1935 		err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1936 		if (err) {
1937 			mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1938 			__mlx4_srq_free_icm(dev, srqn);
1939 			break;
1940 		}
1941 
1942 		set_param_l(out_param, srqn);
1943 		break;
1944 
1945 	default:
1946 		err = -EINVAL;
1947 	}
1948 
1949 	return err;
1950 }
1951 
mac_find_smac_ix_in_slave(struct mlx4_dev * dev,int slave,int port,u8 smac_index,u64 * mac)1952 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1953 				     u8 smac_index, u64 *mac)
1954 {
1955 	struct mlx4_priv *priv = mlx4_priv(dev);
1956 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1957 	struct list_head *mac_list =
1958 		&tracker->slave_list[slave].res_list[RES_MAC];
1959 	struct mac_res *res, *tmp;
1960 
1961 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1962 		if (res->smac_index == smac_index && res->port == (u8) port) {
1963 			*mac = res->mac;
1964 			return 0;
1965 		}
1966 	}
1967 	return -ENOENT;
1968 }
1969 
mac_add_to_slave(struct mlx4_dev * dev,int slave,u64 mac,int port,u8 smac_index)1970 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1971 {
1972 	struct mlx4_priv *priv = mlx4_priv(dev);
1973 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1974 	struct list_head *mac_list =
1975 		&tracker->slave_list[slave].res_list[RES_MAC];
1976 	struct mac_res *res, *tmp;
1977 
1978 	list_for_each_entry_safe(res, tmp, mac_list, list) {
1979 		if (res->mac == mac && res->port == (u8) port) {
1980 			/* mac found. update ref count */
1981 			++res->ref_count;
1982 			return 0;
1983 		}
1984 	}
1985 
1986 	if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1987 		return -EINVAL;
1988 	res = kzalloc(sizeof *res, GFP_KERNEL);
1989 	if (!res) {
1990 		mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1991 		return -ENOMEM;
1992 	}
1993 	res->mac = mac;
1994 	res->port = (u8) port;
1995 	res->smac_index = smac_index;
1996 	res->ref_count = 1;
1997 	list_add_tail(&res->list,
1998 		      &tracker->slave_list[slave].res_list[RES_MAC]);
1999 	return 0;
2000 }
2001 
mac_del_from_slave(struct mlx4_dev * dev,int slave,u64 mac,int port)2002 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2003 			       int port)
2004 {
2005 	struct mlx4_priv *priv = mlx4_priv(dev);
2006 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2007 	struct list_head *mac_list =
2008 		&tracker->slave_list[slave].res_list[RES_MAC];
2009 	struct mac_res *res, *tmp;
2010 
2011 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2012 		if (res->mac == mac && res->port == (u8) port) {
2013 			if (!--res->ref_count) {
2014 				list_del(&res->list);
2015 				mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2016 				kfree(res);
2017 			}
2018 			break;
2019 		}
2020 	}
2021 }
2022 
rem_slave_macs(struct mlx4_dev * dev,int slave)2023 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2024 {
2025 	struct mlx4_priv *priv = mlx4_priv(dev);
2026 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2027 	struct list_head *mac_list =
2028 		&tracker->slave_list[slave].res_list[RES_MAC];
2029 	struct mac_res *res, *tmp;
2030 	int i;
2031 
2032 	list_for_each_entry_safe(res, tmp, mac_list, list) {
2033 		list_del(&res->list);
2034 		/* dereference the mac the num times the slave referenced it */
2035 		for (i = 0; i < res->ref_count; i++)
2036 			__mlx4_unregister_mac(dev, res->port, res->mac);
2037 		mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2038 		kfree(res);
2039 	}
2040 }
2041 
mac_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2042 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2043 			 u64 in_param, u64 *out_param, int in_port)
2044 {
2045 	int err = -EINVAL;
2046 	int port;
2047 	u64 mac;
2048 	u8 smac_index = 0;
2049 
2050 	if (op != RES_OP_RESERVE_AND_MAP)
2051 		return err;
2052 
2053 	port = !in_port ? get_param_l(out_param) : in_port;
2054 	port = mlx4_slave_convert_port(
2055 			dev, slave, port);
2056 
2057 	if (port < 0)
2058 		return -EINVAL;
2059 	mac = in_param;
2060 
2061 	err = __mlx4_register_mac(dev, port, mac);
2062 	if (err >= 0) {
2063 		smac_index = err;
2064 		set_param_l(out_param, err);
2065 		err = 0;
2066 	}
2067 
2068 	if (!err) {
2069 		err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2070 		if (err)
2071 			__mlx4_unregister_mac(dev, port, mac);
2072 	}
2073 	return err;
2074 }
2075 
vlan_add_to_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port,int vlan_index)2076 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2077 			     int port, int vlan_index)
2078 {
2079 	struct mlx4_priv *priv = mlx4_priv(dev);
2080 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2081 	struct list_head *vlan_list =
2082 		&tracker->slave_list[slave].res_list[RES_VLAN];
2083 	struct vlan_res *res, *tmp;
2084 
2085 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2086 		if (res->vlan == vlan && res->port == (u8) port) {
2087 			/* vlan found. update ref count */
2088 			++res->ref_count;
2089 			return 0;
2090 		}
2091 	}
2092 
2093 	if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2094 		return -EINVAL;
2095 	res = kzalloc(sizeof(*res), GFP_KERNEL);
2096 	if (!res) {
2097 		mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2098 		return -ENOMEM;
2099 	}
2100 	res->vlan = vlan;
2101 	res->port = (u8) port;
2102 	res->vlan_index = vlan_index;
2103 	res->ref_count = 1;
2104 	list_add_tail(&res->list,
2105 		      &tracker->slave_list[slave].res_list[RES_VLAN]);
2106 	return 0;
2107 }
2108 
2109 
vlan_del_from_slave(struct mlx4_dev * dev,int slave,u16 vlan,int port)2110 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2111 				int port)
2112 {
2113 	struct mlx4_priv *priv = mlx4_priv(dev);
2114 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2115 	struct list_head *vlan_list =
2116 		&tracker->slave_list[slave].res_list[RES_VLAN];
2117 	struct vlan_res *res, *tmp;
2118 
2119 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2120 		if (res->vlan == vlan && res->port == (u8) port) {
2121 			if (!--res->ref_count) {
2122 				list_del(&res->list);
2123 				mlx4_release_resource(dev, slave, RES_VLAN,
2124 						      1, port);
2125 				kfree(res);
2126 			}
2127 			break;
2128 		}
2129 	}
2130 }
2131 
rem_slave_vlans(struct mlx4_dev * dev,int slave)2132 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2133 {
2134 	struct mlx4_priv *priv = mlx4_priv(dev);
2135 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2136 	struct list_head *vlan_list =
2137 		&tracker->slave_list[slave].res_list[RES_VLAN];
2138 	struct vlan_res *res, *tmp;
2139 	int i;
2140 
2141 	list_for_each_entry_safe(res, tmp, vlan_list, list) {
2142 		list_del(&res->list);
2143 		/* dereference the vlan the num times the slave referenced it */
2144 		for (i = 0; i < res->ref_count; i++)
2145 			__mlx4_unregister_vlan(dev, res->port, res->vlan);
2146 		mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2147 		kfree(res);
2148 	}
2149 }
2150 
vlan_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2151 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2152 			  u64 in_param, u64 *out_param, int in_port)
2153 {
2154 	struct mlx4_priv *priv = mlx4_priv(dev);
2155 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2156 	int err;
2157 	u16 vlan;
2158 	int vlan_index;
2159 	int port;
2160 
2161 	port = !in_port ? get_param_l(out_param) : in_port;
2162 
2163 	if (!port || op != RES_OP_RESERVE_AND_MAP)
2164 		return -EINVAL;
2165 
2166 	port = mlx4_slave_convert_port(
2167 			dev, slave, port);
2168 
2169 	if (port < 0)
2170 		return -EINVAL;
2171 	/* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2172 	if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2173 		slave_state[slave].old_vlan_api = true;
2174 		return 0;
2175 	}
2176 
2177 	vlan = (u16) in_param;
2178 
2179 	err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2180 	if (!err) {
2181 		set_param_l(out_param, (u32) vlan_index);
2182 		err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2183 		if (err)
2184 			__mlx4_unregister_vlan(dev, port, vlan);
2185 	}
2186 	return err;
2187 }
2188 
counter_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2189 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2190 			     u64 in_param, u64 *out_param, int port)
2191 {
2192 	u32 index;
2193 	int err;
2194 
2195 	if (op != RES_OP_RESERVE)
2196 		return -EINVAL;
2197 
2198 	err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2199 	if (err)
2200 		return err;
2201 
2202 	err = __mlx4_counter_alloc(dev, &index);
2203 	if (err) {
2204 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2205 		return err;
2206 	}
2207 
2208 	err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2209 	if (err) {
2210 		__mlx4_counter_free(dev, index);
2211 		mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2212 	} else {
2213 		set_param_l(out_param, index);
2214 	}
2215 
2216 	return err;
2217 }
2218 
xrcdn_alloc_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2219 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2220 			   u64 in_param, u64 *out_param)
2221 {
2222 	u32 xrcdn;
2223 	int err;
2224 
2225 	if (op != RES_OP_RESERVE)
2226 		return -EINVAL;
2227 
2228 	err = __mlx4_xrcd_alloc(dev, &xrcdn);
2229 	if (err)
2230 		return err;
2231 
2232 	err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2233 	if (err)
2234 		__mlx4_xrcd_free(dev, xrcdn);
2235 	else
2236 		set_param_l(out_param, xrcdn);
2237 
2238 	return err;
2239 }
2240 
mlx4_ALLOC_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2241 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2242 			   struct mlx4_vhcr *vhcr,
2243 			   struct mlx4_cmd_mailbox *inbox,
2244 			   struct mlx4_cmd_mailbox *outbox,
2245 			   struct mlx4_cmd_info *cmd)
2246 {
2247 	int err;
2248 	int alop = vhcr->op_modifier;
2249 
2250 	switch (vhcr->in_modifier & 0xFF) {
2251 	case RES_QP:
2252 		err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2253 				   vhcr->in_param, &vhcr->out_param);
2254 		break;
2255 
2256 	case RES_MTT:
2257 		err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2258 				    vhcr->in_param, &vhcr->out_param);
2259 		break;
2260 
2261 	case RES_MPT:
2262 		err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2263 				    vhcr->in_param, &vhcr->out_param);
2264 		break;
2265 
2266 	case RES_CQ:
2267 		err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2268 				   vhcr->in_param, &vhcr->out_param);
2269 		break;
2270 
2271 	case RES_SRQ:
2272 		err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2273 				    vhcr->in_param, &vhcr->out_param);
2274 		break;
2275 
2276 	case RES_MAC:
2277 		err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2278 				    vhcr->in_param, &vhcr->out_param,
2279 				    (vhcr->in_modifier >> 8) & 0xFF);
2280 		break;
2281 
2282 	case RES_VLAN:
2283 		err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2284 				     vhcr->in_param, &vhcr->out_param,
2285 				     (vhcr->in_modifier >> 8) & 0xFF);
2286 		break;
2287 
2288 	case RES_COUNTER:
2289 		err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2290 					vhcr->in_param, &vhcr->out_param, 0);
2291 		break;
2292 
2293 	case RES_XRCD:
2294 		err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295 				      vhcr->in_param, &vhcr->out_param);
2296 		break;
2297 
2298 	default:
2299 		err = -EINVAL;
2300 		break;
2301 	}
2302 
2303 	return err;
2304 }
2305 
qp_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2306 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2307 		       u64 in_param)
2308 {
2309 	int err;
2310 	int count;
2311 	int base;
2312 	int qpn;
2313 
2314 	switch (op) {
2315 	case RES_OP_RESERVE:
2316 		base = get_param_l(&in_param) & 0x7fffff;
2317 		count = get_param_h(&in_param);
2318 		err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2319 		if (err)
2320 			break;
2321 		mlx4_release_resource(dev, slave, RES_QP, count, 0);
2322 		__mlx4_qp_release_range(dev, base, count);
2323 		break;
2324 	case RES_OP_MAP_ICM:
2325 		qpn = get_param_l(&in_param) & 0x7fffff;
2326 		err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2327 					   NULL, 0);
2328 		if (err)
2329 			return err;
2330 
2331 		if (!fw_reserved(dev, qpn))
2332 			__mlx4_qp_free_icm(dev, qpn);
2333 
2334 		res_end_move(dev, slave, RES_QP, qpn);
2335 
2336 		if (valid_reserved(dev, slave, qpn))
2337 			err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2338 		break;
2339 	default:
2340 		err = -EINVAL;
2341 		break;
2342 	}
2343 	return err;
2344 }
2345 
mtt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2346 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2347 			u64 in_param, u64 *out_param)
2348 {
2349 	int err = -EINVAL;
2350 	int base;
2351 	int order;
2352 
2353 	if (op != RES_OP_RESERVE_AND_MAP)
2354 		return err;
2355 
2356 	base = get_param_l(&in_param);
2357 	order = get_param_h(&in_param);
2358 	err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2359 	if (!err) {
2360 		mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2361 		__mlx4_free_mtt_range(dev, base, order);
2362 	}
2363 	return err;
2364 }
2365 
mpt_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param)2366 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2367 			u64 in_param)
2368 {
2369 	int err = -EINVAL;
2370 	int index;
2371 	int id;
2372 	struct res_mpt *mpt;
2373 
2374 	switch (op) {
2375 	case RES_OP_RESERVE:
2376 		index = get_param_l(&in_param);
2377 		id = index & mpt_mask(dev);
2378 		err = get_res(dev, slave, id, RES_MPT, &mpt);
2379 		if (err)
2380 			break;
2381 		index = mpt->key;
2382 		put_res(dev, slave, id, RES_MPT);
2383 
2384 		err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2385 		if (err)
2386 			break;
2387 		mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2388 		__mlx4_mpt_release(dev, index);
2389 		break;
2390 	case RES_OP_MAP_ICM:
2391 		index = get_param_l(&in_param);
2392 		id = index & mpt_mask(dev);
2393 		err = mr_res_start_move_to(dev, slave, id,
2394 					   RES_MPT_RESERVED, &mpt);
2395 		if (err)
2396 			return err;
2397 
2398 		__mlx4_mpt_free_icm(dev, mpt->key);
2399 		res_end_move(dev, slave, RES_MPT, id);
2400 		break;
2401 	default:
2402 		err = -EINVAL;
2403 		break;
2404 	}
2405 	return err;
2406 }
2407 
cq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2408 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409 		       u64 in_param, u64 *out_param)
2410 {
2411 	int cqn;
2412 	int err;
2413 
2414 	switch (op) {
2415 	case RES_OP_RESERVE_AND_MAP:
2416 		cqn = get_param_l(&in_param);
2417 		err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2418 		if (err)
2419 			break;
2420 
2421 		mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2422 		__mlx4_cq_free_icm(dev, cqn);
2423 		break;
2424 
2425 	default:
2426 		err = -EINVAL;
2427 		break;
2428 	}
2429 
2430 	return err;
2431 }
2432 
srq_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2433 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2434 			u64 in_param, u64 *out_param)
2435 {
2436 	int srqn;
2437 	int err;
2438 
2439 	switch (op) {
2440 	case RES_OP_RESERVE_AND_MAP:
2441 		srqn = get_param_l(&in_param);
2442 		err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2443 		if (err)
2444 			break;
2445 
2446 		mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2447 		__mlx4_srq_free_icm(dev, srqn);
2448 		break;
2449 
2450 	default:
2451 		err = -EINVAL;
2452 		break;
2453 	}
2454 
2455 	return err;
2456 }
2457 
mac_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int in_port)2458 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2459 			    u64 in_param, u64 *out_param, int in_port)
2460 {
2461 	int port;
2462 	int err = 0;
2463 
2464 	switch (op) {
2465 	case RES_OP_RESERVE_AND_MAP:
2466 		port = !in_port ? get_param_l(out_param) : in_port;
2467 		port = mlx4_slave_convert_port(
2468 				dev, slave, port);
2469 
2470 		if (port < 0)
2471 			return -EINVAL;
2472 		mac_del_from_slave(dev, slave, in_param, port);
2473 		__mlx4_unregister_mac(dev, port, in_param);
2474 		break;
2475 	default:
2476 		err = -EINVAL;
2477 		break;
2478 	}
2479 
2480 	return err;
2481 
2482 }
2483 
vlan_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param,int port)2484 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2485 			    u64 in_param, u64 *out_param, int port)
2486 {
2487 	struct mlx4_priv *priv = mlx4_priv(dev);
2488 	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2489 	int err = 0;
2490 
2491 	port = mlx4_slave_convert_port(
2492 			dev, slave, port);
2493 
2494 	if (port < 0)
2495 		return -EINVAL;
2496 	switch (op) {
2497 	case RES_OP_RESERVE_AND_MAP:
2498 		if (slave_state[slave].old_vlan_api)
2499 			return 0;
2500 		if (!port)
2501 			return -EINVAL;
2502 		vlan_del_from_slave(dev, slave, in_param, port);
2503 		__mlx4_unregister_vlan(dev, port, in_param);
2504 		break;
2505 	default:
2506 		err = -EINVAL;
2507 		break;
2508 	}
2509 
2510 	return err;
2511 }
2512 
counter_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2513 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2514 			    u64 in_param, u64 *out_param)
2515 {
2516 	int index;
2517 	int err;
2518 
2519 	if (op != RES_OP_RESERVE)
2520 		return -EINVAL;
2521 
2522 	index = get_param_l(&in_param);
2523 	if (index == MLX4_SINK_COUNTER_INDEX(dev))
2524 		return 0;
2525 
2526 	err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2527 	if (err)
2528 		return err;
2529 
2530 	__mlx4_counter_free(dev, index);
2531 	mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2532 
2533 	return err;
2534 }
2535 
xrcdn_free_res(struct mlx4_dev * dev,int slave,int op,int cmd,u64 in_param,u64 * out_param)2536 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2537 			  u64 in_param, u64 *out_param)
2538 {
2539 	int xrcdn;
2540 	int err;
2541 
2542 	if (op != RES_OP_RESERVE)
2543 		return -EINVAL;
2544 
2545 	xrcdn = get_param_l(&in_param);
2546 	err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2547 	if (err)
2548 		return err;
2549 
2550 	__mlx4_xrcd_free(dev, xrcdn);
2551 
2552 	return err;
2553 }
2554 
mlx4_FREE_RES_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2555 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2556 			  struct mlx4_vhcr *vhcr,
2557 			  struct mlx4_cmd_mailbox *inbox,
2558 			  struct mlx4_cmd_mailbox *outbox,
2559 			  struct mlx4_cmd_info *cmd)
2560 {
2561 	int err = -EINVAL;
2562 	int alop = vhcr->op_modifier;
2563 
2564 	switch (vhcr->in_modifier & 0xFF) {
2565 	case RES_QP:
2566 		err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2567 				  vhcr->in_param);
2568 		break;
2569 
2570 	case RES_MTT:
2571 		err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2572 				   vhcr->in_param, &vhcr->out_param);
2573 		break;
2574 
2575 	case RES_MPT:
2576 		err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2577 				   vhcr->in_param);
2578 		break;
2579 
2580 	case RES_CQ:
2581 		err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2582 				  vhcr->in_param, &vhcr->out_param);
2583 		break;
2584 
2585 	case RES_SRQ:
2586 		err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2587 				   vhcr->in_param, &vhcr->out_param);
2588 		break;
2589 
2590 	case RES_MAC:
2591 		err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2592 				   vhcr->in_param, &vhcr->out_param,
2593 				   (vhcr->in_modifier >> 8) & 0xFF);
2594 		break;
2595 
2596 	case RES_VLAN:
2597 		err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2598 				    vhcr->in_param, &vhcr->out_param,
2599 				    (vhcr->in_modifier >> 8) & 0xFF);
2600 		break;
2601 
2602 	case RES_COUNTER:
2603 		err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2604 				       vhcr->in_param, &vhcr->out_param);
2605 		break;
2606 
2607 	case RES_XRCD:
2608 		err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2609 				     vhcr->in_param, &vhcr->out_param);
2610 
2611 	default:
2612 		break;
2613 	}
2614 	return err;
2615 }
2616 
2617 /* ugly but other choices are uglier */
mr_phys_mpt(struct mlx4_mpt_entry * mpt)2618 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2619 {
2620 	return (be32_to_cpu(mpt->flags) >> 9) & 1;
2621 }
2622 
mr_get_mtt_addr(struct mlx4_mpt_entry * mpt)2623 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2624 {
2625 	return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2626 }
2627 
mr_get_mtt_size(struct mlx4_mpt_entry * mpt)2628 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2629 {
2630 	return be32_to_cpu(mpt->mtt_sz);
2631 }
2632 
mr_get_pd(struct mlx4_mpt_entry * mpt)2633 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2634 {
2635 	return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2636 }
2637 
mr_is_fmr(struct mlx4_mpt_entry * mpt)2638 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2639 {
2640 	return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2641 }
2642 
mr_is_bind_enabled(struct mlx4_mpt_entry * mpt)2643 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2644 {
2645 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2646 }
2647 
mr_is_region(struct mlx4_mpt_entry * mpt)2648 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2649 {
2650 	return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2651 }
2652 
qp_get_mtt_addr(struct mlx4_qp_context * qpc)2653 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2654 {
2655 	return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2656 }
2657 
srq_get_mtt_addr(struct mlx4_srq_context * srqc)2658 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2659 {
2660 	return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2661 }
2662 
qp_get_mtt_size(struct mlx4_qp_context * qpc)2663 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2664 {
2665 	int page_shift = (qpc->log_page_size & 0x3f) + 12;
2666 	int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2667 	int log_sq_sride = qpc->sq_size_stride & 7;
2668 	int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2669 	int log_rq_stride = qpc->rq_size_stride & 7;
2670 	int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2671 	int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2672 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2673 	int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2674 	int sq_size;
2675 	int rq_size;
2676 	int total_pages;
2677 	int total_mem;
2678 	int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2679 
2680 	sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2681 	rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2682 	total_mem = sq_size + rq_size;
2683 	total_pages =
2684 		roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2685 				   page_shift);
2686 
2687 	return total_pages;
2688 }
2689 
check_mtt_range(struct mlx4_dev * dev,int slave,int start,int size,struct res_mtt * mtt)2690 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2691 			   int size, struct res_mtt *mtt)
2692 {
2693 	int res_start = mtt->com.res_id;
2694 	int res_size = (1 << mtt->order);
2695 
2696 	if (start < res_start || start + size > res_start + res_size)
2697 		return -EPERM;
2698 	return 0;
2699 }
2700 
mlx4_SW2HW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2701 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2702 			   struct mlx4_vhcr *vhcr,
2703 			   struct mlx4_cmd_mailbox *inbox,
2704 			   struct mlx4_cmd_mailbox *outbox,
2705 			   struct mlx4_cmd_info *cmd)
2706 {
2707 	int err;
2708 	int index = vhcr->in_modifier;
2709 	struct res_mtt *mtt;
2710 	struct res_mpt *mpt;
2711 	int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2712 	int phys;
2713 	int id;
2714 	u32 pd;
2715 	int pd_slave;
2716 
2717 	id = index & mpt_mask(dev);
2718 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2719 	if (err)
2720 		return err;
2721 
2722 	/* Disable memory windows for VFs. */
2723 	if (!mr_is_region(inbox->buf)) {
2724 		err = -EPERM;
2725 		goto ex_abort;
2726 	}
2727 
2728 	/* Make sure that the PD bits related to the slave id are zeros. */
2729 	pd = mr_get_pd(inbox->buf);
2730 	pd_slave = (pd >> 17) & 0x7f;
2731 	if (pd_slave != 0 && --pd_slave != slave) {
2732 		err = -EPERM;
2733 		goto ex_abort;
2734 	}
2735 
2736 	if (mr_is_fmr(inbox->buf)) {
2737 		/* FMR and Bind Enable are forbidden in slave devices. */
2738 		if (mr_is_bind_enabled(inbox->buf)) {
2739 			err = -EPERM;
2740 			goto ex_abort;
2741 		}
2742 		/* FMR and Memory Windows are also forbidden. */
2743 		if (!mr_is_region(inbox->buf)) {
2744 			err = -EPERM;
2745 			goto ex_abort;
2746 		}
2747 	}
2748 
2749 	phys = mr_phys_mpt(inbox->buf);
2750 	if (!phys) {
2751 		err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2752 		if (err)
2753 			goto ex_abort;
2754 
2755 		err = check_mtt_range(dev, slave, mtt_base,
2756 				      mr_get_mtt_size(inbox->buf), mtt);
2757 		if (err)
2758 			goto ex_put;
2759 
2760 		mpt->mtt = mtt;
2761 	}
2762 
2763 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2764 	if (err)
2765 		goto ex_put;
2766 
2767 	if (!phys) {
2768 		atomic_inc(&mtt->ref_count);
2769 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2770 	}
2771 
2772 	res_end_move(dev, slave, RES_MPT, id);
2773 	return 0;
2774 
2775 ex_put:
2776 	if (!phys)
2777 		put_res(dev, slave, mtt->com.res_id, RES_MTT);
2778 ex_abort:
2779 	res_abort_move(dev, slave, RES_MPT, id);
2780 
2781 	return err;
2782 }
2783 
mlx4_HW2SW_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2784 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2785 			   struct mlx4_vhcr *vhcr,
2786 			   struct mlx4_cmd_mailbox *inbox,
2787 			   struct mlx4_cmd_mailbox *outbox,
2788 			   struct mlx4_cmd_info *cmd)
2789 {
2790 	int err;
2791 	int index = vhcr->in_modifier;
2792 	struct res_mpt *mpt;
2793 	int id;
2794 
2795 	id = index & mpt_mask(dev);
2796 	err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2797 	if (err)
2798 		return err;
2799 
2800 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2801 	if (err)
2802 		goto ex_abort;
2803 
2804 	if (mpt->mtt)
2805 		atomic_dec(&mpt->mtt->ref_count);
2806 
2807 	res_end_move(dev, slave, RES_MPT, id);
2808 	return 0;
2809 
2810 ex_abort:
2811 	res_abort_move(dev, slave, RES_MPT, id);
2812 
2813 	return err;
2814 }
2815 
mlx4_QUERY_MPT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2816 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2817 			   struct mlx4_vhcr *vhcr,
2818 			   struct mlx4_cmd_mailbox *inbox,
2819 			   struct mlx4_cmd_mailbox *outbox,
2820 			   struct mlx4_cmd_info *cmd)
2821 {
2822 	int err;
2823 	int index = vhcr->in_modifier;
2824 	struct res_mpt *mpt;
2825 	int id;
2826 
2827 	id = index & mpt_mask(dev);
2828 	err = get_res(dev, slave, id, RES_MPT, &mpt);
2829 	if (err)
2830 		return err;
2831 
2832 	if (mpt->com.from_state == RES_MPT_MAPPED) {
2833 		/* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2834 		 * that, the VF must read the MPT. But since the MPT entry memory is not
2835 		 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2836 		 * entry contents. To guarantee that the MPT cannot be changed, the driver
2837 		 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2838 		 * ownership fofollowing the change. The change here allows the VF to
2839 		 * perform QUERY_MPT also when the entry is in SW ownership.
2840 		 */
2841 		struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2842 					&mlx4_priv(dev)->mr_table.dmpt_table,
2843 					mpt->key, NULL);
2844 
2845 		if (NULL == mpt_entry || NULL == outbox->buf) {
2846 			err = -EINVAL;
2847 			goto out;
2848 		}
2849 
2850 		memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2851 
2852 		err = 0;
2853 	} else if (mpt->com.from_state == RES_MPT_HW) {
2854 		err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2855 	} else {
2856 		err = -EBUSY;
2857 		goto out;
2858 	}
2859 
2860 
2861 out:
2862 	put_res(dev, slave, id, RES_MPT);
2863 	return err;
2864 }
2865 
qp_get_rcqn(struct mlx4_qp_context * qpc)2866 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2867 {
2868 	return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2869 }
2870 
qp_get_scqn(struct mlx4_qp_context * qpc)2871 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2872 {
2873 	return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2874 }
2875 
qp_get_srqn(struct mlx4_qp_context * qpc)2876 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2877 {
2878 	return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2879 }
2880 
adjust_proxy_tun_qkey(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_qp_context * context)2881 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2882 				  struct mlx4_qp_context *context)
2883 {
2884 	u32 qpn = vhcr->in_modifier & 0xffffff;
2885 	u32 qkey = 0;
2886 
2887 	if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2888 		return;
2889 
2890 	/* adjust qkey in qp context */
2891 	context->qkey = cpu_to_be32(qkey);
2892 }
2893 
2894 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2895 				 struct mlx4_qp_context *qpc,
2896 				 struct mlx4_cmd_mailbox *inbox);
2897 
mlx4_RST2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)2898 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2899 			     struct mlx4_vhcr *vhcr,
2900 			     struct mlx4_cmd_mailbox *inbox,
2901 			     struct mlx4_cmd_mailbox *outbox,
2902 			     struct mlx4_cmd_info *cmd)
2903 {
2904 	int err;
2905 	int qpn = vhcr->in_modifier & 0x7fffff;
2906 	struct res_mtt *mtt;
2907 	struct res_qp *qp;
2908 	struct mlx4_qp_context *qpc = inbox->buf + 8;
2909 	int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2910 	int mtt_size = qp_get_mtt_size(qpc);
2911 	struct res_cq *rcq;
2912 	struct res_cq *scq;
2913 	int rcqn = qp_get_rcqn(qpc);
2914 	int scqn = qp_get_scqn(qpc);
2915 	u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2916 	int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2917 	struct res_srq *srq;
2918 	int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2919 
2920 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2921 	if (err)
2922 		return err;
2923 
2924 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2925 	if (err)
2926 		return err;
2927 	qp->local_qpn = local_qpn;
2928 	qp->sched_queue = 0;
2929 	qp->param3 = 0;
2930 	qp->vlan_control = 0;
2931 	qp->fvl_rx = 0;
2932 	qp->pri_path_fl = 0;
2933 	qp->vlan_index = 0;
2934 	qp->feup = 0;
2935 	qp->qpc_flags = be32_to_cpu(qpc->flags);
2936 
2937 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2938 	if (err)
2939 		goto ex_abort;
2940 
2941 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2942 	if (err)
2943 		goto ex_put_mtt;
2944 
2945 	err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2946 	if (err)
2947 		goto ex_put_mtt;
2948 
2949 	if (scqn != rcqn) {
2950 		err = get_res(dev, slave, scqn, RES_CQ, &scq);
2951 		if (err)
2952 			goto ex_put_rcq;
2953 	} else
2954 		scq = rcq;
2955 
2956 	if (use_srq) {
2957 		err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2958 		if (err)
2959 			goto ex_put_scq;
2960 	}
2961 
2962 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
2963 	update_pkey_index(dev, slave, inbox);
2964 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2965 	if (err)
2966 		goto ex_put_srq;
2967 	atomic_inc(&mtt->ref_count);
2968 	qp->mtt = mtt;
2969 	atomic_inc(&rcq->ref_count);
2970 	qp->rcq = rcq;
2971 	atomic_inc(&scq->ref_count);
2972 	qp->scq = scq;
2973 
2974 	if (scqn != rcqn)
2975 		put_res(dev, slave, scqn, RES_CQ);
2976 
2977 	if (use_srq) {
2978 		atomic_inc(&srq->ref_count);
2979 		put_res(dev, slave, srqn, RES_SRQ);
2980 		qp->srq = srq;
2981 	}
2982 	put_res(dev, slave, rcqn, RES_CQ);
2983 	put_res(dev, slave, mtt_base, RES_MTT);
2984 	res_end_move(dev, slave, RES_QP, qpn);
2985 
2986 	return 0;
2987 
2988 ex_put_srq:
2989 	if (use_srq)
2990 		put_res(dev, slave, srqn, RES_SRQ);
2991 ex_put_scq:
2992 	if (scqn != rcqn)
2993 		put_res(dev, slave, scqn, RES_CQ);
2994 ex_put_rcq:
2995 	put_res(dev, slave, rcqn, RES_CQ);
2996 ex_put_mtt:
2997 	put_res(dev, slave, mtt_base, RES_MTT);
2998 ex_abort:
2999 	res_abort_move(dev, slave, RES_QP, qpn);
3000 
3001 	return err;
3002 }
3003 
eq_get_mtt_addr(struct mlx4_eq_context * eqc)3004 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3005 {
3006 	return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3007 }
3008 
eq_get_mtt_size(struct mlx4_eq_context * eqc)3009 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3010 {
3011 	int log_eq_size = eqc->log_eq_size & 0x1f;
3012 	int page_shift = (eqc->log_page_size & 0x3f) + 12;
3013 
3014 	if (log_eq_size + 5 < page_shift)
3015 		return 1;
3016 
3017 	return 1 << (log_eq_size + 5 - page_shift);
3018 }
3019 
cq_get_mtt_addr(struct mlx4_cq_context * cqc)3020 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3021 {
3022 	return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3023 }
3024 
cq_get_mtt_size(struct mlx4_cq_context * cqc)3025 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3026 {
3027 	int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3028 	int page_shift = (cqc->log_page_size & 0x3f) + 12;
3029 
3030 	if (log_cq_size + 5 < page_shift)
3031 		return 1;
3032 
3033 	return 1 << (log_cq_size + 5 - page_shift);
3034 }
3035 
mlx4_SW2HW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3036 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3037 			  struct mlx4_vhcr *vhcr,
3038 			  struct mlx4_cmd_mailbox *inbox,
3039 			  struct mlx4_cmd_mailbox *outbox,
3040 			  struct mlx4_cmd_info *cmd)
3041 {
3042 	int err;
3043 	int eqn = vhcr->in_modifier;
3044 	int res_id = (slave << 10) | eqn;
3045 	struct mlx4_eq_context *eqc = inbox->buf;
3046 	int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3047 	int mtt_size = eq_get_mtt_size(eqc);
3048 	struct res_eq *eq;
3049 	struct res_mtt *mtt;
3050 
3051 	err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3052 	if (err)
3053 		return err;
3054 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3055 	if (err)
3056 		goto out_add;
3057 
3058 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3059 	if (err)
3060 		goto out_move;
3061 
3062 	err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3063 	if (err)
3064 		goto out_put;
3065 
3066 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3067 	if (err)
3068 		goto out_put;
3069 
3070 	atomic_inc(&mtt->ref_count);
3071 	eq->mtt = mtt;
3072 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3073 	res_end_move(dev, slave, RES_EQ, res_id);
3074 	return 0;
3075 
3076 out_put:
3077 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3078 out_move:
3079 	res_abort_move(dev, slave, RES_EQ, res_id);
3080 out_add:
3081 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3082 	return err;
3083 }
3084 
mlx4_CONFIG_DEV_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3085 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3086 			    struct mlx4_vhcr *vhcr,
3087 			    struct mlx4_cmd_mailbox *inbox,
3088 			    struct mlx4_cmd_mailbox *outbox,
3089 			    struct mlx4_cmd_info *cmd)
3090 {
3091 	int err;
3092 	u8 get = vhcr->op_modifier;
3093 
3094 	if (get != 1)
3095 		return -EPERM;
3096 
3097 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3098 
3099 	return err;
3100 }
3101 
get_containing_mtt(struct mlx4_dev * dev,int slave,int start,int len,struct res_mtt ** res)3102 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3103 			      int len, struct res_mtt **res)
3104 {
3105 	struct mlx4_priv *priv = mlx4_priv(dev);
3106 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3107 	struct res_mtt *mtt;
3108 	int err = -EINVAL;
3109 
3110 	spin_lock_irq(mlx4_tlock(dev));
3111 	list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3112 			    com.list) {
3113 		if (!check_mtt_range(dev, slave, start, len, mtt)) {
3114 			*res = mtt;
3115 			mtt->com.from_state = mtt->com.state;
3116 			mtt->com.state = RES_MTT_BUSY;
3117 			err = 0;
3118 			break;
3119 		}
3120 	}
3121 	spin_unlock_irq(mlx4_tlock(dev));
3122 
3123 	return err;
3124 }
3125 
verify_qp_parameters(struct mlx4_dev * dev,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,enum qp_transition transition,u8 slave)3126 static int verify_qp_parameters(struct mlx4_dev *dev,
3127 				struct mlx4_vhcr *vhcr,
3128 				struct mlx4_cmd_mailbox *inbox,
3129 				enum qp_transition transition, u8 slave)
3130 {
3131 	u32			qp_type;
3132 	u32			qpn;
3133 	struct mlx4_qp_context	*qp_ctx;
3134 	enum mlx4_qp_optpar	optpar;
3135 	int port;
3136 	int num_gids;
3137 
3138 	qp_ctx  = inbox->buf + 8;
3139 	qp_type	= (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3140 	optpar	= be32_to_cpu(*(__be32 *) inbox->buf);
3141 
3142 	if (slave != mlx4_master_func_num(dev)) {
3143 		qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3144 		/* setting QP rate-limit is disallowed for VFs */
3145 		if (qp_ctx->rate_limit_params)
3146 			return -EPERM;
3147 	}
3148 
3149 	switch (qp_type) {
3150 	case MLX4_QP_ST_RC:
3151 	case MLX4_QP_ST_XRC:
3152 	case MLX4_QP_ST_UC:
3153 		switch (transition) {
3154 		case QP_TRANS_INIT2RTR:
3155 		case QP_TRANS_RTR2RTS:
3156 		case QP_TRANS_RTS2RTS:
3157 		case QP_TRANS_SQD2SQD:
3158 		case QP_TRANS_SQD2RTS:
3159 			if (slave != mlx4_master_func_num(dev)) {
3160 				if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3161 					port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3162 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3163 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3164 					else
3165 						num_gids = 1;
3166 					if (qp_ctx->pri_path.mgid_index >= num_gids)
3167 						return -EINVAL;
3168 				}
3169 				if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3170 					port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3171 					if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3172 						num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3173 					else
3174 						num_gids = 1;
3175 					if (qp_ctx->alt_path.mgid_index >= num_gids)
3176 						return -EINVAL;
3177 				}
3178 			}
3179 			break;
3180 		default:
3181 			break;
3182 		}
3183 		break;
3184 
3185 	case MLX4_QP_ST_MLX:
3186 		qpn = vhcr->in_modifier & 0x7fffff;
3187 		port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3188 		if (transition == QP_TRANS_INIT2RTR &&
3189 		    slave != mlx4_master_func_num(dev) &&
3190 		    mlx4_is_qp_reserved(dev, qpn) &&
3191 		    !mlx4_vf_smi_enabled(dev, slave, port)) {
3192 			/* only enabled VFs may create MLX proxy QPs */
3193 			mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3194 				 __func__, slave, port);
3195 			return -EPERM;
3196 		}
3197 		break;
3198 
3199 	default:
3200 		break;
3201 	}
3202 
3203 	return 0;
3204 }
3205 
mlx4_WRITE_MTT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3206 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3207 			   struct mlx4_vhcr *vhcr,
3208 			   struct mlx4_cmd_mailbox *inbox,
3209 			   struct mlx4_cmd_mailbox *outbox,
3210 			   struct mlx4_cmd_info *cmd)
3211 {
3212 	struct mlx4_mtt mtt;
3213 	__be64 *page_list = inbox->buf;
3214 	u64 *pg_list = (u64 *)page_list;
3215 	int i;
3216 	struct res_mtt *rmtt = NULL;
3217 	int start = be64_to_cpu(page_list[0]);
3218 	int npages = vhcr->in_modifier;
3219 	int err;
3220 
3221 	err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3222 	if (err)
3223 		return err;
3224 
3225 	/* Call the SW implementation of write_mtt:
3226 	 * - Prepare a dummy mtt struct
3227 	 * - Translate inbox contents to simple addresses in host endianness */
3228 	mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3229 			    we don't really use it */
3230 	mtt.order = 0;
3231 	mtt.page_shift = 0;
3232 	for (i = 0; i < npages; ++i)
3233 		pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3234 
3235 	err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3236 			       ((u64 *)page_list + 2));
3237 
3238 	if (rmtt)
3239 		put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3240 
3241 	return err;
3242 }
3243 
mlx4_HW2SW_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3244 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3245 			  struct mlx4_vhcr *vhcr,
3246 			  struct mlx4_cmd_mailbox *inbox,
3247 			  struct mlx4_cmd_mailbox *outbox,
3248 			  struct mlx4_cmd_info *cmd)
3249 {
3250 	int eqn = vhcr->in_modifier;
3251 	int res_id = eqn | (slave << 10);
3252 	struct res_eq *eq;
3253 	int err;
3254 
3255 	err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3256 	if (err)
3257 		return err;
3258 
3259 	err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3260 	if (err)
3261 		goto ex_abort;
3262 
3263 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3264 	if (err)
3265 		goto ex_put;
3266 
3267 	atomic_dec(&eq->mtt->ref_count);
3268 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3269 	res_end_move(dev, slave, RES_EQ, res_id);
3270 	rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3271 
3272 	return 0;
3273 
3274 ex_put:
3275 	put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3276 ex_abort:
3277 	res_abort_move(dev, slave, RES_EQ, res_id);
3278 
3279 	return err;
3280 }
3281 
mlx4_GEN_EQE(struct mlx4_dev * dev,int slave,struct mlx4_eqe * eqe)3282 int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3283 {
3284 	struct mlx4_priv *priv = mlx4_priv(dev);
3285 	struct mlx4_slave_event_eq_info *event_eq;
3286 	struct mlx4_cmd_mailbox *mailbox;
3287 	u32 in_modifier = 0;
3288 	int err;
3289 	int res_id;
3290 	struct res_eq *req;
3291 
3292 	if (!priv->mfunc.master.slave_state)
3293 		return -EINVAL;
3294 
3295 	/* check for slave valid, slave not PF, and slave active */
3296 	if (slave < 0 || slave > dev->persist->num_vfs ||
3297 	    slave == dev->caps.function ||
3298 	    !priv->mfunc.master.slave_state[slave].active)
3299 		return 0;
3300 
3301 	event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
3302 
3303 	/* Create the event only if the slave is registered */
3304 	if (event_eq->eqn < 0)
3305 		return 0;
3306 
3307 	mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3308 	res_id = (slave << 10) | event_eq->eqn;
3309 	err = get_res(dev, slave, res_id, RES_EQ, &req);
3310 	if (err)
3311 		goto unlock;
3312 
3313 	if (req->com.from_state != RES_EQ_HW) {
3314 		err = -EINVAL;
3315 		goto put;
3316 	}
3317 
3318 	mailbox = mlx4_alloc_cmd_mailbox(dev);
3319 	if (IS_ERR(mailbox)) {
3320 		err = PTR_ERR(mailbox);
3321 		goto put;
3322 	}
3323 
3324 	if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3325 		++event_eq->token;
3326 		eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3327 	}
3328 
3329 	memcpy(mailbox->buf, (u8 *) eqe, 28);
3330 
3331 	in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
3332 
3333 	err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3334 		       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3335 		       MLX4_CMD_NATIVE);
3336 
3337 	put_res(dev, slave, res_id, RES_EQ);
3338 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3339 	mlx4_free_cmd_mailbox(dev, mailbox);
3340 	return err;
3341 
3342 put:
3343 	put_res(dev, slave, res_id, RES_EQ);
3344 
3345 unlock:
3346 	mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3347 	return err;
3348 }
3349 
mlx4_QUERY_EQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3350 int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3351 			  struct mlx4_vhcr *vhcr,
3352 			  struct mlx4_cmd_mailbox *inbox,
3353 			  struct mlx4_cmd_mailbox *outbox,
3354 			  struct mlx4_cmd_info *cmd)
3355 {
3356 	int eqn = vhcr->in_modifier;
3357 	int res_id = eqn | (slave << 10);
3358 	struct res_eq *eq;
3359 	int err;
3360 
3361 	err = get_res(dev, slave, res_id, RES_EQ, &eq);
3362 	if (err)
3363 		return err;
3364 
3365 	if (eq->com.from_state != RES_EQ_HW) {
3366 		err = -EINVAL;
3367 		goto ex_put;
3368 	}
3369 
3370 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3371 
3372 ex_put:
3373 	put_res(dev, slave, res_id, RES_EQ);
3374 	return err;
3375 }
3376 
mlx4_SW2HW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3377 int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3378 			  struct mlx4_vhcr *vhcr,
3379 			  struct mlx4_cmd_mailbox *inbox,
3380 			  struct mlx4_cmd_mailbox *outbox,
3381 			  struct mlx4_cmd_info *cmd)
3382 {
3383 	int err;
3384 	int cqn = vhcr->in_modifier;
3385 	struct mlx4_cq_context *cqc = inbox->buf;
3386 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3387 	struct res_cq *cq = NULL;
3388 	struct res_mtt *mtt;
3389 
3390 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3391 	if (err)
3392 		return err;
3393 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3394 	if (err)
3395 		goto out_move;
3396 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3397 	if (err)
3398 		goto out_put;
3399 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3400 	if (err)
3401 		goto out_put;
3402 	atomic_inc(&mtt->ref_count);
3403 	cq->mtt = mtt;
3404 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3405 	res_end_move(dev, slave, RES_CQ, cqn);
3406 	return 0;
3407 
3408 out_put:
3409 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3410 out_move:
3411 	res_abort_move(dev, slave, RES_CQ, cqn);
3412 	return err;
3413 }
3414 
mlx4_HW2SW_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3415 int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3416 			  struct mlx4_vhcr *vhcr,
3417 			  struct mlx4_cmd_mailbox *inbox,
3418 			  struct mlx4_cmd_mailbox *outbox,
3419 			  struct mlx4_cmd_info *cmd)
3420 {
3421 	int err;
3422 	int cqn = vhcr->in_modifier;
3423 	struct res_cq *cq = NULL;
3424 
3425 	err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3426 	if (err)
3427 		return err;
3428 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3429 	if (err)
3430 		goto out_move;
3431 	atomic_dec(&cq->mtt->ref_count);
3432 	res_end_move(dev, slave, RES_CQ, cqn);
3433 	return 0;
3434 
3435 out_move:
3436 	res_abort_move(dev, slave, RES_CQ, cqn);
3437 	return err;
3438 }
3439 
mlx4_QUERY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3440 int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3441 			  struct mlx4_vhcr *vhcr,
3442 			  struct mlx4_cmd_mailbox *inbox,
3443 			  struct mlx4_cmd_mailbox *outbox,
3444 			  struct mlx4_cmd_info *cmd)
3445 {
3446 	int cqn = vhcr->in_modifier;
3447 	struct res_cq *cq;
3448 	int err;
3449 
3450 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3451 	if (err)
3452 		return err;
3453 
3454 	if (cq->com.from_state != RES_CQ_HW)
3455 		goto ex_put;
3456 
3457 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3458 ex_put:
3459 	put_res(dev, slave, cqn, RES_CQ);
3460 
3461 	return err;
3462 }
3463 
handle_resize(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd,struct res_cq * cq)3464 static int handle_resize(struct mlx4_dev *dev, int slave,
3465 			 struct mlx4_vhcr *vhcr,
3466 			 struct mlx4_cmd_mailbox *inbox,
3467 			 struct mlx4_cmd_mailbox *outbox,
3468 			 struct mlx4_cmd_info *cmd,
3469 			 struct res_cq *cq)
3470 {
3471 	int err;
3472 	struct res_mtt *orig_mtt;
3473 	struct res_mtt *mtt;
3474 	struct mlx4_cq_context *cqc = inbox->buf;
3475 	int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
3476 
3477 	err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3478 	if (err)
3479 		return err;
3480 
3481 	if (orig_mtt != cq->mtt) {
3482 		err = -EINVAL;
3483 		goto ex_put;
3484 	}
3485 
3486 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3487 	if (err)
3488 		goto ex_put;
3489 
3490 	err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3491 	if (err)
3492 		goto ex_put1;
3493 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3494 	if (err)
3495 		goto ex_put1;
3496 	atomic_dec(&orig_mtt->ref_count);
3497 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3498 	atomic_inc(&mtt->ref_count);
3499 	cq->mtt = mtt;
3500 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3501 	return 0;
3502 
3503 ex_put1:
3504 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3505 ex_put:
3506 	put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3507 
3508 	return err;
3509 
3510 }
3511 
mlx4_MODIFY_CQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3512 int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3513 			   struct mlx4_vhcr *vhcr,
3514 			   struct mlx4_cmd_mailbox *inbox,
3515 			   struct mlx4_cmd_mailbox *outbox,
3516 			   struct mlx4_cmd_info *cmd)
3517 {
3518 	int cqn = vhcr->in_modifier;
3519 	struct res_cq *cq;
3520 	int err;
3521 
3522 	err = get_res(dev, slave, cqn, RES_CQ, &cq);
3523 	if (err)
3524 		return err;
3525 
3526 	if (cq->com.from_state != RES_CQ_HW)
3527 		goto ex_put;
3528 
3529 	if (vhcr->op_modifier == 0) {
3530 		err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
3531 		goto ex_put;
3532 	}
3533 
3534 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3535 ex_put:
3536 	put_res(dev, slave, cqn, RES_CQ);
3537 
3538 	return err;
3539 }
3540 
srq_get_mtt_size(struct mlx4_srq_context * srqc)3541 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3542 {
3543 	int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3544 	int log_rq_stride = srqc->logstride & 7;
3545 	int page_shift = (srqc->log_page_size & 0x3f) + 12;
3546 
3547 	if (log_srq_size + log_rq_stride + 4 < page_shift)
3548 		return 1;
3549 
3550 	return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3551 }
3552 
mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3553 int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3554 			   struct mlx4_vhcr *vhcr,
3555 			   struct mlx4_cmd_mailbox *inbox,
3556 			   struct mlx4_cmd_mailbox *outbox,
3557 			   struct mlx4_cmd_info *cmd)
3558 {
3559 	int err;
3560 	int srqn = vhcr->in_modifier;
3561 	struct res_mtt *mtt;
3562 	struct res_srq *srq = NULL;
3563 	struct mlx4_srq_context *srqc = inbox->buf;
3564 	int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
3565 
3566 	if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3567 		return -EINVAL;
3568 
3569 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3570 	if (err)
3571 		return err;
3572 	err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3573 	if (err)
3574 		goto ex_abort;
3575 	err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3576 			      mtt);
3577 	if (err)
3578 		goto ex_put_mtt;
3579 
3580 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3581 	if (err)
3582 		goto ex_put_mtt;
3583 
3584 	atomic_inc(&mtt->ref_count);
3585 	srq->mtt = mtt;
3586 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3587 	res_end_move(dev, slave, RES_SRQ, srqn);
3588 	return 0;
3589 
3590 ex_put_mtt:
3591 	put_res(dev, slave, mtt->com.res_id, RES_MTT);
3592 ex_abort:
3593 	res_abort_move(dev, slave, RES_SRQ, srqn);
3594 
3595 	return err;
3596 }
3597 
mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3598 int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3599 			   struct mlx4_vhcr *vhcr,
3600 			   struct mlx4_cmd_mailbox *inbox,
3601 			   struct mlx4_cmd_mailbox *outbox,
3602 			   struct mlx4_cmd_info *cmd)
3603 {
3604 	int err;
3605 	int srqn = vhcr->in_modifier;
3606 	struct res_srq *srq = NULL;
3607 
3608 	err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3609 	if (err)
3610 		return err;
3611 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3612 	if (err)
3613 		goto ex_abort;
3614 	atomic_dec(&srq->mtt->ref_count);
3615 	if (srq->cq)
3616 		atomic_dec(&srq->cq->ref_count);
3617 	res_end_move(dev, slave, RES_SRQ, srqn);
3618 
3619 	return 0;
3620 
3621 ex_abort:
3622 	res_abort_move(dev, slave, RES_SRQ, srqn);
3623 
3624 	return err;
3625 }
3626 
mlx4_QUERY_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3627 int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3628 			   struct mlx4_vhcr *vhcr,
3629 			   struct mlx4_cmd_mailbox *inbox,
3630 			   struct mlx4_cmd_mailbox *outbox,
3631 			   struct mlx4_cmd_info *cmd)
3632 {
3633 	int err;
3634 	int srqn = vhcr->in_modifier;
3635 	struct res_srq *srq;
3636 
3637 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3638 	if (err)
3639 		return err;
3640 	if (srq->com.from_state != RES_SRQ_HW) {
3641 		err = -EBUSY;
3642 		goto out;
3643 	}
3644 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3645 out:
3646 	put_res(dev, slave, srqn, RES_SRQ);
3647 	return err;
3648 }
3649 
mlx4_ARM_SRQ_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3650 int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3651 			 struct mlx4_vhcr *vhcr,
3652 			 struct mlx4_cmd_mailbox *inbox,
3653 			 struct mlx4_cmd_mailbox *outbox,
3654 			 struct mlx4_cmd_info *cmd)
3655 {
3656 	int err;
3657 	int srqn = vhcr->in_modifier;
3658 	struct res_srq *srq;
3659 
3660 	err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3661 	if (err)
3662 		return err;
3663 
3664 	if (srq->com.from_state != RES_SRQ_HW) {
3665 		err = -EBUSY;
3666 		goto out;
3667 	}
3668 
3669 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3670 out:
3671 	put_res(dev, slave, srqn, RES_SRQ);
3672 	return err;
3673 }
3674 
mlx4_GEN_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3675 int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3676 			struct mlx4_vhcr *vhcr,
3677 			struct mlx4_cmd_mailbox *inbox,
3678 			struct mlx4_cmd_mailbox *outbox,
3679 			struct mlx4_cmd_info *cmd)
3680 {
3681 	int err;
3682 	int qpn = vhcr->in_modifier & 0x7fffff;
3683 	struct res_qp *qp;
3684 
3685 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3686 	if (err)
3687 		return err;
3688 	if (qp->com.from_state != RES_QP_HW) {
3689 		err = -EBUSY;
3690 		goto out;
3691 	}
3692 
3693 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3694 out:
3695 	put_res(dev, slave, qpn, RES_QP);
3696 	return err;
3697 }
3698 
mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3699 int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3700 			      struct mlx4_vhcr *vhcr,
3701 			      struct mlx4_cmd_mailbox *inbox,
3702 			      struct mlx4_cmd_mailbox *outbox,
3703 			      struct mlx4_cmd_info *cmd)
3704 {
3705 	struct mlx4_qp_context *context = inbox->buf + 8;
3706 	adjust_proxy_tun_qkey(dev, vhcr, context);
3707 	update_pkey_index(dev, slave, inbox);
3708 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3709 }
3710 
adjust_qp_sched_queue(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3711 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3712 				  struct mlx4_qp_context *qpc,
3713 				  struct mlx4_cmd_mailbox *inbox)
3714 {
3715 	enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3716 	u8 pri_sched_queue;
3717 	int port = mlx4_slave_convert_port(
3718 		   dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3719 
3720 	if (port < 0)
3721 		return -EINVAL;
3722 
3723 	pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3724 			  ((port & 1) << 6);
3725 
3726 	if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3727 	    qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
3728 		qpc->pri_path.sched_queue = pri_sched_queue;
3729 	}
3730 
3731 	if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3732 		port = mlx4_slave_convert_port(
3733 				dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3734 				+ 1) - 1;
3735 		if (port < 0)
3736 			return -EINVAL;
3737 		qpc->alt_path.sched_queue =
3738 			(qpc->alt_path.sched_queue & ~(1 << 6)) |
3739 			(port & 1) << 6;
3740 	}
3741 	return 0;
3742 }
3743 
roce_verify_mac(struct mlx4_dev * dev,int slave,struct mlx4_qp_context * qpc,struct mlx4_cmd_mailbox * inbox)3744 static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3745 				struct mlx4_qp_context *qpc,
3746 				struct mlx4_cmd_mailbox *inbox)
3747 {
3748 	u64 mac;
3749 	int port;
3750 	u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3751 	u8 sched = *(u8 *)(inbox->buf + 64);
3752 	u8 smac_ix;
3753 
3754 	port = (sched >> 6 & 1) + 1;
3755 	if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3756 		smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3757 		if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3758 			return -ENOENT;
3759 	}
3760 	return 0;
3761 }
3762 
mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3763 int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3764 			     struct mlx4_vhcr *vhcr,
3765 			     struct mlx4_cmd_mailbox *inbox,
3766 			     struct mlx4_cmd_mailbox *outbox,
3767 			     struct mlx4_cmd_info *cmd)
3768 {
3769 	int err;
3770 	struct mlx4_qp_context *qpc = inbox->buf + 8;
3771 	int qpn = vhcr->in_modifier & 0x7fffff;
3772 	struct res_qp *qp;
3773 	u8 orig_sched_queue;
3774 	__be32	orig_param3 = qpc->param3;
3775 	u8 orig_vlan_control = qpc->pri_path.vlan_control;
3776 	u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3777 	u8 orig_pri_path_fl = qpc->pri_path.fl;
3778 	u8 orig_vlan_index = qpc->pri_path.vlan_index;
3779 	u8 orig_feup = qpc->pri_path.feup;
3780 
3781 	err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3782 	if (err)
3783 		return err;
3784 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
3785 	if (err)
3786 		return err;
3787 
3788 	if (roce_verify_mac(dev, slave, qpc, inbox))
3789 		return -EINVAL;
3790 
3791 	update_pkey_index(dev, slave, inbox);
3792 	update_gid(dev, inbox, (u8)slave);
3793 	adjust_proxy_tun_qkey(dev, vhcr, qpc);
3794 	orig_sched_queue = qpc->pri_path.sched_queue;
3795 
3796 	err = get_res(dev, slave, qpn, RES_QP, &qp);
3797 	if (err)
3798 		return err;
3799 	if (qp->com.from_state != RES_QP_HW) {
3800 		err = -EBUSY;
3801 		goto out;
3802 	}
3803 
3804 	err = update_vport_qp_param(dev, inbox, slave, qpn);
3805 	if (err)
3806 		goto out;
3807 
3808 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3809 out:
3810 	/* if no error, save sched queue value passed in by VF. This is
3811 	 * essentially the QOS value provided by the VF. This will be useful
3812 	 * if we allow dynamic changes from VST back to VGT
3813 	 */
3814 	if (!err) {
3815 		qp->sched_queue = orig_sched_queue;
3816 		qp->param3	= orig_param3;
3817 		qp->vlan_control = orig_vlan_control;
3818 		qp->fvl_rx	=  orig_fvl_rx;
3819 		qp->pri_path_fl = orig_pri_path_fl;
3820 		qp->vlan_index  = orig_vlan_index;
3821 		qp->feup	= orig_feup;
3822 	}
3823 	put_res(dev, slave, qpn, RES_QP);
3824 	return err;
3825 }
3826 
mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3827 int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3828 			    struct mlx4_vhcr *vhcr,
3829 			    struct mlx4_cmd_mailbox *inbox,
3830 			    struct mlx4_cmd_mailbox *outbox,
3831 			    struct mlx4_cmd_info *cmd)
3832 {
3833 	int err;
3834 	struct mlx4_qp_context *context = inbox->buf + 8;
3835 
3836 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3837 	if (err)
3838 		return err;
3839 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
3840 	if (err)
3841 		return err;
3842 
3843 	update_pkey_index(dev, slave, inbox);
3844 	update_gid(dev, inbox, (u8)slave);
3845 	adjust_proxy_tun_qkey(dev, vhcr, context);
3846 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3847 }
3848 
mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3849 int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3850 			    struct mlx4_vhcr *vhcr,
3851 			    struct mlx4_cmd_mailbox *inbox,
3852 			    struct mlx4_cmd_mailbox *outbox,
3853 			    struct mlx4_cmd_info *cmd)
3854 {
3855 	int err;
3856 	struct mlx4_qp_context *context = inbox->buf + 8;
3857 
3858 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3859 	if (err)
3860 		return err;
3861 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
3862 	if (err)
3863 		return err;
3864 
3865 	update_pkey_index(dev, slave, inbox);
3866 	update_gid(dev, inbox, (u8)slave);
3867 	adjust_proxy_tun_qkey(dev, vhcr, context);
3868 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3869 }
3870 
3871 
mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3872 int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3873 			      struct mlx4_vhcr *vhcr,
3874 			      struct mlx4_cmd_mailbox *inbox,
3875 			      struct mlx4_cmd_mailbox *outbox,
3876 			      struct mlx4_cmd_info *cmd)
3877 {
3878 	struct mlx4_qp_context *context = inbox->buf + 8;
3879 	int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3880 	if (err)
3881 		return err;
3882 	adjust_proxy_tun_qkey(dev, vhcr, context);
3883 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3884 }
3885 
mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3886 int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3887 			    struct mlx4_vhcr *vhcr,
3888 			    struct mlx4_cmd_mailbox *inbox,
3889 			    struct mlx4_cmd_mailbox *outbox,
3890 			    struct mlx4_cmd_info *cmd)
3891 {
3892 	int err;
3893 	struct mlx4_qp_context *context = inbox->buf + 8;
3894 
3895 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3896 	if (err)
3897 		return err;
3898 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
3899 	if (err)
3900 		return err;
3901 
3902 	adjust_proxy_tun_qkey(dev, vhcr, context);
3903 	update_gid(dev, inbox, (u8)slave);
3904 	update_pkey_index(dev, slave, inbox);
3905 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3906 }
3907 
mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3908 int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3909 			    struct mlx4_vhcr *vhcr,
3910 			    struct mlx4_cmd_mailbox *inbox,
3911 			    struct mlx4_cmd_mailbox *outbox,
3912 			    struct mlx4_cmd_info *cmd)
3913 {
3914 	int err;
3915 	struct mlx4_qp_context *context = inbox->buf + 8;
3916 
3917 	err = adjust_qp_sched_queue(dev, slave, context, inbox);
3918 	if (err)
3919 		return err;
3920 	err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
3921 	if (err)
3922 		return err;
3923 
3924 	adjust_proxy_tun_qkey(dev, vhcr, context);
3925 	update_gid(dev, inbox, (u8)slave);
3926 	update_pkey_index(dev, slave, inbox);
3927 	return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3928 }
3929 
mlx4_2RST_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)3930 int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3931 			 struct mlx4_vhcr *vhcr,
3932 			 struct mlx4_cmd_mailbox *inbox,
3933 			 struct mlx4_cmd_mailbox *outbox,
3934 			 struct mlx4_cmd_info *cmd)
3935 {
3936 	int err;
3937 	int qpn = vhcr->in_modifier & 0x7fffff;
3938 	struct res_qp *qp;
3939 
3940 	err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3941 	if (err)
3942 		return err;
3943 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3944 	if (err)
3945 		goto ex_abort;
3946 
3947 	atomic_dec(&qp->mtt->ref_count);
3948 	atomic_dec(&qp->rcq->ref_count);
3949 	atomic_dec(&qp->scq->ref_count);
3950 	if (qp->srq)
3951 		atomic_dec(&qp->srq->ref_count);
3952 	res_end_move(dev, slave, RES_QP, qpn);
3953 	return 0;
3954 
3955 ex_abort:
3956 	res_abort_move(dev, slave, RES_QP, qpn);
3957 
3958 	return err;
3959 }
3960 
find_gid(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid)3961 static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3962 				struct res_qp *rqp, u8 *gid)
3963 {
3964 	struct res_gid *res;
3965 
3966 	list_for_each_entry(res, &rqp->mcg_list, list) {
3967 		if (!memcmp(res->gid, gid, 16))
3968 			return res;
3969 	}
3970 	return NULL;
3971 }
3972 
add_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 reg_id)3973 static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
3974 		       u8 *gid, enum mlx4_protocol prot,
3975 		       enum mlx4_steer_type steer, u64 reg_id)
3976 {
3977 	struct res_gid *res;
3978 	int err;
3979 
3980 	res = kzalloc(sizeof *res, GFP_KERNEL);
3981 	if (!res)
3982 		return -ENOMEM;
3983 
3984 	spin_lock_irq(&rqp->mcg_spl);
3985 	if (find_gid(dev, slave, rqp, gid)) {
3986 		kfree(res);
3987 		err = -EEXIST;
3988 	} else {
3989 		memcpy(res->gid, gid, 16);
3990 		res->prot = prot;
3991 		res->steer = steer;
3992 		res->reg_id = reg_id;
3993 		list_add_tail(&res->list, &rqp->mcg_list);
3994 		err = 0;
3995 	}
3996 	spin_unlock_irq(&rqp->mcg_spl);
3997 
3998 	return err;
3999 }
4000 
rem_mcg_res(struct mlx4_dev * dev,int slave,struct res_qp * rqp,u8 * gid,enum mlx4_protocol prot,enum mlx4_steer_type steer,u64 * reg_id)4001 static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
4002 		       u8 *gid, enum mlx4_protocol prot,
4003 		       enum mlx4_steer_type steer, u64 *reg_id)
4004 {
4005 	struct res_gid *res;
4006 	int err;
4007 
4008 	spin_lock_irq(&rqp->mcg_spl);
4009 	res = find_gid(dev, slave, rqp, gid);
4010 	if (!res || res->prot != prot || res->steer != steer)
4011 		err = -EINVAL;
4012 	else {
4013 		*reg_id = res->reg_id;
4014 		list_del(&res->list);
4015 		kfree(res);
4016 		err = 0;
4017 	}
4018 	spin_unlock_irq(&rqp->mcg_spl);
4019 
4020 	return err;
4021 }
4022 
qp_attach(struct mlx4_dev * dev,int slave,struct mlx4_qp * qp,u8 gid[16],int block_loopback,enum mlx4_protocol prot,enum mlx4_steer_type type,u64 * reg_id)4023 static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4024 		     u8 gid[16], int block_loopback, enum mlx4_protocol prot,
4025 		     enum mlx4_steer_type type, u64 *reg_id)
4026 {
4027 	switch (dev->caps.steering_mode) {
4028 	case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4029 		int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4030 		if (port < 0)
4031 			return port;
4032 		return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
4033 						block_loopback, prot,
4034 						reg_id);
4035 	}
4036 	case MLX4_STEERING_MODE_B0:
4037 		if (prot == MLX4_PROT_ETH) {
4038 			int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4039 			if (port < 0)
4040 				return port;
4041 			gid[5] = port;
4042 		}
4043 		return mlx4_qp_attach_common(dev, qp, gid,
4044 					    block_loopback, prot, type);
4045 	default:
4046 		return -EINVAL;
4047 	}
4048 }
4049 
qp_detach(struct mlx4_dev * dev,struct mlx4_qp * qp,u8 gid[16],enum mlx4_protocol prot,enum mlx4_steer_type type,u64 reg_id)4050 static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4051 		     u8 gid[16], enum mlx4_protocol prot,
4052 		     enum mlx4_steer_type type, u64 reg_id)
4053 {
4054 	switch (dev->caps.steering_mode) {
4055 	case MLX4_STEERING_MODE_DEVICE_MANAGED:
4056 		return mlx4_flow_detach(dev, reg_id);
4057 	case MLX4_STEERING_MODE_B0:
4058 		return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4059 	default:
4060 		return -EINVAL;
4061 	}
4062 }
4063 
mlx4_adjust_port(struct mlx4_dev * dev,int slave,u8 * gid,enum mlx4_protocol prot)4064 static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4065 			    u8 *gid, enum mlx4_protocol prot)
4066 {
4067 	int real_port;
4068 
4069 	if (prot != MLX4_PROT_ETH)
4070 		return 0;
4071 
4072 	if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4073 	    dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4074 		real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4075 		if (real_port < 0)
4076 			return -EINVAL;
4077 		gid[5] = real_port;
4078 	}
4079 
4080 	return 0;
4081 }
4082 
mlx4_QP_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4083 int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4084 			       struct mlx4_vhcr *vhcr,
4085 			       struct mlx4_cmd_mailbox *inbox,
4086 			       struct mlx4_cmd_mailbox *outbox,
4087 			       struct mlx4_cmd_info *cmd)
4088 {
4089 	struct mlx4_qp qp; /* dummy for calling attach/detach */
4090 	u8 *gid = inbox->buf;
4091 	enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
4092 	int err;
4093 	int qpn;
4094 	struct res_qp *rqp;
4095 	u64 reg_id = 0;
4096 	int attach = vhcr->op_modifier;
4097 	int block_loopback = vhcr->in_modifier >> 31;
4098 	u8 steer_type_mask = 2;
4099 	enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
4100 
4101 	qpn = vhcr->in_modifier & 0xffffff;
4102 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4103 	if (err)
4104 		return err;
4105 
4106 	qp.qpn = qpn;
4107 	if (attach) {
4108 		err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
4109 				type, &reg_id);
4110 		if (err) {
4111 			pr_err("Fail to attach rule to qp 0x%x\n", qpn);
4112 			goto ex_put;
4113 		}
4114 		err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
4115 		if (err)
4116 			goto ex_detach;
4117 	} else {
4118 		err = mlx4_adjust_port(dev, slave, gid, prot);
4119 		if (err)
4120 			goto ex_put;
4121 
4122 		err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
4123 		if (err)
4124 			goto ex_put;
4125 
4126 		err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4127 		if (err)
4128 			pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4129 			       qpn, (unsigned long long)reg_id);
4130 	}
4131 	put_res(dev, slave, qpn, RES_QP);
4132 	return err;
4133 
4134 ex_detach:
4135 	qp_detach(dev, &qp, gid, prot, type, reg_id);
4136 ex_put:
4137 	put_res(dev, slave, qpn, RES_QP);
4138 	return err;
4139 }
4140 
4141 /*
4142  * MAC validation for Flow Steering rules.
4143  * VF can attach rules only with a mac address which is assigned to it.
4144  */
validate_eth_header_mac(int slave,struct _rule_hw * eth_header,struct list_head * rlist)4145 static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4146 				   struct list_head *rlist)
4147 {
4148 	struct mac_res *res, *tmp;
4149 	__be64 be_mac;
4150 
4151 	/* make sure it isn't multicast or broadcast mac*/
4152 	if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4153 	    !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4154 		list_for_each_entry_safe(res, tmp, rlist, list) {
4155 			be_mac = cpu_to_be64(res->mac << 16);
4156 			if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
4157 				return 0;
4158 		}
4159 		pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4160 		       eth_header->eth.dst_mac, slave);
4161 		return -EINVAL;
4162 	}
4163 	return 0;
4164 }
4165 
handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl * ctrl,struct _rule_hw * eth_header)4166 static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4167 					 struct _rule_hw *eth_header)
4168 {
4169 	if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4170 	    is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4171 		struct mlx4_net_trans_rule_hw_eth *eth =
4172 			(struct mlx4_net_trans_rule_hw_eth *)eth_header;
4173 		struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4174 		bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4175 			next_rule->rsvd == 0;
4176 
4177 		if (last_rule)
4178 			ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4179 	}
4180 }
4181 
4182 /*
4183  * In case of missing eth header, append eth header with a MAC address
4184  * assigned to the VF.
4185  */
add_eth_header(struct mlx4_dev * dev,int slave,struct mlx4_cmd_mailbox * inbox,struct list_head * rlist,int header_id)4186 static int add_eth_header(struct mlx4_dev *dev, int slave,
4187 			  struct mlx4_cmd_mailbox *inbox,
4188 			  struct list_head *rlist, int header_id)
4189 {
4190 	struct mac_res *res, *tmp;
4191 	u8 port;
4192 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4193 	struct mlx4_net_trans_rule_hw_eth *eth_header;
4194 	struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4195 	struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4196 	__be64 be_mac = 0;
4197 	__be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4198 
4199 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4200 	port = ctrl->port;
4201 	eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4202 
4203 	/* Clear a space in the inbox for eth header */
4204 	switch (header_id) {
4205 	case MLX4_NET_TRANS_RULE_ID_IPV4:
4206 		ip_header =
4207 			(struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4208 		memmove(ip_header, eth_header,
4209 			sizeof(*ip_header) + sizeof(*l4_header));
4210 		break;
4211 	case MLX4_NET_TRANS_RULE_ID_TCP:
4212 	case MLX4_NET_TRANS_RULE_ID_UDP:
4213 		l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4214 			    (eth_header + 1);
4215 		memmove(l4_header, eth_header, sizeof(*l4_header));
4216 		break;
4217 	default:
4218 		return -EINVAL;
4219 	}
4220 	list_for_each_entry_safe(res, tmp, rlist, list) {
4221 		if (port == res->port) {
4222 			be_mac = cpu_to_be64(res->mac << 16);
4223 			break;
4224 		}
4225 	}
4226 	if (!be_mac) {
4227 		pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
4228 		       port);
4229 		return -EINVAL;
4230 	}
4231 
4232 	memset(eth_header, 0, sizeof(*eth_header));
4233 	eth_header->size = sizeof(*eth_header) >> 2;
4234 	eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4235 	memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4236 	memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4237 
4238 	return 0;
4239 }
4240 
4241 #define MLX4_UPD_QP_PATH_MASK_SUPPORTED      (                                \
4242 	1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX                     |\
4243 	1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
mlx4_UPDATE_QP_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd_info)4244 int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4245 			   struct mlx4_vhcr *vhcr,
4246 			   struct mlx4_cmd_mailbox *inbox,
4247 			   struct mlx4_cmd_mailbox *outbox,
4248 			   struct mlx4_cmd_info *cmd_info)
4249 {
4250 	int err;
4251 	u32 qpn = vhcr->in_modifier & 0xffffff;
4252 	struct res_qp *rqp;
4253 	u64 mac;
4254 	unsigned port;
4255 	u64 pri_addr_path_mask;
4256 	struct mlx4_update_qp_context *cmd;
4257 	int smac_index;
4258 
4259 	cmd = (struct mlx4_update_qp_context *)inbox->buf;
4260 
4261 	pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4262 	if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4263 	    (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4264 		return -EPERM;
4265 
4266 	if ((pri_addr_path_mask &
4267 	     (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4268 		!(dev->caps.flags2 &
4269 		  MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
4270 		mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4271 			  slave);
4272 		return -ENOTSUPP;
4273 	}
4274 
4275 	/* Just change the smac for the QP */
4276 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4277 	if (err) {
4278 		mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4279 		return err;
4280 	}
4281 
4282 	port = (rqp->sched_queue >> 6 & 1) + 1;
4283 
4284 	if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4285 		smac_index = cmd->qp_context.pri_path.grh_mylmc;
4286 		err = mac_find_smac_ix_in_slave(dev, slave, port,
4287 						smac_index, &mac);
4288 
4289 		if (err) {
4290 			mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4291 				 qpn, smac_index);
4292 			goto err_mac;
4293 		}
4294 	}
4295 
4296 	err = mlx4_cmd(dev, inbox->dma,
4297 		       vhcr->in_modifier, 0,
4298 		       MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4299 		       MLX4_CMD_NATIVE);
4300 	if (err) {
4301 		mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4302 		goto err_mac;
4303 	}
4304 
4305 err_mac:
4306 	put_res(dev, slave, qpn, RES_QP);
4307 	return err;
4308 }
4309 
qp_attach_mbox_size(void * mbox)4310 static u32 qp_attach_mbox_size(void *mbox)
4311 {
4312 	u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4313 	struct _rule_hw  *rule_header;
4314 
4315 	rule_header = (struct _rule_hw *)(mbox + size);
4316 
4317 	while (rule_header->size) {
4318 		size += rule_header->size * sizeof(u32);
4319 		rule_header += 1;
4320 	}
4321 	return size;
4322 }
4323 
4324 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4325 
mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4326 int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4327 					 struct mlx4_vhcr *vhcr,
4328 					 struct mlx4_cmd_mailbox *inbox,
4329 					 struct mlx4_cmd_mailbox *outbox,
4330 					 struct mlx4_cmd_info *cmd)
4331 {
4332 
4333 	struct mlx4_priv *priv = mlx4_priv(dev);
4334 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4335 	struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
4336 	int err;
4337 	int qpn;
4338 	struct res_qp *rqp;
4339 	struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4340 	struct _rule_hw  *rule_header;
4341 	int header_id;
4342 	struct res_fs_rule *rrule;
4343 	u32 mbox_size;
4344 
4345 	if (dev->caps.steering_mode !=
4346 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4347 		return -EOPNOTSUPP;
4348 
4349 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
4350 	err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4351 	if (err <= 0)
4352 		return -EINVAL;
4353 	ctrl->port = err;
4354 	qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
4355 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4356 	if (err) {
4357 		pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
4358 		return err;
4359 	}
4360 	rule_header = (struct _rule_hw *)(ctrl + 1);
4361 	header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4362 
4363 	if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4364 		handle_eth_header_mcast_prio(ctrl, rule_header);
4365 
4366 	if (slave == dev->caps.function)
4367 		goto execute;
4368 
4369 	switch (header_id) {
4370 	case MLX4_NET_TRANS_RULE_ID_ETH:
4371 		if (validate_eth_header_mac(slave, rule_header, rlist)) {
4372 			err = -EINVAL;
4373 			goto err_put_qp;
4374 		}
4375 		break;
4376 	case MLX4_NET_TRANS_RULE_ID_IB:
4377 		break;
4378 	case MLX4_NET_TRANS_RULE_ID_IPV4:
4379 	case MLX4_NET_TRANS_RULE_ID_TCP:
4380 	case MLX4_NET_TRANS_RULE_ID_UDP:
4381 		pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
4382 		if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4383 			err = -EINVAL;
4384 			goto err_put_qp;
4385 		}
4386 		vhcr->in_modifier +=
4387 			sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4388 		break;
4389 	default:
4390 		pr_err("Corrupted mailbox\n");
4391 		err = -EINVAL;
4392 		goto err_put_qp;
4393 	}
4394 
4395 execute:
4396 	err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4397 			   vhcr->in_modifier, 0,
4398 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4399 			   MLX4_CMD_NATIVE);
4400 	if (err)
4401 		goto err_put_qp;
4402 
4403 
4404 	err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
4405 	if (err) {
4406 		mlx4_err(dev, "Fail to add flow steering resources\n");
4407 		goto err_detach;
4408 	}
4409 
4410 	err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4411 	if (err)
4412 		goto err_detach;
4413 
4414 	mbox_size = qp_attach_mbox_size(inbox->buf);
4415 	rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4416 	if (!rrule->mirr_mbox) {
4417 		err = -ENOMEM;
4418 		goto err_put_rule;
4419 	}
4420 	rrule->mirr_mbox_size = mbox_size;
4421 	rrule->mirr_rule_id = 0;
4422 	memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4423 
4424 	/* set different port */
4425 	ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4426 	if (ctrl->port == 1)
4427 		ctrl->port = 2;
4428 	else
4429 		ctrl->port = 1;
4430 
4431 	if (mlx4_is_bonded(dev))
4432 		mlx4_do_mirror_rule(dev, rrule);
4433 
4434 	atomic_inc(&rqp->ref_count);
4435 
4436 err_put_rule:
4437 	put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4438 err_detach:
4439 	/* detach rule on error */
4440 	if (err)
4441 		mlx4_cmd(dev, vhcr->out_param, 0, 0,
4442 			 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4443 			 MLX4_CMD_NATIVE);
4444 err_put_qp:
4445 	put_res(dev, slave, qpn, RES_QP);
4446 	return err;
4447 }
4448 
mlx4_undo_mirror_rule(struct mlx4_dev * dev,struct res_fs_rule * fs_rule)4449 static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4450 {
4451 	int err;
4452 
4453 	err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4454 	if (err) {
4455 		mlx4_err(dev, "Fail to remove flow steering resources\n");
4456 		return err;
4457 	}
4458 
4459 	mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4460 		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4461 	return 0;
4462 }
4463 
mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4464 int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4465 					 struct mlx4_vhcr *vhcr,
4466 					 struct mlx4_cmd_mailbox *inbox,
4467 					 struct mlx4_cmd_mailbox *outbox,
4468 					 struct mlx4_cmd_info *cmd)
4469 {
4470 	int err;
4471 	struct res_qp *rqp;
4472 	struct res_fs_rule *rrule;
4473 	u64 mirr_reg_id;
4474 	int qpn;
4475 
4476 	if (dev->caps.steering_mode !=
4477 	    MLX4_STEERING_MODE_DEVICE_MANAGED)
4478 		return -EOPNOTSUPP;
4479 
4480 	err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4481 	if (err)
4482 		return err;
4483 
4484 	if (!rrule->mirr_mbox) {
4485 		mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4486 		put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4487 		return -EINVAL;
4488 	}
4489 	mirr_reg_id = rrule->mirr_rule_id;
4490 	kfree(rrule->mirr_mbox);
4491 	qpn = rrule->qpn;
4492 
4493 	/* Release the rule form busy state before removal */
4494 	put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4495 	err = get_res(dev, slave, qpn, RES_QP, &rqp);
4496 	if (err)
4497 		return err;
4498 
4499 	if (mirr_reg_id && mlx4_is_bonded(dev)) {
4500 		err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4501 		if (err) {
4502 			mlx4_err(dev, "Fail to get resource of mirror rule\n");
4503 		} else {
4504 			put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4505 			mlx4_undo_mirror_rule(dev, rrule);
4506 		}
4507 	}
4508 	err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4509 	if (err) {
4510 		mlx4_err(dev, "Fail to remove flow steering resources\n");
4511 		goto out;
4512 	}
4513 
4514 	err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4515 		       MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4516 		       MLX4_CMD_NATIVE);
4517 	if (!err)
4518 		atomic_dec(&rqp->ref_count);
4519 out:
4520 	put_res(dev, slave, qpn, RES_QP);
4521 	return err;
4522 }
4523 
4524 enum {
4525 	BUSY_MAX_RETRIES = 10
4526 };
4527 
mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev * dev,int slave,struct mlx4_vhcr * vhcr,struct mlx4_cmd_mailbox * inbox,struct mlx4_cmd_mailbox * outbox,struct mlx4_cmd_info * cmd)4528 int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4529 			       struct mlx4_vhcr *vhcr,
4530 			       struct mlx4_cmd_mailbox *inbox,
4531 			       struct mlx4_cmd_mailbox *outbox,
4532 			       struct mlx4_cmd_info *cmd)
4533 {
4534 	int err;
4535 	int index = vhcr->in_modifier & 0xffff;
4536 
4537 	err = get_res(dev, slave, index, RES_COUNTER, NULL);
4538 	if (err)
4539 		return err;
4540 
4541 	err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4542 	put_res(dev, slave, index, RES_COUNTER);
4543 	return err;
4544 }
4545 
detach_qp(struct mlx4_dev * dev,int slave,struct res_qp * rqp)4546 static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4547 {
4548 	struct res_gid *rgid;
4549 	struct res_gid *tmp;
4550 	struct mlx4_qp qp; /* dummy for calling attach/detach */
4551 
4552 	list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
4553 		switch (dev->caps.steering_mode) {
4554 		case MLX4_STEERING_MODE_DEVICE_MANAGED:
4555 			mlx4_flow_detach(dev, rgid->reg_id);
4556 			break;
4557 		case MLX4_STEERING_MODE_B0:
4558 			qp.qpn = rqp->local_qpn;
4559 			(void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4560 						     rgid->prot, rgid->steer);
4561 			break;
4562 		}
4563 		list_del(&rgid->list);
4564 		kfree(rgid);
4565 	}
4566 }
4567 
_move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type,int print)4568 static int _move_all_busy(struct mlx4_dev *dev, int slave,
4569 			  enum mlx4_resource type, int print)
4570 {
4571 	struct mlx4_priv *priv = mlx4_priv(dev);
4572 	struct mlx4_resource_tracker *tracker =
4573 		&priv->mfunc.master.res_tracker;
4574 	struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4575 	struct res_common *r;
4576 	struct res_common *tmp;
4577 	int busy;
4578 
4579 	busy = 0;
4580 	spin_lock_irq(mlx4_tlock(dev));
4581 	list_for_each_entry_safe(r, tmp, rlist, list) {
4582 		if (r->owner == slave) {
4583 			if (!r->removing) {
4584 				if (r->state == RES_ANY_BUSY) {
4585 					if (print)
4586 						mlx4_dbg(dev,
4587 							 "%s id 0x%llx is busy\n",
4588 							  resource_str(type),
4589 							  (long long)r->res_id);
4590 					++busy;
4591 				} else {
4592 					r->from_state = r->state;
4593 					r->state = RES_ANY_BUSY;
4594 					r->removing = 1;
4595 				}
4596 			}
4597 		}
4598 	}
4599 	spin_unlock_irq(mlx4_tlock(dev));
4600 
4601 	return busy;
4602 }
4603 
move_all_busy(struct mlx4_dev * dev,int slave,enum mlx4_resource type)4604 static int move_all_busy(struct mlx4_dev *dev, int slave,
4605 			 enum mlx4_resource type)
4606 {
4607 	unsigned long begin;
4608 	int busy;
4609 
4610 	begin = jiffies;
4611 	do {
4612 		busy = _move_all_busy(dev, slave, type, 0);
4613 		if (time_after(jiffies, begin + 5 * HZ))
4614 			break;
4615 		if (busy)
4616 			cond_resched();
4617 	} while (busy);
4618 
4619 	if (busy)
4620 		busy = _move_all_busy(dev, slave, type, 1);
4621 
4622 	return busy;
4623 }
rem_slave_qps(struct mlx4_dev * dev,int slave)4624 static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4625 {
4626 	struct mlx4_priv *priv = mlx4_priv(dev);
4627 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4628 	struct list_head *qp_list =
4629 		&tracker->slave_list[slave].res_list[RES_QP];
4630 	struct res_qp *qp;
4631 	struct res_qp *tmp;
4632 	int state;
4633 	u64 in_param;
4634 	int qpn;
4635 	int err;
4636 
4637 	err = move_all_busy(dev, slave, RES_QP);
4638 	if (err)
4639 		mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4640 			  slave);
4641 
4642 	spin_lock_irq(mlx4_tlock(dev));
4643 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4644 		spin_unlock_irq(mlx4_tlock(dev));
4645 		if (qp->com.owner == slave) {
4646 			qpn = qp->com.res_id;
4647 			detach_qp(dev, slave, qp);
4648 			state = qp->com.from_state;
4649 			while (state != 0) {
4650 				switch (state) {
4651 				case RES_QP_RESERVED:
4652 					spin_lock_irq(mlx4_tlock(dev));
4653 					rb_erase(&qp->com.node,
4654 						 &tracker->res_tree[RES_QP]);
4655 					list_del(&qp->com.list);
4656 					spin_unlock_irq(mlx4_tlock(dev));
4657 					if (!valid_reserved(dev, slave, qpn)) {
4658 						__mlx4_qp_release_range(dev, qpn, 1);
4659 						mlx4_release_resource(dev, slave,
4660 								      RES_QP, 1, 0);
4661 					}
4662 					kfree(qp);
4663 					state = 0;
4664 					break;
4665 				case RES_QP_MAPPED:
4666 					if (!valid_reserved(dev, slave, qpn))
4667 						__mlx4_qp_free_icm(dev, qpn);
4668 					state = RES_QP_RESERVED;
4669 					break;
4670 				case RES_QP_HW:
4671 					in_param = slave;
4672 					err = mlx4_cmd(dev, in_param,
4673 						       qp->local_qpn, 2,
4674 						       MLX4_CMD_2RST_QP,
4675 						       MLX4_CMD_TIME_CLASS_A,
4676 						       MLX4_CMD_NATIVE);
4677 					if (err)
4678 						mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4679 							 slave, qp->local_qpn);
4680 					atomic_dec(&qp->rcq->ref_count);
4681 					atomic_dec(&qp->scq->ref_count);
4682 					atomic_dec(&qp->mtt->ref_count);
4683 					if (qp->srq)
4684 						atomic_dec(&qp->srq->ref_count);
4685 					state = RES_QP_MAPPED;
4686 					break;
4687 				default:
4688 					state = 0;
4689 				}
4690 			}
4691 		}
4692 		spin_lock_irq(mlx4_tlock(dev));
4693 	}
4694 	spin_unlock_irq(mlx4_tlock(dev));
4695 }
4696 
rem_slave_srqs(struct mlx4_dev * dev,int slave)4697 static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4698 {
4699 	struct mlx4_priv *priv = mlx4_priv(dev);
4700 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4701 	struct list_head *srq_list =
4702 		&tracker->slave_list[slave].res_list[RES_SRQ];
4703 	struct res_srq *srq;
4704 	struct res_srq *tmp;
4705 	int state;
4706 	u64 in_param;
4707 	LIST_HEAD(tlist);
4708 	int srqn;
4709 	int err;
4710 
4711 	err = move_all_busy(dev, slave, RES_SRQ);
4712 	if (err)
4713 		mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4714 			  slave);
4715 
4716 	spin_lock_irq(mlx4_tlock(dev));
4717 	list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4718 		spin_unlock_irq(mlx4_tlock(dev));
4719 		if (srq->com.owner == slave) {
4720 			srqn = srq->com.res_id;
4721 			state = srq->com.from_state;
4722 			while (state != 0) {
4723 				switch (state) {
4724 				case RES_SRQ_ALLOCATED:
4725 					__mlx4_srq_free_icm(dev, srqn);
4726 					spin_lock_irq(mlx4_tlock(dev));
4727 					rb_erase(&srq->com.node,
4728 						 &tracker->res_tree[RES_SRQ]);
4729 					list_del(&srq->com.list);
4730 					spin_unlock_irq(mlx4_tlock(dev));
4731 					mlx4_release_resource(dev, slave,
4732 							      RES_SRQ, 1, 0);
4733 					kfree(srq);
4734 					state = 0;
4735 					break;
4736 
4737 				case RES_SRQ_HW:
4738 					in_param = slave;
4739 					err = mlx4_cmd(dev, in_param, srqn, 1,
4740 						       MLX4_CMD_HW2SW_SRQ,
4741 						       MLX4_CMD_TIME_CLASS_A,
4742 						       MLX4_CMD_NATIVE);
4743 					if (err)
4744 						mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
4745 							 slave, srqn);
4746 
4747 					atomic_dec(&srq->mtt->ref_count);
4748 					if (srq->cq)
4749 						atomic_dec(&srq->cq->ref_count);
4750 					state = RES_SRQ_ALLOCATED;
4751 					break;
4752 
4753 				default:
4754 					state = 0;
4755 				}
4756 			}
4757 		}
4758 		spin_lock_irq(mlx4_tlock(dev));
4759 	}
4760 	spin_unlock_irq(mlx4_tlock(dev));
4761 }
4762 
rem_slave_cqs(struct mlx4_dev * dev,int slave)4763 static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4764 {
4765 	struct mlx4_priv *priv = mlx4_priv(dev);
4766 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4767 	struct list_head *cq_list =
4768 		&tracker->slave_list[slave].res_list[RES_CQ];
4769 	struct res_cq *cq;
4770 	struct res_cq *tmp;
4771 	int state;
4772 	u64 in_param;
4773 	LIST_HEAD(tlist);
4774 	int cqn;
4775 	int err;
4776 
4777 	err = move_all_busy(dev, slave, RES_CQ);
4778 	if (err)
4779 		mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4780 			  slave);
4781 
4782 	spin_lock_irq(mlx4_tlock(dev));
4783 	list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4784 		spin_unlock_irq(mlx4_tlock(dev));
4785 		if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4786 			cqn = cq->com.res_id;
4787 			state = cq->com.from_state;
4788 			while (state != 0) {
4789 				switch (state) {
4790 				case RES_CQ_ALLOCATED:
4791 					__mlx4_cq_free_icm(dev, cqn);
4792 					spin_lock_irq(mlx4_tlock(dev));
4793 					rb_erase(&cq->com.node,
4794 						 &tracker->res_tree[RES_CQ]);
4795 					list_del(&cq->com.list);
4796 					spin_unlock_irq(mlx4_tlock(dev));
4797 					mlx4_release_resource(dev, slave,
4798 							      RES_CQ, 1, 0);
4799 					kfree(cq);
4800 					state = 0;
4801 					break;
4802 
4803 				case RES_CQ_HW:
4804 					in_param = slave;
4805 					err = mlx4_cmd(dev, in_param, cqn, 1,
4806 						       MLX4_CMD_HW2SW_CQ,
4807 						       MLX4_CMD_TIME_CLASS_A,
4808 						       MLX4_CMD_NATIVE);
4809 					if (err)
4810 						mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
4811 							 slave, cqn);
4812 					atomic_dec(&cq->mtt->ref_count);
4813 					state = RES_CQ_ALLOCATED;
4814 					break;
4815 
4816 				default:
4817 					state = 0;
4818 				}
4819 			}
4820 		}
4821 		spin_lock_irq(mlx4_tlock(dev));
4822 	}
4823 	spin_unlock_irq(mlx4_tlock(dev));
4824 }
4825 
rem_slave_mrs(struct mlx4_dev * dev,int slave)4826 static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4827 {
4828 	struct mlx4_priv *priv = mlx4_priv(dev);
4829 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4830 	struct list_head *mpt_list =
4831 		&tracker->slave_list[slave].res_list[RES_MPT];
4832 	struct res_mpt *mpt;
4833 	struct res_mpt *tmp;
4834 	int state;
4835 	u64 in_param;
4836 	LIST_HEAD(tlist);
4837 	int mptn;
4838 	int err;
4839 
4840 	err = move_all_busy(dev, slave, RES_MPT);
4841 	if (err)
4842 		mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4843 			  slave);
4844 
4845 	spin_lock_irq(mlx4_tlock(dev));
4846 	list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4847 		spin_unlock_irq(mlx4_tlock(dev));
4848 		if (mpt->com.owner == slave) {
4849 			mptn = mpt->com.res_id;
4850 			state = mpt->com.from_state;
4851 			while (state != 0) {
4852 				switch (state) {
4853 				case RES_MPT_RESERVED:
4854 					__mlx4_mpt_release(dev, mpt->key);
4855 					spin_lock_irq(mlx4_tlock(dev));
4856 					rb_erase(&mpt->com.node,
4857 						 &tracker->res_tree[RES_MPT]);
4858 					list_del(&mpt->com.list);
4859 					spin_unlock_irq(mlx4_tlock(dev));
4860 					mlx4_release_resource(dev, slave,
4861 							      RES_MPT, 1, 0);
4862 					kfree(mpt);
4863 					state = 0;
4864 					break;
4865 
4866 				case RES_MPT_MAPPED:
4867 					__mlx4_mpt_free_icm(dev, mpt->key);
4868 					state = RES_MPT_RESERVED;
4869 					break;
4870 
4871 				case RES_MPT_HW:
4872 					in_param = slave;
4873 					err = mlx4_cmd(dev, in_param, mptn, 0,
4874 						     MLX4_CMD_HW2SW_MPT,
4875 						     MLX4_CMD_TIME_CLASS_A,
4876 						     MLX4_CMD_NATIVE);
4877 					if (err)
4878 						mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
4879 							 slave, mptn);
4880 					if (mpt->mtt)
4881 						atomic_dec(&mpt->mtt->ref_count);
4882 					state = RES_MPT_MAPPED;
4883 					break;
4884 				default:
4885 					state = 0;
4886 				}
4887 			}
4888 		}
4889 		spin_lock_irq(mlx4_tlock(dev));
4890 	}
4891 	spin_unlock_irq(mlx4_tlock(dev));
4892 }
4893 
rem_slave_mtts(struct mlx4_dev * dev,int slave)4894 static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4895 {
4896 	struct mlx4_priv *priv = mlx4_priv(dev);
4897 	struct mlx4_resource_tracker *tracker =
4898 		&priv->mfunc.master.res_tracker;
4899 	struct list_head *mtt_list =
4900 		&tracker->slave_list[slave].res_list[RES_MTT];
4901 	struct res_mtt *mtt;
4902 	struct res_mtt *tmp;
4903 	int state;
4904 	LIST_HEAD(tlist);
4905 	int base;
4906 	int err;
4907 
4908 	err = move_all_busy(dev, slave, RES_MTT);
4909 	if (err)
4910 		mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts  - too busy for slave %d\n",
4911 			  slave);
4912 
4913 	spin_lock_irq(mlx4_tlock(dev));
4914 	list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4915 		spin_unlock_irq(mlx4_tlock(dev));
4916 		if (mtt->com.owner == slave) {
4917 			base = mtt->com.res_id;
4918 			state = mtt->com.from_state;
4919 			while (state != 0) {
4920 				switch (state) {
4921 				case RES_MTT_ALLOCATED:
4922 					__mlx4_free_mtt_range(dev, base,
4923 							      mtt->order);
4924 					spin_lock_irq(mlx4_tlock(dev));
4925 					rb_erase(&mtt->com.node,
4926 						 &tracker->res_tree[RES_MTT]);
4927 					list_del(&mtt->com.list);
4928 					spin_unlock_irq(mlx4_tlock(dev));
4929 					mlx4_release_resource(dev, slave, RES_MTT,
4930 							      1 << mtt->order, 0);
4931 					kfree(mtt);
4932 					state = 0;
4933 					break;
4934 
4935 				default:
4936 					state = 0;
4937 				}
4938 			}
4939 		}
4940 		spin_lock_irq(mlx4_tlock(dev));
4941 	}
4942 	spin_unlock_irq(mlx4_tlock(dev));
4943 }
4944 
mlx4_do_mirror_rule(struct mlx4_dev * dev,struct res_fs_rule * fs_rule)4945 static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4946 {
4947 	struct mlx4_cmd_mailbox *mailbox;
4948 	int err;
4949 	struct res_fs_rule *mirr_rule;
4950 	u64 reg_id;
4951 
4952 	mailbox = mlx4_alloc_cmd_mailbox(dev);
4953 	if (IS_ERR(mailbox))
4954 		return PTR_ERR(mailbox);
4955 
4956 	if (!fs_rule->mirr_mbox) {
4957 		mlx4_err(dev, "rule mirroring mailbox is null\n");
4958 		return -EINVAL;
4959 	}
4960 	memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4961 	err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4962 			   MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4963 			   MLX4_CMD_NATIVE);
4964 	mlx4_free_cmd_mailbox(dev, mailbox);
4965 
4966 	if (err)
4967 		goto err;
4968 
4969 	err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4970 	if (err)
4971 		goto err_detach;
4972 
4973 	err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4974 	if (err)
4975 		goto err_rem;
4976 
4977 	fs_rule->mirr_rule_id = reg_id;
4978 	mirr_rule->mirr_rule_id = 0;
4979 	mirr_rule->mirr_mbox_size = 0;
4980 	mirr_rule->mirr_mbox = NULL;
4981 	put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
4982 
4983 	return 0;
4984 err_rem:
4985 	rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
4986 err_detach:
4987 	mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4988 		 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4989 err:
4990 	return err;
4991 }
4992 
mlx4_mirror_fs_rules(struct mlx4_dev * dev,bool bond)4993 static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
4994 {
4995 	struct mlx4_priv *priv = mlx4_priv(dev);
4996 	struct mlx4_resource_tracker *tracker =
4997 		&priv->mfunc.master.res_tracker;
4998 	struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
4999 	struct rb_node *p;
5000 	struct res_fs_rule *fs_rule;
5001 	int err = 0;
5002 	LIST_HEAD(mirr_list);
5003 
5004 	for (p = rb_first(root); p; p = rb_next(p)) {
5005 		fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5006 		if ((bond && fs_rule->mirr_mbox_size) ||
5007 		    (!bond && !fs_rule->mirr_mbox_size))
5008 			list_add_tail(&fs_rule->mirr_list, &mirr_list);
5009 	}
5010 
5011 	list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5012 		if (bond)
5013 			err += mlx4_do_mirror_rule(dev, fs_rule);
5014 		else
5015 			err += mlx4_undo_mirror_rule(dev, fs_rule);
5016 	}
5017 	return err;
5018 }
5019 
mlx4_bond_fs_rules(struct mlx4_dev * dev)5020 int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5021 {
5022 	return mlx4_mirror_fs_rules(dev, true);
5023 }
5024 
mlx4_unbond_fs_rules(struct mlx4_dev * dev)5025 int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5026 {
5027 	return mlx4_mirror_fs_rules(dev, false);
5028 }
5029 
rem_slave_fs_rule(struct mlx4_dev * dev,int slave)5030 static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5031 {
5032 	struct mlx4_priv *priv = mlx4_priv(dev);
5033 	struct mlx4_resource_tracker *tracker =
5034 		&priv->mfunc.master.res_tracker;
5035 	struct list_head *fs_rule_list =
5036 		&tracker->slave_list[slave].res_list[RES_FS_RULE];
5037 	struct res_fs_rule *fs_rule;
5038 	struct res_fs_rule *tmp;
5039 	int state;
5040 	u64 base;
5041 	int err;
5042 
5043 	err = move_all_busy(dev, slave, RES_FS_RULE);
5044 	if (err)
5045 		mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5046 			  slave);
5047 
5048 	spin_lock_irq(mlx4_tlock(dev));
5049 	list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5050 		spin_unlock_irq(mlx4_tlock(dev));
5051 		if (fs_rule->com.owner == slave) {
5052 			base = fs_rule->com.res_id;
5053 			state = fs_rule->com.from_state;
5054 			while (state != 0) {
5055 				switch (state) {
5056 				case RES_FS_RULE_ALLOCATED:
5057 					/* detach rule */
5058 					err = mlx4_cmd(dev, base, 0, 0,
5059 						       MLX4_QP_FLOW_STEERING_DETACH,
5060 						       MLX4_CMD_TIME_CLASS_A,
5061 						       MLX4_CMD_NATIVE);
5062 
5063 					spin_lock_irq(mlx4_tlock(dev));
5064 					rb_erase(&fs_rule->com.node,
5065 						 &tracker->res_tree[RES_FS_RULE]);
5066 					list_del(&fs_rule->com.list);
5067 					spin_unlock_irq(mlx4_tlock(dev));
5068 					kfree(fs_rule->mirr_mbox);
5069 					kfree(fs_rule);
5070 					state = 0;
5071 					break;
5072 
5073 				default:
5074 					state = 0;
5075 				}
5076 			}
5077 		}
5078 		spin_lock_irq(mlx4_tlock(dev));
5079 	}
5080 	spin_unlock_irq(mlx4_tlock(dev));
5081 }
5082 
rem_slave_eqs(struct mlx4_dev * dev,int slave)5083 static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5084 {
5085 	struct mlx4_priv *priv = mlx4_priv(dev);
5086 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5087 	struct list_head *eq_list =
5088 		&tracker->slave_list[slave].res_list[RES_EQ];
5089 	struct res_eq *eq;
5090 	struct res_eq *tmp;
5091 	int err;
5092 	int state;
5093 	LIST_HEAD(tlist);
5094 	int eqn;
5095 
5096 	err = move_all_busy(dev, slave, RES_EQ);
5097 	if (err)
5098 		mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5099 			  slave);
5100 
5101 	spin_lock_irq(mlx4_tlock(dev));
5102 	list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5103 		spin_unlock_irq(mlx4_tlock(dev));
5104 		if (eq->com.owner == slave) {
5105 			eqn = eq->com.res_id;
5106 			state = eq->com.from_state;
5107 			while (state != 0) {
5108 				switch (state) {
5109 				case RES_EQ_RESERVED:
5110 					spin_lock_irq(mlx4_tlock(dev));
5111 					rb_erase(&eq->com.node,
5112 						 &tracker->res_tree[RES_EQ]);
5113 					list_del(&eq->com.list);
5114 					spin_unlock_irq(mlx4_tlock(dev));
5115 					kfree(eq);
5116 					state = 0;
5117 					break;
5118 
5119 				case RES_EQ_HW:
5120 					err = mlx4_cmd(dev, slave, eqn & 0x3ff,
5121 						       1, MLX4_CMD_HW2SW_EQ,
5122 						       MLX4_CMD_TIME_CLASS_A,
5123 						       MLX4_CMD_NATIVE);
5124 					if (err)
5125 						mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
5126 							 slave, eqn & 0x3ff);
5127 					atomic_dec(&eq->mtt->ref_count);
5128 					state = RES_EQ_RESERVED;
5129 					break;
5130 
5131 				default:
5132 					state = 0;
5133 				}
5134 			}
5135 		}
5136 		spin_lock_irq(mlx4_tlock(dev));
5137 	}
5138 	spin_unlock_irq(mlx4_tlock(dev));
5139 }
5140 
rem_slave_counters(struct mlx4_dev * dev,int slave)5141 static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5142 {
5143 	struct mlx4_priv *priv = mlx4_priv(dev);
5144 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5145 	struct list_head *counter_list =
5146 		&tracker->slave_list[slave].res_list[RES_COUNTER];
5147 	struct res_counter *counter;
5148 	struct res_counter *tmp;
5149 	int err;
5150 	int *counters_arr = NULL;
5151 	int i, j;
5152 
5153 	err = move_all_busy(dev, slave, RES_COUNTER);
5154 	if (err)
5155 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5156 			  slave);
5157 
5158 	counters_arr = kmalloc_array(dev->caps.max_counters,
5159 				     sizeof(*counters_arr), GFP_KERNEL);
5160 	if (!counters_arr)
5161 		return;
5162 
5163 	do {
5164 		i = 0;
5165 		j = 0;
5166 		spin_lock_irq(mlx4_tlock(dev));
5167 		list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5168 			if (counter->com.owner == slave) {
5169 				counters_arr[i++] = counter->com.res_id;
5170 				rb_erase(&counter->com.node,
5171 					 &tracker->res_tree[RES_COUNTER]);
5172 				list_del(&counter->com.list);
5173 				kfree(counter);
5174 			}
5175 		}
5176 		spin_unlock_irq(mlx4_tlock(dev));
5177 
5178 		while (j < i) {
5179 			__mlx4_counter_free(dev, counters_arr[j++]);
5180 			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
5181 		}
5182 	} while (i);
5183 
5184 	kfree(counters_arr);
5185 }
5186 
rem_slave_xrcdns(struct mlx4_dev * dev,int slave)5187 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5188 {
5189 	struct mlx4_priv *priv = mlx4_priv(dev);
5190 	struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5191 	struct list_head *xrcdn_list =
5192 		&tracker->slave_list[slave].res_list[RES_XRCD];
5193 	struct res_xrcdn *xrcd;
5194 	struct res_xrcdn *tmp;
5195 	int err;
5196 	int xrcdn;
5197 
5198 	err = move_all_busy(dev, slave, RES_XRCD);
5199 	if (err)
5200 		mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5201 			  slave);
5202 
5203 	spin_lock_irq(mlx4_tlock(dev));
5204 	list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5205 		if (xrcd->com.owner == slave) {
5206 			xrcdn = xrcd->com.res_id;
5207 			rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
5208 			list_del(&xrcd->com.list);
5209 			kfree(xrcd);
5210 			__mlx4_xrcd_free(dev, xrcdn);
5211 		}
5212 	}
5213 	spin_unlock_irq(mlx4_tlock(dev));
5214 }
5215 
mlx4_delete_all_resources_for_slave(struct mlx4_dev * dev,int slave)5216 void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5217 {
5218 	struct mlx4_priv *priv = mlx4_priv(dev);
5219 	mlx4_reset_roce_gids(dev, slave);
5220 	mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5221 	rem_slave_vlans(dev, slave);
5222 	rem_slave_macs(dev, slave);
5223 	rem_slave_fs_rule(dev, slave);
5224 	rem_slave_qps(dev, slave);
5225 	rem_slave_srqs(dev, slave);
5226 	rem_slave_cqs(dev, slave);
5227 	rem_slave_mrs(dev, slave);
5228 	rem_slave_eqs(dev, slave);
5229 	rem_slave_mtts(dev, slave);
5230 	rem_slave_counters(dev, slave);
5231 	rem_slave_xrcdns(dev, slave);
5232 	mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5233 }
5234 
mlx4_vf_immed_vlan_work_handler(struct work_struct * _work)5235 void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5236 {
5237 	struct mlx4_vf_immed_vlan_work *work =
5238 		container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5239 	struct mlx4_cmd_mailbox *mailbox;
5240 	struct mlx4_update_qp_context *upd_context;
5241 	struct mlx4_dev *dev = &work->priv->dev;
5242 	struct mlx4_resource_tracker *tracker =
5243 		&work->priv->mfunc.master.res_tracker;
5244 	struct list_head *qp_list =
5245 		&tracker->slave_list[work->slave].res_list[RES_QP];
5246 	struct res_qp *qp;
5247 	struct res_qp *tmp;
5248 	u64 qp_path_mask_vlan_ctrl =
5249 		       ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
5250 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5251 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5252 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5253 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
5254 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5255 
5256 	u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5257 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5258 		       (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
5259 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
5260 		       (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5261 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5262 		       (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
5263 		       (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5264 
5265 	int err;
5266 	int port, errors = 0;
5267 	u8 vlan_control;
5268 
5269 	if (mlx4_is_slave(dev)) {
5270 		mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5271 			  work->slave);
5272 		goto out;
5273 	}
5274 
5275 	mailbox = mlx4_alloc_cmd_mailbox(dev);
5276 	if (IS_ERR(mailbox))
5277 		goto out;
5278 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5279 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5280 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5281 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5282 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5283 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5284 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5285 	else if (!work->vlan_id)
5286 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5287 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5288 	else if (work->vlan_proto == htons(ETH_P_8021AD))
5289 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5290 			MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5291 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5292 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5293 	else  /* vst 802.1Q */
5294 		vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5295 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5296 			MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5297 
5298 	upd_context = mailbox->buf;
5299 	upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
5300 
5301 	spin_lock_irq(mlx4_tlock(dev));
5302 	list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5303 		spin_unlock_irq(mlx4_tlock(dev));
5304 		if (qp->com.owner == work->slave) {
5305 			if (qp->com.from_state != RES_QP_HW ||
5306 			    !qp->sched_queue ||  /* no INIT2RTR trans yet */
5307 			    mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5308 			    qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5309 				spin_lock_irq(mlx4_tlock(dev));
5310 				continue;
5311 			}
5312 			port = (qp->sched_queue >> 6 & 1) + 1;
5313 			if (port != work->port) {
5314 				spin_lock_irq(mlx4_tlock(dev));
5315 				continue;
5316 			}
5317 			if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5318 				upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5319 			else
5320 				upd_context->primary_addr_path_mask =
5321 					cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5322 			if (work->vlan_id == MLX4_VGT) {
5323 				upd_context->qp_context.param3 = qp->param3;
5324 				upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5325 				upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5326 				upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5327 				upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5328 				upd_context->qp_context.pri_path.feup = qp->feup;
5329 				upd_context->qp_context.pri_path.sched_queue =
5330 					qp->sched_queue;
5331 			} else {
5332 				upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5333 				upd_context->qp_context.pri_path.vlan_control = vlan_control;
5334 				upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5335 				upd_context->qp_context.pri_path.fvl_rx =
5336 					qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5337 				upd_context->qp_context.pri_path.fl =
5338 					qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5339 				if (work->vlan_proto == htons(ETH_P_8021AD))
5340 					upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5341 				else
5342 					upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
5343 				upd_context->qp_context.pri_path.feup =
5344 					qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5345 				upd_context->qp_context.pri_path.sched_queue =
5346 					qp->sched_queue & 0xC7;
5347 				upd_context->qp_context.pri_path.sched_queue |=
5348 					((work->qos & 0x7) << 3);
5349 				upd_context->qp_mask |=
5350 					cpu_to_be64(1ULL <<
5351 						    MLX4_UPD_QP_MASK_QOS_VPP);
5352 				upd_context->qp_context.qos_vport =
5353 					work->qos_vport;
5354 			}
5355 
5356 			err = mlx4_cmd(dev, mailbox->dma,
5357 				       qp->local_qpn & 0xffffff,
5358 				       0, MLX4_CMD_UPDATE_QP,
5359 				       MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5360 			if (err) {
5361 				mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5362 					  work->slave, port, qp->local_qpn, err);
5363 				errors++;
5364 			}
5365 		}
5366 		spin_lock_irq(mlx4_tlock(dev));
5367 	}
5368 	spin_unlock_irq(mlx4_tlock(dev));
5369 	mlx4_free_cmd_mailbox(dev, mailbox);
5370 
5371 	if (errors)
5372 		mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5373 			 errors, work->slave, work->port);
5374 
5375 	/* unregister previous vlan_id if needed and we had no errors
5376 	 * while updating the QPs
5377 	 */
5378 	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5379 	    NO_INDX != work->orig_vlan_ix)
5380 		__mlx4_unregister_vlan(&work->priv->dev, work->port,
5381 				       work->orig_vlan_id);
5382 out:
5383 	kfree(work);
5384 	return;
5385 }
5386 
5387