xref: /linux/drivers/infiniband/hw/mlx5/cq.c (revision f14c1a14)
1e126ba97SEli Cohen /*
26cf0a15fSSaeed Mahameed  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3e126ba97SEli Cohen  *
4e126ba97SEli Cohen  * This software is available to you under a choice of one of two
5e126ba97SEli Cohen  * licenses.  You may choose to be licensed under the terms of the GNU
6e126ba97SEli Cohen  * General Public License (GPL) Version 2, available from the file
7e126ba97SEli Cohen  * COPYING in the main directory of this source tree, or the
8e126ba97SEli Cohen  * OpenIB.org BSD license below:
9e126ba97SEli Cohen  *
10e126ba97SEli Cohen  *     Redistribution and use in source and binary forms, with or
11e126ba97SEli Cohen  *     without modification, are permitted provided that the following
12e126ba97SEli Cohen  *     conditions are met:
13e126ba97SEli Cohen  *
14e126ba97SEli Cohen  *      - Redistributions of source code must retain the above
15e126ba97SEli Cohen  *        copyright notice, this list of conditions and the following
16e126ba97SEli Cohen  *        disclaimer.
17e126ba97SEli Cohen  *
18e126ba97SEli Cohen  *      - Redistributions in binary form must reproduce the above
19e126ba97SEli Cohen  *        copyright notice, this list of conditions and the following
20e126ba97SEli Cohen  *        disclaimer in the documentation and/or other materials
21e126ba97SEli Cohen  *        provided with the distribution.
22e126ba97SEli Cohen  *
23e126ba97SEli Cohen  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24e126ba97SEli Cohen  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25e126ba97SEli Cohen  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26e126ba97SEli Cohen  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27e126ba97SEli Cohen  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28e126ba97SEli Cohen  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29e126ba97SEli Cohen  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30e126ba97SEli Cohen  * SOFTWARE.
31e126ba97SEli Cohen  */
32e126ba97SEli Cohen 
33e126ba97SEli Cohen #include <linux/kref.h>
34e126ba97SEli Cohen #include <rdma/ib_umem.h>
35a8237b32SYann Droneaud #include <rdma/ib_user_verbs.h>
36b636401fSSagi Grimberg #include <rdma/ib_cache.h>
37e126ba97SEli Cohen #include "mlx5_ib.h"
38f02d0d6eSLeon Romanovsky #include "srq.h"
39333fbaa0SLeon Romanovsky #include "qp.h"
40e126ba97SEli Cohen 
mlx5_ib_cq_comp(struct mlx5_core_cq * cq,struct mlx5_eqe * eqe)414e0e2ea1SYishai Hadas static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
42e126ba97SEli Cohen {
43e126ba97SEli Cohen 	struct ib_cq *ibcq = &to_mibcq(cq)->ibcq;
44e126ba97SEli Cohen 
45e126ba97SEli Cohen 	ibcq->comp_handler(ibcq, ibcq->cq_context);
46e126ba97SEli Cohen }
47e126ba97SEli Cohen 
mlx5_ib_cq_event(struct mlx5_core_cq * mcq,enum mlx5_event type)48e126ba97SEli Cohen static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
49e126ba97SEli Cohen {
50e126ba97SEli Cohen 	struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq);
51e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
52e126ba97SEli Cohen 	struct ib_cq *ibcq = &cq->ibcq;
53e126ba97SEli Cohen 	struct ib_event event;
54e126ba97SEli Cohen 
55e126ba97SEli Cohen 	if (type != MLX5_EVENT_TYPE_CQ_ERROR) {
56e126ba97SEli Cohen 		mlx5_ib_warn(dev, "Unexpected event type %d on CQ %06x\n",
57e126ba97SEli Cohen 			     type, mcq->cqn);
58e126ba97SEli Cohen 		return;
59e126ba97SEli Cohen 	}
60e126ba97SEli Cohen 
61e126ba97SEli Cohen 	if (ibcq->event_handler) {
62e126ba97SEli Cohen 		event.device     = &dev->ib_dev;
63e126ba97SEli Cohen 		event.event      = IB_EVENT_CQ_ERR;
64e126ba97SEli Cohen 		event.element.cq = ibcq;
65e126ba97SEli Cohen 		ibcq->event_handler(&event, ibcq->cq_context);
66e126ba97SEli Cohen 	}
67e126ba97SEli Cohen }
68e126ba97SEli Cohen 
get_cqe(struct mlx5_ib_cq * cq,int n)69e126ba97SEli Cohen static void *get_cqe(struct mlx5_ib_cq *cq, int n)
70e126ba97SEli Cohen {
71388ca8beSYonatan Cohen 	return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
72e126ba97SEli Cohen }
73e126ba97SEli Cohen 
sw_ownership_bit(int n,int nent)74bde51583SEli Cohen static u8 sw_ownership_bit(int n, int nent)
75bde51583SEli Cohen {
76bde51583SEli Cohen 	return (n & nent) ? 1 : 0;
77bde51583SEli Cohen }
78bde51583SEli Cohen 
get_sw_cqe(struct mlx5_ib_cq * cq,int n)79e126ba97SEli Cohen static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n)
80e126ba97SEli Cohen {
81e126ba97SEli Cohen 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
82e126ba97SEli Cohen 	struct mlx5_cqe64 *cqe64;
83e126ba97SEli Cohen 
84e126ba97SEli Cohen 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
85bde51583SEli Cohen 
86bdefffd1STariq Toukan 	if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
87bde51583SEli Cohen 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
88bde51583SEli Cohen 		return cqe;
89bde51583SEli Cohen 	} else {
90bde51583SEli Cohen 		return NULL;
91bde51583SEli Cohen 	}
92e126ba97SEli Cohen }
93e126ba97SEli Cohen 
next_cqe_sw(struct mlx5_ib_cq * cq)94e126ba97SEli Cohen static void *next_cqe_sw(struct mlx5_ib_cq *cq)
95e126ba97SEli Cohen {
96e126ba97SEli Cohen 	return get_sw_cqe(cq, cq->mcq.cons_index);
97e126ba97SEli Cohen }
98e126ba97SEli Cohen 
get_umr_comp(struct mlx5_ib_wq * wq,int idx)99e126ba97SEli Cohen static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
100e126ba97SEli Cohen {
101e126ba97SEli Cohen 	switch (wq->wr_data[idx]) {
102e126ba97SEli Cohen 	case MLX5_IB_WR_UMR:
103e126ba97SEli Cohen 		return 0;
104e126ba97SEli Cohen 
105e126ba97SEli Cohen 	case IB_WR_LOCAL_INV:
106e126ba97SEli Cohen 		return IB_WC_LOCAL_INV;
107e126ba97SEli Cohen 
1088a187ee5SSagi Grimberg 	case IB_WR_REG_MR:
1098a187ee5SSagi Grimberg 		return IB_WC_REG_MR;
1108a187ee5SSagi Grimberg 
111e126ba97SEli Cohen 	default:
112e126ba97SEli Cohen 		pr_warn("unknown completion status\n");
113e126ba97SEli Cohen 		return 0;
114e126ba97SEli Cohen 	}
115e126ba97SEli Cohen }
116e126ba97SEli Cohen 
handle_good_req(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_wq * wq,int idx)117e126ba97SEli Cohen static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
118e126ba97SEli Cohen 			    struct mlx5_ib_wq *wq, int idx)
119e126ba97SEli Cohen {
120e126ba97SEli Cohen 	wc->wc_flags = 0;
121e126ba97SEli Cohen 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
122e126ba97SEli Cohen 	case MLX5_OPCODE_RDMA_WRITE_IMM:
123e126ba97SEli Cohen 		wc->wc_flags |= IB_WC_WITH_IMM;
124df561f66SGustavo A. R. Silva 		fallthrough;
125e126ba97SEli Cohen 	case MLX5_OPCODE_RDMA_WRITE:
126e126ba97SEli Cohen 		wc->opcode    = IB_WC_RDMA_WRITE;
127e126ba97SEli Cohen 		break;
128e126ba97SEli Cohen 	case MLX5_OPCODE_SEND_IMM:
129e126ba97SEli Cohen 		wc->wc_flags |= IB_WC_WITH_IMM;
130df561f66SGustavo A. R. Silva 		fallthrough;
131e126ba97SEli Cohen 	case MLX5_OPCODE_SEND:
132e126ba97SEli Cohen 	case MLX5_OPCODE_SEND_INVAL:
133e126ba97SEli Cohen 		wc->opcode    = IB_WC_SEND;
134e126ba97SEli Cohen 		break;
135e126ba97SEli Cohen 	case MLX5_OPCODE_RDMA_READ:
136e126ba97SEli Cohen 		wc->opcode    = IB_WC_RDMA_READ;
137e126ba97SEli Cohen 		wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
138e126ba97SEli Cohen 		break;
139e126ba97SEli Cohen 	case MLX5_OPCODE_ATOMIC_CS:
140e126ba97SEli Cohen 		wc->opcode    = IB_WC_COMP_SWAP;
141e126ba97SEli Cohen 		wc->byte_len  = 8;
142e126ba97SEli Cohen 		break;
143e126ba97SEli Cohen 	case MLX5_OPCODE_ATOMIC_FA:
144e126ba97SEli Cohen 		wc->opcode    = IB_WC_FETCH_ADD;
145e126ba97SEli Cohen 		wc->byte_len  = 8;
146e126ba97SEli Cohen 		break;
147e126ba97SEli Cohen 	case MLX5_OPCODE_ATOMIC_MASKED_CS:
148e126ba97SEli Cohen 		wc->opcode    = IB_WC_MASKED_COMP_SWAP;
149e126ba97SEli Cohen 		wc->byte_len  = 8;
150e126ba97SEli Cohen 		break;
151e126ba97SEli Cohen 	case MLX5_OPCODE_ATOMIC_MASKED_FA:
152e126ba97SEli Cohen 		wc->opcode    = IB_WC_MASKED_FETCH_ADD;
153e126ba97SEli Cohen 		wc->byte_len  = 8;
154e126ba97SEli Cohen 		break;
155e126ba97SEli Cohen 	case MLX5_OPCODE_UMR:
156e126ba97SEli Cohen 		wc->opcode = get_umr_comp(wq, idx);
157e126ba97SEli Cohen 		break;
158e126ba97SEli Cohen 	}
159e126ba97SEli Cohen }
160e126ba97SEli Cohen 
161e126ba97SEli Cohen enum {
162e126ba97SEli Cohen 	MLX5_GRH_IN_BUFFER = 1,
163e126ba97SEli Cohen 	MLX5_GRH_IN_CQE	   = 2,
164e126ba97SEli Cohen };
165e126ba97SEli Cohen 
handle_responder(struct ib_wc * wc,struct mlx5_cqe64 * cqe,struct mlx5_ib_qp * qp)166e126ba97SEli Cohen static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
167e126ba97SEli Cohen 			     struct mlx5_ib_qp *qp)
168e126ba97SEli Cohen {
169cb34be6dSAchiad Shochat 	enum rdma_link_layer ll = rdma_port_get_link_layer(qp->ibqp.device, 1);
170e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
1714b916ed9SLeon Romanovsky 	struct mlx5_ib_srq *srq = NULL;
172e126ba97SEli Cohen 	struct mlx5_ib_wq *wq;
173e126ba97SEli Cohen 	u16 wqe_ctr;
17412f8fedeSMoni Shoua 	u8  roce_packet_type;
17512f8fedeSMoni Shoua 	bool vlan_present;
176e126ba97SEli Cohen 	u8 g;
177e126ba97SEli Cohen 
178e126ba97SEli Cohen 	if (qp->ibqp.srq || qp->ibqp.xrcd) {
179e126ba97SEli Cohen 		struct mlx5_core_srq *msrq = NULL;
180e126ba97SEli Cohen 
181e126ba97SEli Cohen 		if (qp->ibqp.xrcd) {
182b4990804SLeon Romanovsky 			msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
1834b916ed9SLeon Romanovsky 			if (msrq)
184e126ba97SEli Cohen 				srq = to_mibsrq(msrq);
185e126ba97SEli Cohen 		} else {
186e126ba97SEli Cohen 			srq = to_msrq(qp->ibqp.srq);
187e126ba97SEli Cohen 		}
188e126ba97SEli Cohen 		if (srq) {
189e126ba97SEli Cohen 			wqe_ctr = be16_to_cpu(cqe->wqe_counter);
190e126ba97SEli Cohen 			wc->wr_id = srq->wrid[wqe_ctr];
191e126ba97SEli Cohen 			mlx5_ib_free_srq_wqe(srq, wqe_ctr);
19210f56242SMoni Shoua 			if (msrq)
19310f56242SMoni Shoua 				mlx5_core_res_put(&msrq->common);
194e126ba97SEli Cohen 		}
195e126ba97SEli Cohen 	} else {
196e126ba97SEli Cohen 		wq	  = &qp->rq;
197e126ba97SEli Cohen 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
198e126ba97SEli Cohen 		++wq->tail;
199e126ba97SEli Cohen 	}
200e126ba97SEli Cohen 	wc->byte_len = be32_to_cpu(cqe->byte_cnt);
201e126ba97SEli Cohen 
202bdefffd1STariq Toukan 	switch (get_cqe_opcode(cqe)) {
203e126ba97SEli Cohen 	case MLX5_CQE_RESP_WR_IMM:
204e126ba97SEli Cohen 		wc->opcode	= IB_WC_RECV_RDMA_WITH_IMM;
205e126ba97SEli Cohen 		wc->wc_flags	= IB_WC_WITH_IMM;
206244faedfSRaed Salem 		wc->ex.imm_data = cqe->immediate;
207e126ba97SEli Cohen 		break;
208e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND:
209e126ba97SEli Cohen 		wc->opcode   = IB_WC_RECV;
210c7ce833bSErez Shitrit 		wc->wc_flags = IB_WC_IP_CSUM_OK;
211c7ce833bSErez Shitrit 		if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
212c7ce833bSErez Shitrit 			       (cqe->hds_ip_ext & CQE_L4_OK))))
213e126ba97SEli Cohen 			wc->wc_flags = 0;
214e126ba97SEli Cohen 		break;
215e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND_IMM:
216e126ba97SEli Cohen 		wc->opcode	= IB_WC_RECV;
217e126ba97SEli Cohen 		wc->wc_flags	= IB_WC_WITH_IMM;
218244faedfSRaed Salem 		wc->ex.imm_data = cqe->immediate;
219e126ba97SEli Cohen 		break;
220e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND_INV:
221e126ba97SEli Cohen 		wc->opcode	= IB_WC_RECV;
222e126ba97SEli Cohen 		wc->wc_flags	= IB_WC_WITH_INVALIDATE;
223244faedfSRaed Salem 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
224e126ba97SEli Cohen 		break;
225e126ba97SEli Cohen 	}
226e126ba97SEli Cohen 	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
227e126ba97SEli Cohen 	wc->dlid_path_bits = cqe->ml_path;
228e126ba97SEli Cohen 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
229e126ba97SEli Cohen 	wc->wc_flags |= g ? IB_WC_GRH : 0;
2309ecf6ac1SMaor Gottlieb 	if (is_qp1(qp->type)) {
231244faedfSRaed Salem 		u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
232b636401fSSagi Grimberg 
233b636401fSSagi Grimberg 		ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
234b636401fSSagi Grimberg 				    &wc->pkey_index);
235b636401fSSagi Grimberg 	} else {
236b636401fSSagi Grimberg 		wc->pkey_index = 0;
237b636401fSSagi Grimberg 	}
238cb34be6dSAchiad Shochat 
23912f8fedeSMoni Shoua 	if (ll != IB_LINK_LAYER_ETHERNET) {
24065389322SMoni Shoua 		wc->slid = be16_to_cpu(cqe->slid);
24112f8fedeSMoni Shoua 		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
242cb34be6dSAchiad Shochat 		return;
24312f8fedeSMoni Shoua 	}
244cb34be6dSAchiad Shochat 
24565389322SMoni Shoua 	wc->slid = 0;
24612f8fedeSMoni Shoua 	vlan_present = cqe->l4_l3_hdr_type & 0x1;
24712f8fedeSMoni Shoua 	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
24812f8fedeSMoni Shoua 	if (vlan_present) {
24912f8fedeSMoni Shoua 		wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
25012f8fedeSMoni Shoua 		wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
25112f8fedeSMoni Shoua 		wc->wc_flags |= IB_WC_WITH_VLAN;
25212f8fedeSMoni Shoua 	} else {
25312f8fedeSMoni Shoua 		wc->sl = 0;
25412f8fedeSMoni Shoua 	}
25512f8fedeSMoni Shoua 
25612f8fedeSMoni Shoua 	switch (roce_packet_type) {
257cb34be6dSAchiad Shochat 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH:
2581c15b4f2SAvihai Horon 		wc->network_hdr_type = RDMA_NETWORK_ROCE_V1;
259cb34be6dSAchiad Shochat 		break;
260cb34be6dSAchiad Shochat 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6:
261cb34be6dSAchiad Shochat 		wc->network_hdr_type = RDMA_NETWORK_IPV6;
262cb34be6dSAchiad Shochat 		break;
263cb34be6dSAchiad Shochat 	case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4:
264cb34be6dSAchiad Shochat 		wc->network_hdr_type = RDMA_NETWORK_IPV4;
265cb34be6dSAchiad Shochat 		break;
266cb34be6dSAchiad Shochat 	}
267cb34be6dSAchiad Shochat 	wc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
268e126ba97SEli Cohen }
269e126ba97SEli Cohen 
dump_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc,const char * level)270abef378cSArumugam Kolappan static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe,
271abef378cSArumugam Kolappan 		     struct ib_wc *wc, const char *level)
272e126ba97SEli Cohen {
273abef378cSArumugam Kolappan 	mlx5_ib_log(level, dev, "WC error: %d, Message: %s\n", wc->status,
274abef378cSArumugam Kolappan 		    ib_wc_status_msg(wc->status));
275abef378cSArumugam Kolappan 	print_hex_dump(level, "cqe_dump: ", DUMP_PREFIX_OFFSET, 16, 1,
276abef378cSArumugam Kolappan 		       cqe, sizeof(*cqe), false);
277e126ba97SEli Cohen }
278e126ba97SEli Cohen 
mlx5_handle_error_cqe(struct mlx5_ib_dev * dev,struct mlx5_err_cqe * cqe,struct ib_wc * wc)279e126ba97SEli Cohen static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
280e126ba97SEli Cohen 				  struct mlx5_err_cqe *cqe,
281e126ba97SEli Cohen 				  struct ib_wc *wc)
282e126ba97SEli Cohen {
283abef378cSArumugam Kolappan 	const char *dump = KERN_WARNING;
284e126ba97SEli Cohen 
285e126ba97SEli Cohen 	switch (cqe->syndrome) {
286e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR:
287e126ba97SEli Cohen 		wc->status = IB_WC_LOC_LEN_ERR;
288e126ba97SEli Cohen 		break;
289e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR:
290e126ba97SEli Cohen 		wc->status = IB_WC_LOC_QP_OP_ERR;
291e126ba97SEli Cohen 		break;
292e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR:
293abef378cSArumugam Kolappan 		dump = KERN_DEBUG;
294e126ba97SEli Cohen 		wc->status = IB_WC_LOC_PROT_ERR;
295e126ba97SEli Cohen 		break;
296e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_WR_FLUSH_ERR:
297abef378cSArumugam Kolappan 		dump = NULL;
298e126ba97SEli Cohen 		wc->status = IB_WC_WR_FLUSH_ERR;
299e126ba97SEli Cohen 		break;
300e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_MW_BIND_ERR:
301e126ba97SEli Cohen 		wc->status = IB_WC_MW_BIND_ERR;
302e126ba97SEli Cohen 		break;
303e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_BAD_RESP_ERR:
304e126ba97SEli Cohen 		wc->status = IB_WC_BAD_RESP_ERR;
305e126ba97SEli Cohen 		break;
306e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR:
307e126ba97SEli Cohen 		wc->status = IB_WC_LOC_ACCESS_ERR;
308e126ba97SEli Cohen 		break;
309e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
310e126ba97SEli Cohen 		wc->status = IB_WC_REM_INV_REQ_ERR;
311e126ba97SEli Cohen 		break;
312e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR:
313abef378cSArumugam Kolappan 		dump = KERN_DEBUG;
314e126ba97SEli Cohen 		wc->status = IB_WC_REM_ACCESS_ERR;
315e126ba97SEli Cohen 		break;
316e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_REMOTE_OP_ERR:
317abef378cSArumugam Kolappan 		dump = KERN_DEBUG;
318e126ba97SEli Cohen 		wc->status = IB_WC_REM_OP_ERR;
319e126ba97SEli Cohen 		break;
320e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
321abef378cSArumugam Kolappan 		dump = NULL;
322e126ba97SEli Cohen 		wc->status = IB_WC_RETRY_EXC_ERR;
323e126ba97SEli Cohen 		break;
324e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
325abef378cSArumugam Kolappan 		dump = NULL;
326e126ba97SEli Cohen 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
327e126ba97SEli Cohen 		break;
328e126ba97SEli Cohen 	case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR:
329e126ba97SEli Cohen 		wc->status = IB_WC_REM_ABORT_ERR;
330e126ba97SEli Cohen 		break;
331e126ba97SEli Cohen 	default:
332e126ba97SEli Cohen 		wc->status = IB_WC_GENERAL_ERR;
333e126ba97SEli Cohen 		break;
334e126ba97SEli Cohen 	}
335e126ba97SEli Cohen 
336e126ba97SEli Cohen 	wc->vendor_err = cqe->vendor_err_synd;
337abef378cSArumugam Kolappan 	if (dump)
338abef378cSArumugam Kolappan 		dump_cqe(dev, cqe, wc, dump);
339a7ad9ddeSDust Li }
340e126ba97SEli Cohen 
handle_atomics(struct mlx5_ib_qp * qp,struct mlx5_cqe64 * cqe64,u16 tail,u16 head)341950bf4f1SLeon Romanovsky static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
342950bf4f1SLeon Romanovsky 			   u16 tail, u16 head)
343950bf4f1SLeon Romanovsky {
344950bf4f1SLeon Romanovsky 	u16 idx;
345950bf4f1SLeon Romanovsky 
346950bf4f1SLeon Romanovsky 	do {
347950bf4f1SLeon Romanovsky 		idx = tail & (qp->sq.wqe_cnt - 1);
348950bf4f1SLeon Romanovsky 		if (idx == head)
349950bf4f1SLeon Romanovsky 			break;
350950bf4f1SLeon Romanovsky 
351950bf4f1SLeon Romanovsky 		tail = qp->sq.w_list[idx].next;
352950bf4f1SLeon Romanovsky 	} while (1);
353950bf4f1SLeon Romanovsky 	tail = qp->sq.w_list[idx].next;
354950bf4f1SLeon Romanovsky 	qp->sq.last_poll = tail;
355950bf4f1SLeon Romanovsky }
356950bf4f1SLeon Romanovsky 
free_cq_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf)357bde51583SEli Cohen static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
358bde51583SEli Cohen {
3594972e6faSTariq Toukan 	mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
360bde51583SEli Cohen }
361bde51583SEli Cohen 
get_sig_err_item(struct mlx5_sig_err_cqe * cqe,struct ib_sig_err * item)362d5436ba0SSagi Grimberg static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
363d5436ba0SSagi Grimberg 			     struct ib_sig_err *item)
364d5436ba0SSagi Grimberg {
365d5436ba0SSagi Grimberg 	u16 syndrome = be16_to_cpu(cqe->syndrome);
366d5436ba0SSagi Grimberg 
367d5436ba0SSagi Grimberg #define GUARD_ERR   (1 << 13)
368d5436ba0SSagi Grimberg #define APPTAG_ERR  (1 << 12)
369d5436ba0SSagi Grimberg #define REFTAG_ERR  (1 << 11)
370d5436ba0SSagi Grimberg 
371d5436ba0SSagi Grimberg 	if (syndrome & GUARD_ERR) {
372d5436ba0SSagi Grimberg 		item->err_type = IB_SIG_BAD_GUARD;
373d5436ba0SSagi Grimberg 		item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
374d5436ba0SSagi Grimberg 		item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
375d5436ba0SSagi Grimberg 	} else
376d5436ba0SSagi Grimberg 	if (syndrome & REFTAG_ERR) {
377d5436ba0SSagi Grimberg 		item->err_type = IB_SIG_BAD_REFTAG;
378d5436ba0SSagi Grimberg 		item->expected = be32_to_cpu(cqe->expected_reftag);
379d5436ba0SSagi Grimberg 		item->actual = be32_to_cpu(cqe->actual_reftag);
380d5436ba0SSagi Grimberg 	} else
381d5436ba0SSagi Grimberg 	if (syndrome & APPTAG_ERR) {
382d5436ba0SSagi Grimberg 		item->err_type = IB_SIG_BAD_APPTAG;
383d5436ba0SSagi Grimberg 		item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
384d5436ba0SSagi Grimberg 		item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
385d5436ba0SSagi Grimberg 	} else {
386d5436ba0SSagi Grimberg 		pr_err("Got signature completion error with bad syndrome %04x\n",
387d5436ba0SSagi Grimberg 		       syndrome);
388d5436ba0SSagi Grimberg 	}
389d5436ba0SSagi Grimberg 
390d5436ba0SSagi Grimberg 	item->sig_err_offset = be64_to_cpu(cqe->err_offset);
391d5436ba0SSagi Grimberg 	item->key = be32_to_cpu(cqe->mkey);
392d5436ba0SSagi Grimberg }
393d5436ba0SSagi Grimberg 
sw_comp(struct mlx5_ib_qp * qp,int num_entries,struct ib_wc * wc,int * npolled,bool is_send)3948e3b6883SLeon Romanovsky static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
395950bf4f1SLeon Romanovsky 		    int *npolled, bool is_send)
39689ea94a7SMaor Gottlieb {
39789ea94a7SMaor Gottlieb 	struct mlx5_ib_wq *wq;
39889ea94a7SMaor Gottlieb 	unsigned int cur;
39989ea94a7SMaor Gottlieb 	int np;
40089ea94a7SMaor Gottlieb 	int i;
40189ea94a7SMaor Gottlieb 
4028e3b6883SLeon Romanovsky 	wq = (is_send) ? &qp->sq : &qp->rq;
40389ea94a7SMaor Gottlieb 	cur = wq->head - wq->tail;
40489ea94a7SMaor Gottlieb 	np = *npolled;
40589ea94a7SMaor Gottlieb 
40689ea94a7SMaor Gottlieb 	if (cur == 0)
40789ea94a7SMaor Gottlieb 		return;
40889ea94a7SMaor Gottlieb 
40989ea94a7SMaor Gottlieb 	for (i = 0;  i < cur && np < num_entries; i++) {
410950bf4f1SLeon Romanovsky 		unsigned int idx;
411950bf4f1SLeon Romanovsky 
412950bf4f1SLeon Romanovsky 		idx = (is_send) ? wq->last_poll : wq->tail;
413950bf4f1SLeon Romanovsky 		idx &= (wq->wqe_cnt - 1);
414950bf4f1SLeon Romanovsky 		wc->wr_id = wq->wrid[idx];
41589ea94a7SMaor Gottlieb 		wc->status = IB_WC_WR_FLUSH_ERR;
41689ea94a7SMaor Gottlieb 		wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
41789ea94a7SMaor Gottlieb 		wq->tail++;
418950bf4f1SLeon Romanovsky 		if (is_send)
419950bf4f1SLeon Romanovsky 			wq->last_poll = wq->w_list[idx].next;
42089ea94a7SMaor Gottlieb 		np++;
42189ea94a7SMaor Gottlieb 		wc->qp = &qp->ibqp;
42289ea94a7SMaor Gottlieb 		wc++;
42389ea94a7SMaor Gottlieb 	}
42489ea94a7SMaor Gottlieb 	*npolled = np;
42589ea94a7SMaor Gottlieb }
42689ea94a7SMaor Gottlieb 
mlx5_ib_poll_sw_comp(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,int * npolled)42789ea94a7SMaor Gottlieb static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
42889ea94a7SMaor Gottlieb 				 struct ib_wc *wc, int *npolled)
42989ea94a7SMaor Gottlieb {
43089ea94a7SMaor Gottlieb 	struct mlx5_ib_qp *qp;
43189ea94a7SMaor Gottlieb 
43289ea94a7SMaor Gottlieb 	*npolled = 0;
4334edf8d5cSTalat Batheesh 	/* Find uncompleted WQEs belonging to that cq and return mmics ones */
43489ea94a7SMaor Gottlieb 	list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
4358e3b6883SLeon Romanovsky 		sw_comp(qp, num_entries, wc + *npolled, npolled, true);
43689ea94a7SMaor Gottlieb 		if (*npolled >= num_entries)
43789ea94a7SMaor Gottlieb 			return;
43889ea94a7SMaor Gottlieb 	}
43989ea94a7SMaor Gottlieb 
44089ea94a7SMaor Gottlieb 	list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
4418e3b6883SLeon Romanovsky 		sw_comp(qp, num_entries, wc + *npolled, npolled, false);
44289ea94a7SMaor Gottlieb 		if (*npolled >= num_entries)
44389ea94a7SMaor Gottlieb 			return;
44489ea94a7SMaor Gottlieb 	}
44589ea94a7SMaor Gottlieb }
44689ea94a7SMaor Gottlieb 
mlx5_poll_one(struct mlx5_ib_cq * cq,struct mlx5_ib_qp ** cur_qp,struct ib_wc * wc)447e126ba97SEli Cohen static int mlx5_poll_one(struct mlx5_ib_cq *cq,
448e126ba97SEli Cohen 			 struct mlx5_ib_qp **cur_qp,
449e126ba97SEli Cohen 			 struct ib_wc *wc)
450e126ba97SEli Cohen {
451e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
452e126ba97SEli Cohen 	struct mlx5_err_cqe *err_cqe;
453e126ba97SEli Cohen 	struct mlx5_cqe64 *cqe64;
454e126ba97SEli Cohen 	struct mlx5_core_qp *mqp;
455e126ba97SEli Cohen 	struct mlx5_ib_wq *wq;
456e126ba97SEli Cohen 	uint8_t opcode;
457e126ba97SEli Cohen 	uint32_t qpn;
458e126ba97SEli Cohen 	u16 wqe_ctr;
459e126ba97SEli Cohen 	void *cqe;
460e126ba97SEli Cohen 	int idx;
461e126ba97SEli Cohen 
462bde51583SEli Cohen repoll:
463e126ba97SEli Cohen 	cqe = next_cqe_sw(cq);
464e126ba97SEli Cohen 	if (!cqe)
465e126ba97SEli Cohen 		return -EAGAIN;
466e126ba97SEli Cohen 
467e126ba97SEli Cohen 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
468e126ba97SEli Cohen 
469e126ba97SEli Cohen 	++cq->mcq.cons_index;
470e126ba97SEli Cohen 
471e126ba97SEli Cohen 	/* Make sure we read CQ entry contents after we've checked the
472e126ba97SEli Cohen 	 * ownership bit.
473e126ba97SEli Cohen 	 */
474e126ba97SEli Cohen 	rmb();
475e126ba97SEli Cohen 
476bdefffd1STariq Toukan 	opcode = get_cqe_opcode(cqe64);
477bde51583SEli Cohen 	if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
478bde51583SEli Cohen 		if (likely(cq->resize_buf)) {
479bde51583SEli Cohen 			free_cq_buf(dev, &cq->buf);
480bde51583SEli Cohen 			cq->buf = *cq->resize_buf;
481bde51583SEli Cohen 			kfree(cq->resize_buf);
482bde51583SEli Cohen 			cq->resize_buf = NULL;
483bde51583SEli Cohen 			goto repoll;
484bde51583SEli Cohen 		} else {
485bde51583SEli Cohen 			mlx5_ib_warn(dev, "unexpected resize cqe\n");
486bde51583SEli Cohen 		}
487bde51583SEli Cohen 	}
488e126ba97SEli Cohen 
489e126ba97SEli Cohen 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
490e126ba97SEli Cohen 	if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) {
491e126ba97SEli Cohen 		/* We do not have to take the QP table lock here,
492e126ba97SEli Cohen 		 * because CQs will be locked while QPs are removed
493e126ba97SEli Cohen 		 * from the table.
494e126ba97SEli Cohen 		 */
495333fbaa0SLeon Romanovsky 		mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
496e126ba97SEli Cohen 		*cur_qp = to_mibqp(mqp);
497e126ba97SEli Cohen 	}
498e126ba97SEli Cohen 
499e126ba97SEli Cohen 	wc->qp  = &(*cur_qp)->ibqp;
500e126ba97SEli Cohen 	switch (opcode) {
501e126ba97SEli Cohen 	case MLX5_CQE_REQ:
502e126ba97SEli Cohen 		wq = &(*cur_qp)->sq;
503e126ba97SEli Cohen 		wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
504e126ba97SEli Cohen 		idx = wqe_ctr & (wq->wqe_cnt - 1);
505e126ba97SEli Cohen 		handle_good_req(wc, cqe64, wq, idx);
506950bf4f1SLeon Romanovsky 		handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
507e126ba97SEli Cohen 		wc->wr_id = wq->wrid[idx];
508e126ba97SEli Cohen 		wq->tail = wq->wqe_head[idx] + 1;
509e126ba97SEli Cohen 		wc->status = IB_WC_SUCCESS;
510e126ba97SEli Cohen 		break;
511e126ba97SEli Cohen 	case MLX5_CQE_RESP_WR_IMM:
512e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND:
513e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND_IMM:
514e126ba97SEli Cohen 	case MLX5_CQE_RESP_SEND_INV:
515e126ba97SEli Cohen 		handle_responder(wc, cqe64, *cur_qp);
516e126ba97SEli Cohen 		wc->status = IB_WC_SUCCESS;
517e126ba97SEli Cohen 		break;
518e126ba97SEli Cohen 	case MLX5_CQE_RESIZE_CQ:
519e126ba97SEli Cohen 		break;
520e126ba97SEli Cohen 	case MLX5_CQE_REQ_ERR:
521e126ba97SEli Cohen 	case MLX5_CQE_RESP_ERR:
522e126ba97SEli Cohen 		err_cqe = (struct mlx5_err_cqe *)cqe64;
523e126ba97SEli Cohen 		mlx5_handle_error_cqe(dev, err_cqe, wc);
524e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "%s error cqe on cqn 0x%x:\n",
525e126ba97SEli Cohen 			    opcode == MLX5_CQE_REQ_ERR ?
526e126ba97SEli Cohen 			    "Requestor" : "Responder", cq->mcq.cqn);
527e126ba97SEli Cohen 		mlx5_ib_dbg(dev, "syndrome 0x%x, vendor syndrome 0x%x\n",
528e126ba97SEli Cohen 			    err_cqe->syndrome, err_cqe->vendor_err_synd);
529158e71bbSAharon Landau 		if (wc->status != IB_WC_WR_FLUSH_ERR &&
530158e71bbSAharon Landau 		    (*cur_qp)->type == MLX5_IB_QPT_REG_UMR)
531158e71bbSAharon Landau 			dev->umrc.state = MLX5_UMR_STATE_RECOVER;
532158e71bbSAharon Landau 
533e126ba97SEli Cohen 		if (opcode == MLX5_CQE_REQ_ERR) {
534e126ba97SEli Cohen 			wq = &(*cur_qp)->sq;
535e126ba97SEli Cohen 			wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
536e126ba97SEli Cohen 			idx = wqe_ctr & (wq->wqe_cnt - 1);
537e126ba97SEli Cohen 			wc->wr_id = wq->wrid[idx];
538e126ba97SEli Cohen 			wq->tail = wq->wqe_head[idx] + 1;
539e126ba97SEli Cohen 		} else {
540e126ba97SEli Cohen 			struct mlx5_ib_srq *srq;
541e126ba97SEli Cohen 
542e126ba97SEli Cohen 			if ((*cur_qp)->ibqp.srq) {
543e126ba97SEli Cohen 				srq = to_msrq((*cur_qp)->ibqp.srq);
544e126ba97SEli Cohen 				wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
545e126ba97SEli Cohen 				wc->wr_id = srq->wrid[wqe_ctr];
546e126ba97SEli Cohen 				mlx5_ib_free_srq_wqe(srq, wqe_ctr);
547e126ba97SEli Cohen 			} else {
548e126ba97SEli Cohen 				wq = &(*cur_qp)->rq;
549e126ba97SEli Cohen 				wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
550e126ba97SEli Cohen 				++wq->tail;
551e126ba97SEli Cohen 			}
552e126ba97SEli Cohen 		}
553e126ba97SEli Cohen 		break;
55450211ec9SJason Gunthorpe 	case MLX5_CQE_SIG_ERR: {
55550211ec9SJason Gunthorpe 		struct mlx5_sig_err_cqe *sig_err_cqe =
55650211ec9SJason Gunthorpe 			(struct mlx5_sig_err_cqe *)cqe64;
55750211ec9SJason Gunthorpe 		struct mlx5_core_sig_ctx *sig;
558d5436ba0SSagi Grimberg 
55950211ec9SJason Gunthorpe 		xa_lock(&dev->sig_mrs);
56050211ec9SJason Gunthorpe 		sig = xa_load(&dev->sig_mrs,
561d5436ba0SSagi Grimberg 				mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
56250211ec9SJason Gunthorpe 		get_sig_err_item(sig_err_cqe, &sig->err_item);
56350211ec9SJason Gunthorpe 		sig->sig_err_exists = true;
56450211ec9SJason Gunthorpe 		sig->sigerr_count++;
565d5436ba0SSagi Grimberg 
566d5436ba0SSagi Grimberg 		mlx5_ib_warn(dev, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
56750211ec9SJason Gunthorpe 			     cq->mcq.cqn, sig->err_item.key,
56850211ec9SJason Gunthorpe 			     sig->err_item.err_type,
56950211ec9SJason Gunthorpe 			     sig->err_item.sig_err_offset,
57050211ec9SJason Gunthorpe 			     sig->err_item.expected,
57150211ec9SJason Gunthorpe 			     sig->err_item.actual);
572d5436ba0SSagi Grimberg 
57350211ec9SJason Gunthorpe 		xa_unlock(&dev->sig_mrs);
574d5436ba0SSagi Grimberg 		goto repoll;
575e126ba97SEli Cohen 	}
57650211ec9SJason Gunthorpe 	}
577e126ba97SEli Cohen 
578e126ba97SEli Cohen 	return 0;
579e126ba97SEli Cohen }
580e126ba97SEli Cohen 
poll_soft_wc(struct mlx5_ib_cq * cq,int num_entries,struct ib_wc * wc,bool is_fatal_err)58125361e02SHaggai Eran static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
5827b74a83cSErez Shitrit 			struct ib_wc *wc, bool is_fatal_err)
58325361e02SHaggai Eran {
58425361e02SHaggai Eran 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
58525361e02SHaggai Eran 	struct mlx5_ib_wc *soft_wc, *next;
58625361e02SHaggai Eran 	int npolled = 0;
58725361e02SHaggai Eran 
58825361e02SHaggai Eran 	list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) {
58925361e02SHaggai Eran 		if (npolled >= num_entries)
59025361e02SHaggai Eran 			break;
59125361e02SHaggai Eran 
59225361e02SHaggai Eran 		mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
59325361e02SHaggai Eran 			    cq->mcq.cqn);
59425361e02SHaggai Eran 
5957b74a83cSErez Shitrit 		if (unlikely(is_fatal_err)) {
5967b74a83cSErez Shitrit 			soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
5977b74a83cSErez Shitrit 			soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
5987b74a83cSErez Shitrit 		}
59925361e02SHaggai Eran 		wc[npolled++] = soft_wc->wc;
60025361e02SHaggai Eran 		list_del(&soft_wc->list);
60125361e02SHaggai Eran 		kfree(soft_wc);
60225361e02SHaggai Eran 	}
60325361e02SHaggai Eran 
60425361e02SHaggai Eran 	return npolled;
60525361e02SHaggai Eran }
60625361e02SHaggai Eran 
mlx5_ib_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * wc)607e126ba97SEli Cohen int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
608e126ba97SEli Cohen {
609e126ba97SEli Cohen 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
610e126ba97SEli Cohen 	struct mlx5_ib_qp *cur_qp = NULL;
61189ea94a7SMaor Gottlieb 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
61289ea94a7SMaor Gottlieb 	struct mlx5_core_dev *mdev = dev->mdev;
613e126ba97SEli Cohen 	unsigned long flags;
61425361e02SHaggai Eran 	int soft_polled = 0;
615e126ba97SEli Cohen 	int npolled;
616e126ba97SEli Cohen 
617e126ba97SEli Cohen 	spin_lock_irqsave(&cq->lock, flags);
61889ea94a7SMaor Gottlieb 	if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
6197b74a83cSErez Shitrit 		/* make sure no soft wqe's are waiting */
6207b74a83cSErez Shitrit 		if (unlikely(!list_empty(&cq->wc_list)))
6217b74a83cSErez Shitrit 			soft_polled = poll_soft_wc(cq, num_entries, wc, true);
6227b74a83cSErez Shitrit 
6237b74a83cSErez Shitrit 		mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
6247b74a83cSErez Shitrit 				     wc + soft_polled, &npolled);
62589ea94a7SMaor Gottlieb 		goto out;
62689ea94a7SMaor Gottlieb 	}
627e126ba97SEli Cohen 
62825361e02SHaggai Eran 	if (unlikely(!list_empty(&cq->wc_list)))
6297b74a83cSErez Shitrit 		soft_polled = poll_soft_wc(cq, num_entries, wc, false);
63025361e02SHaggai Eran 
63125361e02SHaggai Eran 	for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
632dbdf7d4eSLeon Romanovsky 		if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
633e126ba97SEli Cohen 			break;
634e126ba97SEli Cohen 	}
635e126ba97SEli Cohen 
636e126ba97SEli Cohen 	if (npolled)
637e126ba97SEli Cohen 		mlx5_cq_set_ci(&cq->mcq);
63889ea94a7SMaor Gottlieb out:
639e126ba97SEli Cohen 	spin_unlock_irqrestore(&cq->lock, flags);
640e126ba97SEli Cohen 
64125361e02SHaggai Eran 	return soft_polled + npolled;
642e126ba97SEli Cohen }
643e126ba97SEli Cohen 
mlx5_ib_arm_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags flags)644e126ba97SEli Cohen int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
645e126ba97SEli Cohen {
646ce0f7509SSaeed Mahameed 	struct mlx5_core_dev *mdev = to_mdev(ibcq->device)->mdev;
64725361e02SHaggai Eran 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
6485fe9dec0SEli Cohen 	void __iomem *uar_page = mdev->priv.uar->map;
64925361e02SHaggai Eran 	unsigned long irq_flags;
65025361e02SHaggai Eran 	int ret = 0;
651ce0f7509SSaeed Mahameed 
65225361e02SHaggai Eran 	spin_lock_irqsave(&cq->lock, irq_flags);
65325361e02SHaggai Eran 	if (cq->notify_flags != IB_CQ_NEXT_COMP)
65425361e02SHaggai Eran 		cq->notify_flags = flags & IB_CQ_SOLICITED_MASK;
65525361e02SHaggai Eran 
65625361e02SHaggai Eran 	if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list))
65725361e02SHaggai Eran 		ret = 1;
65825361e02SHaggai Eran 	spin_unlock_irqrestore(&cq->lock, irq_flags);
65925361e02SHaggai Eran 
66025361e02SHaggai Eran 	mlx5_cq_arm(&cq->mcq,
661e126ba97SEli Cohen 		    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
662e126ba97SEli Cohen 		    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
6635fe9dec0SEli Cohen 		    uar_page, to_mcq(ibcq)->mcq.cons_index);
664e126ba97SEli Cohen 
66525361e02SHaggai Eran 	return ret;
666e126ba97SEli Cohen }
667e126ba97SEli Cohen 
alloc_cq_frag_buf(struct mlx5_ib_dev * dev,struct mlx5_ib_cq_buf * buf,int nent,int cqe_size)668388ca8beSYonatan Cohen static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
669388ca8beSYonatan Cohen 			     struct mlx5_ib_cq_buf *buf,
670388ca8beSYonatan Cohen 			     int nent,
671388ca8beSYonatan Cohen 			     int cqe_size)
672e126ba97SEli Cohen {
6734972e6faSTariq Toukan 	struct mlx5_frag_buf *frag_buf = &buf->frag_buf;
6744972e6faSTariq Toukan 	u8 log_wq_stride = 6 + (cqe_size == 128 ? 1 : 0);
6754972e6faSTariq Toukan 	u8 log_wq_sz     = ilog2(cqe_size);
676e126ba97SEli Cohen 	int err;
677e126ba97SEli Cohen 
678388ca8beSYonatan Cohen 	err = mlx5_frag_buf_alloc_node(dev->mdev,
679388ca8beSYonatan Cohen 				       nent * cqe_size,
680388ca8beSYonatan Cohen 				       frag_buf,
681388ca8beSYonatan Cohen 				       dev->mdev->priv.numa_node);
682e126ba97SEli Cohen 	if (err)
683e126ba97SEli Cohen 		return err;
684e126ba97SEli Cohen 
6854972e6faSTariq Toukan 	mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc);
6864972e6faSTariq Toukan 
687e126ba97SEli Cohen 	buf->cqe_size = cqe_size;
688bde51583SEli Cohen 	buf->nent = nent;
689e126ba97SEli Cohen 
690e126ba97SEli Cohen 	return 0;
691e126ba97SEli Cohen }
692e126ba97SEli Cohen 
6936f1006a4SYonatan Cohen enum {
6946f1006a4SYonatan Cohen 	MLX5_CQE_RES_FORMAT_HASH = 0,
6956f1006a4SYonatan Cohen 	MLX5_CQE_RES_FORMAT_CSUM = 1,
6966f1006a4SYonatan Cohen 	MLX5_CQE_RES_FORMAT_CSUM_STRIDX = 3,
6976f1006a4SYonatan Cohen };
6986f1006a4SYonatan Cohen 
mini_cqe_res_format_to_hw(struct mlx5_ib_dev * dev,u8 format)6996f1006a4SYonatan Cohen static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev *dev, u8 format)
7006f1006a4SYonatan Cohen {
7016f1006a4SYonatan Cohen 	switch (format) {
7026f1006a4SYonatan Cohen 	case MLX5_IB_CQE_RES_FORMAT_HASH:
7036f1006a4SYonatan Cohen 		return MLX5_CQE_RES_FORMAT_HASH;
7046f1006a4SYonatan Cohen 	case MLX5_IB_CQE_RES_FORMAT_CSUM:
7056f1006a4SYonatan Cohen 		return MLX5_CQE_RES_FORMAT_CSUM;
7066f1006a4SYonatan Cohen 	case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX:
7076f1006a4SYonatan Cohen 		if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index))
7086f1006a4SYonatan Cohen 			return MLX5_CQE_RES_FORMAT_CSUM_STRIDX;
7096f1006a4SYonatan Cohen 		return -EOPNOTSUPP;
7106f1006a4SYonatan Cohen 	default:
7116f1006a4SYonatan Cohen 		return -EINVAL;
7126f1006a4SYonatan Cohen 	}
7136f1006a4SYonatan Cohen }
7146f1006a4SYonatan Cohen 
create_cq_user(struct mlx5_ib_dev * dev,struct ib_udata * udata,struct mlx5_ib_cq * cq,int entries,u32 ** cqb,int * cqe_size,int * index,int * inlen)715e126ba97SEli Cohen static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
716ff23dfa1SShamir Rabinovitch 			  struct mlx5_ib_cq *cq, int entries, u32 **cqb,
717e126ba97SEli Cohen 			  int *cqe_size, int *index, int *inlen)
718e126ba97SEli Cohen {
7191cbe6fc8SBodong Wang 	struct mlx5_ib_create_cq ucmd = {};
720c08fbdc5SJason Gunthorpe 	unsigned long page_size;
721c08fbdc5SJason Gunthorpe 	unsigned int page_offset_quantized;
722a8237b32SYann Droneaud 	size_t ucmdlen;
72327827786SSaeed Mahameed 	__be64 *pas;
724e126ba97SEli Cohen 	int ncont;
72527827786SSaeed Mahameed 	void *cqc;
726e126ba97SEli Cohen 	int err;
727ff23dfa1SShamir Rabinovitch 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
728ff23dfa1SShamir Rabinovitch 		udata, struct mlx5_ib_ucontext, ibucontext);
729e126ba97SEli Cohen 
73064d99f6aSYishai Hadas 	ucmdlen = min(udata->inlen, sizeof(ucmd));
73164d99f6aSYishai Hadas 	if (ucmdlen < offsetof(struct mlx5_ib_create_cq, flags))
73264d99f6aSYishai Hadas 		return -EINVAL;
733a8237b32SYann Droneaud 
734a8237b32SYann Droneaud 	if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
735e126ba97SEli Cohen 		return -EFAULT;
736e126ba97SEli Cohen 
73764d99f6aSYishai Hadas 	if ((ucmd.flags & ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD |
73833652951SAharon Landau 			    MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX |
73933652951SAharon Landau 			    MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)))
740a8237b32SYann Droneaud 		return -EINVAL;
741a8237b32SYann Droneaud 
74264d99f6aSYishai Hadas 	if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) ||
74364d99f6aSYishai Hadas 	    ucmd.reserved0 || ucmd.reserved1)
744e126ba97SEli Cohen 		return -EINVAL;
745e126ba97SEli Cohen 
746e126ba97SEli Cohen 	*cqe_size = ucmd.cqe_size;
747e126ba97SEli Cohen 
748b0ea0fa5SJason Gunthorpe 	cq->buf.umem =
749c320e527SMoni Shoua 		ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
750c320e527SMoni Shoua 			    entries * ucmd.cqe_size, IB_ACCESS_LOCAL_WRITE);
751e126ba97SEli Cohen 	if (IS_ERR(cq->buf.umem)) {
752e126ba97SEli Cohen 		err = PTR_ERR(cq->buf.umem);
753e126ba97SEli Cohen 		return err;
754e126ba97SEli Cohen 	}
755e126ba97SEli Cohen 
756c08fbdc5SJason Gunthorpe 	page_size = mlx5_umem_find_best_cq_quantized_pgoff(
757c08fbdc5SJason Gunthorpe 		cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT,
758c08fbdc5SJason Gunthorpe 		page_offset, 64, &page_offset_quantized);
759c08fbdc5SJason Gunthorpe 	if (!page_size) {
760c08fbdc5SJason Gunthorpe 		err = -EINVAL;
761c08fbdc5SJason Gunthorpe 		goto err_umem;
762c08fbdc5SJason Gunthorpe 	}
763c08fbdc5SJason Gunthorpe 
7640bedd3d0SLang Cheng 	err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db);
765e126ba97SEli Cohen 	if (err)
766e126ba97SEli Cohen 		goto err_umem;
767e126ba97SEli Cohen 
768c08fbdc5SJason Gunthorpe 	ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size);
769f8fb3110SJason Gunthorpe 	mlx5_ib_dbg(
770f8fb3110SJason Gunthorpe 		dev,
771c08fbdc5SJason Gunthorpe 		"addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
772f8fb3110SJason Gunthorpe 		ucmd.buf_addr, entries * ucmd.cqe_size,
773c08fbdc5SJason Gunthorpe 		ib_umem_num_pages(cq->buf.umem), page_size, ncont);
774e126ba97SEli Cohen 
77527827786SSaeed Mahameed 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
77627827786SSaeed Mahameed 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont;
7771b9a07eeSLeon Romanovsky 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
778e126ba97SEli Cohen 	if (!*cqb) {
779e126ba97SEli Cohen 		err = -ENOMEM;
780e126ba97SEli Cohen 		goto err_db;
781e126ba97SEli Cohen 	}
78227827786SSaeed Mahameed 
78327827786SSaeed Mahameed 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
784c08fbdc5SJason Gunthorpe 	mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0);
78527827786SSaeed Mahameed 
78627827786SSaeed Mahameed 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
78727827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, log_page_size,
788c08fbdc5SJason Gunthorpe 		 order_base_2(page_size) - MLX5_ADAPTER_PAGE_SHIFT);
789c08fbdc5SJason Gunthorpe 	MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
790e126ba97SEli Cohen 
7910a2fd01cSYishai Hadas 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX) {
79264d99f6aSYishai Hadas 		*index = ucmd.uar_page_index;
7930a2fd01cSYishai Hadas 	} else if (context->bfregi.lib_uar_dyn) {
7940a2fd01cSYishai Hadas 		err = -EINVAL;
7950a2fd01cSYishai Hadas 		goto err_cqb;
7960a2fd01cSYishai Hadas 	} else {
797ff23dfa1SShamir Rabinovitch 		*index = context->bfregi.sys_pages[0];
7980a2fd01cSYishai Hadas 	}
799e126ba97SEli Cohen 
8001cbe6fc8SBodong Wang 	if (ucmd.cqe_comp_en == 1) {
8016f1006a4SYonatan Cohen 		int mini_cqe_format;
8026f1006a4SYonatan Cohen 
803de57f2adSGuy Levi 		if (!((*cqe_size == 128 &&
804de57f2adSGuy Levi 		       MLX5_CAP_GEN(dev->mdev, cqe_compression_128)) ||
805de57f2adSGuy Levi 		      (*cqe_size == 64  &&
806de57f2adSGuy Levi 		       MLX5_CAP_GEN(dev->mdev, cqe_compression)))) {
8071cbe6fc8SBodong Wang 			err = -EOPNOTSUPP;
8081cbe6fc8SBodong Wang 			mlx5_ib_warn(dev, "CQE compression is not supported for size %d!\n",
8091cbe6fc8SBodong Wang 				     *cqe_size);
8101cbe6fc8SBodong Wang 			goto err_cqb;
8111cbe6fc8SBodong Wang 		}
8121cbe6fc8SBodong Wang 
8136f1006a4SYonatan Cohen 		mini_cqe_format =
8146f1006a4SYonatan Cohen 			mini_cqe_res_format_to_hw(dev,
8151cbe6fc8SBodong Wang 						  ucmd.cqe_comp_res_format);
8166f1006a4SYonatan Cohen 		if (mini_cqe_format < 0) {
8176f1006a4SYonatan Cohen 			err = mini_cqe_format;
8186f1006a4SYonatan Cohen 			mlx5_ib_dbg(dev, "CQE compression res format %d error: %d\n",
8196f1006a4SYonatan Cohen 				    ucmd.cqe_comp_res_format, err);
8201cbe6fc8SBodong Wang 			goto err_cqb;
8211cbe6fc8SBodong Wang 		}
8221cbe6fc8SBodong Wang 
8231cbe6fc8SBodong Wang 		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
8246f1006a4SYonatan Cohen 		MLX5_SET(cqc, cqc, mini_cqe_res_format, mini_cqe_format);
8251cbe6fc8SBodong Wang 	}
8261cbe6fc8SBodong Wang 
8277a0c8f42SGuy Levi 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD) {
8287a0c8f42SGuy Levi 		if (*cqe_size != 128 ||
8297a0c8f42SGuy Levi 		    !MLX5_CAP_GEN(dev->mdev, cqe_128_always)) {
8307a0c8f42SGuy Levi 			err = -EOPNOTSUPP;
8317a0c8f42SGuy Levi 			mlx5_ib_warn(dev,
8327a0c8f42SGuy Levi 				     "CQE padding is not supported for CQE size of %dB!\n",
8337a0c8f42SGuy Levi 				     *cqe_size);
8347a0c8f42SGuy Levi 			goto err_cqb;
8357a0c8f42SGuy Levi 		}
8367a0c8f42SGuy Levi 
8377a0c8f42SGuy Levi 		cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
8387a0c8f42SGuy Levi 	}
8397a0c8f42SGuy Levi 
84033652951SAharon Landau 	if (ucmd.flags & MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS)
84133652951SAharon Landau 		cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS;
84233652951SAharon Landau 
843ff23dfa1SShamir Rabinovitch 	MLX5_SET(create_cq_in, *cqb, uid, context->devx_uid);
844e126ba97SEli Cohen 	return 0;
845e126ba97SEli Cohen 
8461cbe6fc8SBodong Wang err_cqb:
847909d4344SChristophe JAILLET 	kvfree(*cqb);
8481cbe6fc8SBodong Wang 
849e126ba97SEli Cohen err_db:
850ff23dfa1SShamir Rabinovitch 	mlx5_ib_db_unmap_user(context, &cq->db);
851e126ba97SEli Cohen 
852e126ba97SEli Cohen err_umem:
853e126ba97SEli Cohen 	ib_umem_release(cq->buf.umem);
854e126ba97SEli Cohen 	return err;
855e126ba97SEli Cohen }
856e126ba97SEli Cohen 
destroy_cq_user(struct mlx5_ib_cq * cq,struct ib_udata * udata)857bdeacabdSShamir Rabinovitch static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata)
858e126ba97SEli Cohen {
859bdeacabdSShamir Rabinovitch 	struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
860bdeacabdSShamir Rabinovitch 		udata, struct mlx5_ib_ucontext, ibucontext);
861bdeacabdSShamir Rabinovitch 
862bdeacabdSShamir Rabinovitch 	mlx5_ib_db_unmap_user(context, &cq->db);
863e126ba97SEli Cohen 	ib_umem_release(cq->buf.umem);
864e126ba97SEli Cohen }
865e126ba97SEli Cohen 
init_cq_frag_buf(struct mlx5_ib_cq_buf * buf)8662ba0aa2fSAlaa Hleihel static void init_cq_frag_buf(struct mlx5_ib_cq_buf *buf)
867e126ba97SEli Cohen {
868e126ba97SEli Cohen 	int i;
869e126ba97SEli Cohen 	void *cqe;
870e126ba97SEli Cohen 	struct mlx5_cqe64 *cqe64;
871e126ba97SEli Cohen 
872bde51583SEli Cohen 	for (i = 0; i < buf->nent; i++) {
8732ba0aa2fSAlaa Hleihel 		cqe = mlx5_frag_buf_get_wqe(&buf->fbc, i);
874bde51583SEli Cohen 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
875bde51583SEli Cohen 		cqe64->op_own = MLX5_CQE_INVALID << 4;
876e126ba97SEli Cohen 	}
877e126ba97SEli Cohen }
878e126ba97SEli Cohen 
create_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size,u32 ** cqb,int * index,int * inlen)879e126ba97SEli Cohen static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
880e126ba97SEli Cohen 			    int entries, int cqe_size,
88127827786SSaeed Mahameed 			    u32 **cqb, int *index, int *inlen)
882e126ba97SEli Cohen {
88327827786SSaeed Mahameed 	__be64 *pas;
88427827786SSaeed Mahameed 	void *cqc;
885e126ba97SEli Cohen 	int err;
886e126ba97SEli Cohen 
8879603b61dSJack Morgenstein 	err = mlx5_db_alloc(dev->mdev, &cq->db);
888e126ba97SEli Cohen 	if (err)
889e126ba97SEli Cohen 		return err;
890e126ba97SEli Cohen 
891e126ba97SEli Cohen 	cq->mcq.set_ci_db  = cq->db.db;
892e126ba97SEli Cohen 	cq->mcq.arm_db     = cq->db.db + 1;
893e126ba97SEli Cohen 	cq->mcq.cqe_sz = cqe_size;
894e126ba97SEli Cohen 
895388ca8beSYonatan Cohen 	err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
896e126ba97SEli Cohen 	if (err)
897e126ba97SEli Cohen 		goto err_db;
898e126ba97SEli Cohen 
8992ba0aa2fSAlaa Hleihel 	init_cq_frag_buf(&cq->buf);
900e126ba97SEli Cohen 
90127827786SSaeed Mahameed 	*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
902388ca8beSYonatan Cohen 		 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
9034972e6faSTariq Toukan 		 cq->buf.frag_buf.npages;
9041b9a07eeSLeon Romanovsky 	*cqb = kvzalloc(*inlen, GFP_KERNEL);
905e126ba97SEli Cohen 	if (!*cqb) {
906e126ba97SEli Cohen 		err = -ENOMEM;
907e126ba97SEli Cohen 		goto err_buf;
908e126ba97SEli Cohen 	}
909e126ba97SEli Cohen 
91027827786SSaeed Mahameed 	pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
9114972e6faSTariq Toukan 	mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas);
91227827786SSaeed Mahameed 
91327827786SSaeed Mahameed 	cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
91427827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, log_page_size,
9154972e6faSTariq Toukan 		 cq->buf.frag_buf.page_shift -
916388ca8beSYonatan Cohen 		 MLX5_ADAPTER_PAGE_SHIFT);
91727827786SSaeed Mahameed 
9185fe9dec0SEli Cohen 	*index = dev->mdev->priv.uar->index;
919e126ba97SEli Cohen 
920e126ba97SEli Cohen 	return 0;
921e126ba97SEli Cohen 
922e126ba97SEli Cohen err_buf:
923e126ba97SEli Cohen 	free_cq_buf(dev, &cq->buf);
924e126ba97SEli Cohen 
925e126ba97SEli Cohen err_db:
9269603b61dSJack Morgenstein 	mlx5_db_free(dev->mdev, &cq->db);
927e126ba97SEli Cohen 	return err;
928e126ba97SEli Cohen }
929e126ba97SEli Cohen 
destroy_cq_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq)930e126ba97SEli Cohen static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
931e126ba97SEli Cohen {
932e126ba97SEli Cohen 	free_cq_buf(dev, &cq->buf);
9339603b61dSJack Morgenstein 	mlx5_db_free(dev->mdev, &cq->db);
934e126ba97SEli Cohen }
935e126ba97SEli Cohen 
notify_soft_wc_handler(struct work_struct * work)93625361e02SHaggai Eran static void notify_soft_wc_handler(struct work_struct *work)
93725361e02SHaggai Eran {
93825361e02SHaggai Eran 	struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq,
93925361e02SHaggai Eran 					     notify_work);
94025361e02SHaggai Eran 
94125361e02SHaggai Eran 	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
94225361e02SHaggai Eran }
94325361e02SHaggai Eran 
mlx5_ib_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)944e39afe3dSLeon Romanovsky int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
945e126ba97SEli Cohen 		      struct ib_udata *udata)
946e126ba97SEli Cohen {
947e39afe3dSLeon Romanovsky 	struct ib_device *ibdev = ibcq->device;
948bcf4c1eaSMatan Barak 	int entries = attr->cqe;
949bcf4c1eaSMatan Barak 	int vector = attr->comp_vector;
950e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibdev);
951e39afe3dSLeon Romanovsky 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
95238164b77SYishai Hadas 	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
9533f649ab7SKees Cook 	int index;
9543f649ab7SKees Cook 	int inlen;
95527827786SSaeed Mahameed 	u32 *cqb = NULL;
95627827786SSaeed Mahameed 	void *cqc;
957e126ba97SEli Cohen 	int cqe_size;
958e126ba97SEli Cohen 	int eqn;
959e126ba97SEli Cohen 	int err;
960e126ba97SEli Cohen 
9619ea57852SNoa Osherovich 	if (entries < 0 ||
9629ea57852SNoa Osherovich 	    (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))))
963e39afe3dSLeon Romanovsky 		return -EINVAL;
96451ee86a4SEli Cohen 
96534356f64SLeon Romanovsky 	if (check_cq_create_flags(attr->flags))
966e39afe3dSLeon Romanovsky 		return -EOPNOTSUPP;
967972ecb82SMatan Barak 
968e126ba97SEli Cohen 	entries = roundup_pow_of_two(entries + 1);
969938fe83cSSaeed Mahameed 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
970e39afe3dSLeon Romanovsky 		return -EINVAL;
971e126ba97SEli Cohen 
972e126ba97SEli Cohen 	cq->ibcq.cqe = entries - 1;
973e126ba97SEli Cohen 	mutex_init(&cq->resize_mutex);
974e126ba97SEli Cohen 	spin_lock_init(&cq->lock);
975e126ba97SEli Cohen 	cq->resize_buf = NULL;
976e126ba97SEli Cohen 	cq->resize_umem = NULL;
977051f2630SLeon Romanovsky 	cq->create_flags = attr->flags;
97889ea94a7SMaor Gottlieb 	INIT_LIST_HEAD(&cq->list_send_qp);
97989ea94a7SMaor Gottlieb 	INIT_LIST_HEAD(&cq->list_recv_qp);
980e126ba97SEli Cohen 
981bdeacabdSShamir Rabinovitch 	if (udata) {
982ff23dfa1SShamir Rabinovitch 		err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size,
983ff23dfa1SShamir Rabinovitch 				     &index, &inlen);
984e126ba97SEli Cohen 		if (err)
985e39afe3dSLeon Romanovsky 			return err;
986e126ba97SEli Cohen 	} else {
98716b0e069SDaniel Jurgens 		cqe_size = cache_line_size() == 128 ? 128 : 64;
988e126ba97SEli Cohen 		err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb,
989e126ba97SEli Cohen 				       &index, &inlen);
990e126ba97SEli Cohen 		if (err)
991e39afe3dSLeon Romanovsky 			return err;
99225361e02SHaggai Eran 
99325361e02SHaggai Eran 		INIT_WORK(&cq->notify_work, notify_soft_wc_handler);
994e126ba97SEli Cohen 	}
995e126ba97SEli Cohen 
996*f14c1a14SMaher Sanalla 	err = mlx5_comp_eqn_get(dev->mdev, vector, &eqn);
997e126ba97SEli Cohen 	if (err)
998e126ba97SEli Cohen 		goto err_cqb;
999e126ba97SEli Cohen 
100027827786SSaeed Mahameed 	cq->cqe_size = cqe_size;
100127827786SSaeed Mahameed 
100227827786SSaeed Mahameed 	cqc = MLX5_ADDR_OF(create_cq_in, cqb, cq_context);
10037a0c8f42SGuy Levi 	MLX5_SET(cqc, cqc, cqe_sz,
10047a0c8f42SGuy Levi 		 cqe_sz_to_mlx_sz(cqe_size,
10057a0c8f42SGuy Levi 				  cq->private_flags &
10067a0c8f42SGuy Levi 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
100727827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
100827827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, uar_page, index);
1009616d5769STal Gilboa 	MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
101027827786SSaeed Mahameed 	MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma);
1011beb801acSJason Gunthorpe 	if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN)
101227827786SSaeed Mahameed 		MLX5_SET(cqc, cqc, oi, 1);
1013e126ba97SEli Cohen 
101438164b77SYishai Hadas 	err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out));
1015e126ba97SEli Cohen 	if (err)
1016e126ba97SEli Cohen 		goto err_cqb;
1017e126ba97SEli Cohen 
1018e126ba97SEli Cohen 	mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn);
1019ff23dfa1SShamir Rabinovitch 	if (udata)
1020c16d2750SMatan Barak 		cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp;
1021c16d2750SMatan Barak 	else
1022e126ba97SEli Cohen 		cq->mcq.comp  = mlx5_ib_cq_comp;
1023e126ba97SEli Cohen 	cq->mcq.event = mlx5_ib_cq_event;
1024e126ba97SEli Cohen 
102525361e02SHaggai Eran 	INIT_LIST_HEAD(&cq->wc_list);
102625361e02SHaggai Eran 
1027ff23dfa1SShamir Rabinovitch 	if (udata)
1028e126ba97SEli Cohen 		if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) {
1029e126ba97SEli Cohen 			err = -EFAULT;
1030e126ba97SEli Cohen 			goto err_cmd;
1031e126ba97SEli Cohen 		}
1032e126ba97SEli Cohen 
1033e126ba97SEli Cohen 
1034479163f4SAl Viro 	kvfree(cqb);
1035e39afe3dSLeon Romanovsky 	return 0;
1036e126ba97SEli Cohen 
1037e126ba97SEli Cohen err_cmd:
10389603b61dSJack Morgenstein 	mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
1039e126ba97SEli Cohen 
1040e126ba97SEli Cohen err_cqb:
1041479163f4SAl Viro 	kvfree(cqb);
1042bdeacabdSShamir Rabinovitch 	if (udata)
1043bdeacabdSShamir Rabinovitch 		destroy_cq_user(cq, udata);
1044e126ba97SEli Cohen 	else
1045e126ba97SEli Cohen 		destroy_cq_kernel(dev, cq);
1046e39afe3dSLeon Romanovsky 	return err;
1047e126ba97SEli Cohen }
1048e126ba97SEli Cohen 
mlx5_ib_destroy_cq(struct ib_cq * cq,struct ib_udata * udata)104943d781b9SLeon Romanovsky int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
1050e126ba97SEli Cohen {
1051e126ba97SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
1052e126ba97SEli Cohen 	struct mlx5_ib_cq *mcq = to_mcq(cq);
105343d781b9SLeon Romanovsky 	int ret;
1054e126ba97SEli Cohen 
105543d781b9SLeon Romanovsky 	ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
105643d781b9SLeon Romanovsky 	if (ret)
105743d781b9SLeon Romanovsky 		return ret;
105843d781b9SLeon Romanovsky 
1059bdeacabdSShamir Rabinovitch 	if (udata)
1060bdeacabdSShamir Rabinovitch 		destroy_cq_user(mcq, udata);
1061e126ba97SEli Cohen 	else
1062e126ba97SEli Cohen 		destroy_cq_kernel(dev, mcq);
106343d781b9SLeon Romanovsky 	return 0;
1064e126ba97SEli Cohen }
1065e126ba97SEli Cohen 
is_equal_rsn(struct mlx5_cqe64 * cqe64,u32 rsn)1066cfd8f1d4SMoshe Lazer static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
1067e126ba97SEli Cohen {
1068cfd8f1d4SMoshe Lazer 	return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
1069e126ba97SEli Cohen }
1070e126ba97SEli Cohen 
__mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 rsn,struct mlx5_ib_srq * srq)1071e126ba97SEli Cohen void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq)
1072e126ba97SEli Cohen {
1073e126ba97SEli Cohen 	struct mlx5_cqe64 *cqe64, *dest64;
1074e126ba97SEli Cohen 	void *cqe, *dest;
1075e126ba97SEli Cohen 	u32 prod_index;
1076e126ba97SEli Cohen 	int nfreed = 0;
1077e126ba97SEli Cohen 	u8 owner_bit;
1078e126ba97SEli Cohen 
1079e126ba97SEli Cohen 	if (!cq)
1080e126ba97SEli Cohen 		return;
1081e126ba97SEli Cohen 
1082e126ba97SEli Cohen 	/* First we need to find the current producer index, so we
1083e126ba97SEli Cohen 	 * know where to start cleaning from.  It doesn't matter if HW
1084e126ba97SEli Cohen 	 * adds new entries after this loop -- the QP we're worried
1085e126ba97SEli Cohen 	 * about is already in RESET, so the new entries won't come
1086e126ba97SEli Cohen 	 * from our QP and therefore don't need to be checked.
1087e126ba97SEli Cohen 	 */
1088e126ba97SEli Cohen 	for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++)
1089e126ba97SEli Cohen 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
1090e126ba97SEli Cohen 			break;
1091e126ba97SEli Cohen 
1092e126ba97SEli Cohen 	/* Now sweep backwards through the CQ, removing CQ entries
1093e126ba97SEli Cohen 	 * that match our QP by copying older entries on top of them.
1094e126ba97SEli Cohen 	 */
1095e126ba97SEli Cohen 	while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) {
1096e126ba97SEli Cohen 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
1097e126ba97SEli Cohen 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
1098cfd8f1d4SMoshe Lazer 		if (is_equal_rsn(cqe64, rsn)) {
1099cfd8f1d4SMoshe Lazer 			if (srq && (ntohl(cqe64->srqn) & 0xffffff))
1100e126ba97SEli Cohen 				mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
1101e126ba97SEli Cohen 			++nfreed;
1102e126ba97SEli Cohen 		} else if (nfreed) {
1103e126ba97SEli Cohen 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
1104e126ba97SEli Cohen 			dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64;
1105e126ba97SEli Cohen 			owner_bit = dest64->op_own & MLX5_CQE_OWNER_MASK;
1106e126ba97SEli Cohen 			memcpy(dest, cqe, cq->mcq.cqe_sz);
1107e126ba97SEli Cohen 			dest64->op_own = owner_bit |
1108e126ba97SEli Cohen 				(dest64->op_own & ~MLX5_CQE_OWNER_MASK);
1109e126ba97SEli Cohen 		}
1110e126ba97SEli Cohen 	}
1111e126ba97SEli Cohen 
1112e126ba97SEli Cohen 	if (nfreed) {
1113e126ba97SEli Cohen 		cq->mcq.cons_index += nfreed;
1114e126ba97SEli Cohen 		/* Make sure update of buffer contents is done before
1115e126ba97SEli Cohen 		 * updating consumer index.
1116e126ba97SEli Cohen 		 */
1117e126ba97SEli Cohen 		wmb();
1118e126ba97SEli Cohen 		mlx5_cq_set_ci(&cq->mcq);
1119e126ba97SEli Cohen 	}
1120e126ba97SEli Cohen }
1121e126ba97SEli Cohen 
mlx5_ib_cq_clean(struct mlx5_ib_cq * cq,u32 qpn,struct mlx5_ib_srq * srq)1122e126ba97SEli Cohen void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
1123e126ba97SEli Cohen {
1124e126ba97SEli Cohen 	if (!cq)
1125e126ba97SEli Cohen 		return;
1126e126ba97SEli Cohen 
1127e126ba97SEli Cohen 	spin_lock_irq(&cq->lock);
1128e126ba97SEli Cohen 	__mlx5_ib_cq_clean(cq, qpn, srq);
1129e126ba97SEli Cohen 	spin_unlock_irq(&cq->lock);
1130e126ba97SEli Cohen }
1131e126ba97SEli Cohen 
mlx5_ib_modify_cq(struct ib_cq * cq,u16 cq_count,u16 cq_period)1132e126ba97SEli Cohen int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1133e126ba97SEli Cohen {
11343bdb31f6SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(cq->device);
11353bdb31f6SEli Cohen 	struct mlx5_ib_cq *mcq = to_mcq(cq);
11363bdb31f6SEli Cohen 	int err;
11373bdb31f6SEli Cohen 
1138938fe83cSSaeed Mahameed 	if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
113926e551c5SKamal Heib 		return -EOPNOTSUPP;
11403bdb31f6SEli Cohen 
1141b0e9df6dSYonatan Cohen 	if (cq_period > MLX5_MAX_CQ_PERIOD)
1142b0e9df6dSYonatan Cohen 		return -EINVAL;
1143b0e9df6dSYonatan Cohen 
114427827786SSaeed Mahameed 	err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq,
114527827786SSaeed Mahameed 					     cq_period, cq_count);
11463bdb31f6SEli Cohen 	if (err)
11473bdb31f6SEli Cohen 		mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn);
11483bdb31f6SEli Cohen 
11493bdb31f6SEli Cohen 	return err;
1150e126ba97SEli Cohen }
1151e126ba97SEli Cohen 
resize_user(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,struct ib_udata * udata,int * cqe_size)1152bde51583SEli Cohen static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
11537db0eea9SJason Gunthorpe 		       int entries, struct ib_udata *udata,
1154c08fbdc5SJason Gunthorpe 		       int *cqe_size)
1155bde51583SEli Cohen {
1156bde51583SEli Cohen 	struct mlx5_ib_resize_cq ucmd;
1157bde51583SEli Cohen 	struct ib_umem *umem;
1158bde51583SEli Cohen 	int err;
1159bde51583SEli Cohen 
116057761d8dSEli Cohen 	err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
116157761d8dSEli Cohen 	if (err)
116257761d8dSEli Cohen 		return err;
116357761d8dSEli Cohen 
116457761d8dSEli Cohen 	if (ucmd.reserved0 || ucmd.reserved1)
116557761d8dSEli Cohen 		return -EINVAL;
1166bde51583SEli Cohen 
116728e9091eSLeon Romanovsky 	/* check multiplication overflow */
116828e9091eSLeon Romanovsky 	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
116928e9091eSLeon Romanovsky 		return -EINVAL;
117028e9091eSLeon Romanovsky 
1171c320e527SMoni Shoua 	umem = ib_umem_get(&dev->ib_dev, ucmd.buf_addr,
117228e9091eSLeon Romanovsky 			   (size_t)ucmd.cqe_size * entries,
117372b894b0SChristoph Hellwig 			   IB_ACCESS_LOCAL_WRITE);
1174bde51583SEli Cohen 	if (IS_ERR(umem)) {
1175bde51583SEli Cohen 		err = PTR_ERR(umem);
1176bde51583SEli Cohen 		return err;
1177bde51583SEli Cohen 	}
1178bde51583SEli Cohen 
1179bde51583SEli Cohen 	cq->resize_umem = umem;
1180bde51583SEli Cohen 	*cqe_size = ucmd.cqe_size;
1181bde51583SEli Cohen 
1182bde51583SEli Cohen 	return 0;
1183bde51583SEli Cohen }
1184bde51583SEli Cohen 
resize_kernel(struct mlx5_ib_dev * dev,struct mlx5_ib_cq * cq,int entries,int cqe_size)1185bde51583SEli Cohen static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1186bde51583SEli Cohen 			 int entries, int cqe_size)
1187bde51583SEli Cohen {
1188bde51583SEli Cohen 	int err;
1189bde51583SEli Cohen 
1190bde51583SEli Cohen 	cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL);
1191bde51583SEli Cohen 	if (!cq->resize_buf)
1192bde51583SEli Cohen 		return -ENOMEM;
1193bde51583SEli Cohen 
1194388ca8beSYonatan Cohen 	err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1195bde51583SEli Cohen 	if (err)
1196bde51583SEli Cohen 		goto ex;
1197bde51583SEli Cohen 
11982ba0aa2fSAlaa Hleihel 	init_cq_frag_buf(cq->resize_buf);
1199bde51583SEli Cohen 
1200bde51583SEli Cohen 	return 0;
1201bde51583SEli Cohen 
1202bde51583SEli Cohen ex:
1203bde51583SEli Cohen 	kfree(cq->resize_buf);
1204bde51583SEli Cohen 	return err;
1205bde51583SEli Cohen }
1206bde51583SEli Cohen 
copy_resize_cqes(struct mlx5_ib_cq * cq)1207bde51583SEli Cohen static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1208bde51583SEli Cohen {
1209bde51583SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
1210bde51583SEli Cohen 	struct mlx5_cqe64 *scqe64;
1211bde51583SEli Cohen 	struct mlx5_cqe64 *dcqe64;
1212bde51583SEli Cohen 	void *start_cqe;
1213bde51583SEli Cohen 	void *scqe;
1214bde51583SEli Cohen 	void *dcqe;
1215bde51583SEli Cohen 	int ssize;
1216bde51583SEli Cohen 	int dsize;
1217bde51583SEli Cohen 	int i;
1218bde51583SEli Cohen 	u8 sw_own;
1219bde51583SEli Cohen 
1220bde51583SEli Cohen 	ssize = cq->buf.cqe_size;
1221bde51583SEli Cohen 	dsize = cq->resize_buf->cqe_size;
1222bde51583SEli Cohen 	if (ssize != dsize) {
1223bde51583SEli Cohen 		mlx5_ib_warn(dev, "resize from different cqe size is not supported\n");
1224bde51583SEli Cohen 		return -EINVAL;
1225bde51583SEli Cohen 	}
1226bde51583SEli Cohen 
1227bde51583SEli Cohen 	i = cq->mcq.cons_index;
1228bde51583SEli Cohen 	scqe = get_sw_cqe(cq, i);
1229bde51583SEli Cohen 	scqe64 = ssize == 64 ? scqe : scqe + 64;
1230bde51583SEli Cohen 	start_cqe = scqe;
1231bde51583SEli Cohen 	if (!scqe) {
1232bde51583SEli Cohen 		mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1233bde51583SEli Cohen 		return -EINVAL;
1234bde51583SEli Cohen 	}
1235bde51583SEli Cohen 
1236bdefffd1STariq Toukan 	while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
1237388ca8beSYonatan Cohen 		dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1238388ca8beSYonatan Cohen 					     (i + 1) & cq->resize_buf->nent);
1239bde51583SEli Cohen 		dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1240bde51583SEli Cohen 		sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1241bde51583SEli Cohen 		memcpy(dcqe, scqe, dsize);
1242bde51583SEli Cohen 		dcqe64->op_own = (dcqe64->op_own & ~MLX5_CQE_OWNER_MASK) | sw_own;
1243bde51583SEli Cohen 
1244bde51583SEli Cohen 		++i;
1245bde51583SEli Cohen 		scqe = get_sw_cqe(cq, i);
1246bde51583SEli Cohen 		scqe64 = ssize == 64 ? scqe : scqe + 64;
1247bde51583SEli Cohen 		if (!scqe) {
1248bde51583SEli Cohen 			mlx5_ib_warn(dev, "expected cqe in sw ownership\n");
1249bde51583SEli Cohen 			return -EINVAL;
1250bde51583SEli Cohen 		}
1251bde51583SEli Cohen 
1252bde51583SEli Cohen 		if (scqe == start_cqe) {
1253bde51583SEli Cohen 			pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1254bde51583SEli Cohen 				cq->mcq.cqn);
1255bde51583SEli Cohen 			return -ENOMEM;
1256bde51583SEli Cohen 		}
1257bde51583SEli Cohen 	}
1258bde51583SEli Cohen 	++cq->mcq.cons_index;
1259bde51583SEli Cohen 	return 0;
1260bde51583SEli Cohen }
1261bde51583SEli Cohen 
mlx5_ib_resize_cq(struct ib_cq * ibcq,int entries,struct ib_udata * udata)1262e126ba97SEli Cohen int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1263e126ba97SEli Cohen {
1264bde51583SEli Cohen 	struct mlx5_ib_dev *dev = to_mdev(ibcq->device);
1265bde51583SEli Cohen 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
126627827786SSaeed Mahameed 	void *cqc;
126727827786SSaeed Mahameed 	u32 *in;
1268bde51583SEli Cohen 	int err;
1269bde51583SEli Cohen 	int npas;
127027827786SSaeed Mahameed 	__be64 *pas;
1271c08fbdc5SJason Gunthorpe 	unsigned int page_offset_quantized = 0;
1272c08fbdc5SJason Gunthorpe 	unsigned int page_shift;
1273bde51583SEli Cohen 	int inlen;
12743f649ab7SKees Cook 	int cqe_size;
1275bde51583SEli Cohen 	unsigned long flags;
1276bde51583SEli Cohen 
1277938fe83cSSaeed Mahameed 	if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1278bde51583SEli Cohen 		pr_info("Firmware does not support resize CQ\n");
1279e126ba97SEli Cohen 		return -ENOSYS;
1280e126ba97SEli Cohen 	}
1281e126ba97SEli Cohen 
12823c4c3774SNoa Osherovich 	if (entries < 1 ||
12833c4c3774SNoa Osherovich 	    entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) {
12843c4c3774SNoa Osherovich 		mlx5_ib_warn(dev, "wrong entries number %d, max %d\n",
12853c4c3774SNoa Osherovich 			     entries,
12863c4c3774SNoa Osherovich 			     1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz));
1287bde51583SEli Cohen 		return -EINVAL;
12883c4c3774SNoa Osherovich 	}
1289bde51583SEli Cohen 
1290bde51583SEli Cohen 	entries = roundup_pow_of_two(entries + 1);
1291938fe83cSSaeed Mahameed 	if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1292bde51583SEli Cohen 		return -EINVAL;
1293bde51583SEli Cohen 
1294bde51583SEli Cohen 	if (entries == ibcq->cqe + 1)
1295bde51583SEli Cohen 		return 0;
1296bde51583SEli Cohen 
1297bde51583SEli Cohen 	mutex_lock(&cq->resize_mutex);
1298bde51583SEli Cohen 	if (udata) {
1299c08fbdc5SJason Gunthorpe 		unsigned long page_size;
1300c08fbdc5SJason Gunthorpe 
1301c08fbdc5SJason Gunthorpe 		err = resize_user(dev, cq, entries, udata, &cqe_size);
13027db0eea9SJason Gunthorpe 		if (err)
13037db0eea9SJason Gunthorpe 			goto ex;
1304c08fbdc5SJason Gunthorpe 
1305c08fbdc5SJason Gunthorpe 		page_size = mlx5_umem_find_best_cq_quantized_pgoff(
1306c08fbdc5SJason Gunthorpe 			cq->resize_umem, cqc, log_page_size,
1307c08fbdc5SJason Gunthorpe 			MLX5_ADAPTER_PAGE_SHIFT, page_offset, 64,
1308c08fbdc5SJason Gunthorpe 			&page_offset_quantized);
1309c08fbdc5SJason Gunthorpe 		if (!page_size) {
1310c08fbdc5SJason Gunthorpe 			err = -EINVAL;
1311c08fbdc5SJason Gunthorpe 			goto ex_resize;
1312c08fbdc5SJason Gunthorpe 		}
1313c08fbdc5SJason Gunthorpe 		npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size);
1314c08fbdc5SJason Gunthorpe 		page_shift = order_base_2(page_size);
1315bde51583SEli Cohen 	} else {
13167db0eea9SJason Gunthorpe 		struct mlx5_frag_buf *frag_buf;
13177db0eea9SJason Gunthorpe 
1318bde51583SEli Cohen 		cqe_size = 64;
1319bde51583SEli Cohen 		err = resize_kernel(dev, cq, entries, cqe_size);
13207db0eea9SJason Gunthorpe 		if (err)
13217db0eea9SJason Gunthorpe 			goto ex;
13227db0eea9SJason Gunthorpe 		frag_buf = &cq->resize_buf->frag_buf;
13234972e6faSTariq Toukan 		npas = frag_buf->npages;
13244972e6faSTariq Toukan 		page_shift = frag_buf->page_shift;
1325bde51583SEli Cohen 	}
1326bde51583SEli Cohen 
132727827786SSaeed Mahameed 	inlen = MLX5_ST_SZ_BYTES(modify_cq_in) +
132827827786SSaeed Mahameed 		MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas;
132927827786SSaeed Mahameed 
13301b9a07eeSLeon Romanovsky 	in = kvzalloc(inlen, GFP_KERNEL);
1331bde51583SEli Cohen 	if (!in) {
1332bde51583SEli Cohen 		err = -ENOMEM;
1333bde51583SEli Cohen 		goto ex_resize;
1334bde51583SEli Cohen 	}
1335bde51583SEli Cohen 
133627827786SSaeed Mahameed 	pas = (__be64 *)MLX5_ADDR_OF(modify_cq_in, in, pas);
1337bde51583SEli Cohen 	if (udata)
1338aab8d396SJason Gunthorpe 		mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas,
1339aab8d396SJason Gunthorpe 				     0);
1340bde51583SEli Cohen 	else
13414972e6faSTariq Toukan 		mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas);
1342bde51583SEli Cohen 
134327827786SSaeed Mahameed 	MLX5_SET(modify_cq_in, in,
134427827786SSaeed Mahameed 		 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
134527827786SSaeed Mahameed 		 MLX5_MODIFY_CQ_MASK_LOG_SIZE  |
1346bde51583SEli Cohen 		 MLX5_MODIFY_CQ_MASK_PG_OFFSET |
1347bde51583SEli Cohen 		 MLX5_MODIFY_CQ_MASK_PG_SIZE);
134827827786SSaeed Mahameed 
134927827786SSaeed Mahameed 	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
135027827786SSaeed Mahameed 
135127827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, log_page_size,
135227827786SSaeed Mahameed 		 page_shift - MLX5_ADAPTER_PAGE_SHIFT);
1353c08fbdc5SJason Gunthorpe 	MLX5_SET(cqc, cqc, page_offset, page_offset_quantized);
13547a0c8f42SGuy Levi 	MLX5_SET(cqc, cqc, cqe_sz,
13557a0c8f42SGuy Levi 		 cqe_sz_to_mlx_sz(cqe_size,
13567a0c8f42SGuy Levi 				  cq->private_flags &
13577a0c8f42SGuy Levi 				  MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD));
135827827786SSaeed Mahameed 	MLX5_SET(cqc, cqc, log_cq_size, ilog2(entries));
135927827786SSaeed Mahameed 
136027827786SSaeed Mahameed 	MLX5_SET(modify_cq_in, in, op_mod, MLX5_CQ_OPMOD_RESIZE);
136127827786SSaeed Mahameed 	MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn);
1362bde51583SEli Cohen 
13639603b61dSJack Morgenstein 	err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
1364bde51583SEli Cohen 	if (err)
1365bde51583SEli Cohen 		goto ex_alloc;
1366bde51583SEli Cohen 
1367bde51583SEli Cohen 	if (udata) {
1368bde51583SEli Cohen 		cq->ibcq.cqe = entries - 1;
1369bde51583SEli Cohen 		ib_umem_release(cq->buf.umem);
1370bde51583SEli Cohen 		cq->buf.umem = cq->resize_umem;
1371bde51583SEli Cohen 		cq->resize_umem = NULL;
1372bde51583SEli Cohen 	} else {
1373bde51583SEli Cohen 		struct mlx5_ib_cq_buf tbuf;
1374bde51583SEli Cohen 		int resized = 0;
1375bde51583SEli Cohen 
1376bde51583SEli Cohen 		spin_lock_irqsave(&cq->lock, flags);
1377bde51583SEli Cohen 		if (cq->resize_buf) {
1378bde51583SEli Cohen 			err = copy_resize_cqes(cq);
1379bde51583SEli Cohen 			if (!err) {
1380bde51583SEli Cohen 				tbuf = cq->buf;
1381bde51583SEli Cohen 				cq->buf = *cq->resize_buf;
1382bde51583SEli Cohen 				kfree(cq->resize_buf);
1383bde51583SEli Cohen 				cq->resize_buf = NULL;
1384bde51583SEli Cohen 				resized = 1;
1385bde51583SEli Cohen 			}
1386bde51583SEli Cohen 		}
1387bde51583SEli Cohen 		cq->ibcq.cqe = entries - 1;
1388bde51583SEli Cohen 		spin_unlock_irqrestore(&cq->lock, flags);
1389bde51583SEli Cohen 		if (resized)
1390bde51583SEli Cohen 			free_cq_buf(dev, &tbuf);
1391bde51583SEli Cohen 	}
1392bde51583SEli Cohen 	mutex_unlock(&cq->resize_mutex);
1393bde51583SEli Cohen 
1394479163f4SAl Viro 	kvfree(in);
1395bde51583SEli Cohen 	return 0;
1396bde51583SEli Cohen 
1397bde51583SEli Cohen ex_alloc:
1398479163f4SAl Viro 	kvfree(in);
1399bde51583SEli Cohen 
1400bde51583SEli Cohen ex_resize:
1401836a0fbbSLeon Romanovsky 	ib_umem_release(cq->resize_umem);
1402836a0fbbSLeon Romanovsky 	if (!udata) {
1403836a0fbbSLeon Romanovsky 		free_cq_buf(dev, cq->resize_buf);
1404836a0fbbSLeon Romanovsky 		cq->resize_buf = NULL;
1405836a0fbbSLeon Romanovsky 	}
1406bde51583SEli Cohen ex:
1407bde51583SEli Cohen 	mutex_unlock(&cq->resize_mutex);
1408bde51583SEli Cohen 	return err;
1409bde51583SEli Cohen }
1410bde51583SEli Cohen 
mlx5_ib_get_cqe_size(struct ib_cq * ibcq)14115d6ff1baSYonatan Cohen int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
1412e126ba97SEli Cohen {
1413e126ba97SEli Cohen 	struct mlx5_ib_cq *cq;
1414e126ba97SEli Cohen 
1415e126ba97SEli Cohen 	if (!ibcq)
1416e126ba97SEli Cohen 		return 128;
1417e126ba97SEli Cohen 
1418e126ba97SEli Cohen 	cq = to_mcq(ibcq);
1419e126ba97SEli Cohen 	return cq->cqe_size;
1420e126ba97SEli Cohen }
142125361e02SHaggai Eran 
142225361e02SHaggai Eran /* Called from atomic context */
mlx5_ib_generate_wc(struct ib_cq * ibcq,struct ib_wc * wc)142325361e02SHaggai Eran int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc)
142425361e02SHaggai Eran {
142525361e02SHaggai Eran 	struct mlx5_ib_wc *soft_wc;
142625361e02SHaggai Eran 	struct mlx5_ib_cq *cq = to_mcq(ibcq);
142725361e02SHaggai Eran 	unsigned long flags;
142825361e02SHaggai Eran 
142925361e02SHaggai Eran 	soft_wc = kmalloc(sizeof(*soft_wc), GFP_ATOMIC);
143025361e02SHaggai Eran 	if (!soft_wc)
143125361e02SHaggai Eran 		return -ENOMEM;
143225361e02SHaggai Eran 
143325361e02SHaggai Eran 	soft_wc->wc = *wc;
143425361e02SHaggai Eran 	spin_lock_irqsave(&cq->lock, flags);
143525361e02SHaggai Eran 	list_add_tail(&soft_wc->list, &cq->wc_list);
143625361e02SHaggai Eran 	if (cq->notify_flags == IB_CQ_NEXT_COMP ||
143725361e02SHaggai Eran 	    wc->status != IB_WC_SUCCESS) {
143825361e02SHaggai Eran 		cq->notify_flags = 0;
143925361e02SHaggai Eran 		schedule_work(&cq->notify_work);
144025361e02SHaggai Eran 	}
144125361e02SHaggai Eran 	spin_unlock_irqrestore(&cq->lock, flags);
144225361e02SHaggai Eran 
144325361e02SHaggai Eran 	return 0;
144425361e02SHaggai Eran }
1445