1 /*
2  * Copyright (c) 2016-2017 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
43 
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
50 
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 			    struct ib_sge *sg)
53 {
54 	dseg->lkey = cpu_to_le32(sg->lkey);
55 	dseg->addr = cpu_to_le64(sg->addr);
56 	dseg->len  = cpu_to_le32(sg->length);
57 }
58 
59 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 			 struct hns_roce_wqe_frmr_seg *fseg,
61 			 const struct ib_reg_wr *wr)
62 {
63 	struct hns_roce_mr *mr = to_hr_mr(wr->mr);
64 
65 	/* use ib_access_flags */
66 	roce_set_bit(rc_sq_wqe->byte_4,
67 		     V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68 		     wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69 	roce_set_bit(rc_sq_wqe->byte_4,
70 		     V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71 		     wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72 	roce_set_bit(rc_sq_wqe->byte_4,
73 		     V2_RC_FRMR_WQE_BYTE_4_RR_S,
74 		     wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75 	roce_set_bit(rc_sq_wqe->byte_4,
76 		     V2_RC_FRMR_WQE_BYTE_4_RW_S,
77 		     wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78 	roce_set_bit(rc_sq_wqe->byte_4,
79 		     V2_RC_FRMR_WQE_BYTE_4_LW_S,
80 		     wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
81 
82 	/* Data structure reuse may lead to confusion */
83 	rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84 	rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
85 
86 	rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87 	rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88 	rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89 	rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
90 
91 	fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92 	roce_set_field(fseg->mode_buf_pg_sz,
93 		       V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94 		       V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96 	roce_set_bit(fseg->mode_buf_pg_sz,
97 		     V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
98 }
99 
100 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101 			   const struct ib_atomic_wr *wr)
102 {
103 	if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104 		aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105 		aseg->cmp_data  = cpu_to_le64(wr->compare_add);
106 	} else {
107 		aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108 		aseg->cmp_data  = 0;
109 	}
110 }
111 
112 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
113 			   unsigned int *sge_ind)
114 {
115 	struct hns_roce_v2_wqe_data_seg *dseg;
116 	struct ib_sge *sg;
117 	int num_in_wqe = 0;
118 	int extend_sge_num;
119 	int fi_sge_num;
120 	int se_sge_num;
121 	int shift;
122 	int i;
123 
124 	if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125 		num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126 	extend_sge_num = wr->num_sge - num_in_wqe;
127 	sg = wr->sg_list + num_in_wqe;
128 	shift = qp->hr_buf.page_shift;
129 
130 	/*
131 	 * Check whether wr->num_sge sges are in the same page. If not, we
132 	 * should calculate how many sges in the first page and the second
133 	 * page.
134 	 */
135 	dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136 	fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137 		      (uintptr_t)dseg) /
138 		      sizeof(struct hns_roce_v2_wqe_data_seg);
139 	if (extend_sge_num > fi_sge_num) {
140 		se_sge_num = extend_sge_num - fi_sge_num;
141 		for (i = 0; i < fi_sge_num; i++) {
142 			set_data_seg_v2(dseg++, sg + i);
143 			(*sge_ind)++;
144 		}
145 		dseg = get_send_extend_sge(qp,
146 					   (*sge_ind) & (qp->sge.sge_cnt - 1));
147 		for (i = 0; i < se_sge_num; i++) {
148 			set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149 			(*sge_ind)++;
150 		}
151 	} else {
152 		for (i = 0; i < extend_sge_num; i++) {
153 			set_data_seg_v2(dseg++, sg + i);
154 			(*sge_ind)++;
155 		}
156 	}
157 }
158 
159 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
160 			     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161 			     void *wqe, unsigned int *sge_ind,
162 			     const struct ib_send_wr **bad_wr)
163 {
164 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
165 	struct hns_roce_v2_wqe_data_seg *dseg = wqe;
166 	struct hns_roce_qp *qp = to_hr_qp(ibqp);
167 	int i;
168 
169 	if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
170 		if (le32_to_cpu(rc_sq_wqe->msg_len) >
171 		    hr_dev->caps.max_sq_inline) {
172 			*bad_wr = wr;
173 			dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
174 				rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
175 			return -EINVAL;
176 		}
177 
178 		if (wr->opcode == IB_WR_RDMA_READ) {
179 			*bad_wr =  wr;
180 			dev_err(hr_dev->dev, "Not support inline data!\n");
181 			return -EINVAL;
182 		}
183 
184 		for (i = 0; i < wr->num_sge; i++) {
185 			memcpy(wqe, ((void *)wr->sg_list[i].addr),
186 			       wr->sg_list[i].length);
187 			wqe += wr->sg_list[i].length;
188 		}
189 
190 		roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
191 			     1);
192 	} else {
193 		if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
194 			for (i = 0; i < wr->num_sge; i++) {
195 				if (likely(wr->sg_list[i].length)) {
196 					set_data_seg_v2(dseg, wr->sg_list + i);
197 					dseg++;
198 				}
199 			}
200 		} else {
201 			roce_set_field(rc_sq_wqe->byte_20,
202 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
203 				     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
204 				     (*sge_ind) & (qp->sge.sge_cnt - 1));
205 
206 			for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
207 				if (likely(wr->sg_list[i].length)) {
208 					set_data_seg_v2(dseg, wr->sg_list + i);
209 					dseg++;
210 				}
211 			}
212 
213 			set_extend_sge(qp, wr, sge_ind);
214 		}
215 
216 		roce_set_field(rc_sq_wqe->byte_16,
217 			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
218 			       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
219 	}
220 
221 	return 0;
222 }
223 
224 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
225 				 const struct ib_qp_attr *attr,
226 				 int attr_mask, enum ib_qp_state cur_state,
227 				 enum ib_qp_state new_state);
228 
229 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
230 				 const struct ib_send_wr *wr,
231 				 const struct ib_send_wr **bad_wr)
232 {
233 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
234 	struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
235 	struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
236 	struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
237 	struct hns_roce_qp *qp = to_hr_qp(ibqp);
238 	struct hns_roce_wqe_frmr_seg *fseg;
239 	struct device *dev = hr_dev->dev;
240 	struct hns_roce_v2_db sq_db;
241 	struct ib_qp_attr attr;
242 	unsigned int sge_ind;
243 	unsigned int owner_bit;
244 	unsigned long flags;
245 	unsigned int ind;
246 	void *wqe = NULL;
247 	bool loopback;
248 	int attr_mask;
249 	u32 tmp_len;
250 	int ret = 0;
251 	u32 hr_op;
252 	u8 *smac;
253 	int nreq;
254 	int i;
255 
256 	if (unlikely(ibqp->qp_type != IB_QPT_RC &&
257 		     ibqp->qp_type != IB_QPT_GSI &&
258 		     ibqp->qp_type != IB_QPT_UD)) {
259 		dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
260 		*bad_wr = wr;
261 		return -EOPNOTSUPP;
262 	}
263 
264 	if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
265 		     qp->state == IB_QPS_RTR)) {
266 		dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
267 		*bad_wr = wr;
268 		return -EINVAL;
269 	}
270 
271 	spin_lock_irqsave(&qp->sq.lock, flags);
272 	ind = qp->sq_next_wqe;
273 	sge_ind = qp->next_sge;
274 
275 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
276 		if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
277 			ret = -ENOMEM;
278 			*bad_wr = wr;
279 			goto out;
280 		}
281 
282 		if (unlikely(wr->num_sge > qp->sq.max_gs)) {
283 			dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
284 				wr->num_sge, qp->sq.max_gs);
285 			ret = -EINVAL;
286 			*bad_wr = wr;
287 			goto out;
288 		}
289 
290 		wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
291 		qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
292 								      wr->wr_id;
293 
294 		owner_bit =
295 		       ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
296 		tmp_len = 0;
297 
298 		/* Corresponding to the QP type, wqe process separately */
299 		if (ibqp->qp_type == IB_QPT_GSI) {
300 			ud_sq_wqe = wqe;
301 			memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
302 
303 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
304 				       V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
305 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
306 				       V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
307 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
308 				       V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
309 			roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
310 				       V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
311 			roce_set_field(ud_sq_wqe->byte_48,
312 				       V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
313 				       V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
314 				       ah->av.mac[4]);
315 			roce_set_field(ud_sq_wqe->byte_48,
316 				       V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
317 				       V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
318 				       ah->av.mac[5]);
319 
320 			/* MAC loopback */
321 			smac = (u8 *)hr_dev->dev_addr[qp->port];
322 			loopback = ether_addr_equal_unaligned(ah->av.mac,
323 							      smac) ? 1 : 0;
324 
325 			roce_set_bit(ud_sq_wqe->byte_40,
326 				     V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
327 
328 			roce_set_field(ud_sq_wqe->byte_4,
329 				       V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
330 				       V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
331 				       HNS_ROCE_V2_WQE_OP_SEND);
332 
333 			for (i = 0; i < wr->num_sge; i++)
334 				tmp_len += wr->sg_list[i].length;
335 
336 			ud_sq_wqe->msg_len =
337 			 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
338 
339 			switch (wr->opcode) {
340 			case IB_WR_SEND_WITH_IMM:
341 			case IB_WR_RDMA_WRITE_WITH_IMM:
342 				ud_sq_wqe->immtdata =
343 				      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
344 				break;
345 			default:
346 				ud_sq_wqe->immtdata = 0;
347 				break;
348 			}
349 
350 			/* Set sig attr */
351 			roce_set_bit(ud_sq_wqe->byte_4,
352 				   V2_UD_SEND_WQE_BYTE_4_CQE_S,
353 				   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
354 
355 			/* Set se attr */
356 			roce_set_bit(ud_sq_wqe->byte_4,
357 				  V2_UD_SEND_WQE_BYTE_4_SE_S,
358 				  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
359 
360 			roce_set_bit(ud_sq_wqe->byte_4,
361 				     V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
362 
363 			roce_set_field(ud_sq_wqe->byte_16,
364 				       V2_UD_SEND_WQE_BYTE_16_PD_M,
365 				       V2_UD_SEND_WQE_BYTE_16_PD_S,
366 				       to_hr_pd(ibqp->pd)->pdn);
367 
368 			roce_set_field(ud_sq_wqe->byte_16,
369 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
370 				       V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371 				       wr->num_sge);
372 
373 			roce_set_field(ud_sq_wqe->byte_20,
374 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
375 				     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
376 				     sge_ind & (qp->sge.sge_cnt - 1));
377 
378 			roce_set_field(ud_sq_wqe->byte_24,
379 				       V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
380 				       V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
381 			ud_sq_wqe->qkey =
382 			     cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
383 			     qp->qkey : ud_wr(wr)->remote_qkey);
384 			roce_set_field(ud_sq_wqe->byte_32,
385 				       V2_UD_SEND_WQE_BYTE_32_DQPN_M,
386 				       V2_UD_SEND_WQE_BYTE_32_DQPN_S,
387 				       ud_wr(wr)->remote_qpn);
388 
389 			roce_set_field(ud_sq_wqe->byte_36,
390 				       V2_UD_SEND_WQE_BYTE_36_VLAN_M,
391 				       V2_UD_SEND_WQE_BYTE_36_VLAN_S,
392 				       le16_to_cpu(ah->av.vlan));
393 			roce_set_field(ud_sq_wqe->byte_36,
394 				       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
395 				       V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
396 				       ah->av.hop_limit);
397 			roce_set_field(ud_sq_wqe->byte_36,
398 				       V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
399 				       V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
400 				       ah->av.tclass);
401 			roce_set_field(ud_sq_wqe->byte_40,
402 				       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
403 				       V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
404 				       ah->av.flowlabel);
405 			roce_set_field(ud_sq_wqe->byte_40,
406 				       V2_UD_SEND_WQE_BYTE_40_SL_M,
407 				       V2_UD_SEND_WQE_BYTE_40_SL_S,
408 				       ah->av.sl);
409 			roce_set_field(ud_sq_wqe->byte_40,
410 				       V2_UD_SEND_WQE_BYTE_40_PORTN_M,
411 				       V2_UD_SEND_WQE_BYTE_40_PORTN_S,
412 				       qp->port);
413 
414 			roce_set_bit(ud_sq_wqe->byte_40,
415 				     V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
416 				     ah->av.vlan_en ? 1 : 0);
417 			roce_set_field(ud_sq_wqe->byte_48,
418 				       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
419 				       V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
420 				       hns_get_gid_index(hr_dev, qp->phy_port,
421 							 ah->av.gid_index));
422 
423 			memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
424 			       GID_LEN_V2);
425 
426 			set_extend_sge(qp, wr, &sge_ind);
427 			ind++;
428 		} else if (ibqp->qp_type == IB_QPT_RC) {
429 			rc_sq_wqe = wqe;
430 			memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
431 			for (i = 0; i < wr->num_sge; i++)
432 				tmp_len += wr->sg_list[i].length;
433 
434 			rc_sq_wqe->msg_len =
435 			 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
436 
437 			switch (wr->opcode) {
438 			case IB_WR_SEND_WITH_IMM:
439 			case IB_WR_RDMA_WRITE_WITH_IMM:
440 				rc_sq_wqe->immtdata =
441 				      cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
442 				break;
443 			case IB_WR_SEND_WITH_INV:
444 				rc_sq_wqe->inv_key =
445 					cpu_to_le32(wr->ex.invalidate_rkey);
446 				break;
447 			default:
448 				rc_sq_wqe->immtdata = 0;
449 				break;
450 			}
451 
452 			roce_set_bit(rc_sq_wqe->byte_4,
453 				     V2_RC_SEND_WQE_BYTE_4_FENCE_S,
454 				     (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
455 
456 			roce_set_bit(rc_sq_wqe->byte_4,
457 				  V2_RC_SEND_WQE_BYTE_4_SE_S,
458 				  (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
459 
460 			roce_set_bit(rc_sq_wqe->byte_4,
461 				   V2_RC_SEND_WQE_BYTE_4_CQE_S,
462 				   (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
463 
464 			roce_set_bit(rc_sq_wqe->byte_4,
465 				     V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
466 
467 			wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
468 			switch (wr->opcode) {
469 			case IB_WR_RDMA_READ:
470 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
471 				rc_sq_wqe->rkey =
472 					cpu_to_le32(rdma_wr(wr)->rkey);
473 				rc_sq_wqe->va =
474 					cpu_to_le64(rdma_wr(wr)->remote_addr);
475 				break;
476 			case IB_WR_RDMA_WRITE:
477 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
478 				rc_sq_wqe->rkey =
479 					cpu_to_le32(rdma_wr(wr)->rkey);
480 				rc_sq_wqe->va =
481 					cpu_to_le64(rdma_wr(wr)->remote_addr);
482 				break;
483 			case IB_WR_RDMA_WRITE_WITH_IMM:
484 				hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
485 				rc_sq_wqe->rkey =
486 					cpu_to_le32(rdma_wr(wr)->rkey);
487 				rc_sq_wqe->va =
488 					cpu_to_le64(rdma_wr(wr)->remote_addr);
489 				break;
490 			case IB_WR_SEND:
491 				hr_op = HNS_ROCE_V2_WQE_OP_SEND;
492 				break;
493 			case IB_WR_SEND_WITH_INV:
494 				hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
495 				break;
496 			case IB_WR_SEND_WITH_IMM:
497 				hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
498 				break;
499 			case IB_WR_LOCAL_INV:
500 				hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
501 				roce_set_bit(rc_sq_wqe->byte_4,
502 					       V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
503 				rc_sq_wqe->inv_key =
504 					    cpu_to_le32(wr->ex.invalidate_rkey);
505 				break;
506 			case IB_WR_REG_MR:
507 				hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
508 				fseg = wqe;
509 				set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
510 				break;
511 			case IB_WR_ATOMIC_CMP_AND_SWP:
512 				hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
513 				rc_sq_wqe->rkey =
514 					cpu_to_le32(atomic_wr(wr)->rkey);
515 				rc_sq_wqe->va =
516 					cpu_to_le64(atomic_wr(wr)->remote_addr);
517 				break;
518 			case IB_WR_ATOMIC_FETCH_AND_ADD:
519 				hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
520 				rc_sq_wqe->rkey =
521 					cpu_to_le32(atomic_wr(wr)->rkey);
522 				rc_sq_wqe->va =
523 					cpu_to_le64(atomic_wr(wr)->remote_addr);
524 				break;
525 			case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
526 				hr_op =
527 				       HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
528 				break;
529 			case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
530 				hr_op =
531 				      HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
532 				break;
533 			default:
534 				hr_op = HNS_ROCE_V2_WQE_OP_MASK;
535 				break;
536 			}
537 
538 			roce_set_field(rc_sq_wqe->byte_4,
539 				       V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
540 				       V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
541 
542 			if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
543 			    wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
544 				struct hns_roce_v2_wqe_data_seg *dseg;
545 
546 				dseg = wqe;
547 				set_data_seg_v2(dseg, wr->sg_list);
548 				wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
549 				set_atomic_seg(wqe, atomic_wr(wr));
550 				roce_set_field(rc_sq_wqe->byte_16,
551 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
552 					       V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
553 					       wr->num_sge);
554 			} else if (wr->opcode != IB_WR_REG_MR) {
555 				ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
556 							wqe, &sge_ind, bad_wr);
557 				if (ret)
558 					goto out;
559 			}
560 
561 			ind++;
562 		} else {
563 			dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
564 			spin_unlock_irqrestore(&qp->sq.lock, flags);
565 			*bad_wr = wr;
566 			return -EOPNOTSUPP;
567 		}
568 	}
569 
570 out:
571 	if (likely(nreq)) {
572 		qp->sq.head += nreq;
573 		/* Memory barrier */
574 		wmb();
575 
576 		sq_db.byte_4 = 0;
577 		sq_db.parameter = 0;
578 
579 		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
580 			       V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
581 		roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
582 			       V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
583 		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
584 			       V2_DB_PARAMETER_IDX_S,
585 			       qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
586 		roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
587 			       V2_DB_PARAMETER_SL_S, qp->sl);
588 
589 		hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
590 
591 		qp->sq_next_wqe = ind;
592 		qp->next_sge = sge_ind;
593 
594 		if (qp->state == IB_QPS_ERR) {
595 			attr_mask = IB_QP_STATE;
596 			attr.qp_state = IB_QPS_ERR;
597 
598 			ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
599 						    qp->state, IB_QPS_ERR);
600 			if (ret) {
601 				spin_unlock_irqrestore(&qp->sq.lock, flags);
602 				*bad_wr = wr;
603 				return ret;
604 			}
605 		}
606 	}
607 
608 	spin_unlock_irqrestore(&qp->sq.lock, flags);
609 
610 	return ret;
611 }
612 
613 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
614 				 const struct ib_recv_wr *wr,
615 				 const struct ib_recv_wr **bad_wr)
616 {
617 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
618 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
619 	struct hns_roce_v2_wqe_data_seg *dseg;
620 	struct hns_roce_rinl_sge *sge_list;
621 	struct device *dev = hr_dev->dev;
622 	struct ib_qp_attr attr;
623 	unsigned long flags;
624 	void *wqe = NULL;
625 	int attr_mask;
626 	int ret = 0;
627 	int nreq;
628 	int ind;
629 	int i;
630 
631 	spin_lock_irqsave(&hr_qp->rq.lock, flags);
632 	ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
633 
634 	if (hr_qp->state == IB_QPS_RESET) {
635 		spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
636 		*bad_wr = wr;
637 		return -EINVAL;
638 	}
639 
640 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
641 		if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
642 			hr_qp->ibqp.recv_cq)) {
643 			ret = -ENOMEM;
644 			*bad_wr = wr;
645 			goto out;
646 		}
647 
648 		if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
649 			dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
650 				wr->num_sge, hr_qp->rq.max_gs);
651 			ret = -EINVAL;
652 			*bad_wr = wr;
653 			goto out;
654 		}
655 
656 		wqe = get_recv_wqe(hr_qp, ind);
657 		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
658 		for (i = 0; i < wr->num_sge; i++) {
659 			if (!wr->sg_list[i].length)
660 				continue;
661 			set_data_seg_v2(dseg, wr->sg_list + i);
662 			dseg++;
663 		}
664 
665 		if (i < hr_qp->rq.max_gs) {
666 			dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
667 			dseg->addr = 0;
668 		}
669 
670 		/* rq support inline data */
671 		if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
672 			sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
673 			hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
674 							       (u32)wr->num_sge;
675 			for (i = 0; i < wr->num_sge; i++) {
676 				sge_list[i].addr =
677 					       (void *)(u64)wr->sg_list[i].addr;
678 				sge_list[i].len = wr->sg_list[i].length;
679 			}
680 		}
681 
682 		hr_qp->rq.wrid[ind] = wr->wr_id;
683 
684 		ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
685 	}
686 
687 out:
688 	if (likely(nreq)) {
689 		hr_qp->rq.head += nreq;
690 		/* Memory barrier */
691 		wmb();
692 
693 		*hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
694 
695 		if (hr_qp->state == IB_QPS_ERR) {
696 			attr_mask = IB_QP_STATE;
697 			attr.qp_state = IB_QPS_ERR;
698 
699 			ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
700 						    attr_mask, hr_qp->state,
701 						    IB_QPS_ERR);
702 			if (ret) {
703 				spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
704 				*bad_wr = wr;
705 				return ret;
706 			}
707 		}
708 	}
709 	spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
710 
711 	return ret;
712 }
713 
714 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
715 				      unsigned long instance_stage,
716 				      unsigned long reset_stage)
717 {
718 	/* When hardware reset has been completed once or more, we should stop
719 	 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
720 	 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
721 	 * stage of soft reset process, we should exit with error, and then
722 	 * HNAE3_INIT_CLIENT related process can rollback the operation like
723 	 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
724 	 * process will exit with error to notify NIC driver to reschedule soft
725 	 * reset process once again.
726 	 */
727 	hr_dev->is_reset = true;
728 	hr_dev->dis_db = true;
729 
730 	if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
731 	    instance_stage == HNS_ROCE_STATE_INIT)
732 		return CMD_RST_PRC_EBUSY;
733 
734 	return CMD_RST_PRC_SUCCESS;
735 }
736 
737 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
738 					unsigned long instance_stage,
739 					unsigned long reset_stage)
740 {
741 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
742 	struct hnae3_handle *handle = priv->handle;
743 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
744 
745 	/* When hardware reset is detected, we should stop sending mailbox&cmq&
746 	 * doorbell to hardware. If now in .init_instance() function, we should
747 	 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
748 	 * process, we should exit with error, and then HNAE3_INIT_CLIENT
749 	 * related process can rollback the operation like notifing hardware to
750 	 * free resources, HNAE3_INIT_CLIENT related process will exit with
751 	 * error to notify NIC driver to reschedule soft reset process once
752 	 * again.
753 	 */
754 	hr_dev->dis_db = true;
755 	if (!ops->get_hw_reset_stat(handle))
756 		hr_dev->is_reset = true;
757 
758 	if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
759 	    instance_stage == HNS_ROCE_STATE_INIT)
760 		return CMD_RST_PRC_EBUSY;
761 
762 	return CMD_RST_PRC_SUCCESS;
763 }
764 
765 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
766 {
767 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
768 	struct hnae3_handle *handle = priv->handle;
769 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
770 
771 	/* When software reset is detected at .init_instance() function, we
772 	 * should stop sending mailbox&cmq&doorbell to hardware, and exit
773 	 * with error.
774 	 */
775 	hr_dev->dis_db = true;
776 	if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
777 		hr_dev->is_reset = true;
778 
779 	return CMD_RST_PRC_EBUSY;
780 }
781 
782 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
783 {
784 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
785 	struct hnae3_handle *handle = priv->handle;
786 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
787 	unsigned long instance_stage;	/* the current instance stage */
788 	unsigned long reset_stage;	/* the current reset stage */
789 	unsigned long reset_cnt;
790 	bool sw_resetting;
791 	bool hw_resetting;
792 
793 	if (hr_dev->is_reset)
794 		return CMD_RST_PRC_SUCCESS;
795 
796 	/* Get information about reset from NIC driver or RoCE driver itself,
797 	 * the meaning of the following variables from NIC driver are described
798 	 * as below:
799 	 * reset_cnt -- The count value of completed hardware reset.
800 	 * hw_resetting -- Whether hardware device is resetting now.
801 	 * sw_resetting -- Whether NIC's software reset process is running now.
802 	 */
803 	instance_stage = handle->rinfo.instance_state;
804 	reset_stage = handle->rinfo.reset_state;
805 	reset_cnt = ops->ae_dev_reset_cnt(handle);
806 	hw_resetting = ops->get_hw_reset_stat(handle);
807 	sw_resetting = ops->ae_dev_resetting(handle);
808 
809 	if (reset_cnt != hr_dev->reset_cnt)
810 		return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
811 						  reset_stage);
812 	else if (hw_resetting)
813 		return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
814 						    reset_stage);
815 	else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
816 		return hns_roce_v2_cmd_sw_resetting(hr_dev);
817 
818 	return 0;
819 }
820 
821 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
822 {
823 	int ntu = ring->next_to_use;
824 	int ntc = ring->next_to_clean;
825 	int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
826 
827 	return ring->desc_num - used - 1;
828 }
829 
830 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
831 				   struct hns_roce_v2_cmq_ring *ring)
832 {
833 	int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
834 
835 	ring->desc = kzalloc(size, GFP_KERNEL);
836 	if (!ring->desc)
837 		return -ENOMEM;
838 
839 	ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
840 					     DMA_BIDIRECTIONAL);
841 	if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
842 		ring->desc_dma_addr = 0;
843 		kfree(ring->desc);
844 		ring->desc = NULL;
845 		return -ENOMEM;
846 	}
847 
848 	return 0;
849 }
850 
851 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
852 				   struct hns_roce_v2_cmq_ring *ring)
853 {
854 	dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
855 			 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
856 			 DMA_BIDIRECTIONAL);
857 
858 	ring->desc_dma_addr = 0;
859 	kfree(ring->desc);
860 }
861 
862 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
863 {
864 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
865 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
866 					    &priv->cmq.csq : &priv->cmq.crq;
867 
868 	ring->flag = ring_type;
869 	ring->next_to_clean = 0;
870 	ring->next_to_use = 0;
871 
872 	return hns_roce_alloc_cmq_desc(hr_dev, ring);
873 }
874 
875 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
876 {
877 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
878 	struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
879 					    &priv->cmq.csq : &priv->cmq.crq;
880 	dma_addr_t dma = ring->desc_dma_addr;
881 
882 	if (ring_type == TYPE_CSQ) {
883 		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
884 		roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
885 			   upper_32_bits(dma));
886 		roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
887 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
888 		roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
889 		roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
890 	} else {
891 		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
892 		roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
893 			   upper_32_bits(dma));
894 		roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
895 			   ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
896 		roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
897 		roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
898 	}
899 }
900 
901 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
902 {
903 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
904 	int ret;
905 
906 	/* Setup the queue entries for command queue */
907 	priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
908 	priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
909 
910 	/* Setup the lock for command queue */
911 	spin_lock_init(&priv->cmq.csq.lock);
912 	spin_lock_init(&priv->cmq.crq.lock);
913 
914 	/* Setup Tx write back timeout */
915 	priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
916 
917 	/* Init CSQ */
918 	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
919 	if (ret) {
920 		dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
921 		return ret;
922 	}
923 
924 	/* Init CRQ */
925 	ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
926 	if (ret) {
927 		dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
928 		goto err_crq;
929 	}
930 
931 	/* Init CSQ REG */
932 	hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
933 
934 	/* Init CRQ REG */
935 	hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
936 
937 	return 0;
938 
939 err_crq:
940 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
941 
942 	return ret;
943 }
944 
945 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
946 {
947 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
948 
949 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
950 	hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
951 }
952 
953 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
954 					  enum hns_roce_opcode_type opcode,
955 					  bool is_read)
956 {
957 	memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
958 	desc->opcode = cpu_to_le16(opcode);
959 	desc->flag =
960 		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
961 	if (is_read)
962 		desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
963 	else
964 		desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
965 }
966 
967 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
968 {
969 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
970 	u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
971 
972 	return head == priv->cmq.csq.next_to_use;
973 }
974 
975 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
976 {
977 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
978 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
979 	struct hns_roce_cmq_desc *desc;
980 	u16 ntc = csq->next_to_clean;
981 	u32 head;
982 	int clean = 0;
983 
984 	desc = &csq->desc[ntc];
985 	head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
986 	while (head != ntc) {
987 		memset(desc, 0, sizeof(*desc));
988 		ntc++;
989 		if (ntc == csq->desc_num)
990 			ntc = 0;
991 		desc = &csq->desc[ntc];
992 		clean++;
993 	}
994 	csq->next_to_clean = ntc;
995 
996 	return clean;
997 }
998 
999 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1000 			       struct hns_roce_cmq_desc *desc, int num)
1001 {
1002 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1003 	struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1004 	struct hns_roce_cmq_desc *desc_to_use;
1005 	bool complete = false;
1006 	u32 timeout = 0;
1007 	int handle = 0;
1008 	u16 desc_ret;
1009 	int ret = 0;
1010 	int ntc;
1011 
1012 	spin_lock_bh(&csq->lock);
1013 
1014 	if (num > hns_roce_cmq_space(csq)) {
1015 		spin_unlock_bh(&csq->lock);
1016 		return -EBUSY;
1017 	}
1018 
1019 	/*
1020 	 * Record the location of desc in the cmq for this time
1021 	 * which will be use for hardware to write back
1022 	 */
1023 	ntc = csq->next_to_use;
1024 
1025 	while (handle < num) {
1026 		desc_to_use = &csq->desc[csq->next_to_use];
1027 		*desc_to_use = desc[handle];
1028 		dev_dbg(hr_dev->dev, "set cmq desc:\n");
1029 		csq->next_to_use++;
1030 		if (csq->next_to_use == csq->desc_num)
1031 			csq->next_to_use = 0;
1032 		handle++;
1033 	}
1034 
1035 	/* Write to hardware */
1036 	roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1037 
1038 	/*
1039 	 * If the command is sync, wait for the firmware to write back,
1040 	 * if multi descriptors to be sent, use the first one to check
1041 	 */
1042 	if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1043 		do {
1044 			if (hns_roce_cmq_csq_done(hr_dev))
1045 				break;
1046 			udelay(1);
1047 			timeout++;
1048 		} while (timeout < priv->cmq.tx_timeout);
1049 	}
1050 
1051 	if (hns_roce_cmq_csq_done(hr_dev)) {
1052 		complete = true;
1053 		handle = 0;
1054 		while (handle < num) {
1055 			/* get the result of hardware write back */
1056 			desc_to_use = &csq->desc[ntc];
1057 			desc[handle] = *desc_to_use;
1058 			dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1059 			desc_ret = le16_to_cpu(desc[handle].retval);
1060 			if (desc_ret == CMD_EXEC_SUCCESS)
1061 				ret = 0;
1062 			else
1063 				ret = -EIO;
1064 			priv->cmq.last_status = desc_ret;
1065 			ntc++;
1066 			handle++;
1067 			if (ntc == csq->desc_num)
1068 				ntc = 0;
1069 		}
1070 	}
1071 
1072 	if (!complete)
1073 		ret = -EAGAIN;
1074 
1075 	/* clean the command send queue */
1076 	handle = hns_roce_cmq_csq_clean(hr_dev);
1077 	if (handle != num)
1078 		dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1079 			 handle, num);
1080 
1081 	spin_unlock_bh(&csq->lock);
1082 
1083 	return ret;
1084 }
1085 
1086 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1087 			     struct hns_roce_cmq_desc *desc, int num)
1088 {
1089 	int retval;
1090 	int ret;
1091 
1092 	ret = hns_roce_v2_rst_process_cmd(hr_dev);
1093 	if (ret == CMD_RST_PRC_SUCCESS)
1094 		return 0;
1095 	if (ret == CMD_RST_PRC_EBUSY)
1096 		return -EBUSY;
1097 
1098 	ret = __hns_roce_cmq_send(hr_dev, desc, num);
1099 	if (ret) {
1100 		retval = hns_roce_v2_rst_process_cmd(hr_dev);
1101 		if (retval == CMD_RST_PRC_SUCCESS)
1102 			return 0;
1103 		else if (retval == CMD_RST_PRC_EBUSY)
1104 			return -EBUSY;
1105 	}
1106 
1107 	return ret;
1108 }
1109 
1110 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1111 {
1112 	struct hns_roce_query_version *resp;
1113 	struct hns_roce_cmq_desc desc;
1114 	int ret;
1115 
1116 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1117 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1118 	if (ret)
1119 		return ret;
1120 
1121 	resp = (struct hns_roce_query_version *)desc.data;
1122 	hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1123 	hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1124 
1125 	return 0;
1126 }
1127 
1128 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1129 {
1130 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1131 	struct hnae3_handle *handle = priv->handle;
1132 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1133 	unsigned long reset_cnt;
1134 	bool sw_resetting;
1135 	bool hw_resetting;
1136 
1137 	reset_cnt = ops->ae_dev_reset_cnt(handle);
1138 	hw_resetting = ops->get_hw_reset_stat(handle);
1139 	sw_resetting = ops->ae_dev_resetting(handle);
1140 
1141 	if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1142 		return true;
1143 
1144 	return false;
1145 }
1146 
1147 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1148 				      int flag)
1149 {
1150 	struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1151 	struct hnae3_handle *handle = priv->handle;
1152 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1153 	unsigned long instance_stage;
1154 	unsigned long reset_cnt;
1155 	unsigned long end;
1156 	bool sw_resetting;
1157 	bool hw_resetting;
1158 
1159 	instance_stage = handle->rinfo.instance_state;
1160 	reset_cnt = ops->ae_dev_reset_cnt(handle);
1161 	hw_resetting = ops->get_hw_reset_stat(handle);
1162 	sw_resetting = ops->ae_dev_resetting(handle);
1163 
1164 	if (reset_cnt != hr_dev->reset_cnt) {
1165 		hr_dev->dis_db = true;
1166 		hr_dev->is_reset = true;
1167 		dev_info(hr_dev->dev, "Func clear success after reset.\n");
1168 	} else if (hw_resetting) {
1169 		hr_dev->dis_db = true;
1170 
1171 		dev_warn(hr_dev->dev,
1172 			 "Func clear is pending, device in resetting state.\n");
1173 		end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1174 		while (end) {
1175 			if (!ops->get_hw_reset_stat(handle)) {
1176 				hr_dev->is_reset = true;
1177 				dev_info(hr_dev->dev,
1178 					 "Func clear success after reset.\n");
1179 				return;
1180 			}
1181 			msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1182 			end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1183 		}
1184 
1185 		dev_warn(hr_dev->dev, "Func clear failed.\n");
1186 	} else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1187 		hr_dev->dis_db = true;
1188 
1189 		dev_warn(hr_dev->dev,
1190 			 "Func clear is pending, device in resetting state.\n");
1191 		end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1192 		while (end) {
1193 			if (ops->ae_dev_reset_cnt(handle) !=
1194 			    hr_dev->reset_cnt) {
1195 				hr_dev->is_reset = true;
1196 				dev_info(hr_dev->dev,
1197 					 "Func clear success after sw reset\n");
1198 				return;
1199 			}
1200 			msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1201 			end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1202 		}
1203 
1204 		dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1205 	} else {
1206 		if (retval && !flag)
1207 			dev_warn(hr_dev->dev,
1208 				 "Func clear read failed, ret = %d.\n", retval);
1209 
1210 		dev_warn(hr_dev->dev, "Func clear failed.\n");
1211 	}
1212 }
1213 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1214 {
1215 	bool fclr_write_fail_flag = false;
1216 	struct hns_roce_func_clear *resp;
1217 	struct hns_roce_cmq_desc desc;
1218 	unsigned long end;
1219 	int ret = 0;
1220 
1221 	if (hns_roce_func_clr_chk_rst(hr_dev))
1222 		goto out;
1223 
1224 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1225 	resp = (struct hns_roce_func_clear *)desc.data;
1226 
1227 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1228 	if (ret) {
1229 		fclr_write_fail_flag = true;
1230 		dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1231 			 ret);
1232 		goto out;
1233 	}
1234 
1235 	msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1236 	end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1237 	while (end) {
1238 		if (hns_roce_func_clr_chk_rst(hr_dev))
1239 			goto out;
1240 		msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1241 		end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1242 
1243 		hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1244 					      true);
1245 
1246 		ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1247 		if (ret)
1248 			continue;
1249 
1250 		if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1251 			hr_dev->is_reset = true;
1252 			return;
1253 		}
1254 	}
1255 
1256 out:
1257 	dev_err(hr_dev->dev, "Func clear fail.\n");
1258 	hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1259 }
1260 
1261 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1262 {
1263 	struct hns_roce_query_fw_info *resp;
1264 	struct hns_roce_cmq_desc desc;
1265 	int ret;
1266 
1267 	hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1268 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1269 	if (ret)
1270 		return ret;
1271 
1272 	resp = (struct hns_roce_query_fw_info *)desc.data;
1273 	hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1274 
1275 	return 0;
1276 }
1277 
1278 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1279 {
1280 	struct hns_roce_cfg_global_param *req;
1281 	struct hns_roce_cmq_desc desc;
1282 
1283 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1284 				      false);
1285 
1286 	req = (struct hns_roce_cfg_global_param *)desc.data;
1287 	memset(req, 0, sizeof(*req));
1288 	roce_set_field(req->time_cfg_udp_port,
1289 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1290 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1291 	roce_set_field(req->time_cfg_udp_port,
1292 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1293 		       CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1294 
1295 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1296 }
1297 
1298 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1299 {
1300 	struct hns_roce_cmq_desc desc[2];
1301 	struct hns_roce_pf_res_a *req_a;
1302 	struct hns_roce_pf_res_b *req_b;
1303 	int ret;
1304 	int i;
1305 
1306 	for (i = 0; i < 2; i++) {
1307 		hns_roce_cmq_setup_basic_desc(&desc[i],
1308 					      HNS_ROCE_OPC_QUERY_PF_RES, true);
1309 
1310 		if (i == 0)
1311 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1312 		else
1313 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1314 	}
1315 
1316 	ret = hns_roce_cmq_send(hr_dev, desc, 2);
1317 	if (ret)
1318 		return ret;
1319 
1320 	req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1321 	req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1322 
1323 	hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1324 						 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1325 						 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1326 	hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1327 						PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1328 						PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1329 	hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1330 						 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1331 						 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1332 	hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1333 						 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1334 						 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1335 
1336 	hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1337 					     PF_RES_DATA_3_PF_SL_NUM_M,
1338 					     PF_RES_DATA_3_PF_SL_NUM_S);
1339 	hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1340 					     PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1341 					     PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1342 
1343 	return 0;
1344 }
1345 
1346 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1347 {
1348 	struct hns_roce_pf_timer_res_a *req_a;
1349 	struct hns_roce_cmq_desc desc[2];
1350 	int ret, i;
1351 
1352 	for (i = 0; i < 2; i++) {
1353 		hns_roce_cmq_setup_basic_desc(&desc[i],
1354 					      HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1355 					      true);
1356 
1357 		if (i == 0)
1358 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1359 		else
1360 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1361 	}
1362 
1363 	ret = hns_roce_cmq_send(hr_dev, desc, 2);
1364 	if (ret)
1365 		return ret;
1366 
1367 	req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1368 
1369 	hr_dev->caps.qpc_timer_bt_num =
1370 				roce_get_field(req_a->qpc_timer_bt_idx_num,
1371 					PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1372 					PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1373 	hr_dev->caps.cqc_timer_bt_num =
1374 				roce_get_field(req_a->cqc_timer_bt_idx_num,
1375 					PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1376 					PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1377 
1378 	return 0;
1379 }
1380 
1381 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1382 						  int vf_id)
1383 {
1384 	struct hns_roce_cmq_desc desc;
1385 	struct hns_roce_vf_switch *swt;
1386 	int ret;
1387 
1388 	swt = (struct hns_roce_vf_switch *)desc.data;
1389 	hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1390 	swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1391 	roce_set_field(swt->fun_id,
1392 			VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1393 			VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1394 			vf_id);
1395 	ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1396 	if (ret)
1397 		return ret;
1398 	desc.flag =
1399 		cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1400 	desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1401 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1402 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1403 	roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1404 
1405 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1406 }
1407 
1408 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1409 {
1410 	struct hns_roce_cmq_desc desc[2];
1411 	struct hns_roce_vf_res_a *req_a;
1412 	struct hns_roce_vf_res_b *req_b;
1413 	int i;
1414 
1415 	req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1416 	req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1417 	memset(req_a, 0, sizeof(*req_a));
1418 	memset(req_b, 0, sizeof(*req_b));
1419 	for (i = 0; i < 2; i++) {
1420 		hns_roce_cmq_setup_basic_desc(&desc[i],
1421 					      HNS_ROCE_OPC_ALLOC_VF_RES, false);
1422 
1423 		if (i == 0)
1424 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1425 		else
1426 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1427 
1428 		if (i == 0) {
1429 			roce_set_field(req_a->vf_qpc_bt_idx_num,
1430 				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1431 				       VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1432 			roce_set_field(req_a->vf_qpc_bt_idx_num,
1433 				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1434 				       VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1435 				       HNS_ROCE_VF_QPC_BT_NUM);
1436 
1437 			roce_set_field(req_a->vf_srqc_bt_idx_num,
1438 				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1439 				       VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1440 			roce_set_field(req_a->vf_srqc_bt_idx_num,
1441 				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1442 				       VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1443 				       HNS_ROCE_VF_SRQC_BT_NUM);
1444 
1445 			roce_set_field(req_a->vf_cqc_bt_idx_num,
1446 				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1447 				       VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1448 			roce_set_field(req_a->vf_cqc_bt_idx_num,
1449 				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1450 				       VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1451 				       HNS_ROCE_VF_CQC_BT_NUM);
1452 
1453 			roce_set_field(req_a->vf_mpt_bt_idx_num,
1454 				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1455 				       VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1456 			roce_set_field(req_a->vf_mpt_bt_idx_num,
1457 				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1458 				       VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1459 				       HNS_ROCE_VF_MPT_BT_NUM);
1460 
1461 			roce_set_field(req_a->vf_eqc_bt_idx_num,
1462 				       VF_RES_A_DATA_5_VF_EQC_IDX_M,
1463 				       VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1464 			roce_set_field(req_a->vf_eqc_bt_idx_num,
1465 				       VF_RES_A_DATA_5_VF_EQC_NUM_M,
1466 				       VF_RES_A_DATA_5_VF_EQC_NUM_S,
1467 				       HNS_ROCE_VF_EQC_NUM);
1468 		} else {
1469 			roce_set_field(req_b->vf_smac_idx_num,
1470 				       VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1471 				       VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1472 			roce_set_field(req_b->vf_smac_idx_num,
1473 				       VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1474 				       VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1475 				       HNS_ROCE_VF_SMAC_NUM);
1476 
1477 			roce_set_field(req_b->vf_sgid_idx_num,
1478 				       VF_RES_B_DATA_2_VF_SGID_IDX_M,
1479 				       VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1480 			roce_set_field(req_b->vf_sgid_idx_num,
1481 				       VF_RES_B_DATA_2_VF_SGID_NUM_M,
1482 				       VF_RES_B_DATA_2_VF_SGID_NUM_S,
1483 				       HNS_ROCE_VF_SGID_NUM);
1484 
1485 			roce_set_field(req_b->vf_qid_idx_sl_num,
1486 				       VF_RES_B_DATA_3_VF_QID_IDX_M,
1487 				       VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1488 			roce_set_field(req_b->vf_qid_idx_sl_num,
1489 				       VF_RES_B_DATA_3_VF_SL_NUM_M,
1490 				       VF_RES_B_DATA_3_VF_SL_NUM_S,
1491 				       HNS_ROCE_VF_SL_NUM);
1492 
1493 			roce_set_field(req_b->vf_sccc_idx_num,
1494 				       VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1495 				       VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1496 			roce_set_field(req_b->vf_sccc_idx_num,
1497 				       VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1498 				       VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1499 				       HNS_ROCE_VF_SCCC_BT_NUM);
1500 		}
1501 	}
1502 
1503 	return hns_roce_cmq_send(hr_dev, desc, 2);
1504 }
1505 
1506 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1507 {
1508 	u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1509 	u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1510 	u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1511 	u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1512 	u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1513 	struct hns_roce_cfg_bt_attr *req;
1514 	struct hns_roce_cmq_desc desc;
1515 
1516 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1517 	req = (struct hns_roce_cfg_bt_attr *)desc.data;
1518 	memset(req, 0, sizeof(*req));
1519 
1520 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1521 		       CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1522 		       hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1523 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1524 		       CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1525 		       hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1526 	roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1527 		       CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1528 		       qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1529 
1530 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1531 		       CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1532 		       hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1533 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1534 		       CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1535 		       hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1536 	roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1537 		       CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1538 		       srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1539 
1540 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1541 		       CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1542 		       hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1543 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1544 		       CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1545 		       hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1546 	roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1547 		       CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1548 		       cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1549 
1550 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1551 		       CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1552 		       hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1553 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1554 		       CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1555 		       hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1556 	roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1557 		       CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1558 		       mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1559 
1560 	roce_set_field(req->vf_sccc_cfg,
1561 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1562 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1563 		       hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1564 	roce_set_field(req->vf_sccc_cfg,
1565 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1566 		       CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1567 		       hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1568 	roce_set_field(req->vf_sccc_cfg,
1569 		       CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1570 		       CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1571 		       sccc_hop_num ==
1572 			      HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1573 
1574 	return hns_roce_cmq_send(hr_dev, &desc, 1);
1575 }
1576 
1577 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1578 {
1579 	struct hns_roce_caps *caps = &hr_dev->caps;
1580 	int ret;
1581 
1582 	ret = hns_roce_cmq_query_hw_info(hr_dev);
1583 	if (ret) {
1584 		dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1585 			ret);
1586 		return ret;
1587 	}
1588 
1589 	ret = hns_roce_query_fw_ver(hr_dev);
1590 	if (ret) {
1591 		dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1592 			ret);
1593 		return ret;
1594 	}
1595 
1596 	ret = hns_roce_config_global_param(hr_dev);
1597 	if (ret) {
1598 		dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1599 			ret);
1600 		return ret;
1601 	}
1602 
1603 	/* Get pf resource owned by every pf */
1604 	ret = hns_roce_query_pf_resource(hr_dev);
1605 	if (ret) {
1606 		dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1607 			ret);
1608 		return ret;
1609 	}
1610 
1611 	if (hr_dev->pci_dev->revision == 0x21) {
1612 		ret = hns_roce_query_pf_timer_resource(hr_dev);
1613 		if (ret) {
1614 			dev_err(hr_dev->dev,
1615 				"Query pf timer resource fail, ret = %d.\n",
1616 				ret);
1617 			return ret;
1618 		}
1619 	}
1620 
1621 	ret = hns_roce_alloc_vf_resource(hr_dev);
1622 	if (ret) {
1623 		dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1624 			ret);
1625 		return ret;
1626 	}
1627 
1628 	if (hr_dev->pci_dev->revision == 0x21) {
1629 		ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1630 		if (ret) {
1631 			dev_err(hr_dev->dev,
1632 				"Set function switch param fail, ret = %d.\n",
1633 				ret);
1634 			return ret;
1635 		}
1636 	}
1637 
1638 	hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1639 	hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1640 
1641 	caps->num_qps		= HNS_ROCE_V2_MAX_QP_NUM;
1642 	caps->max_wqes		= HNS_ROCE_V2_MAX_WQE_NUM;
1643 	caps->num_cqs		= HNS_ROCE_V2_MAX_CQ_NUM;
1644 	caps->num_srqs		= HNS_ROCE_V2_MAX_SRQ_NUM;
1645 	caps->min_cqes		= HNS_ROCE_MIN_CQE_NUM;
1646 	caps->max_cqes		= HNS_ROCE_V2_MAX_CQE_NUM;
1647 	caps->max_srqwqes	= HNS_ROCE_V2_MAX_SRQWQE_NUM;
1648 	caps->max_sq_sg		= HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1649 	caps->max_extend_sg	= HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1650 	caps->max_rq_sg		= HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1651 	caps->max_sq_inline	= HNS_ROCE_V2_MAX_SQ_INLINE;
1652 	caps->max_srq_sg	= HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1653 	caps->num_uars		= HNS_ROCE_V2_UAR_NUM;
1654 	caps->phy_num_uars	= HNS_ROCE_V2_PHY_UAR_NUM;
1655 	caps->num_aeq_vectors	= HNS_ROCE_V2_AEQE_VEC_NUM;
1656 	caps->num_comp_vectors	= HNS_ROCE_V2_COMP_VEC_NUM;
1657 	caps->num_other_vectors	= HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1658 	caps->num_mtpts		= HNS_ROCE_V2_MAX_MTPT_NUM;
1659 	caps->num_mtt_segs	= HNS_ROCE_V2_MAX_MTT_SEGS;
1660 	caps->num_cqe_segs	= HNS_ROCE_V2_MAX_CQE_SEGS;
1661 	caps->num_srqwqe_segs	= HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1662 	caps->num_idx_segs	= HNS_ROCE_V2_MAX_IDX_SEGS;
1663 	caps->num_pds		= HNS_ROCE_V2_MAX_PD_NUM;
1664 	caps->max_qp_init_rdma	= HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1665 	caps->max_qp_dest_rdma	= HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1666 	caps->max_sq_desc_sz	= HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1667 	caps->max_rq_desc_sz	= HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1668 	caps->max_srq_desc_sz	= HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1669 	caps->qpc_entry_sz	= HNS_ROCE_V2_QPC_ENTRY_SZ;
1670 	caps->irrl_entry_sz	= HNS_ROCE_V2_IRRL_ENTRY_SZ;
1671 	caps->trrl_entry_sz	= HNS_ROCE_V2_TRRL_ENTRY_SZ;
1672 	caps->cqc_entry_sz	= HNS_ROCE_V2_CQC_ENTRY_SZ;
1673 	caps->srqc_entry_sz	= HNS_ROCE_V2_SRQC_ENTRY_SZ;
1674 	caps->mtpt_entry_sz	= HNS_ROCE_V2_MTPT_ENTRY_SZ;
1675 	caps->mtt_entry_sz	= HNS_ROCE_V2_MTT_ENTRY_SZ;
1676 	caps->idx_entry_sz	= 4;
1677 	caps->cq_entry_sz	= HNS_ROCE_V2_CQE_ENTRY_SIZE;
1678 	caps->page_size_cap	= HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1679 	caps->reserved_lkey	= 0;
1680 	caps->reserved_pds	= 0;
1681 	caps->reserved_mrws	= 1;
1682 	caps->reserved_uars	= 0;
1683 	caps->reserved_cqs	= 0;
1684 	caps->reserved_srqs	= 0;
1685 	caps->reserved_qps	= HNS_ROCE_V2_RSV_QPS;
1686 
1687 	caps->qpc_ba_pg_sz	= 0;
1688 	caps->qpc_buf_pg_sz	= 0;
1689 	caps->qpc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1690 	caps->srqc_ba_pg_sz	= 0;
1691 	caps->srqc_buf_pg_sz	= 0;
1692 	caps->srqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1693 	caps->cqc_ba_pg_sz	= 0;
1694 	caps->cqc_buf_pg_sz	= 0;
1695 	caps->cqc_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1696 	caps->mpt_ba_pg_sz	= 0;
1697 	caps->mpt_buf_pg_sz	= 0;
1698 	caps->mpt_hop_num	= HNS_ROCE_CONTEXT_HOP_NUM;
1699 	caps->pbl_ba_pg_sz	= 2;
1700 	caps->pbl_buf_pg_sz	= 0;
1701 	caps->pbl_hop_num	= HNS_ROCE_PBL_HOP_NUM;
1702 	caps->mtt_ba_pg_sz	= 0;
1703 	caps->mtt_buf_pg_sz	= 0;
1704 	caps->mtt_hop_num	= HNS_ROCE_MTT_HOP_NUM;
1705 	caps->wqe_sq_hop_num	= 2;
1706 	caps->wqe_sge_hop_num	= 1;
1707 	caps->wqe_rq_hop_num	= 2;
1708 	caps->cqe_ba_pg_sz	= 6;
1709 	caps->cqe_buf_pg_sz	= 0;
1710 	caps->cqe_hop_num	= HNS_ROCE_CQE_HOP_NUM;
1711 	caps->srqwqe_ba_pg_sz	= 0;
1712 	caps->srqwqe_buf_pg_sz	= 0;
1713 	caps->srqwqe_hop_num	= HNS_ROCE_SRQWQE_HOP_NUM;
1714 	caps->idx_ba_pg_sz	= 0;
1715 	caps->idx_buf_pg_sz	= 0;
1716 	caps->idx_hop_num	= HNS_ROCE_IDX_HOP_NUM;
1717 	caps->eqe_ba_pg_sz	= 0;
1718 	caps->eqe_buf_pg_sz	= 0;
1719 	caps->eqe_hop_num	= HNS_ROCE_EQE_HOP_NUM;
1720 	caps->tsq_buf_pg_sz	= 0;
1721 	caps->chunk_sz		= HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1722 
1723 	caps->flags		= HNS_ROCE_CAP_FLAG_REREG_MR |
1724 				  HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1725 				  HNS_ROCE_CAP_FLAG_RQ_INLINE |
1726 				  HNS_ROCE_CAP_FLAG_RECORD_DB |
1727 				  HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1728 
1729 	if (hr_dev->pci_dev->revision == 0x21)
1730 		caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1731 			       HNS_ROCE_CAP_FLAG_FRMR;
1732 
1733 	caps->pkey_table_len[0] = 1;
1734 	caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1735 	caps->ceqe_depth	= HNS_ROCE_V2_COMP_EQE_NUM;
1736 	caps->aeqe_depth	= HNS_ROCE_V2_ASYNC_EQE_NUM;
1737 	caps->local_ca_ack_delay = 0;
1738 	caps->max_mtu = IB_MTU_4096;
1739 
1740 	caps->max_srqs		= HNS_ROCE_V2_MAX_SRQ;
1741 	caps->max_srq_wrs	= HNS_ROCE_V2_MAX_SRQ_WR;
1742 	caps->max_srq_sges	= HNS_ROCE_V2_MAX_SRQ_SGE;
1743 
1744 	if (hr_dev->pci_dev->revision == 0x21) {
1745 		caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1746 			       HNS_ROCE_CAP_FLAG_SRQ |
1747 			       HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1748 
1749 		caps->num_qpc_timer	  = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1750 		caps->qpc_timer_entry_sz  = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1751 		caps->qpc_timer_ba_pg_sz  = 0;
1752 		caps->qpc_timer_buf_pg_sz = 0;
1753 		caps->qpc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1754 		caps->num_cqc_timer	  = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1755 		caps->cqc_timer_entry_sz  = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1756 		caps->cqc_timer_ba_pg_sz  = 0;
1757 		caps->cqc_timer_buf_pg_sz = 0;
1758 		caps->cqc_timer_hop_num   = HNS_ROCE_HOP_NUM_0;
1759 
1760 		caps->sccc_entry_sz	= HNS_ROCE_V2_SCCC_ENTRY_SZ;
1761 		caps->sccc_ba_pg_sz	= 0;
1762 		caps->sccc_buf_pg_sz    = 0;
1763 		caps->sccc_hop_num	= HNS_ROCE_SCCC_HOP_NUM;
1764 	}
1765 
1766 	ret = hns_roce_v2_set_bt(hr_dev);
1767 	if (ret)
1768 		dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1769 			ret);
1770 
1771 	return ret;
1772 }
1773 
1774 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1775 				      enum hns_roce_link_table_type type)
1776 {
1777 	struct hns_roce_cmq_desc desc[2];
1778 	struct hns_roce_cfg_llm_a *req_a =
1779 				(struct hns_roce_cfg_llm_a *)desc[0].data;
1780 	struct hns_roce_cfg_llm_b *req_b =
1781 				(struct hns_roce_cfg_llm_b *)desc[1].data;
1782 	struct hns_roce_v2_priv *priv = hr_dev->priv;
1783 	struct hns_roce_link_table *link_tbl;
1784 	struct hns_roce_link_table_entry *entry;
1785 	enum hns_roce_opcode_type opcode;
1786 	u32 page_num;
1787 	int i;
1788 
1789 	switch (type) {
1790 	case TSQ_LINK_TABLE:
1791 		link_tbl = &priv->tsq;
1792 		opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1793 		break;
1794 	case TPQ_LINK_TABLE:
1795 		link_tbl = &priv->tpq;
1796 		opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1797 		break;
1798 	default:
1799 		return -EINVAL;
1800 	}
1801 
1802 	page_num = link_tbl->npages;
1803 	entry = link_tbl->table.buf;
1804 	memset(req_a, 0, sizeof(*req_a));
1805 	memset(req_b, 0, sizeof(*req_b));
1806 
1807 	for (i = 0; i < 2; i++) {
1808 		hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1809 
1810 		if (i == 0)
1811 			desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1812 		else
1813 			desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1814 
1815 		if (i == 0) {
1816 			req_a->base_addr_l =
1817 				cpu_to_le32(link_tbl->table.map & 0xffffffff);
1818 			req_a->base_addr_h =
1819 				cpu_to_le32(link_tbl->table.map >> 32);
1820 			roce_set_field(req_a->depth_pgsz_init_en,
1821 				       CFG_LLM_QUE_DEPTH_M,
1822 				       CFG_LLM_QUE_DEPTH_S,
1823 				       link_tbl->npages);
1824 			roce_set_field(req_a->depth_pgsz_init_en,
1825 				       CFG_LLM_QUE_PGSZ_M,
1826 				       CFG_LLM_QUE_PGSZ_S,
1827 				       link_tbl->pg_sz);
1828 			req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
1829 			req_a->head_ba_h_nxtptr =
1830 				cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
1831 			roce_set_field(req_a->head_ptr,
1832 				       CFG_LLM_HEAD_PTR_M,
1833 				       CFG_LLM_HEAD_PTR_S, 0);
1834 		} else {
1835 			req_b->tail_ba_l =
1836 				cpu_to_le32(entry[page_num - 1].blk_ba0);
1837 			roce_set_field(req_b->tail_ba_h,
1838 				       CFG_LLM_TAIL_BA_H_M,
1839 				       CFG_LLM_TAIL_BA_H_S,
1840 				       entry[page_num - 1].blk_ba1_nxt_ptr &
1841 				       HNS_ROCE_LINK_TABLE_BA1_M);
1842 			roce_set_field(req_b->tail_ptr,
1843 				       CFG_LLM_TAIL_PTR_M,
1844 				       CFG_LLM_TAIL_PTR_S,
1845 				       (entry[page_num - 2].blk_ba1_nxt_ptr &
1846 				       HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1847 				       HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1848 		}
1849 	}
1850 	roce_set_field(req_a->depth_pgsz_init_en,
1851 		       CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1852 
1853 	return hns_roce_cmq_send(hr_dev, desc, 2);
1854 }
1855 
1856 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1857 				    enum hns_roce_link_table_type type)
1858 {
1859 	struct hns_roce_v2_priv *priv = hr_dev->priv;
1860 	struct hns_roce_link_table *link_tbl;
1861 	struct hns_roce_link_table_entry *entry;
1862 	struct device *dev = hr_dev->dev;
1863 	u32 buf_chk_sz;
1864 	dma_addr_t t;
1865 	int func_num = 1;
1866 	int pg_num_a;
1867 	int pg_num_b;
1868 	int pg_num;
1869 	int size;
1870 	int i;
1871 
1872 	switch (type) {
1873 	case TSQ_LINK_TABLE:
1874 		link_tbl = &priv->tsq;
1875 		buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1876 		pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1877 		pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1878 		break;
1879 	case TPQ_LINK_TABLE:
1880 		link_tbl = &priv->tpq;
1881 		buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz +	PAGE_SHIFT);
1882 		pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1883 		pg_num_b = 2 * 4 * func_num + 2;
1884 		break;
1885 	default:
1886 		return -EINVAL;
1887 	}
1888 
1889 	pg_num = max(pg_num_a, pg_num_b);
1890 	size = pg_num * sizeof(struct hns_roce_link_table_entry);
1891 
1892 	link_tbl->table.buf = dma_alloc_coherent(dev, size,
1893 						 &link_tbl->table.map,
1894 						 GFP_KERNEL);
1895 	if (!link_tbl->table.buf)
1896 		goto out;
1897 
1898 	link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1899 				    GFP_KERNEL);
1900 	if (!link_tbl->pg_list)
1901 		goto err_kcalloc_failed;
1902 
1903 	entry = link_tbl->table.buf;
1904 	for (i = 0; i < pg_num; ++i) {
1905 		link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1906 							      &t, GFP_KERNEL);
1907 		if (!link_tbl->pg_list[i].buf)
1908 			goto err_alloc_buf_failed;
1909 
1910 		link_tbl->pg_list[i].map = t;
1911 
1912 		entry[i].blk_ba0 = (u32)(t >> 12);
1913 		entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
1914 
1915 		if (i < (pg_num - 1))
1916 			entry[i].blk_ba1_nxt_ptr |=
1917 				(i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
1918 
1919 	}
1920 	link_tbl->npages = pg_num;
1921 	link_tbl->pg_sz = buf_chk_sz;
1922 
1923 	return hns_roce_config_link_table(hr_dev, type);
1924 
1925 err_alloc_buf_failed:
1926 	for (i -= 1; i >= 0; i--)
1927 		dma_free_coherent(dev, buf_chk_sz,
1928 				  link_tbl->pg_list[i].buf,
1929 				  link_tbl->pg_list[i].map);
1930 	kfree(link_tbl->pg_list);
1931 
1932 err_kcalloc_failed:
1933 	dma_free_coherent(dev, size, link_tbl->table.buf,
1934 			  link_tbl->table.map);
1935 
1936 out:
1937 	return -ENOMEM;
1938 }
1939 
1940 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1941 				     struct hns_roce_link_table *link_tbl)
1942 {
1943 	struct device *dev = hr_dev->dev;
1944 	int size;
1945 	int i;
1946 
1947 	size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1948 
1949 	for (i = 0; i < link_tbl->npages; ++i)
1950 		if (link_tbl->pg_list[i].buf)
1951 			dma_free_coherent(dev, link_tbl->pg_sz,
1952 					  link_tbl->pg_list[i].buf,
1953 					  link_tbl->pg_list[i].map);
1954 	kfree(link_tbl->pg_list);
1955 
1956 	dma_free_coherent(dev, size, link_tbl->table.buf,
1957 			  link_tbl->table.map);
1958 }
1959 
1960 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1961 {
1962 	struct hns_roce_v2_priv *priv = hr_dev->priv;
1963 	int qpc_count, cqc_count;
1964 	int ret, i;
1965 
1966 	/* TSQ includes SQ doorbell and ack doorbell */
1967 	ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1968 	if (ret) {
1969 		dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1970 		return ret;
1971 	}
1972 
1973 	ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1974 	if (ret) {
1975 		dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1976 		goto err_tpq_init_failed;
1977 	}
1978 
1979 	/* Alloc memory for QPC Timer buffer space chunk */
1980 	for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1981 	     qpc_count++) {
1982 		ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1983 					 qpc_count);
1984 		if (ret) {
1985 			dev_err(hr_dev->dev, "QPC Timer get failed\n");
1986 			goto err_qpc_timer_failed;
1987 		}
1988 	}
1989 
1990 	/* Alloc memory for CQC Timer buffer space chunk */
1991 	for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1992 	     cqc_count++) {
1993 		ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1994 					 cqc_count);
1995 		if (ret) {
1996 			dev_err(hr_dev->dev, "CQC Timer get failed\n");
1997 			goto err_cqc_timer_failed;
1998 		}
1999 	}
2000 
2001 	return 0;
2002 
2003 err_cqc_timer_failed:
2004 	for (i = 0; i < cqc_count; i++)
2005 		hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2006 
2007 err_qpc_timer_failed:
2008 	for (i = 0; i < qpc_count; i++)
2009 		hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2010 
2011 	hns_roce_free_link_table(hr_dev, &priv->tpq);
2012 
2013 err_tpq_init_failed:
2014 	hns_roce_free_link_table(hr_dev, &priv->tsq);
2015 
2016 	return ret;
2017 }
2018 
2019 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2020 {
2021 	struct hns_roce_v2_priv *priv = hr_dev->priv;
2022 
2023 	if (hr_dev->pci_dev->revision == 0x21)
2024 		hns_roce_function_clear(hr_dev);
2025 
2026 	hns_roce_free_link_table(hr_dev, &priv->tpq);
2027 	hns_roce_free_link_table(hr_dev, &priv->tsq);
2028 }
2029 
2030 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2031 {
2032 	struct hns_roce_cmq_desc desc;
2033 	struct hns_roce_mbox_status *mb_st =
2034 				       (struct hns_roce_mbox_status *)desc.data;
2035 	enum hns_roce_cmd_return_status status;
2036 
2037 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2038 
2039 	status = hns_roce_cmq_send(hr_dev, &desc, 1);
2040 	if (status)
2041 		return status;
2042 
2043 	return le32_to_cpu(mb_st->mb_status_hw_run);
2044 }
2045 
2046 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2047 {
2048 	u32 status = hns_roce_query_mbox_status(hr_dev);
2049 
2050 	return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2051 }
2052 
2053 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2054 {
2055 	u32 status = hns_roce_query_mbox_status(hr_dev);
2056 
2057 	return status & HNS_ROCE_HW_MB_STATUS_MASK;
2058 }
2059 
2060 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2061 			      u64 out_param, u32 in_modifier, u8 op_modifier,
2062 			      u16 op, u16 token, int event)
2063 {
2064 	struct hns_roce_cmq_desc desc;
2065 	struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2066 
2067 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2068 
2069 	mb->in_param_l = cpu_to_le32(in_param);
2070 	mb->in_param_h = cpu_to_le32(in_param >> 32);
2071 	mb->out_param_l = cpu_to_le32(out_param);
2072 	mb->out_param_h = cpu_to_le32(out_param >> 32);
2073 	mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2074 	mb->token_event_en = cpu_to_le32(event << 16 | token);
2075 
2076 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2077 }
2078 
2079 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2080 				 u64 out_param, u32 in_modifier, u8 op_modifier,
2081 				 u16 op, u16 token, int event)
2082 {
2083 	struct device *dev = hr_dev->dev;
2084 	unsigned long end;
2085 	int ret;
2086 
2087 	end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2088 	while (hns_roce_v2_cmd_pending(hr_dev)) {
2089 		if (time_after(jiffies, end)) {
2090 			dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2091 				(int)end);
2092 			return -EAGAIN;
2093 		}
2094 		cond_resched();
2095 	}
2096 
2097 	ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2098 				 op_modifier, op, token, event);
2099 	if (ret)
2100 		dev_err(dev, "Post mailbox fail(%d)\n", ret);
2101 
2102 	return ret;
2103 }
2104 
2105 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2106 				unsigned long timeout)
2107 {
2108 	struct device *dev = hr_dev->dev;
2109 	unsigned long end;
2110 	u32 status;
2111 
2112 	end = msecs_to_jiffies(timeout) + jiffies;
2113 	while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2114 		cond_resched();
2115 
2116 	if (hns_roce_v2_cmd_pending(hr_dev)) {
2117 		dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2118 		return -ETIMEDOUT;
2119 	}
2120 
2121 	status = hns_roce_v2_cmd_complete(hr_dev);
2122 	if (status != 0x1) {
2123 		if (status == CMD_RST_PRC_EBUSY)
2124 			return status;
2125 
2126 		dev_err(dev, "mailbox status 0x%x!\n", status);
2127 		return -EBUSY;
2128 	}
2129 
2130 	return 0;
2131 }
2132 
2133 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2134 				      int gid_index, const union ib_gid *gid,
2135 				      enum hns_roce_sgid_type sgid_type)
2136 {
2137 	struct hns_roce_cmq_desc desc;
2138 	struct hns_roce_cfg_sgid_tb *sgid_tb =
2139 				    (struct hns_roce_cfg_sgid_tb *)desc.data;
2140 	u32 *p;
2141 
2142 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2143 
2144 	roce_set_field(sgid_tb->table_idx_rsv,
2145 		       CFG_SGID_TB_TABLE_IDX_M,
2146 		       CFG_SGID_TB_TABLE_IDX_S, gid_index);
2147 	roce_set_field(sgid_tb->vf_sgid_type_rsv,
2148 		       CFG_SGID_TB_VF_SGID_TYPE_M,
2149 		       CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2150 
2151 	p = (u32 *)&gid->raw[0];
2152 	sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2153 
2154 	p = (u32 *)&gid->raw[4];
2155 	sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2156 
2157 	p = (u32 *)&gid->raw[8];
2158 	sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2159 
2160 	p = (u32 *)&gid->raw[0xc];
2161 	sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2162 
2163 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2164 }
2165 
2166 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2167 			       int gid_index, const union ib_gid *gid,
2168 			       const struct ib_gid_attr *attr)
2169 {
2170 	enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2171 	int ret;
2172 
2173 	if (!gid || !attr)
2174 		return -EINVAL;
2175 
2176 	if (attr->gid_type == IB_GID_TYPE_ROCE)
2177 		sgid_type = GID_TYPE_FLAG_ROCE_V1;
2178 
2179 	if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2180 		if (ipv6_addr_v4mapped((void *)gid))
2181 			sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2182 		else
2183 			sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2184 	}
2185 
2186 	ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2187 	if (ret)
2188 		dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2189 
2190 	return ret;
2191 }
2192 
2193 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2194 			       u8 *addr)
2195 {
2196 	struct hns_roce_cmq_desc desc;
2197 	struct hns_roce_cfg_smac_tb *smac_tb =
2198 				    (struct hns_roce_cfg_smac_tb *)desc.data;
2199 	u16 reg_smac_h;
2200 	u32 reg_smac_l;
2201 
2202 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2203 
2204 	reg_smac_l = *(u32 *)(&addr[0]);
2205 	reg_smac_h = *(u16 *)(&addr[4]);
2206 
2207 	memset(smac_tb, 0, sizeof(*smac_tb));
2208 	roce_set_field(smac_tb->tb_idx_rsv,
2209 		       CFG_SMAC_TB_IDX_M,
2210 		       CFG_SMAC_TB_IDX_S, phy_port);
2211 	roce_set_field(smac_tb->vf_smac_h_rsv,
2212 		       CFG_SMAC_TB_VF_SMAC_H_M,
2213 		       CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2214 	smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2215 
2216 	return hns_roce_cmq_send(hr_dev, &desc, 1);
2217 }
2218 
2219 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2220 			struct hns_roce_mr *mr)
2221 {
2222 	struct sg_dma_page_iter sg_iter;
2223 	u64 page_addr;
2224 	u64 *pages;
2225 	int i;
2226 
2227 	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2228 	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2229 	roce_set_field(mpt_entry->byte_48_mode_ba,
2230 		       V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2231 		       upper_32_bits(mr->pbl_ba >> 3));
2232 
2233 	pages = (u64 *)__get_free_page(GFP_KERNEL);
2234 	if (!pages)
2235 		return -ENOMEM;
2236 
2237 	i = 0;
2238 	for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2239 		page_addr = sg_page_iter_dma_address(&sg_iter);
2240 		pages[i] = page_addr >> 6;
2241 
2242 		/* Record the first 2 entry directly to MTPT table */
2243 		if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2244 			goto found;
2245 		i++;
2246 	}
2247 found:
2248 	mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2249 	roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2250 		       V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2251 
2252 	mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2253 	roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2254 		       V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2255 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2256 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2257 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2258 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2259 
2260 	free_page((unsigned long)pages);
2261 
2262 	return 0;
2263 }
2264 
2265 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2266 				  unsigned long mtpt_idx)
2267 {
2268 	struct hns_roce_v2_mpt_entry *mpt_entry;
2269 	int ret;
2270 
2271 	mpt_entry = mb_buf;
2272 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2273 
2274 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2275 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2276 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2277 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2278 		       HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2279 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2280 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2281 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2282 		       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2283 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2284 		       V2_MPT_BYTE_4_PD_S, mr->pd);
2285 
2286 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2287 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2288 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2289 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2290 		     (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2291 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2292 		     mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2293 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2294 		     (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2295 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2296 		     (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2297 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2298 		     (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2299 
2300 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2301 		     mr->type == MR_TYPE_MR ? 0 : 1);
2302 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2303 		     1);
2304 
2305 	mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2306 	mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2307 	mpt_entry->lkey = cpu_to_le32(mr->key);
2308 	mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2309 	mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2310 
2311 	if (mr->type == MR_TYPE_DMA)
2312 		return 0;
2313 
2314 	ret = set_mtpt_pbl(mpt_entry, mr);
2315 
2316 	return ret;
2317 }
2318 
2319 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2320 					struct hns_roce_mr *mr, int flags,
2321 					u32 pdn, int mr_access_flags, u64 iova,
2322 					u64 size, void *mb_buf)
2323 {
2324 	struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2325 	int ret = 0;
2326 
2327 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2328 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2329 
2330 	if (flags & IB_MR_REREG_PD) {
2331 		roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2332 			       V2_MPT_BYTE_4_PD_S, pdn);
2333 		mr->pd = pdn;
2334 	}
2335 
2336 	if (flags & IB_MR_REREG_ACCESS) {
2337 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2338 			     V2_MPT_BYTE_8_BIND_EN_S,
2339 			     (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2340 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2341 			     V2_MPT_BYTE_8_ATOMIC_EN_S,
2342 			     mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2343 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2344 			     mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2345 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2346 			     mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2347 		roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2348 			     mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2349 	}
2350 
2351 	if (flags & IB_MR_REREG_TRANS) {
2352 		mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2353 		mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2354 		mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2355 		mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2356 
2357 		mr->iova = iova;
2358 		mr->size = size;
2359 
2360 		ret = set_mtpt_pbl(mpt_entry, mr);
2361 	}
2362 
2363 	return ret;
2364 }
2365 
2366 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2367 {
2368 	struct hns_roce_v2_mpt_entry *mpt_entry;
2369 
2370 	mpt_entry = mb_buf;
2371 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2372 
2373 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2374 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2375 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2376 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2377 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2378 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2379 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2380 		       mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2381 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2382 		       V2_MPT_BYTE_4_PD_S, mr->pd);
2383 
2384 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2385 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2386 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2387 
2388 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2389 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2390 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2391 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2392 
2393 	mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2394 
2395 	mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2396 	roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2397 		       V2_MPT_BYTE_48_PBL_BA_H_S,
2398 		       upper_32_bits(mr->pbl_ba >> 3));
2399 
2400 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2401 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2402 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2403 		       mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2404 
2405 	return 0;
2406 }
2407 
2408 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2409 {
2410 	struct hns_roce_v2_mpt_entry *mpt_entry;
2411 
2412 	mpt_entry = mb_buf;
2413 	memset(mpt_entry, 0, sizeof(*mpt_entry));
2414 
2415 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2416 		       V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2417 	roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2418 		       V2_MPT_BYTE_4_PD_S, mw->pdn);
2419 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2420 		       V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2421 		       V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2422 		       mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2423 		       0 : mw->pbl_hop_num);
2424 	roce_set_field(mpt_entry->byte_4_pd_hop_st,
2425 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2426 		       V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2427 		       mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2428 
2429 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2430 	roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2431 
2432 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2433 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2434 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2435 	roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2436 		     mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2437 
2438 	roce_set_field(mpt_entry->byte_64_buf_pa1,
2439 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2440 		       V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2441 		       mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2442 
2443 	mpt_entry->lkey = cpu_to_le32(mw->rkey);
2444 
2445 	return 0;
2446 }
2447 
2448 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2449 {
2450 	return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
2451 				   n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2452 }
2453 
2454 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2455 {
2456 	struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2457 
2458 	/* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2459 	return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2460 		!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
2461 }
2462 
2463 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2464 {
2465 	return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2466 }
2467 
2468 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2469 {
2470 	return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2471 }
2472 
2473 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2474 {
2475 	/* always called with interrupts disabled. */
2476 	spin_lock(&srq->lock);
2477 
2478 	bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2479 	srq->tail++;
2480 
2481 	spin_unlock(&srq->lock);
2482 }
2483 
2484 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2485 {
2486 	*hr_cq->set_ci_db = cons_index & 0xffffff;
2487 }
2488 
2489 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2490 				   struct hns_roce_srq *srq)
2491 {
2492 	struct hns_roce_v2_cqe *cqe, *dest;
2493 	u32 prod_index;
2494 	int nfreed = 0;
2495 	int wqe_index;
2496 	u8 owner_bit;
2497 
2498 	for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2499 	     ++prod_index) {
2500 		if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2501 			break;
2502 	}
2503 
2504 	/*
2505 	 * Now backwards through the CQ, removing CQ entries
2506 	 * that match our QP by overwriting them with next entries.
2507 	 */
2508 	while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2509 		cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2510 		if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2511 				    V2_CQE_BYTE_16_LCL_QPN_S) &
2512 				    HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2513 			if (srq &&
2514 			    roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2515 				wqe_index = roce_get_field(cqe->byte_4,
2516 						     V2_CQE_BYTE_4_WQE_INDX_M,
2517 						     V2_CQE_BYTE_4_WQE_INDX_S);
2518 				hns_roce_free_srq_wqe(srq, wqe_index);
2519 			}
2520 			++nfreed;
2521 		} else if (nfreed) {
2522 			dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2523 					  hr_cq->ib_cq.cqe);
2524 			owner_bit = roce_get_bit(dest->byte_4,
2525 						 V2_CQE_BYTE_4_OWNER_S);
2526 			memcpy(dest, cqe, sizeof(*cqe));
2527 			roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2528 				     owner_bit);
2529 		}
2530 	}
2531 
2532 	if (nfreed) {
2533 		hr_cq->cons_index += nfreed;
2534 		/*
2535 		 * Make sure update of buffer contents is done before
2536 		 * updating consumer index.
2537 		 */
2538 		wmb();
2539 		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2540 	}
2541 }
2542 
2543 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2544 				 struct hns_roce_srq *srq)
2545 {
2546 	spin_lock_irq(&hr_cq->lock);
2547 	__hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2548 	spin_unlock_irq(&hr_cq->lock);
2549 }
2550 
2551 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2552 				  struct hns_roce_cq *hr_cq, void *mb_buf,
2553 				  u64 *mtts, dma_addr_t dma_handle, int nent,
2554 				  u32 vector)
2555 {
2556 	struct hns_roce_v2_cq_context *cq_context;
2557 
2558 	cq_context = mb_buf;
2559 	memset(cq_context, 0, sizeof(*cq_context));
2560 
2561 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2562 		       V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2563 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2564 		       V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2565 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2566 		       V2_CQC_BYTE_4_SHIFT_S, ilog2((unsigned int)nent));
2567 	roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2568 		       V2_CQC_BYTE_4_CEQN_S, vector);
2569 
2570 	roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2571 		       V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2572 
2573 	cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
2574 
2575 	roce_set_field(cq_context->byte_16_hop_addr,
2576 		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2577 		       V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2578 		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2579 	roce_set_field(cq_context->byte_16_hop_addr,
2580 		       V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2581 		       V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2582 		       HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2583 
2584 	cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
2585 	roce_set_field(cq_context->byte_24_pgsz_addr,
2586 		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2587 		       V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2588 		       mtts[1] >> (32 + PAGE_ADDR_SHIFT));
2589 	roce_set_field(cq_context->byte_24_pgsz_addr,
2590 		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2591 		       V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2592 		       hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2593 	roce_set_field(cq_context->byte_24_pgsz_addr,
2594 		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2595 		       V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2596 		       hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2597 
2598 	cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
2599 
2600 	roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2601 		       V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2602 
2603 	if (hr_cq->db_en)
2604 		roce_set_bit(cq_context->byte_44_db_record,
2605 			     V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2606 
2607 	roce_set_field(cq_context->byte_44_db_record,
2608 		       V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2609 		       V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2610 		       ((u32)hr_cq->db.dma) >> 1);
2611 	cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
2612 
2613 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2614 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2615 		       V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2616 		       HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2617 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2618 		       V2_CQC_BYTE_56_CQ_PERIOD_M,
2619 		       V2_CQC_BYTE_56_CQ_PERIOD_S,
2620 		       HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2621 }
2622 
2623 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2624 				     enum ib_cq_notify_flags flags)
2625 {
2626 	struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2627 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2628 	u32 notification_flag;
2629 	__le32 doorbell[2];
2630 
2631 	doorbell[0] = 0;
2632 	doorbell[1] = 0;
2633 
2634 	notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2635 			     V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2636 	/*
2637 	 * flags = 0; Notification Flag = 1, next
2638 	 * flags = 1; Notification Flag = 0, solocited
2639 	 */
2640 	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2641 		       hr_cq->cqn);
2642 	roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2643 		       HNS_ROCE_V2_CQ_DB_NTR);
2644 	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2645 		       V2_CQ_DB_PARAMETER_CONS_IDX_S,
2646 		       hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2647 	roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2648 		       V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2649 	roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2650 		     notification_flag);
2651 
2652 	hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2653 
2654 	return 0;
2655 }
2656 
2657 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2658 						    struct hns_roce_qp **cur_qp,
2659 						    struct ib_wc *wc)
2660 {
2661 	struct hns_roce_rinl_sge *sge_list;
2662 	u32 wr_num, wr_cnt, sge_num;
2663 	u32 sge_cnt, data_len, size;
2664 	void *wqe_buf;
2665 
2666 	wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2667 				V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2668 	wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2669 
2670 	sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2671 	sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2672 	wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2673 	data_len = wc->byte_len;
2674 
2675 	for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2676 		size = min(sge_list[sge_cnt].len, data_len);
2677 		memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2678 
2679 		data_len -= size;
2680 		wqe_buf += size;
2681 	}
2682 
2683 	if (data_len) {
2684 		wc->status = IB_WC_LOC_LEN_ERR;
2685 		return -EAGAIN;
2686 	}
2687 
2688 	return 0;
2689 }
2690 
2691 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2692 				struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2693 {
2694 	struct hns_roce_srq *srq = NULL;
2695 	struct hns_roce_dev *hr_dev;
2696 	struct hns_roce_v2_cqe *cqe;
2697 	struct hns_roce_qp *hr_qp;
2698 	struct hns_roce_wq *wq;
2699 	struct ib_qp_attr attr;
2700 	int attr_mask;
2701 	int is_send;
2702 	u16 wqe_ctr;
2703 	u32 opcode;
2704 	u32 status;
2705 	int qpn;
2706 	int ret;
2707 
2708 	/* Find cqe according to consumer index */
2709 	cqe = next_cqe_sw_v2(hr_cq);
2710 	if (!cqe)
2711 		return -EAGAIN;
2712 
2713 	++hr_cq->cons_index;
2714 	/* Memory barrier */
2715 	rmb();
2716 
2717 	/* 0->SQ, 1->RQ */
2718 	is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2719 
2720 	qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2721 				V2_CQE_BYTE_16_LCL_QPN_S);
2722 
2723 	if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2724 		hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2725 		hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2726 		if (unlikely(!hr_qp)) {
2727 			dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2728 				hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2729 			return -EINVAL;
2730 		}
2731 		*cur_qp = hr_qp;
2732 	}
2733 
2734 	wc->qp = &(*cur_qp)->ibqp;
2735 	wc->vendor_err = 0;
2736 
2737 	if (is_send) {
2738 		wq = &(*cur_qp)->sq;
2739 		if ((*cur_qp)->sq_signal_bits) {
2740 			/*
2741 			 * If sg_signal_bit is 1,
2742 			 * firstly tail pointer updated to wqe
2743 			 * which current cqe correspond to
2744 			 */
2745 			wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2746 						      V2_CQE_BYTE_4_WQE_INDX_M,
2747 						      V2_CQE_BYTE_4_WQE_INDX_S);
2748 			wq->tail += (wqe_ctr - (u16)wq->tail) &
2749 				    (wq->wqe_cnt - 1);
2750 		}
2751 
2752 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2753 		++wq->tail;
2754 	} else if ((*cur_qp)->ibqp.srq) {
2755 		srq = to_hr_srq((*cur_qp)->ibqp.srq);
2756 		wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2757 					      V2_CQE_BYTE_4_WQE_INDX_M,
2758 					      V2_CQE_BYTE_4_WQE_INDX_S);
2759 		wc->wr_id = srq->wrid[wqe_ctr];
2760 		hns_roce_free_srq_wqe(srq, wqe_ctr);
2761 	} else {
2762 		/* Update tail pointer, record wr_id */
2763 		wq = &(*cur_qp)->rq;
2764 		wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2765 		++wq->tail;
2766 	}
2767 
2768 	status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2769 				V2_CQE_BYTE_4_STATUS_S);
2770 	switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2771 	case HNS_ROCE_CQE_V2_SUCCESS:
2772 		wc->status = IB_WC_SUCCESS;
2773 		break;
2774 	case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2775 		wc->status = IB_WC_LOC_LEN_ERR;
2776 		break;
2777 	case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2778 		wc->status = IB_WC_LOC_QP_OP_ERR;
2779 		break;
2780 	case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2781 		wc->status = IB_WC_LOC_PROT_ERR;
2782 		break;
2783 	case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2784 		wc->status = IB_WC_WR_FLUSH_ERR;
2785 		break;
2786 	case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2787 		wc->status = IB_WC_MW_BIND_ERR;
2788 		break;
2789 	case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2790 		wc->status = IB_WC_BAD_RESP_ERR;
2791 		break;
2792 	case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2793 		wc->status = IB_WC_LOC_ACCESS_ERR;
2794 		break;
2795 	case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2796 		wc->status = IB_WC_REM_INV_REQ_ERR;
2797 		break;
2798 	case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2799 		wc->status = IB_WC_REM_ACCESS_ERR;
2800 		break;
2801 	case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2802 		wc->status = IB_WC_REM_OP_ERR;
2803 		break;
2804 	case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2805 		wc->status = IB_WC_RETRY_EXC_ERR;
2806 		break;
2807 	case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2808 		wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2809 		break;
2810 	case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2811 		wc->status = IB_WC_REM_ABORT_ERR;
2812 		break;
2813 	default:
2814 		wc->status = IB_WC_GENERAL_ERR;
2815 		break;
2816 	}
2817 
2818 	/* flush cqe if wc status is error, excluding flush error */
2819 	if ((wc->status != IB_WC_SUCCESS) &&
2820 	    (wc->status != IB_WC_WR_FLUSH_ERR)) {
2821 		attr_mask = IB_QP_STATE;
2822 		attr.qp_state = IB_QPS_ERR;
2823 		return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2824 					     &attr, attr_mask,
2825 					     (*cur_qp)->state, IB_QPS_ERR);
2826 	}
2827 
2828 	if (wc->status == IB_WC_WR_FLUSH_ERR)
2829 		return 0;
2830 
2831 	if (is_send) {
2832 		wc->wc_flags = 0;
2833 		/* SQ corresponding to CQE */
2834 		switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2835 				       V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2836 		case HNS_ROCE_SQ_OPCODE_SEND:
2837 			wc->opcode = IB_WC_SEND;
2838 			break;
2839 		case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2840 			wc->opcode = IB_WC_SEND;
2841 			break;
2842 		case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2843 			wc->opcode = IB_WC_SEND;
2844 			wc->wc_flags |= IB_WC_WITH_IMM;
2845 			break;
2846 		case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2847 			wc->opcode = IB_WC_RDMA_READ;
2848 			wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2849 			break;
2850 		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2851 			wc->opcode = IB_WC_RDMA_WRITE;
2852 			break;
2853 		case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2854 			wc->opcode = IB_WC_RDMA_WRITE;
2855 			wc->wc_flags |= IB_WC_WITH_IMM;
2856 			break;
2857 		case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2858 			wc->opcode = IB_WC_LOCAL_INV;
2859 			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2860 			break;
2861 		case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2862 			wc->opcode = IB_WC_COMP_SWAP;
2863 			wc->byte_len  = 8;
2864 			break;
2865 		case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2866 			wc->opcode = IB_WC_FETCH_ADD;
2867 			wc->byte_len  = 8;
2868 			break;
2869 		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2870 			wc->opcode = IB_WC_MASKED_COMP_SWAP;
2871 			wc->byte_len  = 8;
2872 			break;
2873 		case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2874 			wc->opcode = IB_WC_MASKED_FETCH_ADD;
2875 			wc->byte_len  = 8;
2876 			break;
2877 		case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2878 			wc->opcode = IB_WC_REG_MR;
2879 			break;
2880 		case HNS_ROCE_SQ_OPCODE_BIND_MW:
2881 			wc->opcode = IB_WC_REG_MR;
2882 			break;
2883 		default:
2884 			wc->status = IB_WC_GENERAL_ERR;
2885 			break;
2886 		}
2887 	} else {
2888 		/* RQ correspond to CQE */
2889 		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2890 
2891 		opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2892 					V2_CQE_BYTE_4_OPCODE_S);
2893 		switch (opcode & 0x1f) {
2894 		case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2895 			wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2896 			wc->wc_flags = IB_WC_WITH_IMM;
2897 			wc->ex.imm_data =
2898 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
2899 			break;
2900 		case HNS_ROCE_V2_OPCODE_SEND:
2901 			wc->opcode = IB_WC_RECV;
2902 			wc->wc_flags = 0;
2903 			break;
2904 		case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2905 			wc->opcode = IB_WC_RECV;
2906 			wc->wc_flags = IB_WC_WITH_IMM;
2907 			wc->ex.imm_data =
2908 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
2909 			break;
2910 		case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2911 			wc->opcode = IB_WC_RECV;
2912 			wc->wc_flags = IB_WC_WITH_INVALIDATE;
2913 			wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2914 			break;
2915 		default:
2916 			wc->status = IB_WC_GENERAL_ERR;
2917 			break;
2918 		}
2919 
2920 		if ((wc->qp->qp_type == IB_QPT_RC ||
2921 		     wc->qp->qp_type == IB_QPT_UC) &&
2922 		    (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2923 		    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2924 		    opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2925 		    (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2926 			ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2927 			if (ret)
2928 				return -EAGAIN;
2929 		}
2930 
2931 		wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2932 					    V2_CQE_BYTE_32_SL_S);
2933 		wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2934 						V2_CQE_BYTE_32_RMT_QPN_M,
2935 						V2_CQE_BYTE_32_RMT_QPN_S);
2936 		wc->slid = 0;
2937 		wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2938 					      V2_CQE_BYTE_32_GRH_S) ?
2939 					      IB_WC_GRH : 0);
2940 		wc->port_num = roce_get_field(cqe->byte_32,
2941 				V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2942 		wc->pkey_index = 0;
2943 		memcpy(wc->smac, cqe->smac, 4);
2944 		wc->smac[4] = roce_get_field(cqe->byte_28,
2945 					     V2_CQE_BYTE_28_SMAC_4_M,
2946 					     V2_CQE_BYTE_28_SMAC_4_S);
2947 		wc->smac[5] = roce_get_field(cqe->byte_28,
2948 					     V2_CQE_BYTE_28_SMAC_5_M,
2949 					     V2_CQE_BYTE_28_SMAC_5_S);
2950 		wc->wc_flags |= IB_WC_WITH_SMAC;
2951 		if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2952 			wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2953 							  V2_CQE_BYTE_28_VID_M,
2954 							  V2_CQE_BYTE_28_VID_S);
2955 			wc->wc_flags |= IB_WC_WITH_VLAN;
2956 		} else {
2957 			wc->vlan_id = 0xffff;
2958 		}
2959 
2960 		wc->network_hdr_type = roce_get_field(cqe->byte_28,
2961 						    V2_CQE_BYTE_28_PORT_TYPE_M,
2962 						    V2_CQE_BYTE_28_PORT_TYPE_S);
2963 	}
2964 
2965 	return 0;
2966 }
2967 
2968 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2969 			       struct ib_wc *wc)
2970 {
2971 	struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2972 	struct hns_roce_qp *cur_qp = NULL;
2973 	unsigned long flags;
2974 	int npolled;
2975 
2976 	spin_lock_irqsave(&hr_cq->lock, flags);
2977 
2978 	for (npolled = 0; npolled < num_entries; ++npolled) {
2979 		if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2980 			break;
2981 	}
2982 
2983 	if (npolled) {
2984 		/* Memory barrier */
2985 		wmb();
2986 		hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2987 	}
2988 
2989 	spin_unlock_irqrestore(&hr_cq->lock, flags);
2990 
2991 	return npolled;
2992 }
2993 
2994 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
2995 			      int step_idx)
2996 {
2997 	int op;
2998 
2999 	if (type == HEM_TYPE_SCCC && step_idx)
3000 		return -EINVAL;
3001 
3002 	switch (type) {
3003 	case HEM_TYPE_QPC:
3004 		op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3005 		break;
3006 	case HEM_TYPE_MTPT:
3007 		op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3008 		break;
3009 	case HEM_TYPE_CQC:
3010 		op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3011 		break;
3012 	case HEM_TYPE_SRQC:
3013 		op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3014 		break;
3015 	case HEM_TYPE_SCCC:
3016 		op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3017 		break;
3018 	case HEM_TYPE_QPC_TIMER:
3019 		op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3020 		break;
3021 	case HEM_TYPE_CQC_TIMER:
3022 		op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3023 		break;
3024 	default:
3025 		dev_warn(hr_dev->dev,
3026 			 "Table %d not to be written by mailbox!\n", type);
3027 		return -EINVAL;
3028 	}
3029 
3030 	return op + step_idx;
3031 }
3032 
3033 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3034 			       struct hns_roce_hem_table *table, int obj,
3035 			       int step_idx)
3036 {
3037 	struct hns_roce_cmd_mailbox *mailbox;
3038 	struct hns_roce_hem_iter iter;
3039 	struct hns_roce_hem_mhop mhop;
3040 	struct hns_roce_hem *hem;
3041 	unsigned long mhop_obj = obj;
3042 	int i, j, k;
3043 	int ret = 0;
3044 	u64 hem_idx = 0;
3045 	u64 l1_idx = 0;
3046 	u64 bt_ba = 0;
3047 	u32 chunk_ba_num;
3048 	u32 hop_num;
3049 	int op;
3050 
3051 	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3052 		return 0;
3053 
3054 	hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3055 	i = mhop.l0_idx;
3056 	j = mhop.l1_idx;
3057 	k = mhop.l2_idx;
3058 	hop_num = mhop.hop_num;
3059 	chunk_ba_num = mhop.bt_chunk_size / 8;
3060 
3061 	if (hop_num == 2) {
3062 		hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3063 			  k;
3064 		l1_idx = i * chunk_ba_num + j;
3065 	} else if (hop_num == 1) {
3066 		hem_idx = i * chunk_ba_num + j;
3067 	} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3068 		hem_idx = i;
3069 	}
3070 
3071 	op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3072 	if (op == -EINVAL)
3073 		return 0;
3074 
3075 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3076 	if (IS_ERR(mailbox))
3077 		return PTR_ERR(mailbox);
3078 
3079 	if (table->type == HEM_TYPE_SCCC)
3080 		obj = mhop.l0_idx;
3081 
3082 	if (check_whether_last_step(hop_num, step_idx)) {
3083 		hem = table->hem[hem_idx];
3084 		for (hns_roce_hem_first(hem, &iter);
3085 		     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3086 			bt_ba = hns_roce_hem_addr(&iter);
3087 
3088 			/* configure the ba, tag, and op */
3089 			ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3090 						obj, 0, op,
3091 						HNS_ROCE_CMD_TIMEOUT_MSECS);
3092 		}
3093 	} else {
3094 		if (step_idx == 0)
3095 			bt_ba = table->bt_l0_dma_addr[i];
3096 		else if (step_idx == 1 && hop_num == 2)
3097 			bt_ba = table->bt_l1_dma_addr[l1_idx];
3098 
3099 		/* configure the ba, tag, and op */
3100 		ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3101 					0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3102 	}
3103 
3104 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3105 	return ret;
3106 }
3107 
3108 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3109 				 struct hns_roce_hem_table *table, int obj,
3110 				 int step_idx)
3111 {
3112 	struct device *dev = hr_dev->dev;
3113 	struct hns_roce_cmd_mailbox *mailbox;
3114 	int ret;
3115 	u16 op = 0xff;
3116 
3117 	if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3118 		return 0;
3119 
3120 	switch (table->type) {
3121 	case HEM_TYPE_QPC:
3122 		op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3123 		break;
3124 	case HEM_TYPE_MTPT:
3125 		op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3126 		break;
3127 	case HEM_TYPE_CQC:
3128 		op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3129 		break;
3130 	case HEM_TYPE_SCCC:
3131 	case HEM_TYPE_QPC_TIMER:
3132 	case HEM_TYPE_CQC_TIMER:
3133 		break;
3134 	case HEM_TYPE_SRQC:
3135 		op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3136 		break;
3137 	default:
3138 		dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3139 			 table->type);
3140 		return 0;
3141 	}
3142 
3143 	if (table->type == HEM_TYPE_SCCC ||
3144 	    table->type == HEM_TYPE_QPC_TIMER ||
3145 	    table->type == HEM_TYPE_CQC_TIMER)
3146 		return 0;
3147 
3148 	op += step_idx;
3149 
3150 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3151 	if (IS_ERR(mailbox))
3152 		return PTR_ERR(mailbox);
3153 
3154 	/* configure the tag and op */
3155 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3156 				HNS_ROCE_CMD_TIMEOUT_MSECS);
3157 
3158 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3159 	return ret;
3160 }
3161 
3162 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3163 				 enum ib_qp_state cur_state,
3164 				 enum ib_qp_state new_state,
3165 				 struct hns_roce_v2_qp_context *context,
3166 				 struct hns_roce_qp *hr_qp)
3167 {
3168 	struct hns_roce_cmd_mailbox *mailbox;
3169 	int ret;
3170 
3171 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3172 	if (IS_ERR(mailbox))
3173 		return PTR_ERR(mailbox);
3174 
3175 	memcpy(mailbox->buf, context, sizeof(*context) * 2);
3176 
3177 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3178 				HNS_ROCE_CMD_MODIFY_QPC,
3179 				HNS_ROCE_CMD_TIMEOUT_MSECS);
3180 
3181 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3182 
3183 	return ret;
3184 }
3185 
3186 static void set_access_flags(struct hns_roce_qp *hr_qp,
3187 			     struct hns_roce_v2_qp_context *context,
3188 			     struct hns_roce_v2_qp_context *qpc_mask,
3189 			     const struct ib_qp_attr *attr, int attr_mask)
3190 {
3191 	u8 dest_rd_atomic;
3192 	u32 access_flags;
3193 
3194 	dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3195 			 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3196 
3197 	access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3198 		       attr->qp_access_flags : hr_qp->atomic_rd_en;
3199 
3200 	if (!dest_rd_atomic)
3201 		access_flags &= IB_ACCESS_REMOTE_WRITE;
3202 
3203 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3204 		     !!(access_flags & IB_ACCESS_REMOTE_READ));
3205 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3206 
3207 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3208 		     !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3209 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3210 
3211 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3212 		     !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3213 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3214 }
3215 
3216 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3217 			    struct hns_roce_v2_qp_context *context,
3218 			    struct hns_roce_v2_qp_context *qpc_mask)
3219 {
3220 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3221 		roce_set_field(context->byte_4_sqpn_tst,
3222 			       V2_QPC_BYTE_4_SGE_SHIFT_M,
3223 			       V2_QPC_BYTE_4_SGE_SHIFT_S,
3224 			       ilog2((unsigned int)hr_qp->sge.sge_cnt));
3225 	else
3226 		roce_set_field(context->byte_4_sqpn_tst,
3227 			       V2_QPC_BYTE_4_SGE_SHIFT_M,
3228 			       V2_QPC_BYTE_4_SGE_SHIFT_S,
3229 			       hr_qp->sq.max_gs >
3230 			       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3231 			       ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3232 
3233 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3234 		       V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3235 
3236 	roce_set_field(context->byte_20_smac_sgid_idx,
3237 		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3238 		       ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3239 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3240 		       V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3241 
3242 	roce_set_field(context->byte_20_smac_sgid_idx,
3243 		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3244 		       (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3245 		       hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3246 		       hr_qp->ibqp.srq) ? 0 :
3247 		       ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3248 
3249 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3250 		       V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3251 }
3252 
3253 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3254 				    const struct ib_qp_attr *attr,
3255 				    int attr_mask,
3256 				    struct hns_roce_v2_qp_context *context,
3257 				    struct hns_roce_v2_qp_context *qpc_mask)
3258 {
3259 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3260 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3261 
3262 	/*
3263 	 * In v2 engine, software pass context and context mask to hardware
3264 	 * when modifying qp. If software need modify some fields in context,
3265 	 * we should set all bits of the relevant fields in context mask to
3266 	 * 0 at the same time, else set them to 0x1.
3267 	 */
3268 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3269 		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3270 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3271 		       V2_QPC_BYTE_4_TST_S, 0);
3272 
3273 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3274 		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3275 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3276 		       V2_QPC_BYTE_4_SQPN_S, 0);
3277 
3278 	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3279 		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3280 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3281 		       V2_QPC_BYTE_16_PD_S, 0);
3282 
3283 	roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3284 		       V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3285 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3286 		       V2_QPC_BYTE_20_RQWS_S, 0);
3287 
3288 	set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3289 
3290 	/* No VLAN need to set 0xFFF */
3291 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3292 		       V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3293 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3294 		       V2_QPC_BYTE_24_VLAN_ID_S, 0);
3295 
3296 	/*
3297 	 * Set some fields in context to zero, Because the default values
3298 	 * of all fields in context are zero, we need not set them to 0 again.
3299 	 * but we should set the relevant fields of context mask to 0.
3300 	 */
3301 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3302 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3303 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3304 	roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3305 
3306 	roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3307 		       V2_QPC_BYTE_60_TEMPID_S, 0);
3308 
3309 	roce_set_field(qpc_mask->byte_60_qpst_tempid,
3310 		       V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3311 		       0);
3312 	roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3313 		     V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3314 	roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3315 		     V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3316 	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3317 	roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3318 
3319 	if (hr_qp->rdb_en) {
3320 		roce_set_bit(context->byte_68_rq_db,
3321 			     V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3322 		roce_set_bit(qpc_mask->byte_68_rq_db,
3323 			     V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3324 	}
3325 
3326 	roce_set_field(context->byte_68_rq_db,
3327 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3328 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3329 		       ((u32)hr_qp->rdb.dma) >> 1);
3330 	roce_set_field(qpc_mask->byte_68_rq_db,
3331 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3332 		       V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3333 	context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3334 	qpc_mask->rq_db_record_addr = 0;
3335 
3336 	roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3337 		    (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3338 	roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3339 
3340 	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3341 		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3342 	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3343 		       V2_QPC_BYTE_80_RX_CQN_S, 0);
3344 	if (ibqp->srq) {
3345 		roce_set_field(context->byte_76_srqn_op_en,
3346 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3347 			       to_hr_srq(ibqp->srq)->srqn);
3348 		roce_set_field(qpc_mask->byte_76_srqn_op_en,
3349 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3350 		roce_set_bit(context->byte_76_srqn_op_en,
3351 			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
3352 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3353 			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
3354 	}
3355 
3356 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3357 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3358 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3359 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3360 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3361 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3362 
3363 	roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3364 		       V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3365 
3366 	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3367 		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3368 
3369 	roce_set_field(qpc_mask->byte_104_rq_sge,
3370 		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3371 		       V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3372 
3373 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3374 		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3375 	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3376 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3377 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3378 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3379 		     V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3380 
3381 	qpc_mask->rq_rnr_timer = 0;
3382 	qpc_mask->rx_msg_len = 0;
3383 	qpc_mask->rx_rkey_pkt_info = 0;
3384 	qpc_mask->rx_va = 0;
3385 
3386 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3387 		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3388 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3389 		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3390 
3391 	roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3392 		     0);
3393 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3394 		       V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3395 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3396 		       V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3397 
3398 	roce_set_field(qpc_mask->byte_144_raq,
3399 		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3400 		       V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3401 	roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3402 		       V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3403 	roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3404 
3405 	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3406 		       V2_QPC_BYTE_148_RQ_MSN_S, 0);
3407 	roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3408 		       V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3409 
3410 	roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3411 		       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3412 	roce_set_field(qpc_mask->byte_152_raq,
3413 		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3414 		       V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3415 
3416 	roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3417 		       V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3418 
3419 	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3420 		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3421 		       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3422 	roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3423 		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3424 		       V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3425 
3426 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3427 		     V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3428 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3429 		     V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3430 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3431 		     V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3432 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3433 		     V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3434 	roce_set_bit(qpc_mask->byte_168_irrl_idx,
3435 		     V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3436 	roce_set_field(qpc_mask->byte_168_irrl_idx,
3437 		       V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3438 		       V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3439 
3440 	roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3441 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3442 	roce_set_field(qpc_mask->byte_172_sq_psn,
3443 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3444 		       V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3445 
3446 	roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3447 		     0);
3448 
3449 	roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3450 	roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3451 
3452 	roce_set_field(qpc_mask->byte_176_msg_pktn,
3453 		       V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3454 		       V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3455 	roce_set_field(qpc_mask->byte_176_msg_pktn,
3456 		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3457 		       V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3458 
3459 	roce_set_field(qpc_mask->byte_184_irrl_idx,
3460 		       V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3461 		       V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3462 
3463 	qpc_mask->cur_sge_offset = 0;
3464 
3465 	roce_set_field(qpc_mask->byte_192_ext_sge,
3466 		       V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3467 		       V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3468 	roce_set_field(qpc_mask->byte_192_ext_sge,
3469 		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3470 		       V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3471 
3472 	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3473 		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3474 
3475 	roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3476 		       V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3477 	roce_set_field(qpc_mask->byte_200_sq_max,
3478 		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3479 		       V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3480 
3481 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3482 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3483 
3484 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3485 		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3486 
3487 	qpc_mask->sq_timer = 0;
3488 
3489 	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3490 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3491 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3492 	roce_set_field(qpc_mask->byte_232_irrl_sge,
3493 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3494 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3495 
3496 	roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3497 		     0);
3498 	roce_set_bit(qpc_mask->byte_232_irrl_sge,
3499 		     V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3500 	roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3501 		     0);
3502 
3503 	qpc_mask->irrl_cur_sge_offset = 0;
3504 
3505 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3506 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3507 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3508 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3509 		       V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3510 		       V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3511 	roce_set_field(qpc_mask->byte_240_irrl_tail,
3512 		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
3513 		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3514 
3515 	roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3516 		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3517 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3518 		     0);
3519 	roce_set_field(qpc_mask->byte_248_ack_psn,
3520 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3521 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3522 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3523 		     0);
3524 	roce_set_bit(qpc_mask->byte_248_ack_psn,
3525 		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3526 	roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3527 		     0);
3528 
3529 	hr_qp->access_flags = attr->qp_access_flags;
3530 	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3531 		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3532 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3533 		       V2_QPC_BYTE_252_TX_CQN_S, 0);
3534 
3535 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3536 		       V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3537 
3538 	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3539 		       V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3540 		       V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3541 	roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3542 		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3543 		       V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3544 }
3545 
3546 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3547 				   const struct ib_qp_attr *attr, int attr_mask,
3548 				   struct hns_roce_v2_qp_context *context,
3549 				   struct hns_roce_v2_qp_context *qpc_mask)
3550 {
3551 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3552 
3553 	/*
3554 	 * In v2 engine, software pass context and context mask to hardware
3555 	 * when modifying qp. If software need modify some fields in context,
3556 	 * we should set all bits of the relevant fields in context mask to
3557 	 * 0 at the same time, else set them to 0x1.
3558 	 */
3559 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3560 		       V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3561 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3562 		       V2_QPC_BYTE_4_TST_S, 0);
3563 
3564 	if (attr_mask & IB_QP_ACCESS_FLAGS) {
3565 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3566 			     !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3567 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3568 			     0);
3569 
3570 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3571 			     !!(attr->qp_access_flags &
3572 			     IB_ACCESS_REMOTE_WRITE));
3573 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3574 			     0);
3575 
3576 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3577 			     !!(attr->qp_access_flags &
3578 			     IB_ACCESS_REMOTE_ATOMIC));
3579 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3580 			     0);
3581 	} else {
3582 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3583 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3584 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3585 			     0);
3586 
3587 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3588 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3589 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3590 			     0);
3591 
3592 		roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3593 			     !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3594 		roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3595 			     0);
3596 	}
3597 
3598 	roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3599 		       V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3600 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3601 		       V2_QPC_BYTE_16_PD_S, 0);
3602 
3603 	roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3604 		       V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3605 	roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3606 		       V2_QPC_BYTE_80_RX_CQN_S, 0);
3607 
3608 	roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3609 		       V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3610 	roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3611 		       V2_QPC_BYTE_252_TX_CQN_S, 0);
3612 
3613 	if (ibqp->srq) {
3614 		roce_set_bit(context->byte_76_srqn_op_en,
3615 			     V2_QPC_BYTE_76_SRQ_EN_S, 1);
3616 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3617 			     V2_QPC_BYTE_76_SRQ_EN_S, 0);
3618 		roce_set_field(context->byte_76_srqn_op_en,
3619 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3620 			       to_hr_srq(ibqp->srq)->srqn);
3621 		roce_set_field(qpc_mask->byte_76_srqn_op_en,
3622 			       V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3623 	}
3624 
3625 	roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3626 		       V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3627 	roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3628 		       V2_QPC_BYTE_4_SQPN_S, 0);
3629 
3630 	if (attr_mask & IB_QP_DEST_QPN) {
3631 		roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3632 			       V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3633 		roce_set_field(qpc_mask->byte_56_dqpn_err,
3634 			       V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3635 	}
3636 }
3637 
3638 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3639 				   struct hns_roce_qp *hr_qp, int mtt_cnt,
3640 				   u32 page_size)
3641 {
3642 	struct device *dev = hr_dev->dev;
3643 
3644 	if (hr_qp->rq.wqe_cnt < 1)
3645 		return true;
3646 
3647 	if (mtt_cnt < 1) {
3648 		dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3649 			hr_qp->qpn);
3650 		return false;
3651 	}
3652 
3653 	if (mtt_cnt < MTT_MIN_COUNT &&
3654 		(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3655 		dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3656 			hr_qp->qpn);
3657 		return false;
3658 	}
3659 
3660 	return true;
3661 }
3662 
3663 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3664 				 const struct ib_qp_attr *attr, int attr_mask,
3665 				 struct hns_roce_v2_qp_context *context,
3666 				 struct hns_roce_v2_qp_context *qpc_mask)
3667 {
3668 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3669 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3670 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3671 	struct device *dev = hr_dev->dev;
3672 	u64 mtts[MTT_MIN_COUNT] = { 0 };
3673 	dma_addr_t dma_handle_3;
3674 	dma_addr_t dma_handle_2;
3675 	u64 wqe_sge_ba;
3676 	u32 page_size;
3677 	u8 port_num;
3678 	u64 *mtts_3;
3679 	u64 *mtts_2;
3680 	int count;
3681 	u8 *dmac;
3682 	u8 *smac;
3683 	int port;
3684 
3685 	/* Search qp buf's mtts */
3686 	page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3687 	count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3688 				  hr_qp->rq.offset / page_size, mtts,
3689 				  MTT_MIN_COUNT, &wqe_sge_ba);
3690 	if (!ibqp->srq)
3691 		if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3692 			return -EINVAL;
3693 
3694 	/* Search IRRL's mtts */
3695 	mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3696 				     hr_qp->qpn, &dma_handle_2);
3697 	if (!mtts_2) {
3698 		dev_err(dev, "qp irrl_table find failed\n");
3699 		return -EINVAL;
3700 	}
3701 
3702 	/* Search TRRL's mtts */
3703 	mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3704 				     hr_qp->qpn, &dma_handle_3);
3705 	if (!mtts_3) {
3706 		dev_err(dev, "qp trrl_table find failed\n");
3707 		return -EINVAL;
3708 	}
3709 
3710 	if (attr_mask & IB_QP_ALT_PATH) {
3711 		dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3712 		return -EINVAL;
3713 	}
3714 
3715 	dmac = (u8 *)attr->ah_attr.roce.dmac;
3716 	context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3717 	qpc_mask->wqe_sge_ba = 0;
3718 
3719 	/*
3720 	 * In v2 engine, software pass context and context mask to hardware
3721 	 * when modifying qp. If software need modify some fields in context,
3722 	 * we should set all bits of the relevant fields in context mask to
3723 	 * 0 at the same time, else set them to 0x1.
3724 	 */
3725 	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3726 		       V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3727 	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3728 		       V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3729 
3730 	roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3731 		       V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3732 		       hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3733 		       0 : hr_dev->caps.wqe_sq_hop_num);
3734 	roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3735 		       V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3736 
3737 	roce_set_field(context->byte_20_smac_sgid_idx,
3738 		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3739 		       V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3740 		       ((ibqp->qp_type == IB_QPT_GSI) ||
3741 		       hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3742 		       hr_dev->caps.wqe_sge_hop_num : 0);
3743 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3744 		       V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3745 		       V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3746 
3747 	roce_set_field(context->byte_20_smac_sgid_idx,
3748 		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3749 		       V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3750 		       hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3751 		       0 : hr_dev->caps.wqe_rq_hop_num);
3752 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3753 		       V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3754 		       V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3755 
3756 	roce_set_field(context->byte_16_buf_ba_pg_sz,
3757 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3758 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3759 		       hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
3760 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3761 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3762 		       V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3763 
3764 	roce_set_field(context->byte_16_buf_ba_pg_sz,
3765 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3766 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3767 		       hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3768 	roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3769 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3770 		       V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3771 
3772 	context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
3773 	qpc_mask->rq_cur_blk_addr = 0;
3774 
3775 	roce_set_field(context->byte_92_srq_info,
3776 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3777 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3778 		       mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3779 	roce_set_field(qpc_mask->byte_92_srq_info,
3780 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3781 		       V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3782 
3783 	context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
3784 	qpc_mask->rq_nxt_blk_addr = 0;
3785 
3786 	roce_set_field(context->byte_104_rq_sge,
3787 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3788 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3789 		       mtts[1] >> (32 + PAGE_ADDR_SHIFT));
3790 	roce_set_field(qpc_mask->byte_104_rq_sge,
3791 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3792 		       V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3793 
3794 	roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3795 		       V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3796 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3797 		       V2_QPC_BYTE_132_TRRL_BA_S, 0);
3798 	context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
3799 	qpc_mask->trrl_ba = 0;
3800 	roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3801 		       V2_QPC_BYTE_140_TRRL_BA_S,
3802 		       (u32)(dma_handle_3 >> (32 + 16 + 4)));
3803 	roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3804 		       V2_QPC_BYTE_140_TRRL_BA_S, 0);
3805 
3806 	context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
3807 	qpc_mask->irrl_ba = 0;
3808 	roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3809 		       V2_QPC_BYTE_208_IRRL_BA_S,
3810 		       dma_handle_2 >> (32 + 6));
3811 	roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3812 		       V2_QPC_BYTE_208_IRRL_BA_S, 0);
3813 
3814 	roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3815 	roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3816 
3817 	roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3818 		     hr_qp->sq_signal_bits);
3819 	roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3820 		     0);
3821 
3822 	port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3823 
3824 	smac = (u8 *)hr_dev->dev_addr[port];
3825 	/* when dmac equals smac or loop_idc is 1, it should loopback */
3826 	if (ether_addr_equal_unaligned(dmac, smac) ||
3827 	    hr_dev->loop_idc == 0x1) {
3828 		roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3829 		roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3830 	}
3831 
3832 	if (attr_mask & IB_QP_DEST_QPN) {
3833 		roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3834 			       V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3835 		roce_set_field(qpc_mask->byte_56_dqpn_err,
3836 			       V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3837 	}
3838 
3839 	/* Configure GID index */
3840 	port_num = rdma_ah_get_port_num(&attr->ah_attr);
3841 	roce_set_field(context->byte_20_smac_sgid_idx,
3842 		       V2_QPC_BYTE_20_SGID_IDX_M,
3843 		       V2_QPC_BYTE_20_SGID_IDX_S,
3844 		       hns_get_gid_index(hr_dev, port_num - 1,
3845 					 grh->sgid_index));
3846 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3847 		       V2_QPC_BYTE_20_SGID_IDX_M,
3848 		       V2_QPC_BYTE_20_SGID_IDX_S, 0);
3849 	memcpy(&(context->dmac), dmac, sizeof(u32));
3850 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3851 		       V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3852 	qpc_mask->dmac = 0;
3853 	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3854 		       V2_QPC_BYTE_52_DMAC_S, 0);
3855 
3856 	/* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
3857 	roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3858 		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3859 	roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3860 		       V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3861 
3862 	if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3863 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3864 			       V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3865 	else if (attr_mask & IB_QP_PATH_MTU)
3866 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3867 			       V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3868 
3869 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3870 		       V2_QPC_BYTE_24_MTU_S, 0);
3871 
3872 	roce_set_field(context->byte_84_rq_ci_pi,
3873 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3874 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3875 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3876 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3877 		       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3878 
3879 	roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3880 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3881 		       V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3882 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3883 		     V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3884 	roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3885 		       V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3886 	roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3887 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3888 		       V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3889 
3890 	context->rq_rnr_timer = 0;
3891 	qpc_mask->rq_rnr_timer = 0;
3892 
3893 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3894 		       V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3895 	roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3896 		       V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3897 
3898 	/* rocee send 2^lp_sgen_ini segs every time */
3899 	roce_set_field(context->byte_168_irrl_idx,
3900 		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
3901 		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3902 	roce_set_field(qpc_mask->byte_168_irrl_idx,
3903 		       V2_QPC_BYTE_168_LP_SGEN_INI_M,
3904 		       V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3905 
3906 	return 0;
3907 }
3908 
3909 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3910 				const struct ib_qp_attr *attr, int attr_mask,
3911 				struct hns_roce_v2_qp_context *context,
3912 				struct hns_roce_v2_qp_context *qpc_mask)
3913 {
3914 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3915 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3916 	struct device *dev = hr_dev->dev;
3917 	u64 sge_cur_blk = 0;
3918 	u64 sq_cur_blk = 0;
3919 	u32 page_size;
3920 	int count;
3921 
3922 	/* Search qp buf's mtts */
3923 	count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3924 	if (count < 1) {
3925 		dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
3926 		return -EINVAL;
3927 	}
3928 
3929 	if (hr_qp->sge.offset) {
3930 		page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3931 		count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3932 					  hr_qp->sge.offset / page_size,
3933 					  &sge_cur_blk, 1, NULL);
3934 		if (count < 1) {
3935 			dev_err(dev, "qp(0x%lx) sge pa find failed\n",
3936 				hr_qp->qpn);
3937 			return -EINVAL;
3938 		}
3939 	}
3940 
3941 	/* Not support alternate path and path migration */
3942 	if ((attr_mask & IB_QP_ALT_PATH) ||
3943 	    (attr_mask & IB_QP_PATH_MIG_STATE)) {
3944 		dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3945 		return -EINVAL;
3946 	}
3947 
3948 	/*
3949 	 * In v2 engine, software pass context and context mask to hardware
3950 	 * when modifying qp. If software need modify some fields in context,
3951 	 * we should set all bits of the relevant fields in context mask to
3952 	 * 0 at the same time, else set them to 0x1.
3953 	 */
3954 	context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3955 	roce_set_field(context->byte_168_irrl_idx,
3956 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3957 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3958 		       sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3959 	qpc_mask->sq_cur_blk_addr = 0;
3960 	roce_set_field(qpc_mask->byte_168_irrl_idx,
3961 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3962 		       V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3963 
3964 	context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
3965 		       hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3966 		       cpu_to_le32(sge_cur_blk >>
3967 		       PAGE_ADDR_SHIFT) : 0;
3968 	roce_set_field(context->byte_184_irrl_idx,
3969 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3970 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3971 		       ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
3972 		       HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3973 		       (sge_cur_blk >>
3974 		       (32 + PAGE_ADDR_SHIFT)) : 0);
3975 	qpc_mask->sq_cur_sge_blk_addr = 0;
3976 	roce_set_field(qpc_mask->byte_184_irrl_idx,
3977 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3978 		       V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3979 
3980 	context->rx_sq_cur_blk_addr =
3981 		cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3982 	roce_set_field(context->byte_232_irrl_sge,
3983 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3984 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3985 		       sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3986 	qpc_mask->rx_sq_cur_blk_addr = 0;
3987 	roce_set_field(qpc_mask->byte_232_irrl_sge,
3988 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3989 		       V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3990 
3991 	/*
3992 	 * Set some fields in context to zero, Because the default values
3993 	 * of all fields in context are zero, we need not set them to 0 again.
3994 	 * but we should set the relevant fields of context mask to 0.
3995 	 */
3996 	roce_set_field(qpc_mask->byte_232_irrl_sge,
3997 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3998 		       V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3999 
4000 	roce_set_field(qpc_mask->byte_240_irrl_tail,
4001 		       V2_QPC_BYTE_240_RX_ACK_MSN_M,
4002 		       V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4003 
4004 	roce_set_field(qpc_mask->byte_248_ack_psn,
4005 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4006 		       V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4007 	roce_set_bit(qpc_mask->byte_248_ack_psn,
4008 		     V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4009 	roce_set_field(qpc_mask->byte_248_ack_psn,
4010 		       V2_QPC_BYTE_248_IRRL_PSN_M,
4011 		       V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4012 
4013 	roce_set_field(qpc_mask->byte_240_irrl_tail,
4014 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4015 		       V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4016 
4017 	roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4018 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4019 		       V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4020 
4021 	roce_set_bit(qpc_mask->byte_248_ack_psn,
4022 		     V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4023 
4024 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4025 		       V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4026 
4027 	roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4028 		       V2_QPC_BYTE_212_LSN_S, 0x100);
4029 	roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4030 		       V2_QPC_BYTE_212_LSN_S, 0);
4031 
4032 	roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4033 		       V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4034 
4035 	return 0;
4036 }
4037 
4038 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4039 					     enum ib_qp_state new_state)
4040 {
4041 
4042 	if ((cur_state != IB_QPS_RESET &&
4043 	    (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4044 	    ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4045 	    (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4046 	    (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4047 		return true;
4048 
4049 	return false;
4050 
4051 }
4052 
4053 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4054 				const struct ib_qp_attr *attr,
4055 				int attr_mask,
4056 				struct hns_roce_v2_qp_context *context,
4057 				struct hns_roce_v2_qp_context *qpc_mask)
4058 {
4059 	const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4060 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4061 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4062 	const struct ib_gid_attr *gid_attr = NULL;
4063 	int is_roce_protocol;
4064 	bool is_udp = false;
4065 	u16 vlan = 0xffff;
4066 	u8 ib_port;
4067 	u8 hr_port;
4068 	int ret;
4069 
4070 	ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4071 	hr_port = ib_port - 1;
4072 	is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4073 			   rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4074 
4075 	if (is_roce_protocol) {
4076 		gid_attr = attr->ah_attr.grh.sgid_attr;
4077 		ret = rdma_read_gid_l2_fields(gid_attr, &vlan, NULL);
4078 		if (ret)
4079 			return ret;
4080 
4081 		if (gid_attr)
4082 			is_udp = (gid_attr->gid_type ==
4083 				 IB_GID_TYPE_ROCE_UDP_ENCAP);
4084 	}
4085 
4086 	if (vlan < VLAN_CFI_MASK) {
4087 		roce_set_bit(context->byte_76_srqn_op_en,
4088 			     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4089 		roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4090 			     V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4091 		roce_set_bit(context->byte_168_irrl_idx,
4092 			     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4093 		roce_set_bit(qpc_mask->byte_168_irrl_idx,
4094 			     V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4095 	}
4096 
4097 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4098 		       V2_QPC_BYTE_24_VLAN_ID_S, vlan);
4099 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4100 		       V2_QPC_BYTE_24_VLAN_ID_S, 0);
4101 
4102 	if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4103 		dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
4104 			grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4105 		return -EINVAL;
4106 	}
4107 
4108 	if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4109 		dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4110 		return -EINVAL;
4111 	}
4112 
4113 	roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4114 		       V2_QPC_BYTE_52_UDPSPN_S,
4115 		       is_udp ? 0x12b7 : 0);
4116 
4117 	roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4118 		       V2_QPC_BYTE_52_UDPSPN_S, 0);
4119 
4120 	roce_set_field(context->byte_20_smac_sgid_idx,
4121 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4122 		       grh->sgid_index);
4123 
4124 	roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4125 		       V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4126 
4127 	roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4128 		       V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4129 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4130 		       V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4131 
4132 	if (hr_dev->pci_dev->revision == 0x21 && is_udp)
4133 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4134 			       V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4135 	else
4136 		roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4137 			       V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4138 	roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4139 		       V2_QPC_BYTE_24_TC_S, 0);
4140 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4141 		       V2_QPC_BYTE_28_FL_S, grh->flow_label);
4142 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4143 		       V2_QPC_BYTE_28_FL_S, 0);
4144 	memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4145 	memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4146 	roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4147 		       V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4148 	roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4149 		       V2_QPC_BYTE_28_SL_S, 0);
4150 	hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4151 
4152 	return 0;
4153 }
4154 
4155 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4156 				      const struct ib_qp_attr *attr,
4157 				      int attr_mask,
4158 				      enum ib_qp_state cur_state,
4159 				      enum ib_qp_state new_state,
4160 				      struct hns_roce_v2_qp_context *context,
4161 				      struct hns_roce_v2_qp_context *qpc_mask)
4162 {
4163 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4164 	int ret = 0;
4165 
4166 	if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4167 		memset(qpc_mask, 0, sizeof(*qpc_mask));
4168 		modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4169 					qpc_mask);
4170 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4171 		modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4172 				       qpc_mask);
4173 	} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4174 		ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4175 					    qpc_mask);
4176 		if (ret)
4177 			goto out;
4178 	} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4179 		ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4180 					   qpc_mask);
4181 		if (ret)
4182 			goto out;
4183 	} else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4184 		/* Nothing */
4185 		;
4186 	} else {
4187 		dev_err(hr_dev->dev, "Illegal state for QP!\n");
4188 		ret = -EINVAL;
4189 		goto out;
4190 	}
4191 
4192 out:
4193 	return ret;
4194 }
4195 
4196 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4197 				      const struct ib_qp_attr *attr,
4198 				      int attr_mask,
4199 				      struct hns_roce_v2_qp_context *context,
4200 				      struct hns_roce_v2_qp_context *qpc_mask)
4201 {
4202 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4203 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4204 	int ret = 0;
4205 
4206 	if (attr_mask & IB_QP_AV) {
4207 		ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4208 					   qpc_mask);
4209 		if (ret)
4210 			return ret;
4211 	}
4212 
4213 	if (attr_mask & IB_QP_TIMEOUT) {
4214 		if (attr->timeout < 31) {
4215 			roce_set_field(context->byte_28_at_fl,
4216 				       V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4217 				       attr->timeout);
4218 			roce_set_field(qpc_mask->byte_28_at_fl,
4219 				       V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4220 				       0);
4221 		} else {
4222 			dev_warn(hr_dev->dev,
4223 				 "Local ACK timeout shall be 0 to 30.\n");
4224 		}
4225 	}
4226 
4227 	if (attr_mask & IB_QP_RETRY_CNT) {
4228 		roce_set_field(context->byte_212_lsn,
4229 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4230 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4231 			       attr->retry_cnt);
4232 		roce_set_field(qpc_mask->byte_212_lsn,
4233 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4234 			       V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4235 
4236 		roce_set_field(context->byte_212_lsn,
4237 			       V2_QPC_BYTE_212_RETRY_CNT_M,
4238 			       V2_QPC_BYTE_212_RETRY_CNT_S,
4239 			       attr->retry_cnt);
4240 		roce_set_field(qpc_mask->byte_212_lsn,
4241 			       V2_QPC_BYTE_212_RETRY_CNT_M,
4242 			       V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4243 	}
4244 
4245 	if (attr_mask & IB_QP_RNR_RETRY) {
4246 		roce_set_field(context->byte_244_rnr_rxack,
4247 			       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4248 			       V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4249 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4250 			       V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4251 			       V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4252 
4253 		roce_set_field(context->byte_244_rnr_rxack,
4254 			       V2_QPC_BYTE_244_RNR_CNT_M,
4255 			       V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4256 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4257 			       V2_QPC_BYTE_244_RNR_CNT_M,
4258 			       V2_QPC_BYTE_244_RNR_CNT_S, 0);
4259 	}
4260 
4261 	/* RC&UC&UD required attr */
4262 	if (attr_mask & IB_QP_SQ_PSN) {
4263 		roce_set_field(context->byte_172_sq_psn,
4264 			       V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4265 			       V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4266 		roce_set_field(qpc_mask->byte_172_sq_psn,
4267 			       V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4268 			       V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4269 
4270 		roce_set_field(context->byte_196_sq_psn,
4271 			       V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4272 			       V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4273 		roce_set_field(qpc_mask->byte_196_sq_psn,
4274 			       V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4275 			       V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4276 
4277 		roce_set_field(context->byte_220_retry_psn_msn,
4278 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4279 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4280 		roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4281 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4282 			       V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4283 
4284 		roce_set_field(context->byte_224_retry_msg,
4285 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4286 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4287 			       attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4288 		roce_set_field(qpc_mask->byte_224_retry_msg,
4289 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4290 			       V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4291 
4292 		roce_set_field(context->byte_224_retry_msg,
4293 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4294 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4295 			       attr->sq_psn);
4296 		roce_set_field(qpc_mask->byte_224_retry_msg,
4297 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4298 			       V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4299 
4300 		roce_set_field(context->byte_244_rnr_rxack,
4301 			       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4302 			       V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4303 		roce_set_field(qpc_mask->byte_244_rnr_rxack,
4304 			       V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4305 			       V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4306 	}
4307 
4308 	if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4309 	     attr->max_dest_rd_atomic) {
4310 		roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4311 			       V2_QPC_BYTE_140_RR_MAX_S,
4312 			       fls(attr->max_dest_rd_atomic - 1));
4313 		roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4314 			       V2_QPC_BYTE_140_RR_MAX_S, 0);
4315 	}
4316 
4317 	if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4318 		roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4319 			       V2_QPC_BYTE_208_SR_MAX_S,
4320 			       fls(attr->max_rd_atomic - 1));
4321 		roce_set_field(qpc_mask->byte_208_irrl,
4322 			       V2_QPC_BYTE_208_SR_MAX_M,
4323 			       V2_QPC_BYTE_208_SR_MAX_S, 0);
4324 	}
4325 
4326 	if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4327 		set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4328 
4329 	if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4330 		roce_set_field(context->byte_80_rnr_rx_cqn,
4331 			       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4332 			       V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4333 			       attr->min_rnr_timer);
4334 		roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4335 			       V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4336 			       V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4337 	}
4338 
4339 	/* RC&UC required attr */
4340 	if (attr_mask & IB_QP_RQ_PSN) {
4341 		roce_set_field(context->byte_108_rx_reqepsn,
4342 			       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4343 			       V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4344 		roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4345 			       V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4346 			       V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4347 
4348 		roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4349 			       V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4350 		roce_set_field(qpc_mask->byte_152_raq,
4351 			       V2_QPC_BYTE_152_RAQ_PSN_M,
4352 			       V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4353 	}
4354 
4355 	if (attr_mask & IB_QP_QKEY) {
4356 		context->qkey_xrcd = cpu_to_le32(attr->qkey);
4357 		qpc_mask->qkey_xrcd = 0;
4358 		hr_qp->qkey = attr->qkey;
4359 	}
4360 
4361 	return ret;
4362 }
4363 
4364 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4365 					  const struct ib_qp_attr *attr,
4366 					  int attr_mask)
4367 {
4368 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4369 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4370 
4371 	if (attr_mask & IB_QP_ACCESS_FLAGS)
4372 		hr_qp->atomic_rd_en = attr->qp_access_flags;
4373 
4374 	if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4375 		hr_qp->resp_depth = attr->max_dest_rd_atomic;
4376 	if (attr_mask & IB_QP_PORT) {
4377 		hr_qp->port = attr->port_num - 1;
4378 		hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4379 	}
4380 }
4381 
4382 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4383 				 const struct ib_qp_attr *attr,
4384 				 int attr_mask, enum ib_qp_state cur_state,
4385 				 enum ib_qp_state new_state)
4386 {
4387 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4388 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4389 	struct hns_roce_v2_qp_context ctx[2];
4390 	struct hns_roce_v2_qp_context *context = ctx;
4391 	struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4392 	struct device *dev = hr_dev->dev;
4393 	int ret;
4394 
4395 	/*
4396 	 * In v2 engine, software pass context and context mask to hardware
4397 	 * when modifying qp. If software need modify some fields in context,
4398 	 * we should set all bits of the relevant fields in context mask to
4399 	 * 0 at the same time, else set them to 0x1.
4400 	 */
4401 	memset(context, 0, sizeof(*context));
4402 	memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4403 	ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4404 					 new_state, context, qpc_mask);
4405 	if (ret)
4406 		goto out;
4407 
4408 	/* When QP state is err, SQ and RQ WQE should be flushed */
4409 	if (new_state == IB_QPS_ERR) {
4410 		roce_set_field(context->byte_160_sq_ci_pi,
4411 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4412 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4413 			       hr_qp->sq.head);
4414 		roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4415 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4416 			       V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4417 
4418 		if (!ibqp->srq) {
4419 			roce_set_field(context->byte_84_rq_ci_pi,
4420 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4421 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4422 			       hr_qp->rq.head);
4423 			roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4424 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4425 			       V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4426 		}
4427 	}
4428 
4429 	/* Configure the optional fields */
4430 	ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4431 					 qpc_mask);
4432 	if (ret)
4433 		goto out;
4434 
4435 	roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4436 		     ibqp->srq ? 1 : 0);
4437 	roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4438 		     V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4439 
4440 	/* Every status migrate must change state */
4441 	roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4442 		       V2_QPC_BYTE_60_QP_ST_S, new_state);
4443 	roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4444 		       V2_QPC_BYTE_60_QP_ST_S, 0);
4445 
4446 	/* SW pass context to HW */
4447 	ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
4448 	if (ret) {
4449 		dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4450 		goto out;
4451 	}
4452 
4453 	hr_qp->state = new_state;
4454 
4455 	hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4456 
4457 	if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4458 		hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4459 				     ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4460 		if (ibqp->send_cq != ibqp->recv_cq)
4461 			hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4462 					     hr_qp->qpn, NULL);
4463 
4464 		hr_qp->rq.head = 0;
4465 		hr_qp->rq.tail = 0;
4466 		hr_qp->sq.head = 0;
4467 		hr_qp->sq.tail = 0;
4468 		hr_qp->sq_next_wqe = 0;
4469 		hr_qp->next_sge = 0;
4470 		if (hr_qp->rq.wqe_cnt)
4471 			*hr_qp->rdb.db_record = 0;
4472 	}
4473 
4474 out:
4475 	return ret;
4476 }
4477 
4478 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4479 {
4480 	switch (state) {
4481 	case HNS_ROCE_QP_ST_RST:	return IB_QPS_RESET;
4482 	case HNS_ROCE_QP_ST_INIT:	return IB_QPS_INIT;
4483 	case HNS_ROCE_QP_ST_RTR:	return IB_QPS_RTR;
4484 	case HNS_ROCE_QP_ST_RTS:	return IB_QPS_RTS;
4485 	case HNS_ROCE_QP_ST_SQ_DRAINING:
4486 	case HNS_ROCE_QP_ST_SQD:	return IB_QPS_SQD;
4487 	case HNS_ROCE_QP_ST_SQER:	return IB_QPS_SQE;
4488 	case HNS_ROCE_QP_ST_ERR:	return IB_QPS_ERR;
4489 	default:			return -1;
4490 	}
4491 }
4492 
4493 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4494 				 struct hns_roce_qp *hr_qp,
4495 				 struct hns_roce_v2_qp_context *hr_context)
4496 {
4497 	struct hns_roce_cmd_mailbox *mailbox;
4498 	int ret;
4499 
4500 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4501 	if (IS_ERR(mailbox))
4502 		return PTR_ERR(mailbox);
4503 
4504 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4505 				HNS_ROCE_CMD_QUERY_QPC,
4506 				HNS_ROCE_CMD_TIMEOUT_MSECS);
4507 	if (ret) {
4508 		dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4509 		goto out;
4510 	}
4511 
4512 	memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4513 
4514 out:
4515 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4516 	return ret;
4517 }
4518 
4519 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4520 				int qp_attr_mask,
4521 				struct ib_qp_init_attr *qp_init_attr)
4522 {
4523 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4524 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4525 	struct hns_roce_v2_qp_context context = {};
4526 	struct device *dev = hr_dev->dev;
4527 	int tmp_qp_state;
4528 	int state;
4529 	int ret;
4530 
4531 	memset(qp_attr, 0, sizeof(*qp_attr));
4532 	memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4533 
4534 	mutex_lock(&hr_qp->mutex);
4535 
4536 	if (hr_qp->state == IB_QPS_RESET) {
4537 		qp_attr->qp_state = IB_QPS_RESET;
4538 		ret = 0;
4539 		goto done;
4540 	}
4541 
4542 	ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4543 	if (ret) {
4544 		dev_err(dev, "query qpc error\n");
4545 		ret = -EINVAL;
4546 		goto out;
4547 	}
4548 
4549 	state = roce_get_field(context.byte_60_qpst_tempid,
4550 			       V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4551 	tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4552 	if (tmp_qp_state == -1) {
4553 		dev_err(dev, "Illegal ib_qp_state\n");
4554 		ret = -EINVAL;
4555 		goto out;
4556 	}
4557 	hr_qp->state = (u8)tmp_qp_state;
4558 	qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4559 	qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4560 							V2_QPC_BYTE_24_MTU_M,
4561 							V2_QPC_BYTE_24_MTU_S);
4562 	qp_attr->path_mig_state = IB_MIG_ARMED;
4563 	qp_attr->ah_attr.type   = RDMA_AH_ATTR_TYPE_ROCE;
4564 	if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4565 		qp_attr->qkey = V2_QKEY_VAL;
4566 
4567 	qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4568 					 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4569 					 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4570 	qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4571 					      V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4572 					      V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4573 	qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4574 						  V2_QPC_BYTE_56_DQPN_M,
4575 						  V2_QPC_BYTE_56_DQPN_S);
4576 	qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4577 				    V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4578 				    ((roce_get_bit(context.byte_76_srqn_op_en,
4579 				    V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4580 				    ((roce_get_bit(context.byte_76_srqn_op_en,
4581 				    V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4582 
4583 	if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4584 	    hr_qp->ibqp.qp_type == IB_QPT_UC) {
4585 		struct ib_global_route *grh =
4586 				rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4587 
4588 		rdma_ah_set_sl(&qp_attr->ah_attr,
4589 			       roce_get_field(context.byte_28_at_fl,
4590 					      V2_QPC_BYTE_28_SL_M,
4591 					      V2_QPC_BYTE_28_SL_S));
4592 		grh->flow_label = roce_get_field(context.byte_28_at_fl,
4593 						 V2_QPC_BYTE_28_FL_M,
4594 						 V2_QPC_BYTE_28_FL_S);
4595 		grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4596 						 V2_QPC_BYTE_20_SGID_IDX_M,
4597 						 V2_QPC_BYTE_20_SGID_IDX_S);
4598 		grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4599 						V2_QPC_BYTE_24_HOP_LIMIT_M,
4600 						V2_QPC_BYTE_24_HOP_LIMIT_S);
4601 		grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4602 						    V2_QPC_BYTE_24_TC_M,
4603 						    V2_QPC_BYTE_24_TC_S);
4604 
4605 		memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4606 	}
4607 
4608 	qp_attr->port_num = hr_qp->port + 1;
4609 	qp_attr->sq_draining = 0;
4610 	qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4611 						     V2_QPC_BYTE_208_SR_MAX_M,
4612 						     V2_QPC_BYTE_208_SR_MAX_S);
4613 	qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4614 						     V2_QPC_BYTE_140_RR_MAX_M,
4615 						     V2_QPC_BYTE_140_RR_MAX_S);
4616 	qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4617 						 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4618 						 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4619 	qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4620 					      V2_QPC_BYTE_28_AT_M,
4621 					      V2_QPC_BYTE_28_AT_S);
4622 	qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4623 					    V2_QPC_BYTE_212_RETRY_CNT_M,
4624 					    V2_QPC_BYTE_212_RETRY_CNT_S);
4625 	qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
4626 
4627 done:
4628 	qp_attr->cur_qp_state = qp_attr->qp_state;
4629 	qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4630 	qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4631 
4632 	if (!ibqp->uobject) {
4633 		qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4634 		qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4635 	} else {
4636 		qp_attr->cap.max_send_wr = 0;
4637 		qp_attr->cap.max_send_sge = 0;
4638 	}
4639 
4640 	qp_init_attr->cap = qp_attr->cap;
4641 
4642 out:
4643 	mutex_unlock(&hr_qp->mutex);
4644 	return ret;
4645 }
4646 
4647 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4648 					 struct hns_roce_qp *hr_qp,
4649 					 struct ib_udata *udata)
4650 {
4651 	struct hns_roce_cq *send_cq, *recv_cq;
4652 	struct ib_device *ibdev = &hr_dev->ib_dev;
4653 	int ret;
4654 
4655 	if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4656 		/* Modify qp to reset before destroying qp */
4657 		ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4658 					    hr_qp->state, IB_QPS_RESET);
4659 		if (ret) {
4660 			ibdev_err(ibdev, "modify QP to Reset failed.\n");
4661 			return ret;
4662 		}
4663 	}
4664 
4665 	send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4666 	recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4667 
4668 	hns_roce_lock_cqs(send_cq, recv_cq);
4669 
4670 	if (!udata) {
4671 		__hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4672 				       to_hr_srq(hr_qp->ibqp.srq) : NULL);
4673 		if (send_cq != recv_cq)
4674 			__hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4675 	}
4676 
4677 	hns_roce_qp_remove(hr_dev, hr_qp);
4678 
4679 	hns_roce_unlock_cqs(send_cq, recv_cq);
4680 
4681 	hns_roce_qp_free(hr_dev, hr_qp);
4682 
4683 	/* Not special_QP, free their QPN */
4684 	if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4685 	    (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4686 	    (hr_qp->ibqp.qp_type == IB_QPT_UD))
4687 		hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4688 
4689 	hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
4690 
4691 	if (udata) {
4692 		struct hns_roce_ucontext *context =
4693 			rdma_udata_to_drv_context(
4694 				udata,
4695 				struct hns_roce_ucontext,
4696 				ibucontext);
4697 
4698 		if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4699 			hns_roce_db_unmap_user(context, &hr_qp->sdb);
4700 
4701 		if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4702 			hns_roce_db_unmap_user(context, &hr_qp->rdb);
4703 	} else {
4704 		kfree(hr_qp->sq.wrid);
4705 		kfree(hr_qp->rq.wrid);
4706 		hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4707 		if (hr_qp->rq.wqe_cnt)
4708 			hns_roce_free_db(hr_dev, &hr_qp->rdb);
4709 	}
4710 	ib_umem_release(hr_qp->umem);
4711 
4712 	if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4713 	     hr_qp->rq.wqe_cnt) {
4714 		kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4715 		kfree(hr_qp->rq_inl_buf.wqe_list);
4716 	}
4717 
4718 	return 0;
4719 }
4720 
4721 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4722 {
4723 	struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4724 	struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4725 	int ret;
4726 
4727 	ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4728 	if (ret) {
4729 		ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
4730 			  hr_qp->qpn, ret);
4731 		return ret;
4732 	}
4733 
4734 	if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
4735 		kfree(hr_to_hr_sqp(hr_qp));
4736 	else
4737 		kfree(hr_qp);
4738 
4739 	return 0;
4740 }
4741 
4742 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4743 						struct hns_roce_qp *hr_qp)
4744 {
4745 	struct hns_roce_sccc_clr_done *resp;
4746 	struct hns_roce_sccc_clr *clr;
4747 	struct hns_roce_cmq_desc desc;
4748 	int ret, i;
4749 
4750 	mutex_lock(&hr_dev->qp_table.scc_mutex);
4751 
4752 	/* set scc ctx clear done flag */
4753 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4754 	ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
4755 	if (ret) {
4756 		dev_err(hr_dev->dev, "Reset SCC ctx  failed(%d)\n", ret);
4757 		goto out;
4758 	}
4759 
4760 	/* clear scc context */
4761 	hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4762 	clr = (struct hns_roce_sccc_clr *)desc.data;
4763 	clr->qpn = cpu_to_le32(hr_qp->qpn);
4764 	ret =  hns_roce_cmq_send(hr_dev, &desc, 1);
4765 	if (ret) {
4766 		dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4767 		goto out;
4768 	}
4769 
4770 	/* query scc context clear is done or not */
4771 	resp = (struct hns_roce_sccc_clr_done *)desc.data;
4772 	for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4773 		hns_roce_cmq_setup_basic_desc(&desc,
4774 					      HNS_ROCE_OPC_QUERY_SCCC, true);
4775 		ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4776 		if (ret) {
4777 			dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4778 			goto out;
4779 		}
4780 
4781 		if (resp->clr_done)
4782 			goto out;
4783 
4784 		msleep(20);
4785 	}
4786 
4787 	dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4788 	ret = -ETIMEDOUT;
4789 
4790 out:
4791 	mutex_unlock(&hr_dev->qp_table.scc_mutex);
4792 	return ret;
4793 }
4794 
4795 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4796 {
4797 	struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4798 	struct hns_roce_v2_cq_context *cq_context;
4799 	struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4800 	struct hns_roce_v2_cq_context *cqc_mask;
4801 	struct hns_roce_cmd_mailbox *mailbox;
4802 	int ret;
4803 
4804 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4805 	if (IS_ERR(mailbox))
4806 		return PTR_ERR(mailbox);
4807 
4808 	cq_context = mailbox->buf;
4809 	cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4810 
4811 	memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4812 
4813 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4814 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4815 		       cq_count);
4816 	roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4817 		       V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4818 		       0);
4819 	roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4820 		       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4821 		       cq_period);
4822 	roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4823 		       V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4824 		       0);
4825 
4826 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4827 				HNS_ROCE_CMD_MODIFY_CQC,
4828 				HNS_ROCE_CMD_TIMEOUT_MSECS);
4829 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4830 	if (ret)
4831 		dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4832 
4833 	return ret;
4834 }
4835 
4836 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4837 {
4838 	struct hns_roce_qp *hr_qp;
4839 	struct ib_qp_attr attr;
4840 	int attr_mask;
4841 	int ret;
4842 
4843 	hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4844 	if (!hr_qp) {
4845 		dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4846 		return;
4847 	}
4848 
4849 	if (hr_qp->ibqp.uobject) {
4850 		if (hr_qp->sdb_en == 1) {
4851 			hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4852 			if (hr_qp->rdb_en == 1)
4853 				hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4854 		} else {
4855 			dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4856 			return;
4857 		}
4858 	}
4859 
4860 	attr_mask = IB_QP_STATE;
4861 	attr.qp_state = IB_QPS_ERR;
4862 	ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4863 				    hr_qp->state, IB_QPS_ERR);
4864 	if (ret)
4865 		dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4866 			qpn);
4867 }
4868 
4869 static void hns_roce_irq_work_handle(struct work_struct *work)
4870 {
4871 	struct hns_roce_work *irq_work =
4872 				container_of(work, struct hns_roce_work, work);
4873 	struct device *dev = irq_work->hr_dev->dev;
4874 	u32 qpn = irq_work->qpn;
4875 	u32 cqn = irq_work->cqn;
4876 
4877 	switch (irq_work->event_type) {
4878 	case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4879 		dev_info(dev, "Path migrated succeeded.\n");
4880 		break;
4881 	case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4882 		dev_warn(dev, "Path migration failed.\n");
4883 		break;
4884 	case HNS_ROCE_EVENT_TYPE_COMM_EST:
4885 		break;
4886 	case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4887 		dev_warn(dev, "Send queue drained.\n");
4888 		break;
4889 	case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4890 		dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4891 			qpn, irq_work->sub_type);
4892 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4893 		break;
4894 	case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4895 		dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4896 			qpn);
4897 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4898 		break;
4899 	case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4900 		dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4901 			qpn, irq_work->sub_type);
4902 		hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4903 		break;
4904 	case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4905 		dev_warn(dev, "SRQ limit reach.\n");
4906 		break;
4907 	case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4908 		dev_warn(dev, "SRQ last wqe reach.\n");
4909 		break;
4910 	case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4911 		dev_err(dev, "SRQ catas error.\n");
4912 		break;
4913 	case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4914 		dev_err(dev, "CQ 0x%x access err.\n", cqn);
4915 		break;
4916 	case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4917 		dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4918 		break;
4919 	case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4920 		dev_warn(dev, "DB overflow.\n");
4921 		break;
4922 	case HNS_ROCE_EVENT_TYPE_FLR:
4923 		dev_warn(dev, "Function level reset.\n");
4924 		break;
4925 	default:
4926 		break;
4927 	}
4928 
4929 	kfree(irq_work);
4930 }
4931 
4932 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4933 				      struct hns_roce_eq *eq,
4934 				      u32 qpn, u32 cqn)
4935 {
4936 	struct hns_roce_work *irq_work;
4937 
4938 	irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4939 	if (!irq_work)
4940 		return;
4941 
4942 	INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4943 	irq_work->hr_dev = hr_dev;
4944 	irq_work->qpn = qpn;
4945 	irq_work->cqn = cqn;
4946 	irq_work->event_type = eq->event_type;
4947 	irq_work->sub_type = eq->sub_type;
4948 	queue_work(hr_dev->irq_workq, &(irq_work->work));
4949 }
4950 
4951 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4952 {
4953 	struct hns_roce_dev *hr_dev = eq->hr_dev;
4954 	__le32 doorbell[2];
4955 
4956 	doorbell[0] = 0;
4957 	doorbell[1] = 0;
4958 
4959 	if (eq->type_flag == HNS_ROCE_AEQ) {
4960 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4961 			       HNS_ROCE_V2_EQ_DB_CMD_S,
4962 			       eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4963 			       HNS_ROCE_EQ_DB_CMD_AEQ :
4964 			       HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4965 	} else {
4966 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4967 			       HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4968 
4969 		roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4970 			       HNS_ROCE_V2_EQ_DB_CMD_S,
4971 			       eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4972 			       HNS_ROCE_EQ_DB_CMD_CEQ :
4973 			       HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4974 	}
4975 
4976 	roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4977 		       HNS_ROCE_V2_EQ_DB_PARA_S,
4978 		       (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4979 
4980 	hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4981 }
4982 
4983 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4984 {
4985 	u32 buf_chk_sz;
4986 	unsigned long off;
4987 
4988 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4989 	off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4990 
4991 	return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4992 		off % buf_chk_sz);
4993 }
4994 
4995 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4996 {
4997 	u32 buf_chk_sz;
4998 	unsigned long off;
4999 
5000 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5001 
5002 	off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
5003 
5004 	if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5005 		return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
5006 			off % buf_chk_sz);
5007 	else
5008 		return (struct hns_roce_aeqe *)((u8 *)
5009 			(eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
5010 }
5011 
5012 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5013 {
5014 	struct hns_roce_aeqe *aeqe;
5015 
5016 	if (!eq->hop_num)
5017 		aeqe = get_aeqe_v2(eq, eq->cons_index);
5018 	else
5019 		aeqe = mhop_get_aeqe(eq, eq->cons_index);
5020 
5021 	return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5022 		!!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5023 }
5024 
5025 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5026 			       struct hns_roce_eq *eq)
5027 {
5028 	struct device *dev = hr_dev->dev;
5029 	struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5030 	int aeqe_found = 0;
5031 	int event_type;
5032 	int sub_type;
5033 	u32 srqn;
5034 	u32 qpn;
5035 	u32 cqn;
5036 
5037 	while (aeqe) {
5038 		/* Make sure we read AEQ entry after we have checked the
5039 		 * ownership bit
5040 		 */
5041 		dma_rmb();
5042 
5043 		event_type = roce_get_field(aeqe->asyn,
5044 					    HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5045 					    HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5046 		sub_type = roce_get_field(aeqe->asyn,
5047 					  HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5048 					  HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5049 		qpn = roce_get_field(aeqe->event.qp_event.qp,
5050 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5051 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5052 		cqn = roce_get_field(aeqe->event.cq_event.cq,
5053 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5054 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5055 		srqn = roce_get_field(aeqe->event.srq_event.srq,
5056 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5057 				     HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5058 
5059 		switch (event_type) {
5060 		case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5061 		case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5062 		case HNS_ROCE_EVENT_TYPE_COMM_EST:
5063 		case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5064 		case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5065 		case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5066 		case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5067 		case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5068 			hns_roce_qp_event(hr_dev, qpn, event_type);
5069 			break;
5070 		case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5071 		case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5072 			hns_roce_srq_event(hr_dev, srqn, event_type);
5073 			break;
5074 		case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5075 		case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5076 			hns_roce_cq_event(hr_dev, cqn, event_type);
5077 			break;
5078 		case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5079 			break;
5080 		case HNS_ROCE_EVENT_TYPE_MB:
5081 			hns_roce_cmd_event(hr_dev,
5082 					le16_to_cpu(aeqe->event.cmd.token),
5083 					aeqe->event.cmd.status,
5084 					le64_to_cpu(aeqe->event.cmd.out_param));
5085 			break;
5086 		case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5087 			break;
5088 		case HNS_ROCE_EVENT_TYPE_FLR:
5089 			break;
5090 		default:
5091 			dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5092 				event_type, eq->eqn, eq->cons_index);
5093 			break;
5094 		}
5095 
5096 		eq->event_type = event_type;
5097 		eq->sub_type = sub_type;
5098 		++eq->cons_index;
5099 		aeqe_found = 1;
5100 
5101 		if (eq->cons_index > (2 * eq->entries - 1))
5102 			eq->cons_index = 0;
5103 
5104 		hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5105 
5106 		aeqe = next_aeqe_sw_v2(eq);
5107 	}
5108 
5109 	set_eq_cons_index_v2(eq);
5110 	return aeqe_found;
5111 }
5112 
5113 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
5114 {
5115 	u32 buf_chk_sz;
5116 	unsigned long off;
5117 
5118 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5119 	off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5120 
5121 	return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5122 		off % buf_chk_sz);
5123 }
5124 
5125 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5126 {
5127 	u32 buf_chk_sz;
5128 	unsigned long off;
5129 
5130 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5131 
5132 	off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5133 
5134 	if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5135 		return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5136 			off % buf_chk_sz);
5137 	else
5138 		return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5139 			buf_chk_sz]) + off % buf_chk_sz);
5140 }
5141 
5142 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5143 {
5144 	struct hns_roce_ceqe *ceqe;
5145 
5146 	if (!eq->hop_num)
5147 		ceqe = get_ceqe_v2(eq, eq->cons_index);
5148 	else
5149 		ceqe = mhop_get_ceqe(eq, eq->cons_index);
5150 
5151 	return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5152 		(!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5153 }
5154 
5155 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5156 			       struct hns_roce_eq *eq)
5157 {
5158 	struct device *dev = hr_dev->dev;
5159 	struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5160 	int ceqe_found = 0;
5161 	u32 cqn;
5162 
5163 	while (ceqe) {
5164 		/* Make sure we read CEQ entry after we have checked the
5165 		 * ownership bit
5166 		 */
5167 		dma_rmb();
5168 
5169 		cqn = roce_get_field(ceqe->comp,
5170 				     HNS_ROCE_V2_CEQE_COMP_CQN_M,
5171 				     HNS_ROCE_V2_CEQE_COMP_CQN_S);
5172 
5173 		hns_roce_cq_completion(hr_dev, cqn);
5174 
5175 		++eq->cons_index;
5176 		ceqe_found = 1;
5177 
5178 		if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
5179 			dev_warn(dev, "cons_index overflow, set back to 0.\n");
5180 			eq->cons_index = 0;
5181 		}
5182 
5183 		ceqe = next_ceqe_sw_v2(eq);
5184 	}
5185 
5186 	set_eq_cons_index_v2(eq);
5187 
5188 	return ceqe_found;
5189 }
5190 
5191 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5192 {
5193 	struct hns_roce_eq *eq = eq_ptr;
5194 	struct hns_roce_dev *hr_dev = eq->hr_dev;
5195 	int int_work = 0;
5196 
5197 	if (eq->type_flag == HNS_ROCE_CEQ)
5198 		/* Completion event interrupt */
5199 		int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5200 	else
5201 		/* Asychronous event interrupt */
5202 		int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5203 
5204 	return IRQ_RETVAL(int_work);
5205 }
5206 
5207 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5208 {
5209 	struct hns_roce_dev *hr_dev = dev_id;
5210 	struct device *dev = hr_dev->dev;
5211 	int int_work = 0;
5212 	u32 int_st;
5213 	u32 int_en;
5214 
5215 	/* Abnormal interrupt */
5216 	int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5217 	int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5218 
5219 	if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5220 		struct pci_dev *pdev = hr_dev->pci_dev;
5221 		struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5222 		const struct hnae3_ae_ops *ops = ae_dev->ops;
5223 
5224 		dev_err(dev, "AEQ overflow!\n");
5225 
5226 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5227 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5228 
5229 		/* Set reset level for reset_event() */
5230 		if (ops->set_default_reset_request)
5231 			ops->set_default_reset_request(ae_dev,
5232 						       HNAE3_FUNC_RESET);
5233 		if (ops->reset_event)
5234 			ops->reset_event(pdev, NULL);
5235 
5236 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5237 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5238 
5239 		int_work = 1;
5240 	} else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5241 		dev_err(dev, "BUS ERR!\n");
5242 
5243 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5244 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5245 
5246 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5247 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5248 
5249 		int_work = 1;
5250 	} else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5251 		dev_err(dev, "OTHER ERR!\n");
5252 
5253 		int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5254 		roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5255 
5256 		int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5257 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5258 
5259 		int_work = 1;
5260 	} else
5261 		dev_err(dev, "There is no abnormal irq found!\n");
5262 
5263 	return IRQ_RETVAL(int_work);
5264 }
5265 
5266 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5267 					int eq_num, int enable_flag)
5268 {
5269 	int i;
5270 
5271 	if (enable_flag == EQ_ENABLE) {
5272 		for (i = 0; i < eq_num; i++)
5273 			roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5274 				   i * EQ_REG_OFFSET,
5275 				   HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5276 
5277 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5278 			   HNS_ROCE_V2_VF_ABN_INT_EN_M);
5279 		roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5280 			   HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5281 	} else {
5282 		for (i = 0; i < eq_num; i++)
5283 			roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5284 				   i * EQ_REG_OFFSET,
5285 				   HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5286 
5287 		roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5288 			   HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5289 		roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5290 			   HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5291 	}
5292 }
5293 
5294 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5295 {
5296 	struct device *dev = hr_dev->dev;
5297 	int ret;
5298 
5299 	if (eqn < hr_dev->caps.num_comp_vectors)
5300 		ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5301 					0, HNS_ROCE_CMD_DESTROY_CEQC,
5302 					HNS_ROCE_CMD_TIMEOUT_MSECS);
5303 	else
5304 		ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5305 					0, HNS_ROCE_CMD_DESTROY_AEQC,
5306 					HNS_ROCE_CMD_TIMEOUT_MSECS);
5307 	if (ret)
5308 		dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5309 }
5310 
5311 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5312 				  struct hns_roce_eq *eq)
5313 {
5314 	struct device *dev = hr_dev->dev;
5315 	u64 idx;
5316 	u64 size;
5317 	u32 buf_chk_sz;
5318 	u32 bt_chk_sz;
5319 	u32 mhop_num;
5320 	int eqe_alloc;
5321 	int i = 0;
5322 	int j = 0;
5323 
5324 	mhop_num = hr_dev->caps.eqe_hop_num;
5325 	buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5326 	bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5327 
5328 	if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5329 		dma_free_coherent(dev, (unsigned int)(eq->entries *
5330 				  eq->eqe_size), eq->bt_l0, eq->l0_dma);
5331 		return;
5332 	}
5333 
5334 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5335 	if (mhop_num == 1) {
5336 		for (i = 0; i < eq->l0_last_num; i++) {
5337 			if (i == eq->l0_last_num - 1) {
5338 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5339 				size = (eq->entries - eqe_alloc) * eq->eqe_size;
5340 				dma_free_coherent(dev, size, eq->buf[i],
5341 						  eq->buf_dma[i]);
5342 				break;
5343 			}
5344 			dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5345 					  eq->buf_dma[i]);
5346 		}
5347 	} else if (mhop_num == 2) {
5348 		for (i = 0; i < eq->l0_last_num; i++) {
5349 			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5350 					  eq->l1_dma[i]);
5351 
5352 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5353 				idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
5354 				if ((i == eq->l0_last_num - 1)
5355 				     && j == eq->l1_last_num - 1) {
5356 					eqe_alloc = (buf_chk_sz / eq->eqe_size)
5357 						    * idx;
5358 					size = (eq->entries - eqe_alloc)
5359 						* eq->eqe_size;
5360 					dma_free_coherent(dev, size,
5361 							  eq->buf[idx],
5362 							  eq->buf_dma[idx]);
5363 					break;
5364 				}
5365 				dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5366 						  eq->buf_dma[idx]);
5367 			}
5368 		}
5369 	}
5370 	kfree(eq->buf_dma);
5371 	kfree(eq->buf);
5372 	kfree(eq->l1_dma);
5373 	kfree(eq->bt_l1);
5374 	eq->buf_dma = NULL;
5375 	eq->buf = NULL;
5376 	eq->l1_dma = NULL;
5377 	eq->bt_l1 = NULL;
5378 }
5379 
5380 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5381 				struct hns_roce_eq *eq)
5382 {
5383 	u32 buf_chk_sz;
5384 
5385 	buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5386 
5387 	if (hr_dev->caps.eqe_hop_num) {
5388 		hns_roce_mhop_free_eq(hr_dev, eq);
5389 		return;
5390 	}
5391 
5392 	dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
5393 			  eq->buf_list->map);
5394 	kfree(eq->buf_list);
5395 }
5396 
5397 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5398 				struct hns_roce_eq *eq,
5399 				void *mb_buf)
5400 {
5401 	struct hns_roce_eq_context *eqc;
5402 
5403 	eqc = mb_buf;
5404 	memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5405 
5406 	/* init eqc */
5407 	eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5408 	eq->hop_num = hr_dev->caps.eqe_hop_num;
5409 	eq->cons_index = 0;
5410 	eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5411 	eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5412 	eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5413 	eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5414 	eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5415 	eq->shift = ilog2((unsigned int)eq->entries);
5416 
5417 	if (!eq->hop_num)
5418 		eq->eqe_ba = eq->buf_list->map;
5419 	else
5420 		eq->eqe_ba = eq->l0_dma;
5421 
5422 	/* set eqc state */
5423 	roce_set_field(eqc->byte_4,
5424 		       HNS_ROCE_EQC_EQ_ST_M,
5425 		       HNS_ROCE_EQC_EQ_ST_S,
5426 		       HNS_ROCE_V2_EQ_STATE_VALID);
5427 
5428 	/* set eqe hop num */
5429 	roce_set_field(eqc->byte_4,
5430 		       HNS_ROCE_EQC_HOP_NUM_M,
5431 		       HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5432 
5433 	/* set eqc over_ignore */
5434 	roce_set_field(eqc->byte_4,
5435 		       HNS_ROCE_EQC_OVER_IGNORE_M,
5436 		       HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5437 
5438 	/* set eqc coalesce */
5439 	roce_set_field(eqc->byte_4,
5440 		       HNS_ROCE_EQC_COALESCE_M,
5441 		       HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5442 
5443 	/* set eqc arm_state */
5444 	roce_set_field(eqc->byte_4,
5445 		       HNS_ROCE_EQC_ARM_ST_M,
5446 		       HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5447 
5448 	/* set eqn */
5449 	roce_set_field(eqc->byte_4,
5450 		       HNS_ROCE_EQC_EQN_M,
5451 		       HNS_ROCE_EQC_EQN_S, eq->eqn);
5452 
5453 	/* set eqe_cnt */
5454 	roce_set_field(eqc->byte_4,
5455 		       HNS_ROCE_EQC_EQE_CNT_M,
5456 		       HNS_ROCE_EQC_EQE_CNT_S,
5457 		       HNS_ROCE_EQ_INIT_EQE_CNT);
5458 
5459 	/* set eqe_ba_pg_sz */
5460 	roce_set_field(eqc->byte_8,
5461 		       HNS_ROCE_EQC_BA_PG_SZ_M,
5462 		       HNS_ROCE_EQC_BA_PG_SZ_S,
5463 		       eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5464 
5465 	/* set eqe_buf_pg_sz */
5466 	roce_set_field(eqc->byte_8,
5467 		       HNS_ROCE_EQC_BUF_PG_SZ_M,
5468 		       HNS_ROCE_EQC_BUF_PG_SZ_S,
5469 		       eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5470 
5471 	/* set eq_producer_idx */
5472 	roce_set_field(eqc->byte_8,
5473 		       HNS_ROCE_EQC_PROD_INDX_M,
5474 		       HNS_ROCE_EQC_PROD_INDX_S,
5475 		       HNS_ROCE_EQ_INIT_PROD_IDX);
5476 
5477 	/* set eq_max_cnt */
5478 	roce_set_field(eqc->byte_12,
5479 		       HNS_ROCE_EQC_MAX_CNT_M,
5480 		       HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5481 
5482 	/* set eq_period */
5483 	roce_set_field(eqc->byte_12,
5484 		       HNS_ROCE_EQC_PERIOD_M,
5485 		       HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5486 
5487 	/* set eqe_report_timer */
5488 	roce_set_field(eqc->eqe_report_timer,
5489 		       HNS_ROCE_EQC_REPORT_TIMER_M,
5490 		       HNS_ROCE_EQC_REPORT_TIMER_S,
5491 		       HNS_ROCE_EQ_INIT_REPORT_TIMER);
5492 
5493 	/* set eqe_ba [34:3] */
5494 	roce_set_field(eqc->eqe_ba0,
5495 		       HNS_ROCE_EQC_EQE_BA_L_M,
5496 		       HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5497 
5498 	/* set eqe_ba [64:35] */
5499 	roce_set_field(eqc->eqe_ba1,
5500 		       HNS_ROCE_EQC_EQE_BA_H_M,
5501 		       HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5502 
5503 	/* set eq shift */
5504 	roce_set_field(eqc->byte_28,
5505 		       HNS_ROCE_EQC_SHIFT_M,
5506 		       HNS_ROCE_EQC_SHIFT_S, eq->shift);
5507 
5508 	/* set eq MSI_IDX */
5509 	roce_set_field(eqc->byte_28,
5510 		       HNS_ROCE_EQC_MSI_INDX_M,
5511 		       HNS_ROCE_EQC_MSI_INDX_S,
5512 		       HNS_ROCE_EQ_INIT_MSI_IDX);
5513 
5514 	/* set cur_eqe_ba [27:12] */
5515 	roce_set_field(eqc->byte_28,
5516 		       HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5517 		       HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5518 
5519 	/* set cur_eqe_ba [59:28] */
5520 	roce_set_field(eqc->byte_32,
5521 		       HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5522 		       HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5523 
5524 	/* set cur_eqe_ba [63:60] */
5525 	roce_set_field(eqc->byte_36,
5526 		       HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5527 		       HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5528 
5529 	/* set eq consumer idx */
5530 	roce_set_field(eqc->byte_36,
5531 		       HNS_ROCE_EQC_CONS_INDX_M,
5532 		       HNS_ROCE_EQC_CONS_INDX_S,
5533 		       HNS_ROCE_EQ_INIT_CONS_IDX);
5534 
5535 	/* set nex_eqe_ba[43:12] */
5536 	roce_set_field(eqc->nxt_eqe_ba0,
5537 		       HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5538 		       HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5539 
5540 	/* set nex_eqe_ba[63:44] */
5541 	roce_set_field(eqc->nxt_eqe_ba1,
5542 		       HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5543 		       HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5544 }
5545 
5546 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5547 				  struct hns_roce_eq *eq)
5548 {
5549 	struct device *dev = hr_dev->dev;
5550 	int eq_alloc_done = 0;
5551 	int eq_buf_cnt = 0;
5552 	int eqe_alloc;
5553 	u32 buf_chk_sz;
5554 	u32 bt_chk_sz;
5555 	u32 mhop_num;
5556 	u64 size;
5557 	u64 idx;
5558 	int ba_num;
5559 	int bt_num;
5560 	int record_i;
5561 	int record_j;
5562 	int i = 0;
5563 	int j = 0;
5564 
5565 	mhop_num = hr_dev->caps.eqe_hop_num;
5566 	buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5567 	bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5568 
5569 	ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5570 			      buf_chk_sz);
5571 	bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
5572 
5573 	if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5574 		if (eq->entries > buf_chk_sz / eq->eqe_size) {
5575 			dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5576 				eq->entries);
5577 			return -EINVAL;
5578 		}
5579 		eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5580 					       &(eq->l0_dma), GFP_KERNEL);
5581 		if (!eq->bt_l0)
5582 			return -ENOMEM;
5583 
5584 		eq->cur_eqe_ba = eq->l0_dma;
5585 		eq->nxt_eqe_ba = 0;
5586 
5587 		return 0;
5588 	}
5589 
5590 	eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5591 	if (!eq->buf_dma)
5592 		return -ENOMEM;
5593 	eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5594 	if (!eq->buf)
5595 		goto err_kcalloc_buf;
5596 
5597 	if (mhop_num == 2) {
5598 		eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5599 		if (!eq->l1_dma)
5600 			goto err_kcalloc_l1_dma;
5601 
5602 		eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5603 		if (!eq->bt_l1)
5604 			goto err_kcalloc_bt_l1;
5605 	}
5606 
5607 	/* alloc L0 BT */
5608 	eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5609 	if (!eq->bt_l0)
5610 		goto err_dma_alloc_l0;
5611 
5612 	if (mhop_num == 1) {
5613 		if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
5614 			dev_err(dev, "ba_num %d is too large for 1 hop\n",
5615 				ba_num);
5616 
5617 		/* alloc buf */
5618 		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5619 			if (eq_buf_cnt + 1 < ba_num) {
5620 				size = buf_chk_sz;
5621 			} else {
5622 				eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5623 				size = (eq->entries - eqe_alloc) * eq->eqe_size;
5624 			}
5625 			eq->buf[i] = dma_alloc_coherent(dev, size,
5626 							&(eq->buf_dma[i]),
5627 							GFP_KERNEL);
5628 			if (!eq->buf[i])
5629 				goto err_dma_alloc_buf;
5630 
5631 			*(eq->bt_l0 + i) = eq->buf_dma[i];
5632 
5633 			eq_buf_cnt++;
5634 			if (eq_buf_cnt >= ba_num)
5635 				break;
5636 		}
5637 		eq->cur_eqe_ba = eq->buf_dma[0];
5638 		if (ba_num > 1)
5639 			eq->nxt_eqe_ba = eq->buf_dma[1];
5640 
5641 	} else if (mhop_num == 2) {
5642 		/* alloc L1 BT and buf */
5643 		for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5644 			eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5645 							  &(eq->l1_dma[i]),
5646 							  GFP_KERNEL);
5647 			if (!eq->bt_l1[i])
5648 				goto err_dma_alloc_l1;
5649 			*(eq->bt_l0 + i) = eq->l1_dma[i];
5650 
5651 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5652 				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5653 				if (eq_buf_cnt + 1 < ba_num) {
5654 					size = buf_chk_sz;
5655 				} else {
5656 					eqe_alloc = (buf_chk_sz / eq->eqe_size)
5657 						    * idx;
5658 					size = (eq->entries - eqe_alloc)
5659 						* eq->eqe_size;
5660 				}
5661 				eq->buf[idx] = dma_alloc_coherent(dev, size,
5662 								  &(eq->buf_dma[idx]),
5663 								  GFP_KERNEL);
5664 				if (!eq->buf[idx])
5665 					goto err_dma_alloc_buf;
5666 
5667 				*(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5668 
5669 				eq_buf_cnt++;
5670 				if (eq_buf_cnt >= ba_num) {
5671 					eq_alloc_done = 1;
5672 					break;
5673 				}
5674 			}
5675 
5676 			if (eq_alloc_done)
5677 				break;
5678 		}
5679 		eq->cur_eqe_ba = eq->buf_dma[0];
5680 		if (ba_num > 1)
5681 			eq->nxt_eqe_ba = eq->buf_dma[1];
5682 	}
5683 
5684 	eq->l0_last_num = i + 1;
5685 	if (mhop_num == 2)
5686 		eq->l1_last_num = j + 1;
5687 
5688 	return 0;
5689 
5690 err_dma_alloc_l1:
5691 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5692 	eq->bt_l0 = NULL;
5693 	eq->l0_dma = 0;
5694 	for (i -= 1; i >= 0; i--) {
5695 		dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5696 				  eq->l1_dma[i]);
5697 
5698 		for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5699 			idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5700 			dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5701 					  eq->buf_dma[idx]);
5702 		}
5703 	}
5704 	goto err_dma_alloc_l0;
5705 
5706 err_dma_alloc_buf:
5707 	dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5708 	eq->bt_l0 = NULL;
5709 	eq->l0_dma = 0;
5710 
5711 	if (mhop_num == 1)
5712 		for (i -= 1; i >= 0; i--)
5713 			dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5714 					  eq->buf_dma[i]);
5715 	else if (mhop_num == 2) {
5716 		record_i = i;
5717 		record_j = j;
5718 		for (; i >= 0; i--) {
5719 			dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5720 					  eq->l1_dma[i]);
5721 
5722 			for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5723 				if (i == record_i && j >= record_j)
5724 					break;
5725 
5726 				idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5727 				dma_free_coherent(dev, buf_chk_sz,
5728 						  eq->buf[idx],
5729 						  eq->buf_dma[idx]);
5730 			}
5731 		}
5732 	}
5733 
5734 err_dma_alloc_l0:
5735 	kfree(eq->bt_l1);
5736 	eq->bt_l1 = NULL;
5737 
5738 err_kcalloc_bt_l1:
5739 	kfree(eq->l1_dma);
5740 	eq->l1_dma = NULL;
5741 
5742 err_kcalloc_l1_dma:
5743 	kfree(eq->buf);
5744 	eq->buf = NULL;
5745 
5746 err_kcalloc_buf:
5747 	kfree(eq->buf_dma);
5748 	eq->buf_dma = NULL;
5749 
5750 	return -ENOMEM;
5751 }
5752 
5753 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5754 				 struct hns_roce_eq *eq,
5755 				 unsigned int eq_cmd)
5756 {
5757 	struct device *dev = hr_dev->dev;
5758 	struct hns_roce_cmd_mailbox *mailbox;
5759 	u32 buf_chk_sz = 0;
5760 	int ret;
5761 
5762 	/* Allocate mailbox memory */
5763 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5764 	if (IS_ERR(mailbox))
5765 		return PTR_ERR(mailbox);
5766 
5767 	if (!hr_dev->caps.eqe_hop_num) {
5768 		buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5769 
5770 		eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5771 				       GFP_KERNEL);
5772 		if (!eq->buf_list) {
5773 			ret = -ENOMEM;
5774 			goto free_cmd_mbox;
5775 		}
5776 
5777 		eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5778 						       &(eq->buf_list->map),
5779 						       GFP_KERNEL);
5780 		if (!eq->buf_list->buf) {
5781 			ret = -ENOMEM;
5782 			goto err_alloc_buf;
5783 		}
5784 
5785 	} else {
5786 		ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5787 		if (ret) {
5788 			ret = -ENOMEM;
5789 			goto free_cmd_mbox;
5790 		}
5791 	}
5792 
5793 	hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5794 
5795 	ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5796 				eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5797 	if (ret) {
5798 		dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5799 		goto err_cmd_mbox;
5800 	}
5801 
5802 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5803 
5804 	return 0;
5805 
5806 err_cmd_mbox:
5807 	if (!hr_dev->caps.eqe_hop_num)
5808 		dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5809 				  eq->buf_list->map);
5810 	else {
5811 		hns_roce_mhop_free_eq(hr_dev, eq);
5812 		goto free_cmd_mbox;
5813 	}
5814 
5815 err_alloc_buf:
5816 	kfree(eq->buf_list);
5817 
5818 free_cmd_mbox:
5819 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5820 
5821 	return ret;
5822 }
5823 
5824 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5825 				  int comp_num, int aeq_num, int other_num)
5826 {
5827 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5828 	int i, j;
5829 	int ret;
5830 
5831 	for (i = 0; i < irq_num; i++) {
5832 		hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5833 					       GFP_KERNEL);
5834 		if (!hr_dev->irq_names[i]) {
5835 			ret = -ENOMEM;
5836 			goto err_kzalloc_failed;
5837 		}
5838 	}
5839 
5840 	/* irq contains: abnormal + AEQ + CEQ */
5841 	for (j = 0; j < other_num; j++)
5842 		snprintf((char *)hr_dev->irq_names[j],
5843 			 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
5844 
5845 	for (j = other_num; j < (other_num + aeq_num); j++)
5846 		snprintf((char *)hr_dev->irq_names[j],
5847 			 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5848 			 j - other_num);
5849 
5850 	for (j = (other_num + aeq_num); j < irq_num; j++)
5851 		snprintf((char *)hr_dev->irq_names[j],
5852 			 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5853 			 j - other_num - aeq_num);
5854 
5855 	for (j = 0; j < irq_num; j++) {
5856 		if (j < other_num)
5857 			ret = request_irq(hr_dev->irq[j],
5858 					  hns_roce_v2_msix_interrupt_abn,
5859 					  0, hr_dev->irq_names[j], hr_dev);
5860 
5861 		else if (j < (other_num + comp_num))
5862 			ret = request_irq(eq_table->eq[j - other_num].irq,
5863 					  hns_roce_v2_msix_interrupt_eq,
5864 					  0, hr_dev->irq_names[j + aeq_num],
5865 					  &eq_table->eq[j - other_num]);
5866 		else
5867 			ret = request_irq(eq_table->eq[j - other_num].irq,
5868 					  hns_roce_v2_msix_interrupt_eq,
5869 					  0, hr_dev->irq_names[j - comp_num],
5870 					  &eq_table->eq[j - other_num]);
5871 		if (ret) {
5872 			dev_err(hr_dev->dev, "Request irq error!\n");
5873 			goto err_request_failed;
5874 		}
5875 	}
5876 
5877 	return 0;
5878 
5879 err_request_failed:
5880 	for (j -= 1; j >= 0; j--)
5881 		if (j < other_num)
5882 			free_irq(hr_dev->irq[j], hr_dev);
5883 		else
5884 			free_irq(eq_table->eq[j - other_num].irq,
5885 				 &eq_table->eq[j - other_num]);
5886 
5887 err_kzalloc_failed:
5888 	for (i -= 1; i >= 0; i--)
5889 		kfree(hr_dev->irq_names[i]);
5890 
5891 	return ret;
5892 }
5893 
5894 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5895 {
5896 	int irq_num;
5897 	int eq_num;
5898 	int i;
5899 
5900 	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5901 	irq_num = eq_num + hr_dev->caps.num_other_vectors;
5902 
5903 	for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5904 		free_irq(hr_dev->irq[i], hr_dev);
5905 
5906 	for (i = 0; i < eq_num; i++)
5907 		free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
5908 
5909 	for (i = 0; i < irq_num; i++)
5910 		kfree(hr_dev->irq_names[i]);
5911 }
5912 
5913 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5914 {
5915 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5916 	struct device *dev = hr_dev->dev;
5917 	struct hns_roce_eq *eq;
5918 	unsigned int eq_cmd;
5919 	int irq_num;
5920 	int eq_num;
5921 	int other_num;
5922 	int comp_num;
5923 	int aeq_num;
5924 	int i;
5925 	int ret;
5926 
5927 	other_num = hr_dev->caps.num_other_vectors;
5928 	comp_num = hr_dev->caps.num_comp_vectors;
5929 	aeq_num = hr_dev->caps.num_aeq_vectors;
5930 
5931 	eq_num = comp_num + aeq_num;
5932 	irq_num = eq_num + other_num;
5933 
5934 	eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5935 	if (!eq_table->eq)
5936 		return -ENOMEM;
5937 
5938 	/* create eq */
5939 	for (i = 0; i < eq_num; i++) {
5940 		eq = &eq_table->eq[i];
5941 		eq->hr_dev = hr_dev;
5942 		eq->eqn = i;
5943 		if (i < comp_num) {
5944 			/* CEQ */
5945 			eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5946 			eq->type_flag = HNS_ROCE_CEQ;
5947 			eq->entries = hr_dev->caps.ceqe_depth;
5948 			eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5949 			eq->irq = hr_dev->irq[i + other_num + aeq_num];
5950 			eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5951 			eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5952 		} else {
5953 			/* AEQ */
5954 			eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5955 			eq->type_flag = HNS_ROCE_AEQ;
5956 			eq->entries = hr_dev->caps.aeqe_depth;
5957 			eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5958 			eq->irq = hr_dev->irq[i - comp_num + other_num];
5959 			eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5960 			eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5961 		}
5962 
5963 		ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5964 		if (ret) {
5965 			dev_err(dev, "eq create failed.\n");
5966 			goto err_create_eq_fail;
5967 		}
5968 	}
5969 
5970 	/* enable irq */
5971 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5972 
5973 	ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
5974 				     aeq_num, other_num);
5975 	if (ret) {
5976 		dev_err(dev, "Request irq failed.\n");
5977 		goto err_request_irq_fail;
5978 	}
5979 
5980 	hr_dev->irq_workq =
5981 		create_singlethread_workqueue("hns_roce_irq_workqueue");
5982 	if (!hr_dev->irq_workq) {
5983 		dev_err(dev, "Create irq workqueue failed!\n");
5984 		ret = -ENOMEM;
5985 		goto err_create_wq_fail;
5986 	}
5987 
5988 	return 0;
5989 
5990 err_create_wq_fail:
5991 	__hns_roce_free_irq(hr_dev);
5992 
5993 err_request_irq_fail:
5994 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5995 
5996 err_create_eq_fail:
5997 	for (i -= 1; i >= 0; i--)
5998 		hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5999 	kfree(eq_table->eq);
6000 
6001 	return ret;
6002 }
6003 
6004 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
6005 {
6006 	struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
6007 	int eq_num;
6008 	int i;
6009 
6010 	eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6011 
6012 	/* Disable irq */
6013 	hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6014 
6015 	__hns_roce_free_irq(hr_dev);
6016 
6017 	for (i = 0; i < eq_num; i++) {
6018 		hns_roce_v2_destroy_eqc(hr_dev, i);
6019 
6020 		hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
6021 	}
6022 
6023 	kfree(eq_table->eq);
6024 
6025 	flush_workqueue(hr_dev->irq_workq);
6026 	destroy_workqueue(hr_dev->irq_workq);
6027 }
6028 
6029 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
6030 				   struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
6031 				   u32 cqn, void *mb_buf, u64 *mtts_wqe,
6032 				   u64 *mtts_idx, dma_addr_t dma_handle_wqe,
6033 				   dma_addr_t dma_handle_idx)
6034 {
6035 	struct hns_roce_srq_context *srq_context;
6036 
6037 	srq_context = mb_buf;
6038 	memset(srq_context, 0, sizeof(*srq_context));
6039 
6040 	roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
6041 		       SRQC_BYTE_4_SRQ_ST_S, 1);
6042 
6043 	roce_set_field(srq_context->byte_4_srqn_srqst,
6044 		       SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
6045 		       SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
6046 		       (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6047 		       hr_dev->caps.srqwqe_hop_num));
6048 	roce_set_field(srq_context->byte_4_srqn_srqst,
6049 		       SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
6050 		       ilog2(srq->max));
6051 
6052 	roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
6053 		       SRQC_BYTE_4_SRQN_S, srq->srqn);
6054 
6055 	roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6056 		       SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6057 
6058 	roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
6059 		       SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
6060 
6061 	srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
6062 
6063 	roce_set_field(srq_context->byte_24_wqe_bt_ba,
6064 		       SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
6065 		       SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
6066 		       dma_handle_wqe >> 35);
6067 
6068 	roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
6069 		       SRQC_BYTE_28_PD_S, pdn);
6070 	roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
6071 		       SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
6072 		       fls(srq->max_gs - 1));
6073 
6074 	srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
6075 	roce_set_field(srq_context->rsv_idx_bt_ba,
6076 		       SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
6077 		       SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
6078 		       dma_handle_idx >> 35);
6079 
6080 	srq_context->idx_cur_blk_addr =
6081 		cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
6082 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6083 		       SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
6084 		       SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
6085 		       mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
6086 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6087 		       SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
6088 		       SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
6089 		       hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6090 		       hr_dev->caps.idx_hop_num);
6091 
6092 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6093 		       SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
6094 		       SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
6095 		       hr_dev->caps.idx_ba_pg_sz);
6096 	roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6097 		       SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
6098 		       SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
6099 		       hr_dev->caps.idx_buf_pg_sz);
6100 
6101 	srq_context->idx_nxt_blk_addr =
6102 		cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
6103 	roce_set_field(srq_context->rsv_idxnxtblkaddr,
6104 		       SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
6105 		       SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
6106 		       mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
6107 	roce_set_field(srq_context->byte_56_xrc_cqn,
6108 		       SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
6109 		       cqn);
6110 	roce_set_field(srq_context->byte_56_xrc_cqn,
6111 		       SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
6112 		       SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
6113 		       hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
6114 	roce_set_field(srq_context->byte_56_xrc_cqn,
6115 		       SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
6116 		       SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
6117 		       hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
6118 
6119 	roce_set_bit(srq_context->db_record_addr_record_en,
6120 		     SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
6121 }
6122 
6123 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
6124 				  struct ib_srq_attr *srq_attr,
6125 				  enum ib_srq_attr_mask srq_attr_mask,
6126 				  struct ib_udata *udata)
6127 {
6128 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6129 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6130 	struct hns_roce_srq_context *srq_context;
6131 	struct hns_roce_srq_context *srqc_mask;
6132 	struct hns_roce_cmd_mailbox *mailbox;
6133 	int ret;
6134 
6135 	if (srq_attr_mask & IB_SRQ_LIMIT) {
6136 		if (srq_attr->srq_limit >= srq->max)
6137 			return -EINVAL;
6138 
6139 		mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6140 		if (IS_ERR(mailbox))
6141 			return PTR_ERR(mailbox);
6142 
6143 		srq_context = mailbox->buf;
6144 		srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
6145 
6146 		memset(srqc_mask, 0xff, sizeof(*srqc_mask));
6147 
6148 		roce_set_field(srq_context->byte_8_limit_wl,
6149 			       SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6150 			       SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6151 		roce_set_field(srqc_mask->byte_8_limit_wl,
6152 			       SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6153 			       SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6154 
6155 		ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6156 					HNS_ROCE_CMD_MODIFY_SRQC,
6157 					HNS_ROCE_CMD_TIMEOUT_MSECS);
6158 		hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6159 		if (ret) {
6160 			dev_err(hr_dev->dev,
6161 				"MODIFY SRQ Failed to cmd mailbox.\n");
6162 			return ret;
6163 		}
6164 	}
6165 
6166 	return 0;
6167 }
6168 
6169 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
6170 {
6171 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6172 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6173 	struct hns_roce_srq_context *srq_context;
6174 	struct hns_roce_cmd_mailbox *mailbox;
6175 	int limit_wl;
6176 	int ret;
6177 
6178 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6179 	if (IS_ERR(mailbox))
6180 		return PTR_ERR(mailbox);
6181 
6182 	srq_context = mailbox->buf;
6183 	ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6184 				HNS_ROCE_CMD_QUERY_SRQC,
6185 				HNS_ROCE_CMD_TIMEOUT_MSECS);
6186 	if (ret) {
6187 		dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6188 		goto out;
6189 	}
6190 
6191 	limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6192 				  SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6193 				  SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6194 
6195 	attr->srq_limit = limit_wl;
6196 	attr->max_wr    = srq->max - 1;
6197 	attr->max_sge   = srq->max_gs;
6198 
6199 	memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6200 
6201 out:
6202 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6203 	return ret;
6204 }
6205 
6206 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6207 			    unsigned long size)
6208 {
6209 	int wqe_idx;
6210 
6211 	if (unlikely(bitmap_full(idx_que->bitmap, size)))
6212 		return -ENOSPC;
6213 
6214 	wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6215 
6216 	bitmap_set(idx_que->bitmap, wqe_idx, 1);
6217 
6218 	return wqe_idx;
6219 }
6220 
6221 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6222 			   int cur_idx, int wqe_idx)
6223 {
6224 	unsigned int *addr;
6225 
6226 	addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6227 						   cur_idx * idx_que->entry_sz);
6228 	*addr = wqe_idx;
6229 }
6230 
6231 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6232 				     const struct ib_recv_wr *wr,
6233 				     const struct ib_recv_wr **bad_wr)
6234 {
6235 	struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6236 	struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6237 	struct hns_roce_v2_wqe_data_seg *dseg;
6238 	struct hns_roce_v2_db srq_db;
6239 	unsigned long flags;
6240 	int ret = 0;
6241 	int wqe_idx;
6242 	void *wqe;
6243 	int nreq;
6244 	int ind;
6245 	int i;
6246 
6247 	spin_lock_irqsave(&srq->lock, flags);
6248 
6249 	ind = srq->head & (srq->max - 1);
6250 
6251 	for (nreq = 0; wr; ++nreq, wr = wr->next) {
6252 		if (unlikely(wr->num_sge > srq->max_gs)) {
6253 			ret = -EINVAL;
6254 			*bad_wr = wr;
6255 			break;
6256 		}
6257 
6258 		if (unlikely(srq->head == srq->tail)) {
6259 			ret = -ENOMEM;
6260 			*bad_wr = wr;
6261 			break;
6262 		}
6263 
6264 		wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
6265 		if (wqe_idx < 0) {
6266 			ret = -ENOMEM;
6267 			*bad_wr = wr;
6268 			break;
6269 		}
6270 
6271 		fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6272 		wqe = get_srq_wqe(srq, wqe_idx);
6273 		dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6274 
6275 		for (i = 0; i < wr->num_sge; ++i) {
6276 			dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6277 			dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6278 			dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6279 		}
6280 
6281 		if (i < srq->max_gs) {
6282 			dseg[i].len = 0;
6283 			dseg[i].lkey = cpu_to_le32(0x100);
6284 			dseg[i].addr = 0;
6285 		}
6286 
6287 		srq->wrid[wqe_idx] = wr->wr_id;
6288 		ind = (ind + 1) & (srq->max - 1);
6289 	}
6290 
6291 	if (likely(nreq)) {
6292 		srq->head += nreq;
6293 
6294 		/*
6295 		 * Make sure that descriptors are written before
6296 		 * doorbell record.
6297 		 */
6298 		wmb();
6299 
6300 		srq_db.byte_4 =
6301 			cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6302 				    (srq->srqn & V2_DB_BYTE_4_TAG_M));
6303 		srq_db.parameter = cpu_to_le32(srq->head);
6304 
6305 		hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6306 
6307 	}
6308 
6309 	spin_unlock_irqrestore(&srq->lock, flags);
6310 
6311 	return ret;
6312 }
6313 
6314 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6315 	.query_cqc_info = hns_roce_v2_query_cqc_info,
6316 };
6317 
6318 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6319 	.destroy_qp = hns_roce_v2_destroy_qp,
6320 	.modify_cq = hns_roce_v2_modify_cq,
6321 	.poll_cq = hns_roce_v2_poll_cq,
6322 	.post_recv = hns_roce_v2_post_recv,
6323 	.post_send = hns_roce_v2_post_send,
6324 	.query_qp = hns_roce_v2_query_qp,
6325 	.req_notify_cq = hns_roce_v2_req_notify_cq,
6326 };
6327 
6328 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6329 	.modify_srq = hns_roce_v2_modify_srq,
6330 	.post_srq_recv = hns_roce_v2_post_srq_recv,
6331 	.query_srq = hns_roce_v2_query_srq,
6332 };
6333 
6334 static const struct hns_roce_hw hns_roce_hw_v2 = {
6335 	.cmq_init = hns_roce_v2_cmq_init,
6336 	.cmq_exit = hns_roce_v2_cmq_exit,
6337 	.hw_profile = hns_roce_v2_profile,
6338 	.hw_init = hns_roce_v2_init,
6339 	.hw_exit = hns_roce_v2_exit,
6340 	.post_mbox = hns_roce_v2_post_mbox,
6341 	.chk_mbox = hns_roce_v2_chk_mbox,
6342 	.rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6343 	.set_gid = hns_roce_v2_set_gid,
6344 	.set_mac = hns_roce_v2_set_mac,
6345 	.write_mtpt = hns_roce_v2_write_mtpt,
6346 	.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6347 	.frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6348 	.mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6349 	.write_cqc = hns_roce_v2_write_cqc,
6350 	.set_hem = hns_roce_v2_set_hem,
6351 	.clear_hem = hns_roce_v2_clear_hem,
6352 	.modify_qp = hns_roce_v2_modify_qp,
6353 	.query_qp = hns_roce_v2_query_qp,
6354 	.destroy_qp = hns_roce_v2_destroy_qp,
6355 	.qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6356 	.modify_cq = hns_roce_v2_modify_cq,
6357 	.post_send = hns_roce_v2_post_send,
6358 	.post_recv = hns_roce_v2_post_recv,
6359 	.req_notify_cq = hns_roce_v2_req_notify_cq,
6360 	.poll_cq = hns_roce_v2_poll_cq,
6361 	.init_eq = hns_roce_v2_init_eq_table,
6362 	.cleanup_eq = hns_roce_v2_cleanup_eq_table,
6363 	.write_srqc = hns_roce_v2_write_srqc,
6364 	.modify_srq = hns_roce_v2_modify_srq,
6365 	.query_srq = hns_roce_v2_query_srq,
6366 	.post_srq_recv = hns_roce_v2_post_srq_recv,
6367 	.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6368 	.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6369 };
6370 
6371 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6372 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6373 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6374 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6375 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6376 	{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6377 	/* required last entry */
6378 	{0, }
6379 };
6380 
6381 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6382 
6383 static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6384 				  struct hnae3_handle *handle)
6385 {
6386 	struct hns_roce_v2_priv *priv = hr_dev->priv;
6387 	int i;
6388 
6389 	hr_dev->hw = &hns_roce_hw_v2;
6390 	hr_dev->dfx = &hns_roce_dfx_hw_v2;
6391 	hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6392 	hr_dev->odb_offset = hr_dev->sdb_offset;
6393 
6394 	/* Get info from NIC driver. */
6395 	hr_dev->reg_base = handle->rinfo.roce_io_base;
6396 	hr_dev->caps.num_ports = 1;
6397 	hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6398 	hr_dev->iboe.phy_port[0] = 0;
6399 
6400 	addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6401 			    hr_dev->iboe.netdevs[0]->dev_addr);
6402 
6403 	for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6404 		hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6405 						i + handle->rinfo.base_vector);
6406 
6407 	/* cmd issue mode: 0 is poll, 1 is event */
6408 	hr_dev->cmd_mod = 1;
6409 	hr_dev->loop_idc = 0;
6410 
6411 	hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6412 	priv->handle = handle;
6413 
6414 	return 0;
6415 }
6416 
6417 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6418 {
6419 	struct hns_roce_dev *hr_dev;
6420 	int ret;
6421 
6422 	hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6423 	if (!hr_dev)
6424 		return -ENOMEM;
6425 
6426 	hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6427 	if (!hr_dev->priv) {
6428 		ret = -ENOMEM;
6429 		goto error_failed_kzalloc;
6430 	}
6431 
6432 	hr_dev->pci_dev = handle->pdev;
6433 	hr_dev->dev = &handle->pdev->dev;
6434 
6435 	ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
6436 	if (ret) {
6437 		dev_err(hr_dev->dev, "Get Configuration failed!\n");
6438 		goto error_failed_get_cfg;
6439 	}
6440 
6441 	ret = hns_roce_init(hr_dev);
6442 	if (ret) {
6443 		dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6444 		goto error_failed_get_cfg;
6445 	}
6446 
6447 	handle->priv = hr_dev;
6448 
6449 	return 0;
6450 
6451 error_failed_get_cfg:
6452 	kfree(hr_dev->priv);
6453 
6454 error_failed_kzalloc:
6455 	ib_dealloc_device(&hr_dev->ib_dev);
6456 
6457 	return ret;
6458 }
6459 
6460 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6461 					   bool reset)
6462 {
6463 	struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6464 
6465 	if (!hr_dev)
6466 		return;
6467 
6468 	handle->priv = NULL;
6469 	hns_roce_exit(hr_dev);
6470 	kfree(hr_dev->priv);
6471 	ib_dealloc_device(&hr_dev->ib_dev);
6472 }
6473 
6474 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6475 {
6476 	const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6477 	const struct pci_device_id *id;
6478 	struct device *dev = &handle->pdev->dev;
6479 	int ret;
6480 
6481 	handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6482 
6483 	if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6484 		handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6485 		goto reset_chk_err;
6486 	}
6487 
6488 	id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6489 	if (!id)
6490 		return 0;
6491 
6492 	ret = __hns_roce_hw_v2_init_instance(handle);
6493 	if (ret) {
6494 		handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6495 		dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6496 		if (ops->ae_dev_resetting(handle) ||
6497 		    ops->get_hw_reset_stat(handle))
6498 			goto reset_chk_err;
6499 		else
6500 			return ret;
6501 	}
6502 
6503 	handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6504 
6505 
6506 	return 0;
6507 
6508 reset_chk_err:
6509 	dev_err(dev, "Device is busy in resetting state.\n"
6510 		     "please retry later.\n");
6511 
6512 	return -EBUSY;
6513 }
6514 
6515 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6516 					   bool reset)
6517 {
6518 	if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6519 		return;
6520 
6521 	handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6522 
6523 	__hns_roce_hw_v2_uninit_instance(handle, reset);
6524 
6525 	handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6526 }
6527 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6528 {
6529 	struct hns_roce_dev *hr_dev;
6530 	struct ib_event event;
6531 
6532 	if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6533 		set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6534 		return 0;
6535 	}
6536 
6537 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6538 	clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6539 
6540 	hr_dev = (struct hns_roce_dev *)handle->priv;
6541 	if (!hr_dev)
6542 		return 0;
6543 
6544 	hr_dev->is_reset = true;
6545 	hr_dev->active = false;
6546 	hr_dev->dis_db = true;
6547 
6548 	event.event = IB_EVENT_DEVICE_FATAL;
6549 	event.device = &hr_dev->ib_dev;
6550 	event.element.port_num = 1;
6551 	ib_dispatch_event(&event);
6552 
6553 	return 0;
6554 }
6555 
6556 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6557 {
6558 	struct device *dev = &handle->pdev->dev;
6559 	int ret;
6560 
6561 	if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6562 			       &handle->rinfo.state)) {
6563 		handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6564 		return 0;
6565 	}
6566 
6567 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6568 
6569 	dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6570 	ret = __hns_roce_hw_v2_init_instance(handle);
6571 	if (ret) {
6572 		/* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6573 		 * callback function, RoCE Engine reinitialize. If RoCE reinit
6574 		 * failed, we should inform NIC driver.
6575 		 */
6576 		handle->priv = NULL;
6577 		dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6578 	} else {
6579 		handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6580 		dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6581 	}
6582 
6583 	return ret;
6584 }
6585 
6586 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6587 {
6588 	if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6589 		return 0;
6590 
6591 	handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6592 	dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6593 	msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6594 	__hns_roce_hw_v2_uninit_instance(handle, false);
6595 
6596 	return 0;
6597 }
6598 
6599 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6600 				       enum hnae3_reset_notify_type type)
6601 {
6602 	int ret = 0;
6603 
6604 	switch (type) {
6605 	case HNAE3_DOWN_CLIENT:
6606 		ret = hns_roce_hw_v2_reset_notify_down(handle);
6607 		break;
6608 	case HNAE3_INIT_CLIENT:
6609 		ret = hns_roce_hw_v2_reset_notify_init(handle);
6610 		break;
6611 	case HNAE3_UNINIT_CLIENT:
6612 		ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6613 		break;
6614 	default:
6615 		break;
6616 	}
6617 
6618 	return ret;
6619 }
6620 
6621 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6622 	.init_instance = hns_roce_hw_v2_init_instance,
6623 	.uninit_instance = hns_roce_hw_v2_uninit_instance,
6624 	.reset_notify = hns_roce_hw_v2_reset_notify,
6625 };
6626 
6627 static struct hnae3_client hns_roce_hw_v2_client = {
6628 	.name = "hns_roce_hw_v2",
6629 	.type = HNAE3_CLIENT_ROCE,
6630 	.ops = &hns_roce_hw_v2_ops,
6631 };
6632 
6633 static int __init hns_roce_hw_v2_init(void)
6634 {
6635 	return hnae3_register_client(&hns_roce_hw_v2_client);
6636 }
6637 
6638 static void __exit hns_roce_hw_v2_exit(void)
6639 {
6640 	hnae3_unregister_client(&hns_roce_hw_v2_client);
6641 }
6642 
6643 module_init(hns_roce_hw_v2_init);
6644 module_exit(hns_roce_hw_v2_exit);
6645 
6646 MODULE_LICENSE("Dual BSD/GPL");
6647 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6648 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6649 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6650 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");
6651