xref: /linux/drivers/infiniband/hw/hns/hns_roce_cq.c (revision 021bc4b9)
1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <rdma/ib_umem.h>
34 #include <rdma/uverbs_ioctl.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_cmd.h"
37 #include "hns_roce_hem.h"
38 #include "hns_roce_common.h"
39 
40 static u8 get_least_load_bankid_for_cq(struct hns_roce_bank *bank)
41 {
42 	u32 least_load = bank[0].inuse;
43 	u8 bankid = 0;
44 	u32 bankcnt;
45 	u8 i;
46 
47 	for (i = 1; i < HNS_ROCE_CQ_BANK_NUM; i++) {
48 		bankcnt = bank[i].inuse;
49 		if (bankcnt < least_load) {
50 			least_load = bankcnt;
51 			bankid = i;
52 		}
53 	}
54 
55 	return bankid;
56 }
57 
58 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
59 {
60 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
61 	struct hns_roce_bank *bank;
62 	u8 bankid;
63 	int id;
64 
65 	mutex_lock(&cq_table->bank_mutex);
66 	bankid = get_least_load_bankid_for_cq(cq_table->bank);
67 	bank = &cq_table->bank[bankid];
68 
69 	id = ida_alloc_range(&bank->ida, bank->min, bank->max, GFP_KERNEL);
70 	if (id < 0) {
71 		mutex_unlock(&cq_table->bank_mutex);
72 		return id;
73 	}
74 
75 	/* the lower 2 bits is bankid */
76 	hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid;
77 	bank->inuse++;
78 	mutex_unlock(&cq_table->bank_mutex);
79 
80 	return 0;
81 }
82 
83 static inline u8 get_cq_bankid(unsigned long cqn)
84 {
85 	/* The lower 2 bits of CQN are used to hash to different banks */
86 	return (u8)(cqn & GENMASK(1, 0));
87 }
88 
89 static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn)
90 {
91 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
92 	struct hns_roce_bank *bank;
93 
94 	bank = &cq_table->bank[get_cq_bankid(cqn)];
95 
96 	ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT);
97 
98 	mutex_lock(&cq_table->bank_mutex);
99 	bank->inuse--;
100 	mutex_unlock(&cq_table->bank_mutex);
101 }
102 
103 static int hns_roce_create_cqc(struct hns_roce_dev *hr_dev,
104 			       struct hns_roce_cq *hr_cq,
105 			       u64 *mtts, dma_addr_t dma_handle)
106 {
107 	struct ib_device *ibdev = &hr_dev->ib_dev;
108 	struct hns_roce_cmd_mailbox *mailbox;
109 	int ret;
110 
111 	mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
112 	if (IS_ERR(mailbox)) {
113 		ibdev_err(ibdev, "failed to alloc mailbox for CQC.\n");
114 		return PTR_ERR(mailbox);
115 	}
116 
117 	hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle);
118 
119 	ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_CQC,
120 				     hr_cq->cqn);
121 	if (ret)
122 		ibdev_err(ibdev,
123 			  "failed to send create cmd for CQ(0x%lx), ret = %d.\n",
124 			  hr_cq->cqn, ret);
125 
126 	hns_roce_free_cmd_mailbox(hr_dev, mailbox);
127 
128 	return ret;
129 }
130 
131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
132 {
133 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
134 	struct ib_device *ibdev = &hr_dev->ib_dev;
135 	u64 mtts[MTT_MIN_COUNT] = {};
136 	dma_addr_t dma_handle;
137 	int ret;
138 
139 	ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
140 				&dma_handle);
141 	if (!ret) {
142 		ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
143 		return -EINVAL;
144 	}
145 
146 	/* Get CQC memory HEM(Hardware Entry Memory) table */
147 	ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
148 	if (ret) {
149 		ibdev_err(ibdev, "failed to get CQ(0x%lx) context, ret = %d.\n",
150 			  hr_cq->cqn, ret);
151 		return ret;
152 	}
153 
154 	ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
155 	if (ret) {
156 		ibdev_err(ibdev, "failed to xa_store CQ, ret = %d.\n", ret);
157 		goto err_put;
158 	}
159 
160 	ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
161 	if (ret)
162 		goto err_xa;
163 
164 	return 0;
165 
166 err_xa:
167 	xa_erase(&cq_table->array, hr_cq->cqn);
168 err_put:
169 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
170 
171 	return ret;
172 }
173 
174 static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
175 {
176 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
177 	struct device *dev = hr_dev->dev;
178 	int ret;
179 
180 	ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_CQC,
181 				      hr_cq->cqn);
182 	if (ret)
183 		dev_err(dev, "DESTROY_CQ failed (%d) for CQN %06lx\n", ret,
184 			hr_cq->cqn);
185 
186 	xa_erase(&cq_table->array, hr_cq->cqn);
187 
188 	/* Waiting interrupt process procedure carried out */
189 	synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
190 
191 	/* wait for all interrupt processed */
192 	if (refcount_dec_and_test(&hr_cq->refcount))
193 		complete(&hr_cq->free);
194 	wait_for_completion(&hr_cq->free);
195 
196 	hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
197 }
198 
199 static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
200 			struct ib_udata *udata, unsigned long addr)
201 {
202 	struct ib_device *ibdev = &hr_dev->ib_dev;
203 	struct hns_roce_buf_attr buf_attr = {};
204 	int ret;
205 
206 	buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_SHIFT;
207 	buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size;
208 	buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
209 	buf_attr.region_count = 1;
210 
211 	ret = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
212 				  hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT,
213 				  udata, addr);
214 	if (ret)
215 		ibdev_err(ibdev, "failed to alloc CQ mtr, ret = %d.\n", ret);
216 
217 	return ret;
218 }
219 
220 static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
221 {
222 	hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
223 }
224 
225 static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
226 		       struct ib_udata *udata, unsigned long addr,
227 		       struct hns_roce_ib_create_cq_resp *resp)
228 {
229 	bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB;
230 	struct hns_roce_ucontext *uctx;
231 	int err;
232 
233 	if (udata) {
234 		if (has_db &&
235 		    udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
236 			uctx = rdma_udata_to_drv_context(udata,
237 					struct hns_roce_ucontext, ibucontext);
238 			err = hns_roce_db_map_user(uctx, addr, &hr_cq->db);
239 			if (err)
240 				return err;
241 			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
242 			resp->cap_flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
243 		}
244 	} else {
245 		if (has_db) {
246 			err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
247 			if (err)
248 				return err;
249 			hr_cq->set_ci_db = hr_cq->db.db_record;
250 			*hr_cq->set_ci_db = 0;
251 			hr_cq->flags |= HNS_ROCE_CQ_FLAG_RECORD_DB;
252 		}
253 		hr_cq->db_reg = hr_dev->reg_base + hr_dev->odb_offset +
254 				DB_REG_OFFSET * hr_dev->priv_uar.index;
255 	}
256 
257 	return 0;
258 }
259 
260 static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
261 		       struct ib_udata *udata)
262 {
263 	struct hns_roce_ucontext *uctx;
264 
265 	if (!(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB))
266 		return;
267 
268 	hr_cq->flags &= ~HNS_ROCE_CQ_FLAG_RECORD_DB;
269 	if (udata) {
270 		uctx = rdma_udata_to_drv_context(udata,
271 						 struct hns_roce_ucontext,
272 						 ibucontext);
273 		hns_roce_db_unmap_user(uctx, &hr_cq->db);
274 	} else {
275 		hns_roce_free_db(hr_dev, &hr_cq->db);
276 	}
277 }
278 
279 static int verify_cq_create_attr(struct hns_roce_dev *hr_dev,
280 				 const struct ib_cq_init_attr *attr)
281 {
282 	struct ib_device *ibdev = &hr_dev->ib_dev;
283 
284 	if (!attr->cqe || attr->cqe > hr_dev->caps.max_cqes) {
285 		ibdev_err(ibdev, "failed to check CQ count %u, max = %u.\n",
286 			  attr->cqe, hr_dev->caps.max_cqes);
287 		return -EINVAL;
288 	}
289 
290 	if (attr->comp_vector >= hr_dev->caps.num_comp_vectors) {
291 		ibdev_err(ibdev, "failed to check CQ vector = %u, max = %d.\n",
292 			  attr->comp_vector, hr_dev->caps.num_comp_vectors);
293 		return -EINVAL;
294 	}
295 
296 	return 0;
297 }
298 
299 static int get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
300 		       struct hns_roce_ib_create_cq *ucmd)
301 {
302 	struct ib_device *ibdev = hr_cq->ib_cq.device;
303 	int ret;
304 
305 	ret = ib_copy_from_udata(ucmd, udata, min(udata->inlen, sizeof(*ucmd)));
306 	if (ret) {
307 		ibdev_err(ibdev, "failed to copy CQ udata, ret = %d.\n", ret);
308 		return ret;
309 	}
310 
311 	return 0;
312 }
313 
314 static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
315 			 struct hns_roce_ib_create_cq *ucmd)
316 {
317 	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
318 
319 	cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
320 	cq_entries = roundup_pow_of_two(cq_entries);
321 	hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
322 	hr_cq->cq_depth = cq_entries;
323 	hr_cq->vector = vector;
324 
325 	spin_lock_init(&hr_cq->lock);
326 	INIT_LIST_HEAD(&hr_cq->sq_list);
327 	INIT_LIST_HEAD(&hr_cq->rq_list);
328 }
329 
330 static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
331 			struct hns_roce_ib_create_cq *ucmd)
332 {
333 	struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
334 
335 	if (!udata) {
336 		hr_cq->cqe_size = hr_dev->caps.cqe_sz;
337 		return 0;
338 	}
339 
340 	if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
341 		if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
342 		    ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
343 			ibdev_err(&hr_dev->ib_dev,
344 				  "invalid cqe size %u.\n", ucmd->cqe_size);
345 			return -EINVAL;
346 		}
347 
348 		hr_cq->cqe_size = ucmd->cqe_size;
349 	} else {
350 		hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
351 	}
352 
353 	return 0;
354 }
355 
356 int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
357 		       struct ib_udata *udata)
358 {
359 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
360 	struct hns_roce_ib_create_cq_resp resp = {};
361 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
362 	struct ib_device *ibdev = &hr_dev->ib_dev;
363 	struct hns_roce_ib_create_cq ucmd = {};
364 	int ret;
365 
366 	if (attr->flags) {
367 		ret = -EOPNOTSUPP;
368 		goto err_out;
369 	}
370 
371 	ret = verify_cq_create_attr(hr_dev, attr);
372 	if (ret)
373 		goto err_out;
374 
375 	if (udata) {
376 		ret = get_cq_ucmd(hr_cq, udata, &ucmd);
377 		if (ret)
378 			goto err_out;
379 	}
380 
381 	set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
382 
383 	ret = set_cqe_size(hr_cq, udata, &ucmd);
384 	if (ret)
385 		goto err_out;
386 
387 	ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
388 	if (ret) {
389 		ibdev_err(ibdev, "failed to alloc CQ buf, ret = %d.\n", ret);
390 		goto err_out;
391 	}
392 
393 	ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
394 	if (ret) {
395 		ibdev_err(ibdev, "failed to alloc CQ db, ret = %d.\n", ret);
396 		goto err_cq_buf;
397 	}
398 
399 	ret = alloc_cqn(hr_dev, hr_cq);
400 	if (ret) {
401 		ibdev_err(ibdev, "failed to alloc CQN, ret = %d.\n", ret);
402 		goto err_cq_db;
403 	}
404 
405 	ret = alloc_cqc(hr_dev, hr_cq);
406 	if (ret) {
407 		ibdev_err(ibdev,
408 			  "failed to alloc CQ context, ret = %d.\n", ret);
409 		goto err_cqn;
410 	}
411 
412 	if (udata) {
413 		resp.cqn = hr_cq->cqn;
414 		ret = ib_copy_to_udata(udata, &resp,
415 				       min(udata->outlen, sizeof(resp)));
416 		if (ret)
417 			goto err_cqc;
418 	}
419 
420 	hr_cq->cons_index = 0;
421 	hr_cq->arm_sn = 1;
422 	refcount_set(&hr_cq->refcount, 1);
423 	init_completion(&hr_cq->free);
424 
425 	return 0;
426 
427 err_cqc:
428 	free_cqc(hr_dev, hr_cq);
429 err_cqn:
430 	free_cqn(hr_dev, hr_cq->cqn);
431 err_cq_db:
432 	free_cq_db(hr_dev, hr_cq, udata);
433 err_cq_buf:
434 	free_cq_buf(hr_dev, hr_cq);
435 err_out:
436 	atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_CREATE_ERR_CNT]);
437 
438 	return ret;
439 }
440 
441 int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
442 {
443 	struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
444 	struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
445 
446 	free_cqc(hr_dev, hr_cq);
447 	free_cqn(hr_dev, hr_cq->cqn);
448 	free_cq_db(hr_dev, hr_cq, udata);
449 	free_cq_buf(hr_dev, hr_cq);
450 
451 	return 0;
452 }
453 
454 void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
455 {
456 	struct hns_roce_cq *hr_cq;
457 	struct ib_cq *ibcq;
458 
459 	hr_cq = xa_load(&hr_dev->cq_table.array,
460 			cqn & (hr_dev->caps.num_cqs - 1));
461 	if (!hr_cq) {
462 		dev_warn(hr_dev->dev, "completion event for bogus CQ 0x%06x\n",
463 			 cqn);
464 		return;
465 	}
466 
467 	++hr_cq->arm_sn;
468 	ibcq = &hr_cq->ib_cq;
469 	if (ibcq->comp_handler)
470 		ibcq->comp_handler(ibcq, ibcq->cq_context);
471 }
472 
473 void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
474 {
475 	struct device *dev = hr_dev->dev;
476 	struct hns_roce_cq *hr_cq;
477 	struct ib_event event;
478 	struct ib_cq *ibcq;
479 
480 	hr_cq = xa_load(&hr_dev->cq_table.array,
481 			cqn & (hr_dev->caps.num_cqs - 1));
482 	if (!hr_cq) {
483 		dev_warn(dev, "async event for bogus CQ 0x%06x\n", cqn);
484 		return;
485 	}
486 
487 	if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
488 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
489 	    event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
490 		dev_err(dev, "unexpected event type 0x%x on CQ 0x%06x\n",
491 			event_type, cqn);
492 		return;
493 	}
494 
495 	refcount_inc(&hr_cq->refcount);
496 
497 	ibcq = &hr_cq->ib_cq;
498 	if (ibcq->event_handler) {
499 		event.device = ibcq->device;
500 		event.element.cq = ibcq;
501 		event.event = IB_EVENT_CQ_ERR;
502 		ibcq->event_handler(&event, ibcq->cq_context);
503 	}
504 
505 	if (refcount_dec_and_test(&hr_cq->refcount))
506 		complete(&hr_cq->free);
507 }
508 
509 void hns_roce_init_cq_table(struct hns_roce_dev *hr_dev)
510 {
511 	struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
512 	unsigned int reserved_from_bot;
513 	unsigned int i;
514 
515 	mutex_init(&cq_table->bank_mutex);
516 	xa_init(&cq_table->array);
517 
518 	reserved_from_bot = hr_dev->caps.reserved_cqs;
519 
520 	for (i = 0; i < reserved_from_bot; i++) {
521 		cq_table->bank[get_cq_bankid(i)].inuse++;
522 		cq_table->bank[get_cq_bankid(i)].min++;
523 	}
524 
525 	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++) {
526 		ida_init(&cq_table->bank[i].ida);
527 		cq_table->bank[i].max = hr_dev->caps.num_cqs /
528 					HNS_ROCE_CQ_BANK_NUM - 1;
529 	}
530 }
531 
532 void hns_roce_cleanup_cq_table(struct hns_roce_dev *hr_dev)
533 {
534 	int i;
535 
536 	for (i = 0; i < HNS_ROCE_CQ_BANK_NUM; i++)
537 		ida_destroy(&hr_dev->cq_table.bank[i].ida);
538 }
539