1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/vmalloc.h>
35 #include <linux/count_zeros.h>
36 #include <rdma/ib_umem.h>
37 #include <linux/math.h>
38 #include "hns_roce_device.h"
39 #include "hns_roce_cmd.h"
40 #include "hns_roce_hem.h"
41
hw_index_to_key(int ind)42 static u32 hw_index_to_key(int ind)
43 {
44 return ((u32)ind >> 24) | ((u32)ind << 8);
45 }
46
key_to_hw_index(u32 key)47 unsigned long key_to_hw_index(u32 key)
48 {
49 return (key << 24) | (key >> 8);
50 }
51
alloc_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)52 static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
53 {
54 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
55 struct ib_device *ibdev = &hr_dev->ib_dev;
56 int err;
57 int id;
58
59 /* Allocate a key for mr from mr_table */
60 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
61 GFP_KERNEL);
62 if (id < 0) {
63 ibdev_err(ibdev, "failed to alloc id for MR key, id(%d)\n", id);
64 return -ENOMEM;
65 }
66
67 mr->key = hw_index_to_key(id); /* MR key */
68
69 err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
70 (unsigned long)id);
71 if (err) {
72 ibdev_err(ibdev, "failed to alloc mtpt, ret = %d.\n", err);
73 goto err_free_bitmap;
74 }
75
76 return 0;
77 err_free_bitmap:
78 ida_free(&mtpt_ida->ida, id);
79 return err;
80 }
81
free_mr_key(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)82 static void free_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
83 {
84 unsigned long obj = key_to_hw_index(mr->key);
85
86 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table, obj);
87 ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)obj);
88 }
89
alloc_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr,struct ib_udata * udata,u64 start)90 static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
91 struct ib_udata *udata, u64 start)
92 {
93 struct ib_device *ibdev = &hr_dev->ib_dev;
94 bool is_fast = mr->type == MR_TYPE_FRMR;
95 struct hns_roce_buf_attr buf_attr = {};
96 int err;
97
98 mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
99 buf_attr.page_shift = is_fast ? PAGE_SHIFT :
100 hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
101 buf_attr.region[0].size = mr->size;
102 buf_attr.region[0].hopnum = mr->pbl_hop_num;
103 buf_attr.region_count = 1;
104 buf_attr.user_access = mr->access;
105 /* fast MR's buffer is alloced before mapping, not at creation */
106 buf_attr.mtt_only = is_fast;
107 buf_attr.iova = mr->iova;
108 /* pagesize and hopnum is fixed for fast MR */
109 buf_attr.adaptive = !is_fast;
110 buf_attr.type = MTR_PBL;
111
112 err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
113 hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
114 udata, start);
115 if (err) {
116 ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
117 return err;
118 }
119
120 mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
121 mr->pbl_hop_num = buf_attr.region[0].hopnum;
122
123 return err;
124 }
125
free_mr_pbl(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)126 static void free_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
127 {
128 hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
129 }
130
hns_roce_mr_free(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)131 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
132 {
133 struct ib_device *ibdev = &hr_dev->ib_dev;
134 int ret;
135
136 if (mr->enabled) {
137 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
138 key_to_hw_index(mr->key) &
139 (hr_dev->caps.num_mtpts - 1));
140 if (ret)
141 ibdev_warn(ibdev, "failed to destroy mpt, ret = %d.\n",
142 ret);
143 }
144
145 free_mr_pbl(hr_dev, mr);
146 free_mr_key(hr_dev, mr);
147 }
148
hns_roce_mr_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mr * mr)149 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
150 struct hns_roce_mr *mr)
151 {
152 unsigned long mtpt_idx = key_to_hw_index(mr->key);
153 struct hns_roce_cmd_mailbox *mailbox;
154 struct device *dev = hr_dev->dev;
155 int ret;
156
157 /* Allocate mailbox memory */
158 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
159 if (IS_ERR(mailbox))
160 return PTR_ERR(mailbox);
161
162 if (mr->type != MR_TYPE_FRMR)
163 ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
164 else
165 ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
166 if (ret) {
167 dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
168 goto err_page;
169 }
170
171 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
172 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
173 if (ret) {
174 dev_err(dev, "failed to create mpt, ret = %d.\n", ret);
175 goto err_page;
176 }
177
178 mr->enabled = 1;
179
180 err_page:
181 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
182
183 return ret;
184 }
185
hns_roce_init_mr_table(struct hns_roce_dev * hr_dev)186 void hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
187 {
188 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
189
190 ida_init(&mtpt_ida->ida);
191 mtpt_ida->max = hr_dev->caps.num_mtpts - 1;
192 mtpt_ida->min = hr_dev->caps.reserved_mrws;
193 }
194
hns_roce_get_dma_mr(struct ib_pd * pd,int acc)195 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
196 {
197 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
198 struct hns_roce_mr *mr;
199 int ret;
200
201 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
202 if (!mr)
203 return ERR_PTR(-ENOMEM);
204
205 mr->type = MR_TYPE_DMA;
206 mr->pd = to_hr_pd(pd)->pdn;
207 mr->access = acc;
208
209 /* Allocate memory region key */
210 hns_roce_hem_list_init(&mr->pbl_mtr.hem_list);
211 ret = alloc_mr_key(hr_dev, mr);
212 if (ret)
213 goto err_free;
214
215 ret = hns_roce_mr_enable(hr_dev, mr);
216 if (ret)
217 goto err_mr;
218
219 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
220
221 return &mr->ibmr;
222 err_mr:
223 free_mr_key(hr_dev, mr);
224
225 err_free:
226 kfree(mr);
227 return ERR_PTR(ret);
228 }
229
hns_roce_reg_user_mr(struct ib_pd * pd,u64 start,u64 length,u64 virt_addr,int access_flags,struct ib_udata * udata)230 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
231 u64 virt_addr, int access_flags,
232 struct ib_udata *udata)
233 {
234 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
235 struct hns_roce_mr *mr;
236 int ret;
237
238 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
239 if (!mr) {
240 ret = -ENOMEM;
241 goto err_out;
242 }
243
244 mr->iova = virt_addr;
245 mr->size = length;
246 mr->pd = to_hr_pd(pd)->pdn;
247 mr->access = access_flags;
248 mr->type = MR_TYPE_MR;
249
250 ret = alloc_mr_key(hr_dev, mr);
251 if (ret)
252 goto err_alloc_mr;
253
254 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
255 if (ret)
256 goto err_alloc_key;
257
258 ret = hns_roce_mr_enable(hr_dev, mr);
259 if (ret)
260 goto err_alloc_pbl;
261
262 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
263
264 return &mr->ibmr;
265
266 err_alloc_pbl:
267 free_mr_pbl(hr_dev, mr);
268 err_alloc_key:
269 free_mr_key(hr_dev, mr);
270 err_alloc_mr:
271 kfree(mr);
272 err_out:
273 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REG_ERR_CNT]);
274
275 return ERR_PTR(ret);
276 }
277
hns_roce_rereg_user_mr(struct ib_mr * ibmr,int flags,u64 start,u64 length,u64 virt_addr,int mr_access_flags,struct ib_pd * pd,struct ib_udata * udata)278 struct ib_mr *hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start,
279 u64 length, u64 virt_addr,
280 int mr_access_flags, struct ib_pd *pd,
281 struct ib_udata *udata)
282 {
283 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
284 struct ib_device *ib_dev = &hr_dev->ib_dev;
285 struct hns_roce_mr *mr = to_hr_mr(ibmr);
286 struct hns_roce_cmd_mailbox *mailbox;
287 unsigned long mtpt_idx;
288 int ret;
289
290 if (!mr->enabled) {
291 ret = -EINVAL;
292 goto err_out;
293 }
294
295 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
296 ret = PTR_ERR_OR_ZERO(mailbox);
297 if (ret)
298 goto err_out;
299
300 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
301
302 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT,
303 mtpt_idx);
304 if (ret)
305 goto free_cmd_mbox;
306
307 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
308 mtpt_idx);
309 if (ret)
310 ibdev_warn(ib_dev, "failed to destroy MPT, ret = %d.\n", ret);
311
312 mr->enabled = 0;
313 mr->iova = virt_addr;
314 mr->size = length;
315
316 if (flags & IB_MR_REREG_PD)
317 mr->pd = to_hr_pd(pd)->pdn;
318
319 if (flags & IB_MR_REREG_ACCESS)
320 mr->access = mr_access_flags;
321
322 if (flags & IB_MR_REREG_TRANS) {
323 free_mr_pbl(hr_dev, mr);
324 ret = alloc_mr_pbl(hr_dev, mr, udata, start);
325 if (ret) {
326 ibdev_err(ib_dev, "failed to alloc mr PBL, ret = %d.\n",
327 ret);
328 goto free_cmd_mbox;
329 }
330 }
331
332 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, mailbox->buf);
333 if (ret) {
334 ibdev_err(ib_dev, "failed to write mtpt, ret = %d.\n", ret);
335 goto free_cmd_mbox;
336 }
337
338 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
339 mtpt_idx);
340 if (ret) {
341 ibdev_err(ib_dev, "failed to create MPT, ret = %d.\n", ret);
342 goto free_cmd_mbox;
343 }
344
345 mr->enabled = 1;
346
347 free_cmd_mbox:
348 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
349
350 err_out:
351 if (ret) {
352 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_MR_REREG_ERR_CNT]);
353 return ERR_PTR(ret);
354 }
355
356 return NULL;
357 }
358
hns_roce_dereg_mr(struct ib_mr * ibmr,struct ib_udata * udata)359 int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
360 {
361 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
362 struct hns_roce_mr *mr = to_hr_mr(ibmr);
363
364 if (hr_dev->hw->dereg_mr)
365 hr_dev->hw->dereg_mr(hr_dev);
366
367 hns_roce_mr_free(hr_dev, mr);
368 kfree(mr);
369
370 return 0;
371 }
372
hns_roce_alloc_mr(struct ib_pd * pd,enum ib_mr_type mr_type,u32 max_num_sg)373 struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
374 u32 max_num_sg)
375 {
376 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
377 struct device *dev = hr_dev->dev;
378 struct hns_roce_mr *mr;
379 int ret;
380
381 if (mr_type != IB_MR_TYPE_MEM_REG)
382 return ERR_PTR(-EINVAL);
383
384 if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
385 dev_err(dev, "max_num_sg larger than %d\n",
386 HNS_ROCE_FRMR_MAX_PA);
387 return ERR_PTR(-EINVAL);
388 }
389
390 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
391 if (!mr)
392 return ERR_PTR(-ENOMEM);
393
394 mr->type = MR_TYPE_FRMR;
395 mr->pd = to_hr_pd(pd)->pdn;
396 mr->size = max_num_sg * (1 << PAGE_SHIFT);
397
398 /* Allocate memory region key */
399 ret = alloc_mr_key(hr_dev, mr);
400 if (ret)
401 goto err_free;
402
403 ret = alloc_mr_pbl(hr_dev, mr, NULL, 0);
404 if (ret)
405 goto err_key;
406
407 ret = hns_roce_mr_enable(hr_dev, mr);
408 if (ret)
409 goto err_pbl;
410
411 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
412 mr->ibmr.length = mr->size;
413
414 return &mr->ibmr;
415
416 err_pbl:
417 free_mr_pbl(hr_dev, mr);
418 err_key:
419 free_mr_key(hr_dev, mr);
420 err_free:
421 kfree(mr);
422 return ERR_PTR(ret);
423 }
424
hns_roce_set_page(struct ib_mr * ibmr,u64 addr)425 static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
426 {
427 struct hns_roce_mr *mr = to_hr_mr(ibmr);
428
429 if (likely(mr->npages < mr->pbl_mtr.hem_cfg.buf_pg_count)) {
430 mr->page_list[mr->npages++] = addr;
431 return 0;
432 }
433
434 return -ENOBUFS;
435 }
436
hns_roce_map_mr_sg(struct ib_mr * ibmr,struct scatterlist * sg,int sg_nents,unsigned int * sg_offset)437 int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
438 unsigned int *sg_offset)
439 {
440 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
441 struct ib_device *ibdev = &hr_dev->ib_dev;
442 struct hns_roce_mr *mr = to_hr_mr(ibmr);
443 struct hns_roce_mtr *mtr = &mr->pbl_mtr;
444 int ret, sg_num = 0;
445
446 mr->npages = 0;
447 mr->page_list = kvcalloc(mr->pbl_mtr.hem_cfg.buf_pg_count,
448 sizeof(dma_addr_t), GFP_KERNEL);
449 if (!mr->page_list)
450 return sg_num;
451
452 sg_num = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
453 if (sg_num < 1) {
454 ibdev_err(ibdev, "failed to store sg pages %u %u, cnt = %d.\n",
455 mr->npages, mr->pbl_mtr.hem_cfg.buf_pg_count, sg_num);
456 goto err_page_list;
457 }
458
459 mtr->hem_cfg.region[0].offset = 0;
460 mtr->hem_cfg.region[0].count = mr->npages;
461 mtr->hem_cfg.region[0].hopnum = mr->pbl_hop_num;
462 mtr->hem_cfg.region_count = 1;
463 ret = hns_roce_mtr_map(hr_dev, mtr, mr->page_list, mr->npages);
464 if (ret) {
465 ibdev_err(ibdev, "failed to map sg mtr, ret = %d.\n", ret);
466 sg_num = 0;
467 } else {
468 mr->pbl_mtr.hem_cfg.buf_pg_shift = (u32)ilog2(ibmr->page_size);
469 }
470
471 err_page_list:
472 kvfree(mr->page_list);
473 mr->page_list = NULL;
474
475 return sg_num;
476 }
477
hns_roce_mw_free(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)478 static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
479 struct hns_roce_mw *mw)
480 {
481 struct device *dev = hr_dev->dev;
482 int ret;
483
484 if (mw->enabled) {
485 ret = hns_roce_destroy_hw_ctx(hr_dev, HNS_ROCE_CMD_DESTROY_MPT,
486 key_to_hw_index(mw->rkey) &
487 (hr_dev->caps.num_mtpts - 1));
488 if (ret)
489 dev_warn(dev, "MW DESTROY_MPT failed (%d)\n", ret);
490
491 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
492 key_to_hw_index(mw->rkey));
493 }
494
495 ida_free(&hr_dev->mr_table.mtpt_ida.ida,
496 (int)key_to_hw_index(mw->rkey));
497 }
498
hns_roce_mw_enable(struct hns_roce_dev * hr_dev,struct hns_roce_mw * mw)499 static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
500 struct hns_roce_mw *mw)
501 {
502 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
503 struct hns_roce_cmd_mailbox *mailbox;
504 struct device *dev = hr_dev->dev;
505 unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
506 int ret;
507
508 /* prepare HEM entry memory */
509 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
510 if (ret)
511 return ret;
512
513 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
514 if (IS_ERR(mailbox)) {
515 ret = PTR_ERR(mailbox);
516 goto err_table;
517 }
518
519 ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
520 if (ret) {
521 dev_err(dev, "MW write mtpt fail!\n");
522 goto err_page;
523 }
524
525 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_MPT,
526 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
527 if (ret) {
528 dev_err(dev, "MW CREATE_MPT failed (%d)\n", ret);
529 goto err_page;
530 }
531
532 mw->enabled = 1;
533
534 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
535
536 return 0;
537
538 err_page:
539 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
540
541 err_table:
542 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
543
544 return ret;
545 }
546
hns_roce_alloc_mw(struct ib_mw * ibmw,struct ib_udata * udata)547 int hns_roce_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
548 {
549 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
550 struct hns_roce_ida *mtpt_ida = &hr_dev->mr_table.mtpt_ida;
551 struct ib_device *ibdev = &hr_dev->ib_dev;
552 struct hns_roce_mw *mw = to_hr_mw(ibmw);
553 int ret;
554 int id;
555
556 /* Allocate a key for mw from mr_table */
557 id = ida_alloc_range(&mtpt_ida->ida, mtpt_ida->min, mtpt_ida->max,
558 GFP_KERNEL);
559 if (id < 0) {
560 ibdev_err(ibdev, "failed to alloc id for MW key, id(%d)\n", id);
561 return -ENOMEM;
562 }
563
564 mw->rkey = hw_index_to_key(id);
565
566 ibmw->rkey = mw->rkey;
567 mw->pdn = to_hr_pd(ibmw->pd)->pdn;
568 mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
569 mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
570 mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
571
572 ret = hns_roce_mw_enable(hr_dev, mw);
573 if (ret)
574 goto err_mw;
575
576 return 0;
577
578 err_mw:
579 hns_roce_mw_free(hr_dev, mw);
580 return ret;
581 }
582
hns_roce_dealloc_mw(struct ib_mw * ibmw)583 int hns_roce_dealloc_mw(struct ib_mw *ibmw)
584 {
585 struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
586 struct hns_roce_mw *mw = to_hr_mw(ibmw);
587
588 hns_roce_mw_free(hr_dev, mw);
589 return 0;
590 }
591
mtr_map_region(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_region * region,dma_addr_t * pages,int max_count)592 static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
593 struct hns_roce_buf_region *region, dma_addr_t *pages,
594 int max_count)
595 {
596 int count, npage;
597 int offset, end;
598 __le64 *mtts;
599 u64 addr;
600 int i;
601
602 offset = region->offset;
603 end = offset + region->count;
604 npage = 0;
605 while (offset < end && npage < max_count) {
606 count = 0;
607 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
608 offset, &count);
609 if (!mtts)
610 return -ENOBUFS;
611
612 for (i = 0; i < count && npage < max_count; i++) {
613 addr = pages[npage];
614
615 mtts[i] = cpu_to_le64(addr);
616 npage++;
617 }
618 offset += count;
619 }
620
621 return npage;
622 }
623
mtr_has_mtt(struct hns_roce_buf_attr * attr)624 static inline bool mtr_has_mtt(struct hns_roce_buf_attr *attr)
625 {
626 int i;
627
628 for (i = 0; i < attr->region_count; i++)
629 if (attr->region[i].hopnum != HNS_ROCE_HOP_NUM_0 &&
630 attr->region[i].hopnum > 0)
631 return true;
632
633 /* because the mtr only one root base address, when hopnum is 0 means
634 * root base address equals the first buffer address, thus all alloced
635 * memory must in a continuous space accessed by direct mode.
636 */
637 return false;
638 }
639
mtr_bufs_size(struct hns_roce_buf_attr * attr)640 static inline size_t mtr_bufs_size(struct hns_roce_buf_attr *attr)
641 {
642 size_t size = 0;
643 int i;
644
645 for (i = 0; i < attr->region_count; i++)
646 size += attr->region[i].size;
647
648 return size;
649 }
650
651 /*
652 * check the given pages in continuous address space
653 * Returns 0 on success, or the error page num.
654 */
mtr_check_direct_pages(dma_addr_t * pages,int page_count,unsigned int page_shift)655 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
656 unsigned int page_shift)
657 {
658 size_t page_size = 1 << page_shift;
659 int i;
660
661 for (i = 1; i < page_count; i++)
662 if (pages[i] - pages[i - 1] != page_size)
663 return i;
664
665 return 0;
666 }
667
mtr_free_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)668 static void mtr_free_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
669 {
670 /* release user buffers */
671 if (mtr->umem) {
672 ib_umem_release(mtr->umem);
673 mtr->umem = NULL;
674 }
675
676 /* release kernel buffers */
677 if (mtr->kmem) {
678 hns_roce_buf_free(hr_dev, mtr->kmem);
679 mtr->kmem = NULL;
680 }
681 }
682
mtr_alloc_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,struct ib_udata * udata,unsigned long user_addr)683 static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
684 struct hns_roce_buf_attr *buf_attr,
685 struct ib_udata *udata, unsigned long user_addr)
686 {
687 struct ib_device *ibdev = &hr_dev->ib_dev;
688 size_t total_size;
689
690 total_size = mtr_bufs_size(buf_attr);
691
692 if (udata) {
693 mtr->kmem = NULL;
694 mtr->umem = ib_umem_get(ibdev, user_addr, total_size,
695 buf_attr->user_access);
696 if (IS_ERR(mtr->umem)) {
697 ibdev_err(ibdev, "failed to get umem, ret = %ld.\n",
698 PTR_ERR(mtr->umem));
699 return -ENOMEM;
700 }
701 } else {
702 mtr->umem = NULL;
703 mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
704 buf_attr->page_shift,
705 !mtr_has_mtt(buf_attr) ?
706 HNS_ROCE_BUF_DIRECT : 0);
707 if (IS_ERR(mtr->kmem)) {
708 ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
709 PTR_ERR(mtr->kmem));
710 return PTR_ERR(mtr->kmem);
711 }
712 }
713
714 return 0;
715 }
716
cal_mtr_pg_cnt(struct hns_roce_mtr * mtr)717 static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
718 {
719 struct hns_roce_buf_region *region;
720 int page_cnt = 0;
721 int i;
722
723 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
724 region = &mtr->hem_cfg.region[i];
725 page_cnt += region->count;
726 }
727
728 return page_cnt;
729 }
730
need_split_huge_page(struct hns_roce_mtr * mtr)731 static bool need_split_huge_page(struct hns_roce_mtr *mtr)
732 {
733 /* When HEM buffer uses 0-level addressing, the page size is
734 * equal to the whole buffer size. If the current MTR has multiple
735 * regions, we split the buffer into small pages(4k, required by hns
736 * ROCEE). These pages will be used in multiple regions.
737 */
738 return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
739 }
740
mtr_map_bufs(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)741 static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
742 {
743 struct ib_device *ibdev = &hr_dev->ib_dev;
744 int page_count = cal_mtr_pg_cnt(mtr);
745 unsigned int page_shift;
746 dma_addr_t *pages;
747 int npage;
748 int ret;
749
750 page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
751 mtr->hem_cfg.buf_pg_shift;
752 /* alloc a tmp array to store buffer's dma address */
753 pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
754 if (!pages)
755 return -ENOMEM;
756
757 if (mtr->umem)
758 npage = hns_roce_get_umem_bufs(pages, page_count,
759 mtr->umem, page_shift);
760 else
761 npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
762 mtr->kmem, page_shift);
763
764 if (npage != page_count) {
765 ibdev_err(ibdev, "failed to get mtr page %d != %d.\n", npage,
766 page_count);
767 ret = -ENOBUFS;
768 goto err_alloc_list;
769 }
770
771 if (need_split_huge_page(mtr) && npage > 1) {
772 ret = mtr_check_direct_pages(pages, npage, page_shift);
773 if (ret) {
774 ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
775 mtr->umem ? "umtr" : "kmtr", ret, npage);
776 ret = -ENOBUFS;
777 goto err_alloc_list;
778 }
779 }
780
781 ret = hns_roce_mtr_map(hr_dev, mtr, pages, page_count);
782 if (ret)
783 ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
784
785 err_alloc_list:
786 kvfree(pages);
787
788 return ret;
789 }
790
hns_roce_mtr_map(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,dma_addr_t * pages,unsigned int page_cnt)791 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
792 dma_addr_t *pages, unsigned int page_cnt)
793 {
794 struct ib_device *ibdev = &hr_dev->ib_dev;
795 struct hns_roce_buf_region *r;
796 unsigned int i, mapped_cnt;
797 int ret = 0;
798
799 /*
800 * Only use the first page address as root ba when hopnum is 0, this
801 * is because the addresses of all pages are consecutive in this case.
802 */
803 if (mtr->hem_cfg.is_direct) {
804 mtr->hem_cfg.root_ba = pages[0];
805 return 0;
806 }
807
808 for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
809 mapped_cnt < page_cnt; i++) {
810 r = &mtr->hem_cfg.region[i];
811 /* if hopnum is 0, no need to map pages in this region */
812 if (!r->hopnum) {
813 mapped_cnt += r->count;
814 continue;
815 }
816
817 if (r->offset + r->count > page_cnt) {
818 ret = -EINVAL;
819 ibdev_err(ibdev,
820 "failed to check mtr%u count %u + %u > %u.\n",
821 i, r->offset, r->count, page_cnt);
822 return ret;
823 }
824
825 ret = mtr_map_region(hr_dev, mtr, r, &pages[r->offset],
826 page_cnt - mapped_cnt);
827 if (ret < 0) {
828 ibdev_err(ibdev,
829 "failed to map mtr%u offset %u, ret = %d.\n",
830 i, r->offset, ret);
831 return ret;
832 }
833 mapped_cnt += ret;
834 ret = 0;
835 }
836
837 if (mapped_cnt < page_cnt) {
838 ret = -ENOBUFS;
839 ibdev_err(ibdev, "failed to map mtr pages count: %u < %u.\n",
840 mapped_cnt, page_cnt);
841 }
842
843 return ret;
844 }
845
hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg * cfg,u32 start_index,u64 * mtt_buf,int mtt_cnt)846 static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
847 u32 start_index, u64 *mtt_buf,
848 int mtt_cnt)
849 {
850 int mtt_count;
851 int total = 0;
852 u32 npage;
853 u64 addr;
854
855 if (mtt_cnt > cfg->region_count)
856 return -EINVAL;
857
858 for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
859 mtt_count++) {
860 npage = cfg->region[mtt_count].offset;
861 if (npage < start_index)
862 continue;
863
864 addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
865 mtt_buf[total] = addr;
866
867 total++;
868 }
869
870 if (!total)
871 return -ENOENT;
872
873 return 0;
874 }
875
hns_roce_get_mhop_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 start_index,u64 * mtt_buf,int mtt_cnt)876 static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
877 struct hns_roce_mtr *mtr, u32 start_index,
878 u64 *mtt_buf, int mtt_cnt)
879 {
880 int left = mtt_cnt;
881 int total = 0;
882 int mtt_count;
883 __le64 *mtts;
884 u32 npage;
885
886 while (left > 0) {
887 mtt_count = 0;
888 mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
889 start_index + total,
890 &mtt_count);
891 if (!mtts || !mtt_count)
892 break;
893
894 npage = min(mtt_count, left);
895 left -= npage;
896 for (mtt_count = 0; mtt_count < npage; mtt_count++)
897 mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
898 }
899
900 if (!total)
901 return -ENOENT;
902
903 return 0;
904 }
905
hns_roce_mtr_find(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,u32 offset,u64 * mtt_buf,int mtt_max)906 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
907 u32 offset, u64 *mtt_buf, int mtt_max)
908 {
909 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
910 u32 start_index;
911 int ret;
912
913 if (!mtt_buf || mtt_max < 1)
914 return -EINVAL;
915
916 /* no mtt memory in direct mode, so just return the buffer address */
917 if (cfg->is_direct) {
918 start_index = offset >> HNS_HW_PAGE_SHIFT;
919 ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
920 mtt_buf, mtt_max);
921 } else {
922 start_index = offset >> cfg->buf_pg_shift;
923 ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
924 mtt_buf, mtt_max);
925 }
926 return ret;
927 }
928
get_best_page_shift(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr)929 static int get_best_page_shift(struct hns_roce_dev *hr_dev,
930 struct hns_roce_mtr *mtr,
931 struct hns_roce_buf_attr *buf_attr)
932 {
933 unsigned int page_sz;
934
935 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
936 return 0;
937
938 page_sz = ib_umem_find_best_pgsz(mtr->umem,
939 hr_dev->caps.page_size_cap,
940 buf_attr->iova);
941 if (!page_sz)
942 return -EINVAL;
943
944 buf_attr->page_shift = order_base_2(page_sz);
945 return 0;
946 }
947
get_best_hop_num(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_pg_shift)948 static int get_best_hop_num(struct hns_roce_dev *hr_dev,
949 struct hns_roce_mtr *mtr,
950 struct hns_roce_buf_attr *buf_attr,
951 unsigned int ba_pg_shift)
952 {
953 #define INVALID_HOPNUM -1
954 #define MIN_BA_CNT 1
955 size_t buf_pg_sz = 1 << buf_attr->page_shift;
956 struct ib_device *ibdev = &hr_dev->ib_dev;
957 size_t ba_pg_sz = 1 << ba_pg_shift;
958 int hop_num = INVALID_HOPNUM;
959 size_t unit = MIN_BA_CNT;
960 size_t ba_cnt;
961 int j;
962
963 if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
964 return 0;
965
966 /* Caculating the number of buf pages, each buf page need a BA */
967 if (mtr->umem)
968 ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
969 else
970 ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
971
972 for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
973 if (ba_cnt <= unit) {
974 hop_num = j;
975 break;
976 }
977 /* Number of BAs can be represented at per hop */
978 unit *= ba_pg_sz / BA_BYTE_LEN;
979 }
980
981 if (hop_num < 0) {
982 ibdev_err(ibdev,
983 "failed to calculate a valid hopnum.\n");
984 return -EINVAL;
985 }
986
987 buf_attr->region[0].hopnum = hop_num;
988
989 return 0;
990 }
991
is_buf_attr_valid(struct hns_roce_dev * hr_dev,struct hns_roce_buf_attr * attr)992 static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
993 struct hns_roce_buf_attr *attr)
994 {
995 struct ib_device *ibdev = &hr_dev->ib_dev;
996
997 if (attr->region_count > ARRAY_SIZE(attr->region) ||
998 attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
999 ibdev_err(ibdev,
1000 "invalid buf attr, region count %d, page shift %u.\n",
1001 attr->region_count, attr->page_shift);
1002 return false;
1003 }
1004
1005 return true;
1006 }
1007
mtr_init_buf_cfg(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * attr)1008 static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
1009 struct hns_roce_mtr *mtr,
1010 struct hns_roce_buf_attr *attr)
1011 {
1012 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1013 struct hns_roce_buf_region *r;
1014 size_t buf_pg_sz;
1015 size_t buf_size;
1016 int page_cnt, i;
1017 u64 pgoff = 0;
1018
1019 if (!is_buf_attr_valid(hr_dev, attr))
1020 return -EINVAL;
1021
1022 /* If mtt is disabled, all pages must be within a continuous range */
1023 cfg->is_direct = !mtr_has_mtt(attr);
1024 cfg->region_count = attr->region_count;
1025 buf_size = mtr_bufs_size(attr);
1026 if (need_split_huge_page(mtr)) {
1027 buf_pg_sz = HNS_HW_PAGE_SIZE;
1028 cfg->buf_pg_count = 1;
1029 /* The ROCEE requires the page size to be 4K * 2 ^ N. */
1030 cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
1031 order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
1032 } else {
1033 buf_pg_sz = 1 << attr->page_shift;
1034 cfg->buf_pg_count = mtr->umem ?
1035 ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
1036 DIV_ROUND_UP(buf_size, buf_pg_sz);
1037 cfg->buf_pg_shift = attr->page_shift;
1038 pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
1039 }
1040
1041 /* Convert buffer size to page index and page count for each region and
1042 * the buffer's offset needs to be appended to the first region.
1043 */
1044 for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
1045 r = &cfg->region[i];
1046 r->offset = page_cnt;
1047 buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
1048 if (attr->type == MTR_PBL && mtr->umem)
1049 r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
1050 else
1051 r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
1052
1053 pgoff = 0;
1054 page_cnt += r->count;
1055 r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
1056 }
1057
1058 return 0;
1059 }
1060
cal_pages_per_l1ba(unsigned int ba_per_bt,unsigned int hopnum)1061 static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
1062 {
1063 return int_pow(ba_per_bt, hopnum - 1);
1064 }
1065
cal_best_bt_pg_sz(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int pg_shift)1066 static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
1067 struct hns_roce_mtr *mtr,
1068 unsigned int pg_shift)
1069 {
1070 unsigned long cap = hr_dev->caps.page_size_cap;
1071 struct hns_roce_buf_region *re;
1072 unsigned int pgs_per_l1ba;
1073 unsigned int ba_per_bt;
1074 unsigned int ba_num;
1075 int i;
1076
1077 for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
1078 if (!(BIT(pg_shift) & cap))
1079 continue;
1080
1081 ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
1082 ba_num = 0;
1083 for (i = 0; i < mtr->hem_cfg.region_count; i++) {
1084 re = &mtr->hem_cfg.region[i];
1085 if (re->hopnum == 0)
1086 continue;
1087
1088 pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
1089 ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
1090 }
1091
1092 if (ba_num <= ba_per_bt)
1093 return pg_shift;
1094 }
1095
1096 return 0;
1097 }
1098
mtr_alloc_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,unsigned int ba_page_shift)1099 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1100 unsigned int ba_page_shift)
1101 {
1102 struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
1103 int ret;
1104
1105 hns_roce_hem_list_init(&mtr->hem_list);
1106 if (!cfg->is_direct) {
1107 ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
1108 if (!ba_page_shift)
1109 return -ERANGE;
1110
1111 ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
1112 cfg->region, cfg->region_count,
1113 ba_page_shift);
1114 if (ret)
1115 return ret;
1116 cfg->root_ba = mtr->hem_list.root_ba;
1117 cfg->ba_pg_shift = ba_page_shift;
1118 } else {
1119 cfg->ba_pg_shift = cfg->buf_pg_shift;
1120 }
1121
1122 return 0;
1123 }
1124
mtr_free_mtt(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1125 static void mtr_free_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1126 {
1127 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1128 }
1129
1130 /**
1131 * hns_roce_mtr_create - Create hns memory translate region.
1132 *
1133 * @hr_dev: RoCE device struct pointer
1134 * @mtr: memory translate region
1135 * @buf_attr: buffer attribute for creating mtr
1136 * @ba_page_shift: page shift for multi-hop base address table
1137 * @udata: user space context, if it's NULL, means kernel space
1138 * @user_addr: userspace virtual address to start at
1139 */
hns_roce_mtr_create(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr,struct hns_roce_buf_attr * buf_attr,unsigned int ba_page_shift,struct ib_udata * udata,unsigned long user_addr)1140 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
1141 struct hns_roce_buf_attr *buf_attr,
1142 unsigned int ba_page_shift, struct ib_udata *udata,
1143 unsigned long user_addr)
1144 {
1145 struct ib_device *ibdev = &hr_dev->ib_dev;
1146 int ret;
1147
1148 /* The caller has its own buffer list and invokes the hns_roce_mtr_map()
1149 * to finish the MTT configuration.
1150 */
1151 if (buf_attr->mtt_only) {
1152 mtr->umem = NULL;
1153 mtr->kmem = NULL;
1154 } else {
1155 ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
1156 if (ret) {
1157 ibdev_err(ibdev,
1158 "failed to alloc mtr bufs, ret = %d.\n", ret);
1159 return ret;
1160 }
1161
1162 ret = get_best_page_shift(hr_dev, mtr, buf_attr);
1163 if (ret)
1164 goto err_init_buf;
1165
1166 ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
1167 if (ret)
1168 goto err_init_buf;
1169 }
1170
1171 ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
1172 if (ret)
1173 goto err_init_buf;
1174
1175 ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
1176 if (ret) {
1177 ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
1178 goto err_init_buf;
1179 }
1180
1181 if (buf_attr->mtt_only)
1182 return 0;
1183
1184 /* Write buffer's dma address to MTT */
1185 ret = mtr_map_bufs(hr_dev, mtr);
1186 if (ret) {
1187 ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
1188 goto err_alloc_mtt;
1189 }
1190
1191 return 0;
1192
1193 err_alloc_mtt:
1194 mtr_free_mtt(hr_dev, mtr);
1195 err_init_buf:
1196 mtr_free_bufs(hr_dev, mtr);
1197
1198 return ret;
1199 }
1200
hns_roce_mtr_destroy(struct hns_roce_dev * hr_dev,struct hns_roce_mtr * mtr)1201 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
1202 {
1203 /* release multi-hop addressing resource */
1204 hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
1205
1206 /* free buffers */
1207 mtr_free_bufs(hr_dev, mtr);
1208 }
1209