xref: /linux/drivers/vdpa/mlx5/core/mr.c (revision e91c37f1)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
3 
4 #include <linux/vhost_types.h>
5 #include <linux/vdpa.h>
6 #include <linux/gcd.h>
7 #include <linux/string.h>
8 #include <linux/mlx5/qp.h>
9 #include "mlx5_vdpa.h"
10 
11 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
12 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
13 ({ \
14 	u64 __s = _s; \
15 	u64 _res; \
16 	_res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
17 	_res; \
18 })
19 
20 static int get_octo_len(u64 len, int page_shift)
21 {
22 	u64 page_size = 1ULL << page_shift;
23 	int npages;
24 
25 	npages = ALIGN(len, page_size) >> page_shift;
26 	return (npages + 1) / 2;
27 }
28 
29 static void mlx5_set_access_mode(void *mkc, int mode)
30 {
31 	MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3);
32 	MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2);
33 }
34 
35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt)
36 {
37 	struct scatterlist *sg;
38 	int nsg = mr->nsg;
39 	u64 dma_addr;
40 	u64 dma_len;
41 	int j = 0;
42 	int i;
43 
44 	for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) {
45 		for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg);
46 		     nsg && dma_len;
47 		     nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size))
48 			mtt[j++] = cpu_to_be64(dma_addr);
49 	}
50 }
51 
52 static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
53 {
54 	int inlen;
55 	void *mkc;
56 	void *in;
57 	int err;
58 
59 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16);
60 	in = kvzalloc(inlen, GFP_KERNEL);
61 	if (!in)
62 		return -ENOMEM;
63 
64 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
65 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
66 	MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO));
67 	MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO));
68 	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT);
69 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
70 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
71 	MLX5_SET64(mkc, mkc, start_addr, mr->offset);
72 	MLX5_SET64(mkc, mkc, len, mr->end - mr->start);
73 	MLX5_SET(mkc, mkc, log_page_size, mr->log_size);
74 	MLX5_SET(mkc, mkc, translations_octword_size,
75 		 get_octo_len(mr->end - mr->start, mr->log_size));
76 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
77 		 get_octo_len(mr->end - mr->start, mr->log_size));
78 	populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt));
79 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen);
80 	kvfree(in);
81 	if (err) {
82 		mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n");
83 		return err;
84 	}
85 
86 	return 0;
87 }
88 
89 static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
90 {
91 	mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
92 }
93 
94 static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
95 {
96 	return max_t(u64, map->start, mr->start);
97 }
98 
99 static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
100 {
101 	return min_t(u64, map->last + 1, mr->end);
102 }
103 
104 static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
105 {
106 	return map_end(map, mr) - map_start(map, mr);
107 }
108 
109 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
110 #define MLX5_VDPA_INVALID_LEN ((u64)-1)
111 
112 static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey)
113 {
114 	struct mlx5_vdpa_direct_mr *s;
115 
116 	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
117 	if (!s)
118 		return MLX5_VDPA_INVALID_START_ADDR;
119 
120 	return s->start;
121 }
122 
123 static u64 indir_len(struct mlx5_vdpa_mr *mkey)
124 {
125 	struct mlx5_vdpa_direct_mr *s;
126 	struct mlx5_vdpa_direct_mr *e;
127 
128 	s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list);
129 	if (!s)
130 		return MLX5_VDPA_INVALID_LEN;
131 
132 	e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list);
133 
134 	return e->end - s->start;
135 }
136 
137 #define LOG_MAX_KLM_SIZE 30
138 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
139 
140 static u32 klm_bcount(u64 size)
141 {
142 	return (u32)size;
143 }
144 
145 static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in)
146 {
147 	struct mlx5_vdpa_direct_mr *dmr;
148 	struct mlx5_klm *klmarr;
149 	struct mlx5_klm *klm;
150 	bool first = true;
151 	u64 preve;
152 	int i;
153 
154 	klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
155 	i = 0;
156 	list_for_each_entry(dmr, &mkey->head, list) {
157 again:
158 		klm = &klmarr[i++];
159 		if (first) {
160 			preve = dmr->start;
161 			first = false;
162 		}
163 
164 		if (preve == dmr->start) {
165 			klm->key = cpu_to_be32(dmr->mr);
166 			klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
167 			preve = dmr->end;
168 		} else {
169 			klm->key = cpu_to_be32(mvdev->res.null_mkey);
170 			klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve));
171 			preve = dmr->start;
172 			goto again;
173 		}
174 	}
175 }
176 
177 static int klm_byte_size(int nklms)
178 {
179 	return 16 * ALIGN(nklms, 4);
180 }
181 
182 static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
183 {
184 	int inlen;
185 	void *mkc;
186 	void *in;
187 	int err;
188 	u64 start;
189 	u64 len;
190 
191 	start = indir_start_addr(mr);
192 	len = indir_len(mr);
193 	if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN)
194 		return -EINVAL;
195 
196 	inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms);
197 	in = kzalloc(inlen, GFP_KERNEL);
198 	if (!in)
199 		return -ENOMEM;
200 
201 	MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid);
202 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
203 	MLX5_SET(mkc, mkc, lw, 1);
204 	MLX5_SET(mkc, mkc, lr, 1);
205 	mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS);
206 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
207 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
208 	MLX5_SET64(mkc, mkc, start_addr, start);
209 	MLX5_SET64(mkc, mkc, len, len);
210 	MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16);
211 	MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms);
212 	fill_indir(mvdev, mr, in);
213 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
214 	kfree(in);
215 	return err;
216 }
217 
218 static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
219 {
220 	mlx5_vdpa_destroy_mkey(mvdev, mkey->mkey);
221 }
222 
223 static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
224 			 struct vhost_iotlb *iotlb)
225 {
226 	struct vhost_iotlb_map *map;
227 	unsigned long lgcd = 0;
228 	int log_entity_size;
229 	unsigned long size;
230 	u64 start = 0;
231 	int err;
232 	struct page *pg;
233 	unsigned int nsg;
234 	int sglen;
235 	u64 pa;
236 	u64 paend;
237 	struct scatterlist *sg;
238 	struct device *dma = mvdev->vdev.dma_dev;
239 
240 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
241 	     map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) {
242 		size = maplen(map, mr);
243 		lgcd = gcd(lgcd, size);
244 		start += size;
245 	}
246 	log_entity_size = ilog2(lgcd);
247 
248 	sglen = 1 << log_entity_size;
249 	nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size);
250 
251 	err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL);
252 	if (err)
253 		return err;
254 
255 	sg = mr->sg_head.sgl;
256 	for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1);
257 	     map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) {
258 		paend = map->addr + maplen(map, mr);
259 		for (pa = map->addr; pa < paend; pa += sglen) {
260 			pg = pfn_to_page(__phys_to_pfn(pa));
261 			if (!sg) {
262 				mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n",
263 					       map->start, map->last + 1);
264 				err = -ENOMEM;
265 				goto err_map;
266 			}
267 			sg_set_page(sg, pg, sglen, 0);
268 			sg = sg_next(sg);
269 			if (!sg)
270 				goto done;
271 		}
272 	}
273 done:
274 	mr->log_size = log_entity_size;
275 	mr->nsg = nsg;
276 	mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
277 	if (!mr->nent) {
278 		err = -ENOMEM;
279 		goto err_map;
280 	}
281 
282 	err = create_direct_mr(mvdev, mr);
283 	if (err)
284 		goto err_direct;
285 
286 	return 0;
287 
288 err_direct:
289 	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
290 err_map:
291 	sg_free_table(&mr->sg_head);
292 	return err;
293 }
294 
295 static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
296 {
297 	struct device *dma = mvdev->vdev.dma_dev;
298 
299 	destroy_direct_mr(mvdev, mr);
300 	dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0);
301 	sg_free_table(&mr->sg_head);
302 }
303 
304 static int add_direct_chain(struct mlx5_vdpa_dev *mvdev,
305 			    struct mlx5_vdpa_mr *mr,
306 			    u64 start,
307 			    u64 size,
308 			    u8 perm,
309 			    struct vhost_iotlb *iotlb)
310 {
311 	struct mlx5_vdpa_direct_mr *dmr;
312 	struct mlx5_vdpa_direct_mr *n;
313 	LIST_HEAD(tmp);
314 	u64 st;
315 	u64 sz;
316 	int err;
317 
318 	st = start;
319 	while (size) {
320 		sz = (u32)min_t(u64, MAX_KLM_SIZE, size);
321 		dmr = kzalloc(sizeof(*dmr), GFP_KERNEL);
322 		if (!dmr) {
323 			err = -ENOMEM;
324 			goto err_alloc;
325 		}
326 
327 		dmr->start = st;
328 		dmr->end = st + sz;
329 		dmr->perm = perm;
330 		err = map_direct_mr(mvdev, dmr, iotlb);
331 		if (err) {
332 			kfree(dmr);
333 			goto err_alloc;
334 		}
335 
336 		list_add_tail(&dmr->list, &tmp);
337 		size -= sz;
338 		mr->num_directs++;
339 		mr->num_klms++;
340 		st += sz;
341 	}
342 	list_splice_tail(&tmp, &mr->head);
343 	return 0;
344 
345 err_alloc:
346 	list_for_each_entry_safe(dmr, n, &mr->head, list) {
347 		list_del_init(&dmr->list);
348 		unmap_direct_mr(mvdev, dmr);
349 		kfree(dmr);
350 	}
351 	return err;
352 }
353 
354 /* The iotlb pointer contains a list of maps. Go over the maps, possibly
355  * merging mergeable maps, and create direct memory keys that provide the
356  * device access to memory. The direct mkeys are then referred to by the
357  * indirect memory key that provides access to the enitre address space given
358  * by iotlb.
359  */
360 static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
361 			  struct mlx5_vdpa_mr *mr,
362 			  struct vhost_iotlb *iotlb)
363 {
364 	struct mlx5_vdpa_direct_mr *dmr;
365 	struct mlx5_vdpa_direct_mr *n;
366 	struct vhost_iotlb_map *map;
367 	u32 pperm = U16_MAX;
368 	u64 last = U64_MAX;
369 	u64 ps = U64_MAX;
370 	u64 pe = U64_MAX;
371 	u64 start = 0;
372 	int err = 0;
373 	int nnuls;
374 
375 	INIT_LIST_HEAD(&mr->head);
376 	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
377 	     map = vhost_iotlb_itree_next(map, start, last)) {
378 		start = map->start;
379 		if (pe == map->start && pperm == map->perm) {
380 			pe = map->last + 1;
381 		} else {
382 			if (ps != U64_MAX) {
383 				if (pe < map->start) {
384 					/* We have a hole in the map. Check how
385 					 * many null keys are required to fill it.
386 					 */
387 					nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe,
388 								       LOG_MAX_KLM_SIZE);
389 					mr->num_klms += nnuls;
390 				}
391 				err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
392 				if (err)
393 					goto err_chain;
394 			}
395 			ps = map->start;
396 			pe = map->last + 1;
397 			pperm = map->perm;
398 		}
399 	}
400 	err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
401 	if (err)
402 		goto err_chain;
403 
404 	/* Create the memory key that defines the guests's address space. This
405 	 * memory key refers to the direct keys that contain the MTT
406 	 * translations
407 	 */
408 	err = create_indirect_key(mvdev, mr);
409 	if (err)
410 		goto err_chain;
411 
412 	mr->user_mr = true;
413 	return 0;
414 
415 err_chain:
416 	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
417 		list_del_init(&dmr->list);
418 		unmap_direct_mr(mvdev, dmr);
419 		kfree(dmr);
420 	}
421 	return err;
422 }
423 
424 static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
425 {
426 	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
427 	void *mkc;
428 	u32 *in;
429 	int err;
430 
431 	in = kzalloc(inlen, GFP_KERNEL);
432 	if (!in)
433 		return -ENOMEM;
434 
435 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
436 
437 	MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
438 	MLX5_SET(mkc, mkc, length64, 1);
439 	MLX5_SET(mkc, mkc, lw, 1);
440 	MLX5_SET(mkc, mkc, lr, 1);
441 	MLX5_SET(mkc, mkc, pd, mvdev->res.pdn);
442 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
443 
444 	err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen);
445 	if (!err)
446 		mr->user_mr = false;
447 
448 	kfree(in);
449 	return err;
450 }
451 
452 static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
453 {
454 	mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
455 }
456 
457 static int dup_iotlb(struct vhost_iotlb *dst, struct vhost_iotlb *src)
458 {
459 	struct vhost_iotlb_map *map;
460 	u64 start = 0, last = ULLONG_MAX;
461 	int err;
462 
463 	if (dst == src)
464 		return -EINVAL;
465 
466 	if (!src) {
467 		err = vhost_iotlb_add_range(dst, start, last, start, VHOST_ACCESS_RW);
468 		return err;
469 	}
470 
471 	for (map = vhost_iotlb_itree_first(src, start, last); map;
472 		map = vhost_iotlb_itree_next(map, start, last)) {
473 		err = vhost_iotlb_add_range(dst, map->start, map->last,
474 					    map->addr, map->perm);
475 		if (err)
476 			return err;
477 	}
478 	return 0;
479 }
480 
481 static void prune_iotlb(struct vhost_iotlb *iotlb)
482 {
483 	vhost_iotlb_del_range(iotlb, 0, ULLONG_MAX);
484 }
485 
486 static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
487 {
488 	struct mlx5_vdpa_direct_mr *dmr;
489 	struct mlx5_vdpa_direct_mr *n;
490 
491 	destroy_indirect_key(mvdev, mr);
492 	list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) {
493 		list_del_init(&dmr->list);
494 		unmap_direct_mr(mvdev, dmr);
495 		kfree(dmr);
496 	}
497 }
498 
499 static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
500 {
501 	if (WARN_ON(!mr))
502 		return;
503 
504 	if (mr->user_mr)
505 		destroy_user_mr(mvdev, mr);
506 	else
507 		destroy_dma_mr(mvdev, mr);
508 
509 	vhost_iotlb_free(mr->iotlb);
510 
511 	list_del(&mr->mr_list);
512 
513 	kfree(mr);
514 }
515 
516 static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
517 			      struct mlx5_vdpa_mr *mr)
518 {
519 	if (!mr)
520 		return;
521 
522 	if (refcount_dec_and_test(&mr->refcount))
523 		_mlx5_vdpa_destroy_mr(mvdev, mr);
524 }
525 
526 void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev *mvdev,
527 		      struct mlx5_vdpa_mr *mr)
528 {
529 	mutex_lock(&mvdev->mr_mtx);
530 	_mlx5_vdpa_put_mr(mvdev, mr);
531 	mutex_unlock(&mvdev->mr_mtx);
532 }
533 
534 static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
535 			      struct mlx5_vdpa_mr *mr)
536 {
537 	if (!mr)
538 		return;
539 
540 	refcount_inc(&mr->refcount);
541 }
542 
543 void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev *mvdev,
544 		      struct mlx5_vdpa_mr *mr)
545 {
546 	mutex_lock(&mvdev->mr_mtx);
547 	_mlx5_vdpa_get_mr(mvdev, mr);
548 	mutex_unlock(&mvdev->mr_mtx);
549 }
550 
551 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
552 			 struct mlx5_vdpa_mr *new_mr,
553 			 unsigned int asid)
554 {
555 	struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
556 
557 	mutex_lock(&mvdev->mr_mtx);
558 
559 	_mlx5_vdpa_put_mr(mvdev, old_mr);
560 	mvdev->mr[asid] = new_mr;
561 
562 	mutex_unlock(&mvdev->mr_mtx);
563 }
564 
565 static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev *mvdev)
566 {
567 	struct mlx5_vdpa_mr *mr;
568 
569 	mutex_lock(&mvdev->mr_mtx);
570 
571 	list_for_each_entry(mr, &mvdev->mr_list_head, mr_list) {
572 
573 		mlx5_vdpa_warn(mvdev, "mkey still alive after resource delete: "
574 				      "mr: %p, mkey: 0x%x, refcount: %u\n",
575 				       mr, mr->mkey, refcount_read(&mr->refcount));
576 	}
577 
578 	mutex_unlock(&mvdev->mr_mtx);
579 
580 }
581 
582 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
583 {
584 	for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
585 		mlx5_vdpa_update_mr(mvdev, NULL, i);
586 
587 	prune_iotlb(mvdev->cvq.iotlb);
588 
589 	mlx5_vdpa_show_mr_leaks(mvdev);
590 }
591 
592 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
593 				struct mlx5_vdpa_mr *mr,
594 				struct vhost_iotlb *iotlb)
595 {
596 	int err;
597 
598 	if (iotlb)
599 		err = create_user_mr(mvdev, mr, iotlb);
600 	else
601 		err = create_dma_mr(mvdev, mr);
602 
603 	if (err)
604 		return err;
605 
606 	mr->iotlb = vhost_iotlb_alloc(0, 0);
607 	if (!mr->iotlb) {
608 		err = -ENOMEM;
609 		goto err_mr;
610 	}
611 
612 	err = dup_iotlb(mr->iotlb, iotlb);
613 	if (err)
614 		goto err_iotlb;
615 
616 	list_add_tail(&mr->mr_list, &mvdev->mr_list_head);
617 
618 	return 0;
619 
620 err_iotlb:
621 	vhost_iotlb_free(mr->iotlb);
622 
623 err_mr:
624 	if (iotlb)
625 		destroy_user_mr(mvdev, mr);
626 	else
627 		destroy_dma_mr(mvdev, mr);
628 
629 	return err;
630 }
631 
632 struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
633 					 struct vhost_iotlb *iotlb)
634 {
635 	struct mlx5_vdpa_mr *mr;
636 	int err;
637 
638 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
639 	if (!mr)
640 		return ERR_PTR(-ENOMEM);
641 
642 	mutex_lock(&mvdev->mr_mtx);
643 	err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
644 	mutex_unlock(&mvdev->mr_mtx);
645 
646 	if (err)
647 		goto out_err;
648 
649 	refcount_set(&mr->refcount, 1);
650 
651 	return mr;
652 
653 out_err:
654 	kfree(mr);
655 	return ERR_PTR(err);
656 }
657 
658 int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
659 				struct vhost_iotlb *iotlb,
660 				unsigned int asid)
661 {
662 	int err;
663 
664 	if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
665 		return 0;
666 
667 	spin_lock(&mvdev->cvq.iommu_lock);
668 
669 	prune_iotlb(mvdev->cvq.iotlb);
670 	err = dup_iotlb(mvdev->cvq.iotlb, iotlb);
671 
672 	spin_unlock(&mvdev->cvq.iommu_lock);
673 
674 	return err;
675 }
676 
677 int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
678 {
679 	struct mlx5_vdpa_mr *mr;
680 
681 	mr = mlx5_vdpa_create_mr(mvdev, NULL);
682 	if (IS_ERR(mr))
683 		return PTR_ERR(mr);
684 
685 	mlx5_vdpa_update_mr(mvdev, mr, 0);
686 
687 	return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
688 }
689 
690 int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
691 {
692 	if (asid >= MLX5_VDPA_NUM_AS)
693 		return -EINVAL;
694 
695 	mlx5_vdpa_update_mr(mvdev, NULL, asid);
696 
697 	if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
698 		if (mlx5_vdpa_create_dma_mr(mvdev))
699 			mlx5_vdpa_warn(mvdev, "create DMA MR failed\n");
700 	} else {
701 		mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, asid);
702 	}
703 
704 	return 0;
705 }
706