xref: /linux/drivers/infiniband/hw/mana/mr.c (revision 1e525507)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2022, Microsoft Corporation. All rights reserved.
4  */
5 
6 #include "mana_ib.h"
7 
8 #define VALID_MR_FLAGS                                                         \
9 	(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ)
10 
11 static enum gdma_mr_access_flags
12 mana_ib_verbs_to_gdma_access_flags(int access_flags)
13 {
14 	enum gdma_mr_access_flags flags = GDMA_ACCESS_FLAG_LOCAL_READ;
15 
16 	if (access_flags & IB_ACCESS_LOCAL_WRITE)
17 		flags |= GDMA_ACCESS_FLAG_LOCAL_WRITE;
18 
19 	if (access_flags & IB_ACCESS_REMOTE_WRITE)
20 		flags |= GDMA_ACCESS_FLAG_REMOTE_WRITE;
21 
22 	if (access_flags & IB_ACCESS_REMOTE_READ)
23 		flags |= GDMA_ACCESS_FLAG_REMOTE_READ;
24 
25 	return flags;
26 }
27 
28 static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
29 				struct gdma_create_mr_params *mr_params)
30 {
31 	struct gdma_create_mr_response resp = {};
32 	struct gdma_create_mr_request req = {};
33 	struct gdma_context *gc = mdev_to_gc(dev);
34 	int err;
35 
36 	mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
37 			     sizeof(resp));
38 	req.pd_handle = mr_params->pd_handle;
39 	req.mr_type = mr_params->mr_type;
40 
41 	switch (mr_params->mr_type) {
42 	case GDMA_MR_TYPE_GVA:
43 		req.gva.dma_region_handle = mr_params->gva.dma_region_handle;
44 		req.gva.virtual_address = mr_params->gva.virtual_address;
45 		req.gva.access_flags = mr_params->gva.access_flags;
46 		break;
47 
48 	default:
49 		ibdev_dbg(&dev->ib_dev,
50 			  "invalid param (GDMA_MR_TYPE) passed, type %d\n",
51 			  req.mr_type);
52 		return -EINVAL;
53 	}
54 
55 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
56 
57 	if (err || resp.hdr.status) {
58 		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
59 			  resp.hdr.status);
60 		if (!err)
61 			err = -EPROTO;
62 
63 		return err;
64 	}
65 
66 	mr->ibmr.lkey = resp.lkey;
67 	mr->ibmr.rkey = resp.rkey;
68 	mr->mr_handle = resp.mr_handle;
69 
70 	return 0;
71 }
72 
73 static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
74 {
75 	struct gdma_destroy_mr_response resp = {};
76 	struct gdma_destroy_mr_request req = {};
77 	struct gdma_context *gc = mdev_to_gc(dev);
78 	int err;
79 
80 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
81 			     sizeof(resp));
82 
83 	req.mr_handle = mr_handle;
84 
85 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
86 	if (err || resp.hdr.status) {
87 		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
88 			resp.hdr.status);
89 		if (!err)
90 			err = -EPROTO;
91 		return err;
92 	}
93 
94 	return 0;
95 }
96 
97 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
98 				  u64 iova, int access_flags,
99 				  struct ib_udata *udata)
100 {
101 	struct mana_ib_pd *pd = container_of(ibpd, struct mana_ib_pd, ibpd);
102 	struct gdma_create_mr_params mr_params = {};
103 	struct ib_device *ibdev = ibpd->device;
104 	struct mana_ib_dev *dev;
105 	struct mana_ib_mr *mr;
106 	u64 dma_region_handle;
107 	int err;
108 
109 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
110 
111 	ibdev_dbg(ibdev,
112 		  "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113 		  start, iova, length, access_flags);
114 
115 	if (access_flags & ~VALID_MR_FLAGS)
116 		return ERR_PTR(-EINVAL);
117 
118 	mr = kzalloc(sizeof(*mr), GFP_KERNEL);
119 	if (!mr)
120 		return ERR_PTR(-ENOMEM);
121 
122 	mr->umem = ib_umem_get(ibdev, start, length, access_flags);
123 	if (IS_ERR(mr->umem)) {
124 		err = PTR_ERR(mr->umem);
125 		ibdev_dbg(ibdev,
126 			  "Failed to get umem for register user-mr, %d\n", err);
127 		goto err_free;
128 	}
129 
130 	err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
131 	if (err) {
132 		ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
133 			  err);
134 		goto err_umem;
135 	}
136 
137 	ibdev_dbg(ibdev,
138 		  "create_dma_region ret %d gdma_region %llx\n", err,
139 		  dma_region_handle);
140 
141 	mr_params.pd_handle = pd->pd_handle;
142 	mr_params.mr_type = GDMA_MR_TYPE_GVA;
143 	mr_params.gva.dma_region_handle = dma_region_handle;
144 	mr_params.gva.virtual_address = iova;
145 	mr_params.gva.access_flags =
146 		mana_ib_verbs_to_gdma_access_flags(access_flags);
147 
148 	err = mana_ib_gd_create_mr(dev, mr, &mr_params);
149 	if (err)
150 		goto err_dma_region;
151 
152 	/*
153 	 * There is no need to keep track of dma_region_handle after MR is
154 	 * successfully created. The dma_region_handle is tracked in the PF
155 	 * as part of the lifecycle of this MR.
156 	 */
157 
158 	return &mr->ibmr;
159 
160 err_dma_region:
161 	mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
162 
163 err_umem:
164 	ib_umem_release(mr->umem);
165 
166 err_free:
167 	kfree(mr);
168 	return ERR_PTR(err);
169 }
170 
171 int mana_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
172 {
173 	struct mana_ib_mr *mr = container_of(ibmr, struct mana_ib_mr, ibmr);
174 	struct ib_device *ibdev = ibmr->device;
175 	struct mana_ib_dev *dev;
176 	int err;
177 
178 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
179 
180 	err = mana_ib_gd_destroy_mr(dev, mr->mr_handle);
181 	if (err)
182 		return err;
183 
184 	if (mr->umem)
185 		ib_umem_release(mr->umem);
186 
187 	kfree(mr);
188 
189 	return 0;
190 }
191