xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_uar.c (revision 10ff414c)
1 /*-
2  * Copyright (c) 2013-2020, Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $FreeBSD$
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/io-mapping.h>
31 #include <dev/mlx5/driver.h>
32 #include "mlx5_core.h"
33 
34 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
35 {
36 	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
37 	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)]   = {0};
38 	int err;
39 
40 	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
41 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
42 	if (!err)
43 		*uarn = MLX5_GET(alloc_uar_out, out, uar);
44 	return err;
45 }
46 EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
47 
48 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
49 {
50 	u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
51 	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)]   = {0};
52 
53 	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
54 	MLX5_SET(dealloc_uar_in, in, uar, uarn);
55 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
56 }
57 EXPORT_SYMBOL(mlx5_cmd_free_uar);
58 
59 static int uars_per_sys_page(struct mlx5_core_dev *mdev)
60 {
61 	if (MLX5_CAP_GEN(mdev, uar_4k))
62 		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
63 
64 	return 1;
65 }
66 
67 static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
68 {
69 	u32 system_page_index;
70 
71 	if (MLX5_CAP_GEN(mdev, uar_4k))
72 		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
73 	else
74 		system_page_index = index;
75 
76 	return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
77 }
78 
79 static void up_rel_func(struct kref *kref)
80 {
81 	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
82 
83 	list_del(&up->list);
84 	iounmap(up->map);
85 	if (mlx5_cmd_free_uar(up->mdev, up->index))
86 		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
87 	bitmap_free(up->reg_bitmap);
88 	bitmap_free(up->fp_bitmap);
89 	kfree(up);
90 }
91 
92 static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
93 					      bool map_wc)
94 {
95 	struct mlx5_uars_page *up;
96 	int err = -ENOMEM;
97 	phys_addr_t pfn;
98 	int bfregs;
99 	int i;
100 
101 	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
102 	up = kzalloc(sizeof(*up), GFP_KERNEL);
103 	if (!up)
104 		return ERR_PTR(err);
105 
106 	up->mdev = mdev;
107 	up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
108 	if (!up->reg_bitmap)
109 		goto error1;
110 
111 	up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
112 	if (!up->fp_bitmap)
113 		goto error1;
114 
115 	for (i = 0; i < bfregs; i++)
116 		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
117 			set_bit(i, up->reg_bitmap);
118 		else
119 			set_bit(i, up->fp_bitmap);
120 
121 	up->bfregs = bfregs;
122 	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
123 	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
124 
125 	err = mlx5_cmd_alloc_uar(mdev, &up->index);
126 	if (err) {
127 		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
128 		goto error1;
129 	}
130 
131 	pfn = uar2pfn(mdev, up->index);
132 	if (map_wc) {
133 		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
134 		if (!up->map) {
135 			err = -EAGAIN;
136 			goto error2;
137 		}
138 	} else {
139 		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
140 		if (!up->map) {
141 			err = -ENOMEM;
142 			goto error2;
143 		}
144 	}
145 	kref_init(&up->ref_count);
146 	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
147 		      up->index, up->bfregs);
148 	return up;
149 
150 error2:
151 	if (mlx5_cmd_free_uar(mdev, up->index))
152 		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
153 error1:
154 	bitmap_free(up->fp_bitmap);
155 	bitmap_free(up->reg_bitmap);
156 	kfree(up);
157 	return ERR_PTR(err);
158 }
159 
160 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
161 {
162 	struct mlx5_uars_page *ret;
163 
164 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
165 	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
166 		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
167 				       struct mlx5_uars_page, list);
168 		kref_get(&ret->ref_count);
169 		goto out;
170 	}
171 	ret = alloc_uars_page(mdev, false);
172 	if (IS_ERR(ret))
173 		goto out;
174 	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
175 out:
176 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
177 
178 	return ret;
179 }
180 EXPORT_SYMBOL(mlx5_get_uars_page);
181 
182 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
183 {
184 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
185 	kref_put(&up->ref_count, up_rel_func);
186 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
187 }
188 EXPORT_SYMBOL(mlx5_put_uars_page);
189 
190 static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
191 {
192 	/* return the offset in bytes from the start of the page to the
193 	 * blue flame area of the UAR
194 	 */
195 	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
196 	       (dbi % MLX5_BFREGS_PER_UAR) *
197 	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
198 }
199 
200 static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
201 		       bool map_wc, bool fast_path)
202 {
203 	struct mlx5_bfreg_data *bfregs;
204 	struct mlx5_uars_page *up;
205 	struct list_head *head;
206 	unsigned long *bitmap;
207 	unsigned int *avail;
208 	struct mutex *lock;  /* pointer to right mutex */
209 	int dbi;
210 
211 	bfregs = &mdev->priv.bfregs;
212 	if (map_wc) {
213 		head = &bfregs->wc_head.list;
214 		lock = &bfregs->wc_head.lock;
215 	} else {
216 		head = &bfregs->reg_head.list;
217 		lock = &bfregs->reg_head.lock;
218 	}
219 	mutex_lock(lock);
220 	if (list_empty(head)) {
221 		up = alloc_uars_page(mdev, map_wc);
222 		if (IS_ERR(up)) {
223 			mutex_unlock(lock);
224 			return PTR_ERR(up);
225 		}
226 		list_add(&up->list, head);
227 	} else {
228 		up = list_entry(head->next, struct mlx5_uars_page, list);
229 		kref_get(&up->ref_count);
230 	}
231 	if (fast_path) {
232 		bitmap = up->fp_bitmap;
233 		avail = &up->fp_avail;
234 	} else {
235 		bitmap = up->reg_bitmap;
236 		avail = &up->reg_avail;
237 	}
238 	dbi = find_first_bit(bitmap, up->bfregs);
239 	clear_bit(dbi, bitmap);
240 	(*avail)--;
241 	if (!(*avail))
242 		list_del(&up->list);
243 
244 	bfreg->map = up->map + map_offset(mdev, dbi);
245 	bfreg->up = up;
246 	bfreg->wc = map_wc;
247 	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
248 	mutex_unlock(lock);
249 
250 	return 0;
251 }
252 
253 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
254 		     bool map_wc, bool fast_path)
255 {
256 	int err;
257 
258 	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
259 	if (!err)
260 		return 0;
261 
262 	if (err == -EAGAIN && map_wc)
263 		return alloc_bfreg(mdev, bfreg, false, fast_path);
264 
265 	return err;
266 }
267 EXPORT_SYMBOL(mlx5_alloc_bfreg);
268 
269 static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
270 					   struct mlx5_uars_page *up,
271 					   struct mlx5_sq_bfreg *bfreg)
272 {
273 	unsigned int uar_idx;
274 	unsigned int bfreg_idx;
275 	unsigned int bf_reg_size;
276 
277 	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
278 
279 	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
280 	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
281 
282 	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
283 }
284 
285 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
286 {
287 	struct mlx5_bfreg_data *bfregs;
288 	struct mlx5_uars_page *up;
289 	struct mutex *lock; /* pointer to right mutex */
290 	unsigned int dbi;
291 	bool fp;
292 	unsigned int *avail;
293 	unsigned long *bitmap;
294 	struct list_head *head;
295 
296 	bfregs = &mdev->priv.bfregs;
297 	if (bfreg->wc) {
298 		head = &bfregs->wc_head.list;
299 		lock = &bfregs->wc_head.lock;
300 	} else {
301 		head = &bfregs->reg_head.list;
302 		lock = &bfregs->reg_head.lock;
303 	}
304 	up = bfreg->up;
305 	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
306 	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
307 	if (fp) {
308 		avail = &up->fp_avail;
309 		bitmap = up->fp_bitmap;
310 	} else {
311 		avail = &up->reg_avail;
312 		bitmap = up->reg_bitmap;
313 	}
314 	mutex_lock(lock);
315 	(*avail)++;
316 	set_bit(dbi, bitmap);
317 	if (*avail == 1)
318 		list_add_tail(&up->list, head);
319 
320 	kref_put(&up->ref_count, up_rel_func);
321 	mutex_unlock(lock);
322 }
323 EXPORT_SYMBOL(mlx5_free_bfreg);
324