xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_uar.c (revision 95ee2897)
1 /*-
2  * Copyright (c) 2013-2020, Mellanox Technologies. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/io-mapping.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/mlx5_core/mlx5_core.h>
34 
mlx5_cmd_alloc_uar(struct mlx5_core_dev * dev,u32 * uarn)35 int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
36 {
37 	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
38 	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)]   = {0};
39 	int err;
40 
41 	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
42 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
43 	if (!err)
44 		*uarn = MLX5_GET(alloc_uar_out, out, uar);
45 	return err;
46 }
47 EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
48 
mlx5_cmd_free_uar(struct mlx5_core_dev * dev,u32 uarn)49 int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
50 {
51 	u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
52 	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)]   = {0};
53 
54 	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
55 	MLX5_SET(dealloc_uar_in, in, uar, uarn);
56 	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
57 }
58 EXPORT_SYMBOL(mlx5_cmd_free_uar);
59 
uars_per_sys_page(struct mlx5_core_dev * mdev)60 static int uars_per_sys_page(struct mlx5_core_dev *mdev)
61 {
62 	if (MLX5_CAP_GEN(mdev, uar_4k))
63 		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
64 
65 	return 1;
66 }
67 
uar2pfn(struct mlx5_core_dev * mdev,u32 index)68 static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
69 {
70 	u32 system_page_index;
71 
72 	if (MLX5_CAP_GEN(mdev, uar_4k))
73 		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
74 	else
75 		system_page_index = index;
76 
77 	return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
78 }
79 
up_rel_func(struct kref * kref)80 static void up_rel_func(struct kref *kref)
81 {
82 	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
83 
84 	list_del(&up->list);
85 	iounmap(up->map);
86 	if (mlx5_cmd_free_uar(up->mdev, up->index))
87 		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
88 	bitmap_free(up->reg_bitmap);
89 	bitmap_free(up->fp_bitmap);
90 	kfree(up);
91 }
92 
alloc_uars_page(struct mlx5_core_dev * mdev,bool map_wc)93 static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
94 					      bool map_wc)
95 {
96 	struct mlx5_uars_page *up;
97 	int err = -ENOMEM;
98 	phys_addr_t pfn;
99 	int bfregs;
100 	int i;
101 
102 	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
103 	up = kzalloc(sizeof(*up), GFP_KERNEL);
104 	if (!up)
105 		return ERR_PTR(err);
106 
107 	up->mdev = mdev;
108 	up->reg_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
109 	if (!up->reg_bitmap)
110 		goto error1;
111 
112 	up->fp_bitmap = bitmap_zalloc(bfregs, GFP_KERNEL);
113 	if (!up->fp_bitmap)
114 		goto error1;
115 
116 	for (i = 0; i < bfregs; i++)
117 		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
118 			set_bit(i, up->reg_bitmap);
119 		else
120 			set_bit(i, up->fp_bitmap);
121 
122 	up->bfregs = bfregs;
123 	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
124 	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
125 
126 	err = mlx5_cmd_alloc_uar(mdev, &up->index);
127 	if (err) {
128 		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
129 		goto error1;
130 	}
131 
132 	pfn = uar2pfn(mdev, up->index);
133 	if (map_wc) {
134 		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
135 		if (!up->map) {
136 			err = -EAGAIN;
137 			goto error2;
138 		}
139 	} else {
140 		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
141 		if (!up->map) {
142 			err = -ENOMEM;
143 			goto error2;
144 		}
145 	}
146 	kref_init(&up->ref_count);
147 	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
148 		      up->index, up->bfregs);
149 	return up;
150 
151 error2:
152 	if (mlx5_cmd_free_uar(mdev, up->index))
153 		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
154 error1:
155 	bitmap_free(up->fp_bitmap);
156 	bitmap_free(up->reg_bitmap);
157 	kfree(up);
158 	return ERR_PTR(err);
159 }
160 
mlx5_get_uars_page(struct mlx5_core_dev * mdev)161 struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
162 {
163 	struct mlx5_uars_page *ret;
164 
165 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
166 	if (!list_empty(&mdev->priv.bfregs.reg_head.list)) {
167 		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
168 				       struct mlx5_uars_page, list);
169 		kref_get(&ret->ref_count);
170 		goto out;
171 	}
172 	ret = alloc_uars_page(mdev, false);
173 	if (IS_ERR(ret))
174 		goto out;
175 	list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
176 out:
177 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
178 
179 	return ret;
180 }
181 EXPORT_SYMBOL(mlx5_get_uars_page);
182 
mlx5_put_uars_page(struct mlx5_core_dev * mdev,struct mlx5_uars_page * up)183 void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
184 {
185 	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
186 	kref_put(&up->ref_count, up_rel_func);
187 	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
188 }
189 EXPORT_SYMBOL(mlx5_put_uars_page);
190 
map_offset(struct mlx5_core_dev * mdev,int dbi)191 static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
192 {
193 	/* return the offset in bytes from the start of the page to the
194 	 * blue flame area of the UAR
195 	 */
196 	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
197 	       (dbi % MLX5_BFREGS_PER_UAR) *
198 	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
199 }
200 
alloc_bfreg(struct mlx5_core_dev * mdev,struct mlx5_sq_bfreg * bfreg,bool map_wc,bool fast_path)201 static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
202 		       bool map_wc, bool fast_path)
203 {
204 	struct mlx5_bfreg_data *bfregs;
205 	struct mlx5_uars_page *up;
206 	struct list_head *head;
207 	unsigned long *bitmap;
208 	unsigned int *avail;
209 	struct mutex *lock;  /* pointer to right mutex */
210 	int dbi;
211 
212 	bfregs = &mdev->priv.bfregs;
213 	if (map_wc) {
214 		head = &bfregs->wc_head.list;
215 		lock = &bfregs->wc_head.lock;
216 	} else {
217 		head = &bfregs->reg_head.list;
218 		lock = &bfregs->reg_head.lock;
219 	}
220 	mutex_lock(lock);
221 	if (list_empty(head)) {
222 		up = alloc_uars_page(mdev, map_wc);
223 		if (IS_ERR(up)) {
224 			mutex_unlock(lock);
225 			return PTR_ERR(up);
226 		}
227 		list_add(&up->list, head);
228 	} else {
229 		up = list_entry(head->next, struct mlx5_uars_page, list);
230 		kref_get(&up->ref_count);
231 	}
232 	if (fast_path) {
233 		bitmap = up->fp_bitmap;
234 		avail = &up->fp_avail;
235 	} else {
236 		bitmap = up->reg_bitmap;
237 		avail = &up->reg_avail;
238 	}
239 	dbi = find_first_bit(bitmap, up->bfregs);
240 	clear_bit(dbi, bitmap);
241 	(*avail)--;
242 	if (!(*avail))
243 		list_del(&up->list);
244 
245 	bfreg->map = up->map + map_offset(mdev, dbi);
246 	bfreg->up = up;
247 	bfreg->wc = map_wc;
248 	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
249 	mutex_unlock(lock);
250 
251 	return 0;
252 }
253 
mlx5_alloc_bfreg(struct mlx5_core_dev * mdev,struct mlx5_sq_bfreg * bfreg,bool map_wc,bool fast_path)254 int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
255 		     bool map_wc, bool fast_path)
256 {
257 	int err;
258 
259 	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
260 	if (!err)
261 		return 0;
262 
263 	if (err == -EAGAIN && map_wc)
264 		return alloc_bfreg(mdev, bfreg, false, fast_path);
265 
266 	return err;
267 }
268 EXPORT_SYMBOL(mlx5_alloc_bfreg);
269 
addr_to_dbi_in_syspage(struct mlx5_core_dev * dev,struct mlx5_uars_page * up,struct mlx5_sq_bfreg * bfreg)270 static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
271 					   struct mlx5_uars_page *up,
272 					   struct mlx5_sq_bfreg *bfreg)
273 {
274 	unsigned int uar_idx;
275 	unsigned int bfreg_idx;
276 	unsigned int bf_reg_size;
277 
278 	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
279 
280 	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
281 	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
282 
283 	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
284 }
285 
mlx5_free_bfreg(struct mlx5_core_dev * mdev,struct mlx5_sq_bfreg * bfreg)286 void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
287 {
288 	struct mlx5_bfreg_data *bfregs;
289 	struct mlx5_uars_page *up;
290 	struct mutex *lock; /* pointer to right mutex */
291 	unsigned int dbi;
292 	bool fp;
293 	unsigned int *avail;
294 	unsigned long *bitmap;
295 	struct list_head *head;
296 
297 	bfregs = &mdev->priv.bfregs;
298 	if (bfreg->wc) {
299 		head = &bfregs->wc_head.list;
300 		lock = &bfregs->wc_head.lock;
301 	} else {
302 		head = &bfregs->reg_head.list;
303 		lock = &bfregs->reg_head.lock;
304 	}
305 	up = bfreg->up;
306 	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
307 	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
308 	if (fp) {
309 		avail = &up->fp_avail;
310 		bitmap = up->fp_bitmap;
311 	} else {
312 		avail = &up->reg_avail;
313 		bitmap = up->reg_bitmap;
314 	}
315 	mutex_lock(lock);
316 	(*avail)++;
317 	set_bit(dbi, bitmap);
318 	if (*avail == 1)
319 		list_add_tail(&up->list, head);
320 
321 	kref_put(&up->ref_count, up_rel_func);
322 	mutex_unlock(lock);
323 }
324 EXPORT_SYMBOL(mlx5_free_bfreg);
325