1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/delay.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/xarray.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "lib/tout.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 	u8	release_all;
55 };
56 
57 struct fw_page {
58 	struct rb_node		rb_node;
59 	u64			addr;
60 	struct page	       *page;
61 	u32			function;
62 	unsigned long		bitmask;
63 	struct list_head	list;
64 	unsigned int free_count;
65 };
66 
67 enum {
68 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
69 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
70 };
71 
get_function(u16 func_id,bool ec_function)72 static u32 get_function(u16 func_id, bool ec_function)
73 {
74 	return (u32)func_id | (ec_function << 16);
75 }
76 
func_id_to_type(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)77 static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
78 {
79 	if (!func_id)
80 		return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
81 
82 	if (func_id <= max(mlx5_core_max_vfs(dev), mlx5_core_max_ec_vfs(dev))) {
83 		if (ec_function)
84 			return MLX5_EC_VF;
85 		else
86 			return MLX5_VF;
87 	}
88 	return MLX5_SF;
89 }
90 
mlx5_get_ec_function(u32 function)91 static u32 mlx5_get_ec_function(u32 function)
92 {
93 	return function >> 16;
94 }
95 
mlx5_get_func_id(u32 function)96 static u32 mlx5_get_func_id(u32 function)
97 {
98 	return function & 0xffff;
99 }
100 
page_root_per_function(struct mlx5_core_dev * dev,u32 function)101 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
102 {
103 	struct rb_root *root;
104 	int err;
105 
106 	root = xa_load(&dev->priv.page_root_xa, function);
107 	if (root)
108 		return root;
109 
110 	root = kzalloc(sizeof(*root), GFP_KERNEL);
111 	if (!root)
112 		return ERR_PTR(-ENOMEM);
113 
114 	err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
115 	if (err) {
116 		kfree(root);
117 		return ERR_PTR(err);
118 	}
119 
120 	*root = RB_ROOT;
121 
122 	return root;
123 }
124 
insert_page(struct mlx5_core_dev * dev,u64 addr,struct page * page,u32 function)125 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
126 {
127 	struct rb_node *parent = NULL;
128 	struct rb_root *root;
129 	struct rb_node **new;
130 	struct fw_page *nfp;
131 	struct fw_page *tfp;
132 	int i;
133 
134 	root = page_root_per_function(dev, function);
135 	if (IS_ERR(root))
136 		return PTR_ERR(root);
137 
138 	new = &root->rb_node;
139 
140 	while (*new) {
141 		parent = *new;
142 		tfp = rb_entry(parent, struct fw_page, rb_node);
143 		if (tfp->addr < addr)
144 			new = &parent->rb_left;
145 		else if (tfp->addr > addr)
146 			new = &parent->rb_right;
147 		else
148 			return -EEXIST;
149 	}
150 
151 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
152 	if (!nfp)
153 		return -ENOMEM;
154 
155 	nfp->addr = addr;
156 	nfp->page = page;
157 	nfp->function = function;
158 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
159 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
160 		set_bit(i, &nfp->bitmask);
161 
162 	rb_link_node(&nfp->rb_node, parent, new);
163 	rb_insert_color(&nfp->rb_node, root);
164 	list_add(&nfp->list, &dev->priv.free_list);
165 
166 	return 0;
167 }
168 
find_fw_page(struct mlx5_core_dev * dev,u64 addr,u32 function)169 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
170 				    u32 function)
171 {
172 	struct fw_page *result = NULL;
173 	struct rb_root *root;
174 	struct rb_node *tmp;
175 	struct fw_page *tfp;
176 
177 	root = xa_load(&dev->priv.page_root_xa, function);
178 	if (WARN_ON_ONCE(!root))
179 		return NULL;
180 
181 	tmp = root->rb_node;
182 
183 	while (tmp) {
184 		tfp = rb_entry(tmp, struct fw_page, rb_node);
185 		if (tfp->addr < addr) {
186 			tmp = tmp->rb_left;
187 		} else if (tfp->addr > addr) {
188 			tmp = tmp->rb_right;
189 		} else {
190 			result = tfp;
191 			break;
192 		}
193 	}
194 
195 	return result;
196 }
197 
mlx5_cmd_query_pages(struct mlx5_core_dev * dev,u16 * func_id,s32 * npages,int boot)198 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
199 				s32 *npages, int boot)
200 {
201 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
202 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
203 	int err;
204 
205 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
206 	MLX5_SET(query_pages_in, in, op_mod, boot ?
207 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
208 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
209 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
210 
211 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
212 	if (err)
213 		return err;
214 
215 	*npages = MLX5_GET(query_pages_out, out, num_pages);
216 	*func_id = MLX5_GET(query_pages_out, out, function_id);
217 
218 	return err;
219 }
220 
alloc_4k(struct mlx5_core_dev * dev,u64 * addr,u32 function)221 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
222 {
223 	struct fw_page *fp = NULL;
224 	struct fw_page *iter;
225 	unsigned n;
226 
227 	list_for_each_entry(iter, &dev->priv.free_list, list) {
228 		if (iter->function != function)
229 			continue;
230 		fp = iter;
231 	}
232 
233 	if (list_empty(&dev->priv.free_list) || !fp)
234 		return -ENOMEM;
235 
236 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
237 	if (n >= MLX5_NUM_4K_IN_PAGE) {
238 		mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
239 			       fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
240 		return -ENOENT;
241 	}
242 	clear_bit(n, &fp->bitmask);
243 	fp->free_count--;
244 	if (!fp->free_count)
245 		list_del(&fp->list);
246 
247 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
248 
249 	return 0;
250 }
251 
252 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
253 
free_fwp(struct mlx5_core_dev * dev,struct fw_page * fwp,bool in_free_list)254 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
255 		     bool in_free_list)
256 {
257 	struct rb_root *root;
258 
259 	root = xa_load(&dev->priv.page_root_xa, fwp->function);
260 	if (WARN_ON_ONCE(!root))
261 		return;
262 
263 	rb_erase(&fwp->rb_node, root);
264 	if (in_free_list)
265 		list_del(&fwp->list);
266 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
267 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
268 	__free_page(fwp->page);
269 	kfree(fwp);
270 }
271 
free_4k(struct mlx5_core_dev * dev,u64 addr,u32 function)272 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
273 {
274 	struct fw_page *fwp;
275 	int n;
276 
277 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
278 	if (!fwp) {
279 		mlx5_core_warn_rl(dev, "page not found\n");
280 		return;
281 	}
282 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
283 	fwp->free_count++;
284 	set_bit(n, &fwp->bitmask);
285 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
286 		free_fwp(dev, fwp, fwp->free_count != 1);
287 	else if (fwp->free_count == 1)
288 		list_add(&fwp->list, &dev->priv.free_list);
289 }
290 
alloc_system_page(struct mlx5_core_dev * dev,u32 function)291 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
292 {
293 	struct device *device = mlx5_core_dma_dev(dev);
294 	int nid = dev_to_node(device);
295 	struct page *page;
296 	u64 zero_addr = 1;
297 	u64 addr;
298 	int err;
299 
300 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
301 	if (!page) {
302 		mlx5_core_warn(dev, "failed to allocate page\n");
303 		return -ENOMEM;
304 	}
305 map:
306 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
307 	if (dma_mapping_error(device, addr)) {
308 		mlx5_core_warn(dev, "failed dma mapping page\n");
309 		err = -ENOMEM;
310 		goto err_mapping;
311 	}
312 
313 	/* Firmware doesn't support page with physical address 0 */
314 	if (addr == 0) {
315 		zero_addr = addr;
316 		goto map;
317 	}
318 
319 	err = insert_page(dev, addr, page, function);
320 	if (err) {
321 		mlx5_core_err(dev, "failed to track allocated page\n");
322 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
323 	}
324 
325 err_mapping:
326 	if (err)
327 		__free_page(page);
328 
329 	if (zero_addr == 0)
330 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
331 			       DMA_BIDIRECTIONAL);
332 
333 	return err;
334 }
335 
page_notify_fail(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)336 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
337 			     bool ec_function)
338 {
339 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
340 	int err;
341 
342 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
343 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
344 	MLX5_SET(manage_pages_in, in, function_id, func_id);
345 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
346 
347 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
348 	if (err)
349 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
350 			       func_id, err);
351 }
352 
give_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int event,bool ec_function)353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
354 		      int event, bool ec_function)
355 {
356 	u32 function = get_function(func_id, ec_function);
357 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
358 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
359 	int notify_fail = event;
360 	u16 func_type;
361 	u64 addr;
362 	int err;
363 	u32 *in;
364 	int i;
365 
366 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
367 	in = kvzalloc(inlen, GFP_KERNEL);
368 	if (!in) {
369 		err = -ENOMEM;
370 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
371 		goto out_free;
372 	}
373 
374 	for (i = 0; i < npages; i++) {
375 retry:
376 		err = alloc_4k(dev, &addr, function);
377 		if (err) {
378 			if (err == -ENOMEM)
379 				err = alloc_system_page(dev, function);
380 			if (err) {
381 				dev->priv.fw_pages_alloc_failed += (npages - i);
382 				goto out_4k;
383 			}
384 
385 			goto retry;
386 		}
387 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
388 	}
389 
390 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
391 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
392 	MLX5_SET(manage_pages_in, in, function_id, func_id);
393 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
394 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
395 
396 	err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
397 	if (err == -EREMOTEIO) {
398 		notify_fail = 0;
399 		/* if triggered by FW and failed by FW ignore */
400 		if (event) {
401 			err = 0;
402 			goto out_dropped;
403 		}
404 	}
405 	err = mlx5_cmd_check(dev, err, in, out);
406 	if (err) {
407 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
408 			       func_id, npages, err);
409 		goto out_dropped;
410 	}
411 
412 	func_type = func_id_to_type(dev, func_id, ec_function);
413 	dev->priv.page_counters[func_type] += npages;
414 	dev->priv.fw_pages += npages;
415 
416 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
417 		      npages, ec_function, func_id, err);
418 
419 	kvfree(in);
420 	return 0;
421 
422 out_dropped:
423 	dev->priv.give_pages_dropped += npages;
424 out_4k:
425 	for (i--; i >= 0; i--)
426 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
427 out_free:
428 	kvfree(in);
429 	if (notify_fail)
430 		page_notify_fail(dev, func_id, ec_function);
431 	return err;
432 }
433 
release_all_pages(struct mlx5_core_dev * dev,u16 func_id,bool ec_function)434 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
435 			      bool ec_function)
436 {
437 	u32 function = get_function(func_id, ec_function);
438 	struct rb_root *root;
439 	struct rb_node *p;
440 	int npages = 0;
441 	u16 func_type;
442 
443 	root = xa_load(&dev->priv.page_root_xa, function);
444 	if (WARN_ON_ONCE(!root))
445 		return;
446 
447 	p = rb_first(root);
448 	while (p) {
449 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
450 
451 		p = rb_next(p);
452 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
453 		free_fwp(dev, fwp, fwp->free_count);
454 	}
455 
456 	func_type = func_id_to_type(dev, func_id, ec_function);
457 	dev->priv.page_counters[func_type] -= npages;
458 	dev->priv.fw_pages -= npages;
459 
460 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
461 		      npages, ec_function, func_id);
462 }
463 
fwp_fill_manage_pages_out(struct fw_page * fwp,u32 * out,u32 index,u32 npages)464 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
465 				     u32 npages)
466 {
467 	u32 pages_set = 0;
468 	unsigned int n;
469 
470 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
471 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
472 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
473 		pages_set++;
474 
475 		if (!--npages)
476 			break;
477 	}
478 
479 	return pages_set;
480 }
481 
reclaim_pages_cmd(struct mlx5_core_dev * dev,u32 * in,int in_size,u32 * out,int out_size)482 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
483 			     u32 *in, int in_size, u32 *out, int out_size)
484 {
485 	struct rb_root *root;
486 	struct fw_page *fwp;
487 	struct rb_node *p;
488 	bool ec_function;
489 	u32 func_id;
490 	u32 npages;
491 	u32 i = 0;
492 
493 	if (!mlx5_cmd_is_down(dev))
494 		return mlx5_cmd_do(dev, in, in_size, out, out_size);
495 
496 	/* No hard feelings, we want our pages back! */
497 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
498 	func_id = MLX5_GET(manage_pages_in, in, function_id);
499 	ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
500 
501 	root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
502 	if (WARN_ON_ONCE(!root))
503 		return -EEXIST;
504 
505 	p = rb_first(root);
506 	while (p && i < npages) {
507 		fwp = rb_entry(p, struct fw_page, rb_node);
508 		p = rb_next(p);
509 
510 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
511 	}
512 
513 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
514 	return 0;
515 }
516 
reclaim_pages(struct mlx5_core_dev * dev,u16 func_id,int npages,int * nclaimed,bool event,bool ec_function)517 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
518 			 int *nclaimed, bool event, bool ec_function)
519 {
520 	u32 function = get_function(func_id, ec_function);
521 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
522 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
523 	int num_claimed;
524 	u16 func_type;
525 	u32 *out;
526 	int err;
527 	int i;
528 
529 	if (nclaimed)
530 		*nclaimed = 0;
531 
532 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
533 	out = kvzalloc(outlen, GFP_KERNEL);
534 	if (!out)
535 		return -ENOMEM;
536 
537 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
538 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
539 	MLX5_SET(manage_pages_in, in, function_id, func_id);
540 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
541 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
542 
543 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
544 		      func_id, npages, outlen);
545 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
546 	if (err) {
547 		npages = MLX5_GET(manage_pages_in, in, input_num_entries);
548 		dev->priv.reclaim_pages_discard += npages;
549 	}
550 	/* if triggered by FW event and failed by FW then ignore */
551 	if (event && err == -EREMOTEIO) {
552 		err = 0;
553 		goto out_free;
554 	}
555 
556 	err = mlx5_cmd_check(dev, err, in, out);
557 	if (err) {
558 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
559 		goto out_free;
560 	}
561 
562 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
563 	if (num_claimed > npages) {
564 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
565 			       num_claimed, npages);
566 		err = -EINVAL;
567 		goto out_free;
568 	}
569 
570 	for (i = 0; i < num_claimed; i++)
571 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
572 
573 	if (nclaimed)
574 		*nclaimed = num_claimed;
575 
576 	func_type = func_id_to_type(dev, func_id, ec_function);
577 	dev->priv.page_counters[func_type] -= num_claimed;
578 	dev->priv.fw_pages -= num_claimed;
579 
580 out_free:
581 	kvfree(out);
582 	return err;
583 }
584 
pages_work_handler(struct work_struct * work)585 static void pages_work_handler(struct work_struct *work)
586 {
587 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
588 	struct mlx5_core_dev *dev = req->dev;
589 	int err = 0;
590 
591 	if (req->release_all)
592 		release_all_pages(dev, req->func_id, req->ec_function);
593 	else if (req->npages < 0)
594 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
595 				    true, req->ec_function);
596 	else if (req->npages > 0)
597 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
598 
599 	if (err)
600 		mlx5_core_warn(dev, "%s fail %d\n",
601 			       req->npages < 0 ? "reclaim" : "give", err);
602 
603 	kfree(req);
604 }
605 
606 enum {
607 	EC_FUNCTION_MASK = 0x8000,
608 	RELEASE_ALL_PAGES_MASK = 0x4000,
609 };
610 
611 /* This limit is based on the capability of the firmware as it cannot release
612  * more than 50000 back to the host in one go.
613  */
614 #define MAX_RECLAIM_NPAGES (-50000)
615 
req_pages_handler(struct notifier_block * nb,unsigned long type,void * data)616 static int req_pages_handler(struct notifier_block *nb,
617 			     unsigned long type, void *data)
618 {
619 	struct mlx5_pages_req *req;
620 	struct mlx5_core_dev *dev;
621 	struct mlx5_priv *priv;
622 	struct mlx5_eqe *eqe;
623 	bool ec_function;
624 	bool release_all;
625 	u16 func_id;
626 	s32 npages;
627 
628 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
629 	dev  = container_of(priv, struct mlx5_core_dev, priv);
630 	eqe  = data;
631 
632 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
633 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
634 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
635 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
636 		      RELEASE_ALL_PAGES_MASK;
637 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
638 		      func_id, npages, release_all);
639 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
640 	if (!req) {
641 		mlx5_core_warn(dev, "failed to allocate pages request\n");
642 		return NOTIFY_DONE;
643 	}
644 
645 	req->dev = dev;
646 	req->func_id = func_id;
647 
648 	/* npages > 0 means HCA asking host to allocate/give pages,
649 	 * npages < 0 means HCA asking host to reclaim back the pages allocated.
650 	 * Here we are restricting the maximum number of pages that can be
651 	 * reclaimed to be MAX_RECLAIM_NPAGES. Note that MAX_RECLAIM_NPAGES is
652 	 * a negative value.
653 	 * Since MAX_RECLAIM is negative, we are using max() to restrict
654 	 * req->npages (and not min ()).
655 	 */
656 	req->npages = max_t(s32, npages, MAX_RECLAIM_NPAGES);
657 	req->ec_function = ec_function;
658 	req->release_all = release_all;
659 	INIT_WORK(&req->work, pages_work_handler);
660 	queue_work(dev->priv.pg_wq, &req->work);
661 	return NOTIFY_OK;
662 }
663 
mlx5_satisfy_startup_pages(struct mlx5_core_dev * dev,int boot)664 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
665 {
666 	u16 func_id;
667 	s32 npages;
668 	int err;
669 
670 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
671 	if (err)
672 		return err;
673 
674 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
675 		      npages, boot ? "boot" : "init", func_id);
676 
677 	if (!npages)
678 		return 0;
679 
680 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
681 }
682 
683 enum {
684 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
685 };
686 
optimal_reclaimed_pages(void)687 static int optimal_reclaimed_pages(void)
688 {
689 	struct mlx5_cmd_prot_block *block;
690 	struct mlx5_cmd_layout *lay;
691 	int ret;
692 
693 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
694 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
695 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
696 
697 	return ret;
698 }
699 
mlx5_reclaim_root_pages(struct mlx5_core_dev * dev,struct rb_root * root,u32 function)700 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
701 				   struct rb_root *root, u32 function)
702 {
703 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
704 	unsigned long end = jiffies + recl_pages_to_jiffies;
705 
706 	while (!RB_EMPTY_ROOT(root)) {
707 		u32 ec_function = mlx5_get_ec_function(function);
708 		u32 function_id = mlx5_get_func_id(function);
709 		int nclaimed;
710 		int err;
711 
712 		err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(),
713 				    &nclaimed, false, ec_function);
714 		if (err) {
715 			mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n",
716 				       err, function_id, ec_function);
717 			return err;
718 		}
719 
720 		if (nclaimed)
721 			end = jiffies + recl_pages_to_jiffies;
722 
723 		if (time_after(jiffies, end)) {
724 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
725 			break;
726 		}
727 	}
728 
729 	return 0;
730 }
731 
mlx5_reclaim_startup_pages(struct mlx5_core_dev * dev)732 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
733 {
734 	struct rb_root *root;
735 	unsigned long id;
736 	void *entry;
737 
738 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
739 		root = entry;
740 		mlx5_reclaim_root_pages(dev, root, id);
741 		xa_erase(&dev->priv.page_root_xa, id);
742 		kfree(root);
743 	}
744 
745 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
746 
747 	WARN(dev->priv.fw_pages,
748 	     "FW pages counter is %d after reclaiming all pages\n",
749 	     dev->priv.fw_pages);
750 	WARN(dev->priv.page_counters[MLX5_VF],
751 	     "VFs FW pages counter is %d after reclaiming all pages\n",
752 	     dev->priv.page_counters[MLX5_VF]);
753 	WARN(dev->priv.page_counters[MLX5_HOST_PF],
754 	     "External host PF FW pages counter is %d after reclaiming all pages\n",
755 	     dev->priv.page_counters[MLX5_HOST_PF]);
756 	WARN(dev->priv.page_counters[MLX5_EC_VF],
757 	     "EC VFs FW pages counter is %d after reclaiming all pages\n",
758 	     dev->priv.page_counters[MLX5_EC_VF]);
759 
760 	return 0;
761 }
762 
mlx5_pagealloc_init(struct mlx5_core_dev * dev)763 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
764 {
765 	INIT_LIST_HEAD(&dev->priv.free_list);
766 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
767 	if (!dev->priv.pg_wq)
768 		return -ENOMEM;
769 
770 	xa_init(&dev->priv.page_root_xa);
771 	mlx5_pages_debugfs_init(dev);
772 
773 	return 0;
774 }
775 
mlx5_pagealloc_cleanup(struct mlx5_core_dev * dev)776 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
777 {
778 	mlx5_pages_debugfs_cleanup(dev);
779 	xa_destroy(&dev->priv.page_root_xa);
780 	destroy_workqueue(dev->priv.pg_wq);
781 }
782 
mlx5_pagealloc_start(struct mlx5_core_dev * dev)783 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
784 {
785 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
786 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
787 }
788 
mlx5_pagealloc_stop(struct mlx5_core_dev * dev)789 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
790 {
791 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
792 	flush_workqueue(dev->priv.pg_wq);
793 }
794 
mlx5_wait_for_pages(struct mlx5_core_dev * dev,int * pages)795 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
796 {
797 	u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
798 	unsigned long end = jiffies + recl_vf_pages_to_jiffies;
799 	int prev_pages = *pages;
800 
801 	/* In case of internal error we will free the pages manually later */
802 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
803 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
804 		return 0;
805 	}
806 
807 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
808 	while (*pages) {
809 		if (time_after(jiffies, end)) {
810 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
811 			return -ETIMEDOUT;
812 		}
813 		if (*pages < prev_pages) {
814 			end = jiffies + recl_vf_pages_to_jiffies;
815 			prev_pages = *pages;
816 		}
817 		msleep(50);
818 	}
819 
820 	mlx5_core_dbg(dev, "All pages received\n");
821 	return 0;
822 }
823