1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/delay.h>
36 #include <linux/mlx5/driver.h>
37 #include <linux/xarray.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40 #include "lib/tout.h"
41 
42 enum {
43 	MLX5_PAGES_CANT_GIVE	= 0,
44 	MLX5_PAGES_GIVE		= 1,
45 	MLX5_PAGES_TAKE		= 2
46 };
47 
48 struct mlx5_pages_req {
49 	struct mlx5_core_dev *dev;
50 	u16	func_id;
51 	u8	ec_function;
52 	s32	npages;
53 	struct work_struct work;
54 	u8	release_all;
55 };
56 
57 struct fw_page {
58 	struct rb_node		rb_node;
59 	u64			addr;
60 	struct page	       *page;
61 	u32			function;
62 	unsigned long		bitmask;
63 	struct list_head	list;
64 	unsigned int free_count;
65 };
66 
67 enum {
68 	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
69 	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
70 };
71 
72 static u32 get_function(u16 func_id, bool ec_function)
73 {
74 	return (u32)func_id | (ec_function << 16);
75 }
76 
77 static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
78 {
79 	if (!func_id)
80 		return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
81 
82 	return func_id <= mlx5_core_max_vfs(dev) ?  MLX5_VF : MLX5_SF;
83 }
84 
85 static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
86 {
87 	struct rb_root *root;
88 	int err;
89 
90 	root = xa_load(&dev->priv.page_root_xa, function);
91 	if (root)
92 		return root;
93 
94 	root = kzalloc(sizeof(*root), GFP_KERNEL);
95 	if (!root)
96 		return ERR_PTR(-ENOMEM);
97 
98 	err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL);
99 	if (err) {
100 		kfree(root);
101 		return ERR_PTR(err);
102 	}
103 
104 	*root = RB_ROOT;
105 
106 	return root;
107 }
108 
109 static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function)
110 {
111 	struct rb_node *parent = NULL;
112 	struct rb_root *root;
113 	struct rb_node **new;
114 	struct fw_page *nfp;
115 	struct fw_page *tfp;
116 	int i;
117 
118 	root = page_root_per_function(dev, function);
119 	if (IS_ERR(root))
120 		return PTR_ERR(root);
121 
122 	new = &root->rb_node;
123 
124 	while (*new) {
125 		parent = *new;
126 		tfp = rb_entry(parent, struct fw_page, rb_node);
127 		if (tfp->addr < addr)
128 			new = &parent->rb_left;
129 		else if (tfp->addr > addr)
130 			new = &parent->rb_right;
131 		else
132 			return -EEXIST;
133 	}
134 
135 	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
136 	if (!nfp)
137 		return -ENOMEM;
138 
139 	nfp->addr = addr;
140 	nfp->page = page;
141 	nfp->function = function;
142 	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
143 	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
144 		set_bit(i, &nfp->bitmask);
145 
146 	rb_link_node(&nfp->rb_node, parent, new);
147 	rb_insert_color(&nfp->rb_node, root);
148 	list_add(&nfp->list, &dev->priv.free_list);
149 
150 	return 0;
151 }
152 
153 static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr,
154 				    u32 function)
155 {
156 	struct fw_page *result = NULL;
157 	struct rb_root *root;
158 	struct rb_node *tmp;
159 	struct fw_page *tfp;
160 
161 	root = xa_load(&dev->priv.page_root_xa, function);
162 	if (WARN_ON_ONCE(!root))
163 		return NULL;
164 
165 	tmp = root->rb_node;
166 
167 	while (tmp) {
168 		tfp = rb_entry(tmp, struct fw_page, rb_node);
169 		if (tfp->addr < addr) {
170 			tmp = tmp->rb_left;
171 		} else if (tfp->addr > addr) {
172 			tmp = tmp->rb_right;
173 		} else {
174 			result = tfp;
175 			break;
176 		}
177 	}
178 
179 	return result;
180 }
181 
182 static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
183 				s32 *npages, int boot)
184 {
185 	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {};
186 	u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {};
187 	int err;
188 
189 	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
190 	MLX5_SET(query_pages_in, in, op_mod, boot ?
191 		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
192 		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
193 	MLX5_SET(query_pages_in, in, embedded_cpu_function, mlx5_core_is_ecpf(dev));
194 
195 	err = mlx5_cmd_exec_inout(dev, query_pages, in, out);
196 	if (err)
197 		return err;
198 
199 	*npages = MLX5_GET(query_pages_out, out, num_pages);
200 	*func_id = MLX5_GET(query_pages_out, out, function_id);
201 
202 	return err;
203 }
204 
205 static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
206 {
207 	struct fw_page *fp = NULL;
208 	struct fw_page *iter;
209 	unsigned n;
210 
211 	list_for_each_entry(iter, &dev->priv.free_list, list) {
212 		if (iter->function != function)
213 			continue;
214 		fp = iter;
215 	}
216 
217 	if (list_empty(&dev->priv.free_list) || !fp)
218 		return -ENOMEM;
219 
220 	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
221 	if (n >= MLX5_NUM_4K_IN_PAGE) {
222 		mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
223 			       fp->addr, n, fp->bitmask,  MLX5_NUM_4K_IN_PAGE);
224 		return -ENOENT;
225 	}
226 	clear_bit(n, &fp->bitmask);
227 	fp->free_count--;
228 	if (!fp->free_count)
229 		list_del(&fp->list);
230 
231 	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
232 
233 	return 0;
234 }
235 
236 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
237 
238 static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
239 		     bool in_free_list)
240 {
241 	struct rb_root *root;
242 
243 	root = xa_load(&dev->priv.page_root_xa, fwp->function);
244 	if (WARN_ON_ONCE(!root))
245 		return;
246 
247 	rb_erase(&fwp->rb_node, root);
248 	if (in_free_list)
249 		list_del(&fwp->list);
250 	dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
251 		       PAGE_SIZE, DMA_BIDIRECTIONAL);
252 	__free_page(fwp->page);
253 	kfree(fwp);
254 }
255 
256 static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
257 {
258 	struct fw_page *fwp;
259 	int n;
260 
261 	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function);
262 	if (!fwp) {
263 		mlx5_core_warn_rl(dev, "page not found\n");
264 		return;
265 	}
266 	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
267 	fwp->free_count++;
268 	set_bit(n, &fwp->bitmask);
269 	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE)
270 		free_fwp(dev, fwp, fwp->free_count != 1);
271 	else if (fwp->free_count == 1)
272 		list_add(&fwp->list, &dev->priv.free_list);
273 }
274 
275 static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
276 {
277 	struct device *device = mlx5_core_dma_dev(dev);
278 	int nid = dev_to_node(device);
279 	struct page *page;
280 	u64 zero_addr = 1;
281 	u64 addr;
282 	int err;
283 
284 	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
285 	if (!page) {
286 		mlx5_core_warn(dev, "failed to allocate page\n");
287 		return -ENOMEM;
288 	}
289 map:
290 	addr = dma_map_page(device, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
291 	if (dma_mapping_error(device, addr)) {
292 		mlx5_core_warn(dev, "failed dma mapping page\n");
293 		err = -ENOMEM;
294 		goto err_mapping;
295 	}
296 
297 	/* Firmware doesn't support page with physical address 0 */
298 	if (addr == 0) {
299 		zero_addr = addr;
300 		goto map;
301 	}
302 
303 	err = insert_page(dev, addr, page, function);
304 	if (err) {
305 		mlx5_core_err(dev, "failed to track allocated page\n");
306 		dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
307 	}
308 
309 err_mapping:
310 	if (err)
311 		__free_page(page);
312 
313 	if (zero_addr == 0)
314 		dma_unmap_page(device, zero_addr, PAGE_SIZE,
315 			       DMA_BIDIRECTIONAL);
316 
317 	return err;
318 }
319 
320 static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id,
321 			     bool ec_function)
322 {
323 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
324 	int err;
325 
326 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
327 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
328 	MLX5_SET(manage_pages_in, in, function_id, func_id);
329 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
330 
331 	err = mlx5_cmd_exec_in(dev, manage_pages, in);
332 	if (err)
333 		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
334 			       func_id, err);
335 }
336 
337 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
338 		      int event, bool ec_function)
339 {
340 	u32 function = get_function(func_id, ec_function);
341 	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
342 	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
343 	int notify_fail = event;
344 	u16 func_type;
345 	u64 addr;
346 	int err;
347 	u32 *in;
348 	int i;
349 
350 	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
351 	in = kvzalloc(inlen, GFP_KERNEL);
352 	if (!in) {
353 		err = -ENOMEM;
354 		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
355 		goto out_free;
356 	}
357 
358 	for (i = 0; i < npages; i++) {
359 retry:
360 		err = alloc_4k(dev, &addr, function);
361 		if (err) {
362 			if (err == -ENOMEM)
363 				err = alloc_system_page(dev, function);
364 			if (err) {
365 				dev->priv.fw_pages_alloc_failed += (npages - i);
366 				goto out_4k;
367 			}
368 
369 			goto retry;
370 		}
371 		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
372 	}
373 
374 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
375 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
376 	MLX5_SET(manage_pages_in, in, function_id, func_id);
377 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
378 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
379 
380 	err = mlx5_cmd_do(dev, in, inlen, out, sizeof(out));
381 	if (err == -EREMOTEIO) {
382 		notify_fail = 0;
383 		/* if triggered by FW and failed by FW ignore */
384 		if (event) {
385 			err = 0;
386 			goto out_dropped;
387 		}
388 	}
389 	err = mlx5_cmd_check(dev, err, in, out);
390 	if (err) {
391 		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
392 			       func_id, npages, err);
393 		goto out_dropped;
394 	}
395 
396 	func_type = func_id_to_type(dev, func_id, ec_function);
397 	dev->priv.page_counters[func_type] += npages;
398 	dev->priv.fw_pages += npages;
399 
400 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
401 		      npages, ec_function, func_id, err);
402 
403 	kvfree(in);
404 	return 0;
405 
406 out_dropped:
407 	dev->priv.give_pages_dropped += npages;
408 out_4k:
409 	for (i--; i >= 0; i--)
410 		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function);
411 out_free:
412 	kvfree(in);
413 	if (notify_fail)
414 		page_notify_fail(dev, func_id, ec_function);
415 	return err;
416 }
417 
418 static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
419 			      bool ec_function)
420 {
421 	u32 function = get_function(func_id, ec_function);
422 	struct rb_root *root;
423 	struct rb_node *p;
424 	int npages = 0;
425 	u16 func_type;
426 
427 	root = xa_load(&dev->priv.page_root_xa, function);
428 	if (WARN_ON_ONCE(!root))
429 		return;
430 
431 	p = rb_first(root);
432 	while (p) {
433 		struct fw_page *fwp = rb_entry(p, struct fw_page, rb_node);
434 
435 		p = rb_next(p);
436 		npages += (MLX5_NUM_4K_IN_PAGE - fwp->free_count);
437 		free_fwp(dev, fwp, fwp->free_count);
438 	}
439 
440 	func_type = func_id_to_type(dev, func_id, ec_function);
441 	dev->priv.page_counters[func_type] -= npages;
442 	dev->priv.fw_pages -= npages;
443 
444 	mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
445 		      npages, ec_function, func_id);
446 }
447 
448 static u32 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index,
449 				     u32 npages)
450 {
451 	u32 pages_set = 0;
452 	unsigned int n;
453 
454 	for_each_clear_bit(n, &fwp->bitmask, MLX5_NUM_4K_IN_PAGE) {
455 		MLX5_ARRAY_SET64(manage_pages_out, out, pas, index + pages_set,
456 				 fwp->addr + (n * MLX5_ADAPTER_PAGE_SIZE));
457 		pages_set++;
458 
459 		if (!--npages)
460 			break;
461 	}
462 
463 	return pages_set;
464 }
465 
466 static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
467 			     u32 *in, int in_size, u32 *out, int out_size)
468 {
469 	struct rb_root *root;
470 	struct fw_page *fwp;
471 	struct rb_node *p;
472 	bool ec_function;
473 	u32 func_id;
474 	u32 npages;
475 	u32 i = 0;
476 
477 	if (!mlx5_cmd_is_down(dev))
478 		return mlx5_cmd_do(dev, in, in_size, out, out_size);
479 
480 	/* No hard feelings, we want our pages back! */
481 	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
482 	func_id = MLX5_GET(manage_pages_in, in, function_id);
483 	ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function);
484 
485 	root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function));
486 	if (WARN_ON_ONCE(!root))
487 		return -EEXIST;
488 
489 	p = rb_first(root);
490 	while (p && i < npages) {
491 		fwp = rb_entry(p, struct fw_page, rb_node);
492 		p = rb_next(p);
493 
494 		i += fwp_fill_manage_pages_out(fwp, out, i, npages - i);
495 	}
496 
497 	MLX5_SET(manage_pages_out, out, output_num_entries, i);
498 	return 0;
499 }
500 
501 static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
502 			 int *nclaimed, bool event, bool ec_function)
503 {
504 	u32 function = get_function(func_id, ec_function);
505 	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
506 	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
507 	int num_claimed;
508 	u16 func_type;
509 	u32 *out;
510 	int err;
511 	int i;
512 
513 	if (nclaimed)
514 		*nclaimed = 0;
515 
516 	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
517 	out = kvzalloc(outlen, GFP_KERNEL);
518 	if (!out)
519 		return -ENOMEM;
520 
521 	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
522 	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
523 	MLX5_SET(manage_pages_in, in, function_id, func_id);
524 	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
525 	MLX5_SET(manage_pages_in, in, embedded_cpu_function, ec_function);
526 
527 	mlx5_core_dbg(dev, "func 0x%x, npages %d, outlen %d\n",
528 		      func_id, npages, outlen);
529 	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
530 	if (err) {
531 		npages = MLX5_GET(manage_pages_in, in, input_num_entries);
532 		dev->priv.reclaim_pages_discard += npages;
533 	}
534 	/* if triggered by FW event and failed by FW then ignore */
535 	if (event && err == -EREMOTEIO) {
536 		err = 0;
537 		goto out_free;
538 	}
539 
540 	err = mlx5_cmd_check(dev, err, in, out);
541 	if (err) {
542 		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
543 		goto out_free;
544 	}
545 
546 	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
547 	if (num_claimed > npages) {
548 		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
549 			       num_claimed, npages);
550 		err = -EINVAL;
551 		goto out_free;
552 	}
553 
554 	for (i = 0; i < num_claimed; i++)
555 		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function);
556 
557 	if (nclaimed)
558 		*nclaimed = num_claimed;
559 
560 	func_type = func_id_to_type(dev, func_id, ec_function);
561 	dev->priv.page_counters[func_type] -= num_claimed;
562 	dev->priv.fw_pages -= num_claimed;
563 
564 out_free:
565 	kvfree(out);
566 	return err;
567 }
568 
569 static void pages_work_handler(struct work_struct *work)
570 {
571 	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
572 	struct mlx5_core_dev *dev = req->dev;
573 	int err = 0;
574 
575 	if (req->release_all)
576 		release_all_pages(dev, req->func_id, req->ec_function);
577 	else if (req->npages < 0)
578 		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL,
579 				    true, req->ec_function);
580 	else if (req->npages > 0)
581 		err = give_pages(dev, req->func_id, req->npages, 1, req->ec_function);
582 
583 	if (err)
584 		mlx5_core_warn(dev, "%s fail %d\n",
585 			       req->npages < 0 ? "reclaim" : "give", err);
586 
587 	kfree(req);
588 }
589 
590 enum {
591 	EC_FUNCTION_MASK = 0x8000,
592 	RELEASE_ALL_PAGES_MASK = 0x4000,
593 };
594 
595 static int req_pages_handler(struct notifier_block *nb,
596 			     unsigned long type, void *data)
597 {
598 	struct mlx5_pages_req *req;
599 	struct mlx5_core_dev *dev;
600 	struct mlx5_priv *priv;
601 	struct mlx5_eqe *eqe;
602 	bool ec_function;
603 	bool release_all;
604 	u16 func_id;
605 	s32 npages;
606 
607 	priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
608 	dev  = container_of(priv, struct mlx5_core_dev, priv);
609 	eqe  = data;
610 
611 	func_id = be16_to_cpu(eqe->data.req_pages.func_id);
612 	npages  = be32_to_cpu(eqe->data.req_pages.num_pages);
613 	ec_function = be16_to_cpu(eqe->data.req_pages.ec_function) & EC_FUNCTION_MASK;
614 	release_all = be16_to_cpu(eqe->data.req_pages.ec_function) &
615 		      RELEASE_ALL_PAGES_MASK;
616 	mlx5_core_dbg(dev, "page request for func 0x%x, npages %d, release_all %d\n",
617 		      func_id, npages, release_all);
618 	req = kzalloc(sizeof(*req), GFP_ATOMIC);
619 	if (!req) {
620 		mlx5_core_warn(dev, "failed to allocate pages request\n");
621 		return NOTIFY_DONE;
622 	}
623 
624 	req->dev = dev;
625 	req->func_id = func_id;
626 	req->npages = npages;
627 	req->ec_function = ec_function;
628 	req->release_all = release_all;
629 	INIT_WORK(&req->work, pages_work_handler);
630 	queue_work(dev->priv.pg_wq, &req->work);
631 	return NOTIFY_OK;
632 }
633 
634 int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
635 {
636 	u16 func_id;
637 	s32 npages;
638 	int err;
639 
640 	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
641 	if (err)
642 		return err;
643 
644 	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
645 		      npages, boot ? "boot" : "init", func_id);
646 
647 	return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
648 }
649 
650 enum {
651 	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
652 };
653 
654 static int optimal_reclaimed_pages(void)
655 {
656 	struct mlx5_cmd_prot_block *block;
657 	struct mlx5_cmd_layout *lay;
658 	int ret;
659 
660 	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
661 	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
662 	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
663 
664 	return ret;
665 }
666 
667 static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
668 				   struct rb_root *root, u16 func_id)
669 {
670 	u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
671 	unsigned long end = jiffies + recl_pages_to_jiffies;
672 
673 	while (!RB_EMPTY_ROOT(root)) {
674 		int nclaimed;
675 		int err;
676 
677 		err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(),
678 				    &nclaimed, false, mlx5_core_is_ecpf(dev));
679 		if (err) {
680 			mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n",
681 				       err, func_id);
682 			return err;
683 		}
684 
685 		if (nclaimed)
686 			end = jiffies + recl_pages_to_jiffies;
687 
688 		if (time_after(jiffies, end)) {
689 			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
690 			break;
691 		}
692 	}
693 
694 	return 0;
695 }
696 
697 int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
698 {
699 	struct rb_root *root;
700 	unsigned long id;
701 	void *entry;
702 
703 	xa_for_each(&dev->priv.page_root_xa, id, entry) {
704 		root = entry;
705 		mlx5_reclaim_root_pages(dev, root, id);
706 		xa_erase(&dev->priv.page_root_xa, id);
707 		kfree(root);
708 	}
709 
710 	WARN_ON(!xa_empty(&dev->priv.page_root_xa));
711 
712 	WARN(dev->priv.fw_pages,
713 	     "FW pages counter is %d after reclaiming all pages\n",
714 	     dev->priv.fw_pages);
715 	WARN(dev->priv.page_counters[MLX5_VF],
716 	     "VFs FW pages counter is %d after reclaiming all pages\n",
717 	     dev->priv.page_counters[MLX5_VF]);
718 	WARN(dev->priv.page_counters[MLX5_HOST_PF],
719 	     "External host PF FW pages counter is %d after reclaiming all pages\n",
720 	     dev->priv.page_counters[MLX5_HOST_PF]);
721 
722 	return 0;
723 }
724 
725 int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
726 {
727 	INIT_LIST_HEAD(&dev->priv.free_list);
728 	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
729 	if (!dev->priv.pg_wq)
730 		return -ENOMEM;
731 
732 	xa_init(&dev->priv.page_root_xa);
733 	mlx5_pages_debugfs_init(dev);
734 
735 	return 0;
736 }
737 
738 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
739 {
740 	mlx5_pages_debugfs_cleanup(dev);
741 	xa_destroy(&dev->priv.page_root_xa);
742 	destroy_workqueue(dev->priv.pg_wq);
743 }
744 
745 void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
746 {
747 	MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
748 	mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
749 }
750 
751 void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
752 {
753 	mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
754 	flush_workqueue(dev->priv.pg_wq);
755 }
756 
757 int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
758 {
759 	u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
760 	unsigned long end = jiffies + recl_vf_pages_to_jiffies;
761 	int prev_pages = *pages;
762 
763 	/* In case of internal error we will free the pages manually later */
764 	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
765 		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
766 		return 0;
767 	}
768 
769 	mlx5_core_dbg(dev, "Waiting for %d pages\n", prev_pages);
770 	while (*pages) {
771 		if (time_after(jiffies, end)) {
772 			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", *pages);
773 			return -ETIMEDOUT;
774 		}
775 		if (*pages < prev_pages) {
776 			end = jiffies + recl_vf_pages_to_jiffies;
777 			prev_pages = *pages;
778 		}
779 		msleep(50);
780 	}
781 
782 	mlx5_core_dbg(dev, "All pages received\n");
783 	return 0;
784 }
785