1 /*******************************************************************************
2 *
3 * Copyright (c) 2015-2016 Intel Corporation.  All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
10 *
11 *   Redistribution and use in source and binary forms, with or
12 *   without modification, are permitted provided that the following
13 *   conditions are met:
14 *
15 *    - Redistributions of source code must retain the above
16 *	copyright notice, this list of conditions and the following
17 *	disclaimer.
18 *
19 *    - Redistributions in binary form must reproduce the above
20 *	copyright notice, this list of conditions and the following
21 *	disclaimer in the documentation and/or other materials
22 *	provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 *******************************************************************************/
34 
35 #include "i40iw_status.h"
36 #include "i40iw_osdep.h"
37 #include "i40iw_register.h"
38 #include "i40iw_hmc.h"
39 
40 #include "i40iw_d.h"
41 #include "i40iw_type.h"
42 #include "i40iw_p.h"
43 
44 #include <linux/pci.h>
45 #include <linux/genalloc.h>
46 #include <linux/vmalloc.h>
47 #include "i40iw_pble.h"
48 #include "i40iw.h"
49 
50 struct i40iw_device;
51 static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
52 					    struct i40iw_hmc_pble_rsrc *pble_rsrc);
53 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
54 
55 /**
56  * i40iw_destroy_pble_pool - destroy pool during module unload
57  * @dev: i40iw_sc_dev struct
58  * @pble_rsrc:	pble resources
59  */
i40iw_destroy_pble_pool(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc)60 void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
61 {
62 	struct list_head *clist;
63 	struct list_head *tlist;
64 	struct i40iw_chunk *chunk;
65 	struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
66 
67 	if (pinfo->pool) {
68 		list_for_each_safe(clist, tlist, &pinfo->clist) {
69 			chunk = list_entry(clist, struct i40iw_chunk, list);
70 			if (chunk->type == I40IW_VMALLOC)
71 				i40iw_free_vmalloc_mem(dev->hw, chunk);
72 			kfree(chunk);
73 		}
74 		gen_pool_destroy(pinfo->pool);
75 	}
76 }
77 
78 /**
79  * i40iw_hmc_init_pble - Initialize pble resources during module load
80  * @dev: i40iw_sc_dev struct
81  * @pble_rsrc:	pble resources
82  */
i40iw_hmc_init_pble(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc)83 enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
84 					   struct i40iw_hmc_pble_rsrc *pble_rsrc)
85 {
86 	struct i40iw_hmc_info *hmc_info;
87 	u32 fpm_idx = 0;
88 
89 	hmc_info = dev->hmc_info;
90 	pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
91 	/* Now start the pble' on 4k boundary */
92 	if (pble_rsrc->fpm_base_addr & 0xfff)
93 		fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
94 
95 	pble_rsrc->unallocated_pble =
96 	    hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
97 	pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
98 
99 	pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
100 	pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
101 	INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
102 	if (!pble_rsrc->pinfo.pool)
103 		goto error;
104 
105 	if (add_pble_pool(dev, pble_rsrc))
106 		goto error;
107 
108 	return 0;
109 
110  error:i40iw_destroy_pble_pool(dev, pble_rsrc);
111 	return I40IW_ERR_NO_MEMORY;
112 }
113 
114 /**
115  * get_sd_pd_idx -  Returns sd index, pd index and rel_pd_idx from fpm address
116  * @pble_rsrc:	structure containing fpm address
117  * @idx: where to return indexes
118  */
get_sd_pd_idx(struct i40iw_hmc_pble_rsrc * pble_rsrc,struct sd_pd_idx * idx)119 static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
120 				 struct sd_pd_idx *idx)
121 {
122 	idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
123 	idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
124 	idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
125 }
126 
127 /**
128  * add_sd_direct - add sd direct for pble
129  * @dev: hardware control device structure
130  * @pble_rsrc: pble resource ptr
131  * @info: page info for sd
132  */
add_sd_direct(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_add_page_info * info)133 static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
134 					    struct i40iw_hmc_pble_rsrc *pble_rsrc,
135 					    struct i40iw_add_page_info *info)
136 {
137 	enum i40iw_status_code ret_code = 0;
138 	struct sd_pd_idx *idx = &info->idx;
139 	struct i40iw_chunk *chunk = info->chunk;
140 	struct i40iw_hmc_info *hmc_info = info->hmc_info;
141 	struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
142 	u32 offset = 0;
143 
144 	if (!sd_entry->valid) {
145 		if (dev->is_pf) {
146 			ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
147 							    info->idx.sd_idx,
148 							    I40IW_SD_TYPE_DIRECT,
149 							    I40IW_HMC_DIRECT_BP_SIZE);
150 			if (ret_code)
151 				return ret_code;
152 			chunk->type = I40IW_DMA_COHERENT;
153 		}
154 	}
155 	offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
156 	chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
157 	chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
158 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
159 	i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
160 		    chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
161 	return 0;
162 }
163 
164 /**
165  * i40iw_free_vmalloc_mem - free vmalloc during close
166  * @hw: hw struct
167  * @chunk: chunk information for vmalloc
168  */
i40iw_free_vmalloc_mem(struct i40iw_hw * hw,struct i40iw_chunk * chunk)169 static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
170 {
171 	struct pci_dev *pcidev = hw->pcidev;
172 	int i;
173 
174 	if (!chunk->pg_cnt)
175 		goto done;
176 	for (i = 0; i < chunk->pg_cnt; i++)
177 		dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
178 
179  done:
180 	kfree(chunk->dmaaddrs);
181 	chunk->dmaaddrs = NULL;
182 	vfree(chunk->vaddr);
183 	chunk->vaddr = NULL;
184 	chunk->type = 0;
185 }
186 
187 /**
188  * i40iw_get_vmalloc_mem - get 2M page for sd
189  * @hw: hardware address
190  * @chunk: chunk to adf
191  * @pg_cnt: #of 4 K pages
192  */
i40iw_get_vmalloc_mem(struct i40iw_hw * hw,struct i40iw_chunk * chunk,int pg_cnt)193 static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
194 						    struct i40iw_chunk *chunk,
195 						    int pg_cnt)
196 {
197 	struct pci_dev *pcidev = hw->pcidev;
198 	struct page *page;
199 	u8 *addr;
200 	u32 size;
201 	int i;
202 
203 	chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
204 	if (!chunk->dmaaddrs)
205 		return I40IW_ERR_NO_MEMORY;
206 	size = PAGE_SIZE * pg_cnt;
207 	chunk->vaddr = vmalloc(size);
208 	if (!chunk->vaddr) {
209 		kfree(chunk->dmaaddrs);
210 		chunk->dmaaddrs = NULL;
211 		return I40IW_ERR_NO_MEMORY;
212 	}
213 	chunk->size = size;
214 	addr = (u8 *)chunk->vaddr;
215 	for (i = 0; i < pg_cnt; i++) {
216 		page = vmalloc_to_page((void *)addr);
217 		if (!page)
218 			break;
219 		chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
220 						  PAGE_SIZE, DMA_BIDIRECTIONAL);
221 		if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
222 			break;
223 		addr += PAGE_SIZE;
224 	}
225 
226 	chunk->pg_cnt = i;
227 	chunk->type = I40IW_VMALLOC;
228 	if (i == pg_cnt)
229 		return 0;
230 
231 	i40iw_free_vmalloc_mem(hw, chunk);
232 	return I40IW_ERR_NO_MEMORY;
233 }
234 
235 /**
236  * fpm_to_idx - given fpm address, get pble index
237  * @pble_rsrc: pble resource management
238  * @addr: fpm address for index
239  */
fpm_to_idx(struct i40iw_hmc_pble_rsrc * pble_rsrc,u64 addr)240 static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
241 {
242 	return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
243 }
244 
245 /**
246  * add_bp_pages - add backing pages for sd
247  * @dev: hardware control device structure
248  * @pble_rsrc: pble resource management
249  * @info: page info for sd
250  */
add_bp_pages(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_add_page_info * info)251 static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
252 					   struct i40iw_hmc_pble_rsrc *pble_rsrc,
253 					   struct i40iw_add_page_info *info)
254 {
255 	u8 *addr;
256 	struct i40iw_dma_mem mem;
257 	struct i40iw_hmc_pd_entry *pd_entry;
258 	struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
259 	struct i40iw_hmc_info *hmc_info = info->hmc_info;
260 	struct i40iw_chunk *chunk = info->chunk;
261 	struct i40iw_manage_vf_pble_info vf_pble_info;
262 	enum i40iw_status_code status = 0;
263 	u32 rel_pd_idx = info->idx.rel_pd_idx;
264 	u32 pd_idx = info->idx.pd_idx;
265 	u32 i;
266 
267 	status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
268 	if (status)
269 		return I40IW_ERR_NO_MEMORY;
270 	status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
271 					  info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
272 					  I40IW_HMC_DIRECT_BP_SIZE);
273 	if (status)
274 		goto error;
275 	if (!dev->is_pf) {
276 		status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
277 						     fpm_to_idx(pble_rsrc,
278 								pble_rsrc->next_fpm_addr),
279 						     (info->pages << PBLE_512_SHIFT));
280 		if (status) {
281 			i40iw_pr_err("allocate PBLEs in the PF.  Error %i\n", status);
282 			goto error;
283 		}
284 	}
285 	addr = chunk->vaddr;
286 	for (i = 0; i < info->pages; i++) {
287 		mem.pa = chunk->dmaaddrs[i];
288 		mem.size = PAGE_SIZE;
289 		mem.va = (void *)(addr);
290 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
291 		if (!pd_entry->valid) {
292 			status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
293 			if (status)
294 				goto error;
295 			addr += PAGE_SIZE;
296 		} else {
297 			i40iw_pr_err("pd entry is valid expecting to be invalid\n");
298 		}
299 	}
300 	if (!dev->is_pf) {
301 		vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
302 		vf_pble_info.inv_pd_ent = false;
303 		vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
304 		vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
305 		vf_pble_info.sd_index = info->idx.sd_idx;
306 		status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
307 						    &vf_pble_info, true);
308 		if (status) {
309 			i40iw_pr_err("CQP manage VF PBLE BP failed.  %i\n", status);
310 			goto error;
311 		}
312 	}
313 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
314 	return 0;
315 error:
316 	i40iw_free_vmalloc_mem(dev->hw, chunk);
317 	return status;
318 }
319 
320 /**
321  * add_pble_pool - add a sd entry for pble resoure
322  * @dev: hardware control device structure
323  * @pble_rsrc: pble resource management
324  */
add_pble_pool(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc)325 static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
326 					    struct i40iw_hmc_pble_rsrc *pble_rsrc)
327 {
328 	struct i40iw_hmc_sd_entry *sd_entry;
329 	struct i40iw_hmc_info *hmc_info;
330 	struct i40iw_chunk *chunk;
331 	struct i40iw_add_page_info info;
332 	struct sd_pd_idx *idx = &info.idx;
333 	enum i40iw_status_code ret_code = 0;
334 	enum i40iw_sd_entry_type sd_entry_type;
335 	u64 sd_reg_val = 0;
336 	u32 pages;
337 
338 	if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
339 		return I40IW_ERR_NO_MEMORY;
340 	if (pble_rsrc->next_fpm_addr & 0xfff) {
341 		i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
342 		return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
343 	}
344 	chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
345 	if (!chunk)
346 		return I40IW_ERR_NO_MEMORY;
347 	hmc_info = dev->hmc_info;
348 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
349 	get_sd_pd_idx(pble_rsrc, idx);
350 	sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
351 	pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
352 			idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
353 	pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
354 	info.chunk = chunk;
355 	info.hmc_info = hmc_info;
356 	info.pages = pages;
357 	info.sd_entry = sd_entry;
358 	if (!sd_entry->valid) {
359 		sd_entry_type = (!idx->rel_pd_idx &&
360 				 (pages == I40IW_HMC_PD_CNT_IN_SD) &&
361 				 dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
362 	} else {
363 		sd_entry_type = sd_entry->entry_type;
364 	}
365 	i40iw_debug(dev, I40IW_DEBUG_PBLE,
366 		    "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
367 		    pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
368 	i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
369 		    sd_entry_type, sd_entry->valid);
370 
371 	if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
372 		ret_code = add_sd_direct(dev, pble_rsrc, &info);
373 	if (ret_code)
374 		sd_entry_type = I40IW_SD_TYPE_PAGED;
375 	else
376 		pble_rsrc->stats_direct_sds++;
377 
378 	if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
379 		ret_code = add_bp_pages(dev, pble_rsrc, &info);
380 		if (ret_code)
381 			goto error;
382 		else
383 			pble_rsrc->stats_paged_sds++;
384 	}
385 
386 	if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
387 			      (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
388 		i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
389 		ret_code = I40IW_ERR_NO_MEMORY;
390 		goto error;
391 	}
392 	pble_rsrc->next_fpm_addr += chunk->size;
393 	i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
394 		    pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
395 	pble_rsrc->unallocated_pble -= (chunk->size >> 3);
396 	sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
397 			sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
398 	if (dev->is_pf && !sd_entry->valid) {
399 		ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
400 					    sd_reg_val, idx->sd_idx,
401 					    sd_entry->entry_type, true);
402 		if (ret_code) {
403 			i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
404 			goto error;
405 		}
406 	}
407 
408 	sd_entry->valid = true;
409 	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
410 	return 0;
411  error:
412 	kfree(chunk);
413 	return ret_code;
414 }
415 
416 /**
417  * free_lvl2 - fee level 2 pble
418  * @pble_rsrc: pble resource management
419  * @palloc: level 2 pble allocation
420  */
free_lvl2(struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc)421 static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
422 		      struct i40iw_pble_alloc *palloc)
423 {
424 	u32 i;
425 	struct gen_pool *pool;
426 	struct i40iw_pble_level2 *lvl2 = &palloc->level2;
427 	struct i40iw_pble_info *root = &lvl2->root;
428 	struct i40iw_pble_info *leaf = lvl2->leaf;
429 
430 	pool = pble_rsrc->pinfo.pool;
431 
432 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
433 		if (leaf->addr)
434 			gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
435 		else
436 			break;
437 	}
438 
439 	if (root->addr)
440 		gen_pool_free(pool, root->addr, (root->cnt << 3));
441 
442 	kfree(lvl2->leaf);
443 	lvl2->leaf = NULL;
444 }
445 
446 /**
447  * get_lvl2_pble - get level 2 pble resource
448  * @pble_rsrc: pble resource management
449  * @palloc: level 2 pble allocation
450  * @pool: pool pointer
451  */
get_lvl2_pble(struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc,struct gen_pool * pool)452 static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
453 					    struct i40iw_pble_alloc *palloc,
454 					    struct gen_pool *pool)
455 {
456 	u32 lf4k, lflast, total, i;
457 	u32 pblcnt = PBLE_PER_PAGE;
458 	u64 *addr;
459 	struct i40iw_pble_level2 *lvl2 = &palloc->level2;
460 	struct i40iw_pble_info *root = &lvl2->root;
461 	struct i40iw_pble_info *leaf;
462 
463 	/* number of full 512 (4K) leafs) */
464 	lf4k = palloc->total_cnt >> 9;
465 	lflast = palloc->total_cnt % PBLE_PER_PAGE;
466 	total = (lflast == 0) ? lf4k : lf4k + 1;
467 	lvl2->leaf_cnt = total;
468 
469 	leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
470 	if (!leaf)
471 		return I40IW_ERR_NO_MEMORY;
472 	lvl2->leaf = leaf;
473 	/* allocate pbles for the root */
474 	root->addr = gen_pool_alloc(pool, (total << 3));
475 	if (!root->addr) {
476 		kfree(lvl2->leaf);
477 		lvl2->leaf = NULL;
478 		return I40IW_ERR_NO_MEMORY;
479 	}
480 	root->idx = fpm_to_idx(pble_rsrc,
481 			       (u64)gen_pool_virt_to_phys(pool, root->addr));
482 	root->cnt = total;
483 	addr = (u64 *)root->addr;
484 	for (i = 0; i < total; i++, leaf++) {
485 		pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
486 		leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
487 		if (!leaf->addr)
488 			goto error;
489 		leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
490 
491 		leaf->cnt = pblcnt;
492 		*addr = (u64)leaf->idx;
493 		addr++;
494 	}
495 	palloc->level = I40IW_LEVEL_2;
496 	pble_rsrc->stats_lvl2++;
497 	return 0;
498  error:
499 	free_lvl2(pble_rsrc, palloc);
500 	return I40IW_ERR_NO_MEMORY;
501 }
502 
503 /**
504  * get_lvl1_pble - get level 1 pble resource
505  * @dev: hardware control device structure
506  * @pble_rsrc: pble resource management
507  * @palloc: level 1 pble allocation
508  */
get_lvl1_pble(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc)509 static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
510 					    struct i40iw_hmc_pble_rsrc *pble_rsrc,
511 					    struct i40iw_pble_alloc *palloc)
512 {
513 	u64 *addr;
514 	struct gen_pool *pool;
515 	struct i40iw_pble_info *lvl1 = &palloc->level1;
516 
517 	pool = pble_rsrc->pinfo.pool;
518 	addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
519 
520 	if (!addr)
521 		return I40IW_ERR_NO_MEMORY;
522 
523 	palloc->level = I40IW_LEVEL_1;
524 	lvl1->addr = (unsigned long)addr;
525 	lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
526 			       (unsigned long)addr));
527 	lvl1->cnt = palloc->total_cnt;
528 	pble_rsrc->stats_lvl1++;
529 	return 0;
530 }
531 
532 /**
533  * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
534  * @dev: i40iw_sc_dev struct
535  * @pble_rsrc:	pble resources
536  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
537  * @pool: pointer to general purpose special memory pool descriptor
538  */
get_lvl1_lvl2_pble(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc,struct gen_pool * pool)539 static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
540 							struct i40iw_hmc_pble_rsrc *pble_rsrc,
541 							struct i40iw_pble_alloc *palloc,
542 							struct gen_pool *pool)
543 {
544 	enum i40iw_status_code status = 0;
545 
546 	status = get_lvl1_pble(dev, pble_rsrc, palloc);
547 	if (status && (palloc->total_cnt > PBLE_PER_PAGE))
548 		status = get_lvl2_pble(pble_rsrc, palloc, pool);
549 	return status;
550 }
551 
552 /**
553  * i40iw_get_pble - allocate pbles from the pool
554  * @dev: i40iw_sc_dev struct
555  * @pble_rsrc:	pble resources
556  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
557  * @pble_cnt: #of pbles requested
558  */
i40iw_get_pble(struct i40iw_sc_dev * dev,struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc,u32 pble_cnt)559 enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
560 				      struct i40iw_hmc_pble_rsrc *pble_rsrc,
561 				      struct i40iw_pble_alloc *palloc,
562 				      u32 pble_cnt)
563 {
564 	struct gen_pool *pool;
565 	enum i40iw_status_code status = 0;
566 	u32 max_sds = 0;
567 	int i;
568 
569 	pool = pble_rsrc->pinfo.pool;
570 	palloc->total_cnt = pble_cnt;
571 	palloc->level = I40IW_LEVEL_0;
572 	/*check first to see if we can get pble's without acquiring additional sd's */
573 	status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
574 	if (!status)
575 		goto exit;
576 	max_sds = (palloc->total_cnt >> 18) + 1;
577 	for (i = 0; i < max_sds; i++) {
578 		status = add_pble_pool(dev, pble_rsrc);
579 		if (status)
580 			break;
581 		status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
582 		if (!status)
583 			break;
584 	}
585 exit:
586 	if (!status)
587 		pble_rsrc->stats_alloc_ok++;
588 	else
589 		pble_rsrc->stats_alloc_fail++;
590 
591 	return status;
592 }
593 
594 /**
595  * i40iw_free_pble - put pbles back into pool
596  * @pble_rsrc:	pble resources
597  * @palloc: contains all inforamtion regarding pble resource being freed
598  */
i40iw_free_pble(struct i40iw_hmc_pble_rsrc * pble_rsrc,struct i40iw_pble_alloc * palloc)599 void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
600 		     struct i40iw_pble_alloc *palloc)
601 {
602 	struct gen_pool *pool;
603 
604 	pool = pble_rsrc->pinfo.pool;
605 	if (palloc->level == I40IW_LEVEL_2)
606 		free_lvl2(pble_rsrc, palloc);
607 	else
608 		gen_pool_free(pool, palloc->level1.addr,
609 			      (palloc->level1.cnt << 3));
610 	pble_rsrc->stats_alloc_freed++;
611 }
612