xref: /freebsd/sys/dev/irdma/irdma_hmc.c (revision 81ad6265)
1 /*-
2  * SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
3  *
4  * Copyright (c) 2015 - 2022 Intel Corporation
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenFabrics.org BSD license below:
11  *
12  *   Redistribution and use in source and binary forms, with or
13  *   without modification, are permitted provided that the following
14  *   conditions are met:
15  *
16  *    - Redistributions of source code must retain the above
17  *	copyright notice, this list of conditions and the following
18  *	disclaimer.
19  *
20  *    - Redistributions in binary form must reproduce the above
21  *	copyright notice, this list of conditions and the following
22  *	disclaimer in the documentation and/or other materials
23  *	provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 /*$FreeBSD$*/
35 
36 #include "osdep.h"
37 #include "irdma_hmc.h"
38 #include "irdma_defs.h"
39 #include "irdma_type.h"
40 #include "irdma_protos.h"
41 
42 /**
43  * irdma_find_sd_index_limit - finds segment descriptor index limit
44  * @hmc_info: pointer to the HMC configuration information structure
45  * @type: type of HMC resources we're searching
46  * @idx: starting index for the object
47  * @cnt: number of objects we're trying to create
48  * @sd_idx: pointer to return index of the segment descriptor in question
49  * @sd_limit: pointer to return the maximum number of segment descriptors
50  *
51  * This function calculates the segment descriptor index and index limit
52  * for the resource defined by irdma_hmc_rsrc_type.
53  */
54 
55 static void
56 irdma_find_sd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
57 			  u32 idx, u32 cnt, u32 *sd_idx,
58 			  u32 *sd_limit)
59 {
60 	u64 fpm_addr, fpm_limit;
61 
62 	fpm_addr = hmc_info->hmc_obj[(type)].base +
63 	    hmc_info->hmc_obj[type].size * idx;
64 	fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
65 	*sd_idx = (u32)(fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE);
66 	*sd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_DIRECT_BP_SIZE);
67 	*sd_limit += 1;
68 }
69 
70 /**
71  * irdma_find_pd_index_limit - finds page descriptor index limit
72  * @hmc_info: pointer to the HMC configuration information struct
73  * @type: HMC resource type we're examining
74  * @idx: starting index for the object
75  * @cnt: number of objects we're trying to create
76  * @pd_idx: pointer to return page descriptor index
77  * @pd_limit: pointer to return page descriptor index limit
78  *
79  * Calculates the page descriptor index and index limit for the resource
80  * defined by irdma_hmc_rsrc_type.
81  */
82 
83 static void
84 irdma_find_pd_index_limit(struct irdma_hmc_info *hmc_info, u32 type,
85 			  u32 idx, u32 cnt, u32 *pd_idx,
86 			  u32 *pd_limit)
87 {
88 	u64 fpm_adr, fpm_limit;
89 
90 	fpm_adr = hmc_info->hmc_obj[type].base +
91 	    hmc_info->hmc_obj[type].size * idx;
92 	fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
93 	*pd_idx = (u32)(fpm_adr / IRDMA_HMC_PAGED_BP_SIZE);
94 	*pd_limit = (u32)((fpm_limit - 1) / IRDMA_HMC_PAGED_BP_SIZE);
95 	*pd_limit += 1;
96 }
97 
98 /**
99  * irdma_set_sd_entry - setup entry for sd programming
100  * @pa: physical addr
101  * @idx: sd index
102  * @type: paged or direct sd
103  * @entry: sd entry ptr
104  */
105 static void
106 irdma_set_sd_entry(u64 pa, u32 idx, enum irdma_sd_entry_type type,
107 		   struct irdma_update_sd_entry *entry)
108 {
109 	entry->data = pa |
110 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
111 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
112 		       type == IRDMA_SD_TYPE_PAGED ? 0 : 1) |
113 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDVALID, 1);
114 
115 	entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
116 	    IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
117 }
118 
119 /**
120  * irdma_clr_sd_entry - setup entry for sd clear
121  * @idx: sd index
122  * @type: paged or direct sd
123  * @entry: sd entry ptr
124  */
125 static void
126 irdma_clr_sd_entry(u32 idx, enum irdma_sd_entry_type type,
127 		   struct irdma_update_sd_entry *entry)
128 {
129 	entry->data = FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDBPCOUNT, IRDMA_HMC_MAX_BP_COUNT) |
130 	    FIELD_PREP(IRDMA_PFHMC_SDDATALOW_PMSDTYPE,
131 		       type == IRDMA_SD_TYPE_PAGED ? 0 : 1);
132 
133 	entry->cmd = idx | FIELD_PREP(IRDMA_PFHMC_SDCMD_PMSDWR, 1) |
134 	    IRDMA_PFHMC_SDCMD_PMSDPARTSEL;
135 }
136 
137 /**
138  * irdma_invalidate_pf_hmc_pd - Invalidates the pd cache in the hardware for PF
139  * @dev: pointer to our device struct
140  * @sd_idx: segment descriptor index
141  * @pd_idx: page descriptor index
142  */
143 static inline void
144 irdma_invalidate_pf_hmc_pd(struct irdma_sc_dev *dev, u32 sd_idx,
145 			   u32 pd_idx)
146 {
147 	u32 val = FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDIDX, sd_idx) |
148 	FIELD_PREP(IRDMA_PFHMC_PDINV_PMSDPARTSEL, 1) |
149 	FIELD_PREP(IRDMA_PFHMC_PDINV_PMPDIDX, pd_idx);
150 
151 	writel(val, dev->hw_regs[IRDMA_PFHMC_PDINV]);
152 }
153 
154 /**
155  * irdma_hmc_sd_one - setup 1 sd entry for cqp
156  * @dev: pointer to the device structure
157  * @hmc_fn_id: hmc's function id
158  * @pa: physical addr
159  * @sd_idx: sd index
160  * @type: paged or direct sd
161  * @setsd: flag to set or clear sd
162  */
163 int
164 irdma_hmc_sd_one(struct irdma_sc_dev *dev, u16 hmc_fn_id, u64 pa, u32 sd_idx,
165 		 enum irdma_sd_entry_type type, bool setsd)
166 {
167 	struct irdma_update_sds_info sdinfo;
168 
169 	sdinfo.cnt = 1;
170 	sdinfo.hmc_fn_id = hmc_fn_id;
171 	if (setsd)
172 		irdma_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
173 	else
174 		irdma_clr_sd_entry(sd_idx, type, sdinfo.entry);
175 	return dev->cqp->process_cqp_sds(dev, &sdinfo);
176 }
177 
178 /**
179  * irdma_hmc_sd_grp - setup group of sd entries for cqp
180  * @dev: pointer to the device structure
181  * @hmc_info: pointer to the HMC configuration information struct
182  * @sd_index: sd index
183  * @sd_cnt: number of sd entries
184  * @setsd: flag to set or clear sd
185  */
186 static int
187 irdma_hmc_sd_grp(struct irdma_sc_dev *dev,
188 		 struct irdma_hmc_info *hmc_info, u32 sd_index,
189 		 u32 sd_cnt, bool setsd)
190 {
191 	struct irdma_hmc_sd_entry *sd_entry;
192 	struct irdma_update_sds_info sdinfo = {0};
193 	u64 pa;
194 	u32 i;
195 	int ret_code = 0;
196 
197 	sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
198 	for (i = sd_index; i < sd_index + sd_cnt; i++) {
199 		sd_entry = &hmc_info->sd_table.sd_entry[i];
200 		if (!sd_entry || (!sd_entry->valid && setsd) ||
201 		    (sd_entry->valid && !setsd))
202 			continue;
203 		if (setsd) {
204 			pa = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
205 			    sd_entry->u.pd_table.pd_page_addr.pa :
206 			    sd_entry->u.bp.addr.pa;
207 			irdma_set_sd_entry(pa, i, sd_entry->entry_type,
208 					   &sdinfo.entry[sdinfo.cnt]);
209 		} else {
210 			irdma_clr_sd_entry(i, sd_entry->entry_type,
211 					   &sdinfo.entry[sdinfo.cnt]);
212 		}
213 		sdinfo.cnt++;
214 		if (sdinfo.cnt == IRDMA_MAX_SD_ENTRIES) {
215 			ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
216 			if (ret_code) {
217 				irdma_debug(dev, IRDMA_DEBUG_HMC,
218 					    "sd_programming failed err=%d\n",
219 					    ret_code);
220 				return ret_code;
221 			}
222 
223 			sdinfo.cnt = 0;
224 		}
225 	}
226 	if (sdinfo.cnt)
227 		ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
228 
229 	return ret_code;
230 }
231 
232 /**
233  * irdma_hmc_finish_add_sd_reg - program sd entries for objects
234  * @dev: pointer to the device structure
235  * @info: create obj info
236  */
237 static int
238 irdma_hmc_finish_add_sd_reg(struct irdma_sc_dev *dev,
239 			    struct irdma_hmc_create_obj_info *info)
240 {
241 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
242 		return -EINVAL;
243 
244 	if ((info->start_idx + info->count) >
245 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt)
246 		return -EINVAL;
247 
248 	if (!info->add_sd_cnt)
249 		return 0;
250 	return irdma_hmc_sd_grp(dev, info->hmc_info,
251 				info->hmc_info->sd_indexes[0], info->add_sd_cnt,
252 				true);
253 }
254 
255 /**
256  * irdma_sc_create_hmc_obj - allocate backing store for hmc objects
257  * @dev: pointer to the device structure
258  * @info: pointer to irdma_hmc_create_obj_info struct
259  *
260  * This will allocate memory for PDs and backing pages and populate
261  * the sd and pd entries.
262  */
263 int
264 irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
265 			struct irdma_hmc_create_obj_info *info)
266 {
267 	struct irdma_hmc_sd_entry *sd_entry;
268 	u32 sd_idx, sd_lmt;
269 	u32 pd_idx = 0, pd_lmt = 0;
270 	u32 pd_idx1 = 0, pd_lmt1 = 0;
271 	u32 i, j;
272 	bool pd_error = false;
273 	int ret_code = 0;
274 
275 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
276 		return -EINVAL;
277 
278 	if ((info->start_idx + info->count) >
279 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
280 		irdma_debug(dev, IRDMA_DEBUG_HMC,
281 			    "error type %u, start = %u, req cnt %u, cnt = %u\n",
282 			    info->rsrc_type, info->start_idx, info->count,
283 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
284 		return -EINVAL;
285 	}
286 
287 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
288 				  info->start_idx, info->count, &sd_idx,
289 				  &sd_lmt);
290 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
291 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
292 		return -EINVAL;
293 	}
294 
295 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
296 				  info->start_idx, info->count, &pd_idx,
297 				  &pd_lmt);
298 
299 	for (j = sd_idx; j < sd_lmt; j++) {
300 		ret_code = irdma_add_sd_table_entry(dev->hw, info->hmc_info, j,
301 						    info->entry_type,
302 						    IRDMA_HMC_DIRECT_BP_SIZE);
303 		if (ret_code)
304 			goto exit_sd_error;
305 
306 		sd_entry = &info->hmc_info->sd_table.sd_entry[j];
307 		if (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED &&
308 		    (dev->hmc_info == info->hmc_info &&
309 		     info->rsrc_type != IRDMA_HMC_IW_PBLE)) {
310 			pd_idx1 = max(pd_idx, (j * IRDMA_HMC_MAX_BP_COUNT));
311 			pd_lmt1 = min(pd_lmt, (j + 1) * IRDMA_HMC_MAX_BP_COUNT);
312 			for (i = pd_idx1; i < pd_lmt1; i++) {
313 				/* update the pd table entry */
314 				ret_code = irdma_add_pd_table_entry(dev,
315 								    info->hmc_info,
316 								    i, NULL);
317 				if (ret_code) {
318 					pd_error = true;
319 					break;
320 				}
321 			}
322 			if (pd_error) {
323 				while (i && (i > pd_idx1)) {
324 					irdma_remove_pd_bp(dev, info->hmc_info,
325 							   i - 1);
326 					i--;
327 				}
328 			}
329 		}
330 		if (sd_entry->valid)
331 			continue;
332 
333 		info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
334 		info->add_sd_cnt++;
335 		sd_entry->valid = true;
336 	}
337 	return irdma_hmc_finish_add_sd_reg(dev, info);
338 
339 exit_sd_error:
340 	while (j && (j > sd_idx)) {
341 		sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
342 		switch (sd_entry->entry_type) {
343 		case IRDMA_SD_TYPE_PAGED:
344 			pd_idx1 = max(pd_idx, (j - 1) * IRDMA_HMC_MAX_BP_COUNT);
345 			pd_lmt1 = min(pd_lmt, (j * IRDMA_HMC_MAX_BP_COUNT));
346 			for (i = pd_idx1; i < pd_lmt1; i++)
347 				irdma_prep_remove_pd_page(info->hmc_info, i);
348 			break;
349 		case IRDMA_SD_TYPE_DIRECT:
350 			irdma_prep_remove_pd_page(info->hmc_info, (j - 1));
351 			break;
352 		default:
353 			ret_code = -EINVAL;
354 			break;
355 		}
356 		j--;
357 	}
358 
359 	return ret_code;
360 }
361 
362 /**
363  * irdma_finish_del_sd_reg - delete sd entries for objects
364  * @dev: pointer to the device structure
365  * @info: dele obj info
366  * @reset: true if called before reset
367  */
368 static int
369 irdma_finish_del_sd_reg(struct irdma_sc_dev *dev,
370 			struct irdma_hmc_del_obj_info *info,
371 			bool reset)
372 {
373 	struct irdma_hmc_sd_entry *sd_entry;
374 	int ret_code = 0;
375 	struct irdma_dma_mem *mem;
376 	u32 i, sd_idx;
377 
378 	if (!reset)
379 		ret_code = irdma_hmc_sd_grp(dev, info->hmc_info,
380 					    info->hmc_info->sd_indexes[0],
381 					    info->del_sd_cnt, false);
382 
383 	if (ret_code)
384 		irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd sd_grp\n");
385 	for (i = 0; i < info->del_sd_cnt; i++) {
386 		sd_idx = info->hmc_info->sd_indexes[i];
387 		sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
388 
389 		mem = (sd_entry->entry_type == IRDMA_SD_TYPE_PAGED) ?
390 		    &sd_entry->u.pd_table.pd_page_addr :
391 		    &sd_entry->u.bp.addr;
392 
393 		if (!mem || !mem->va)
394 			irdma_debug(dev, IRDMA_DEBUG_HMC, "error cqp sd mem\n");
395 		else
396 			irdma_free_dma_mem(dev->hw, mem);
397 	}
398 
399 	return ret_code;
400 }
401 
402 /**
403  * irdma_sc_del_hmc_obj - remove pe hmc objects
404  * @dev: pointer to the device structure
405  * @info: pointer to irdma_hmc_del_obj_info struct
406  * @reset: true if called before reset
407  *
408  * This will de-populate the SDs and PDs.  It frees
409  * the memory for PDS and backing storage.  After this function is returned,
410  * caller should deallocate memory allocated previously for
411  * book-keeping information about PDs and backing storage.
412  */
413 int
414 irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
415 		     struct irdma_hmc_del_obj_info *info, bool reset)
416 {
417 	struct irdma_hmc_pd_table *pd_table;
418 	u32 sd_idx, sd_lmt;
419 	u32 pd_idx, pd_lmt, rel_pd_idx;
420 	u32 i, j;
421 	int ret_code = 0;
422 
423 	if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
424 		irdma_debug(dev, IRDMA_DEBUG_HMC,
425 			    "error start_idx[%04d]  >= [type %04d].cnt[%04d]\n",
426 			    info->start_idx, info->rsrc_type,
427 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
428 		return -EINVAL;
429 	}
430 
431 	if ((info->start_idx + info->count) >
432 	    info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
433 		irdma_debug(dev, IRDMA_DEBUG_HMC,
434 			    "error start_idx[%04d] + count %04d  >= [type %04d].cnt[%04d]\n",
435 			    info->start_idx, info->count, info->rsrc_type,
436 			    info->hmc_info->hmc_obj[info->rsrc_type].cnt);
437 		return -EINVAL;
438 	}
439 
440 	irdma_find_pd_index_limit(info->hmc_info, info->rsrc_type,
441 				  info->start_idx, info->count, &pd_idx,
442 				  &pd_lmt);
443 
444 	for (j = pd_idx; j < pd_lmt; j++) {
445 		sd_idx = j / IRDMA_HMC_PD_CNT_IN_SD;
446 
447 		if (!info->hmc_info->sd_table.sd_entry[sd_idx].valid)
448 			continue;
449 
450 		if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
451 		    IRDMA_SD_TYPE_PAGED)
452 			continue;
453 
454 		rel_pd_idx = j % IRDMA_HMC_PD_CNT_IN_SD;
455 		pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
456 		if (pd_table->pd_entry &&
457 		    pd_table->pd_entry[rel_pd_idx].valid) {
458 			ret_code = irdma_remove_pd_bp(dev, info->hmc_info, j);
459 			if (ret_code) {
460 				irdma_debug(dev, IRDMA_DEBUG_HMC,
461 					    "remove_pd_bp error\n");
462 				return ret_code;
463 			}
464 		}
465 	}
466 
467 	irdma_find_sd_index_limit(info->hmc_info, info->rsrc_type,
468 				  info->start_idx, info->count, &sd_idx,
469 				  &sd_lmt);
470 	if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
471 	    sd_lmt > info->hmc_info->sd_table.sd_cnt) {
472 		irdma_debug(dev, IRDMA_DEBUG_HMC, "invalid sd_idx\n");
473 		return -EINVAL;
474 	}
475 
476 	for (i = sd_idx; i < sd_lmt; i++) {
477 		pd_table = &info->hmc_info->sd_table.sd_entry[i].u.pd_table;
478 		if (!info->hmc_info->sd_table.sd_entry[i].valid)
479 			continue;
480 		switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
481 		case IRDMA_SD_TYPE_DIRECT:
482 			ret_code = irdma_prep_remove_sd_bp(info->hmc_info, i);
483 			if (!ret_code) {
484 				info->hmc_info->sd_indexes[info->del_sd_cnt] =
485 				    (u16)i;
486 				info->del_sd_cnt++;
487 			}
488 			break;
489 		case IRDMA_SD_TYPE_PAGED:
490 			ret_code = irdma_prep_remove_pd_page(info->hmc_info, i);
491 			if (ret_code)
492 				break;
493 			if (dev->hmc_info != info->hmc_info &&
494 			    info->rsrc_type == IRDMA_HMC_IW_PBLE &&
495 			    pd_table->pd_entry) {
496 				kfree(pd_table->pd_entry_virt_mem.va);
497 				pd_table->pd_entry = NULL;
498 			}
499 			info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
500 			info->del_sd_cnt++;
501 			break;
502 		default:
503 			break;
504 		}
505 	}
506 	return irdma_finish_del_sd_reg(dev, info, reset);
507 }
508 
509 /**
510  * irdma_add_sd_table_entry - Adds a segment descriptor to the table
511  * @hw: pointer to our hw struct
512  * @hmc_info: pointer to the HMC configuration information struct
513  * @sd_index: segment descriptor index to manipulate
514  * @type: what type of segment descriptor we're manipulating
515  * @direct_mode_sz: size to alloc in direct mode
516  */
517 int
518 irdma_add_sd_table_entry(struct irdma_hw *hw,
519 			 struct irdma_hmc_info *hmc_info, u32 sd_index,
520 			 enum irdma_sd_entry_type type, u64 direct_mode_sz)
521 {
522 	struct irdma_hmc_sd_entry *sd_entry;
523 	struct irdma_dma_mem dma_mem;
524 	u64 alloc_len;
525 
526 	sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
527 	if (!sd_entry->valid) {
528 		if (type == IRDMA_SD_TYPE_PAGED)
529 			alloc_len = IRDMA_HMC_PAGED_BP_SIZE;
530 		else
531 			alloc_len = direct_mode_sz;
532 
533 		/* allocate a 4K pd page or 2M backing page */
534 		dma_mem.size = alloc_len;
535 		dma_mem.va = irdma_allocate_dma_mem(hw, &dma_mem, dma_mem.size,
536 						    IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
537 		if (!dma_mem.va)
538 			return -ENOMEM;
539 		if (type == IRDMA_SD_TYPE_PAGED) {
540 			struct irdma_virt_mem *vmem =
541 			&sd_entry->u.pd_table.pd_entry_virt_mem;
542 
543 			vmem->size = sizeof(struct irdma_hmc_pd_entry) * 512;
544 			vmem->va = kzalloc(vmem->size, GFP_KERNEL);
545 			if (!vmem->va) {
546 				irdma_free_dma_mem(hw, &dma_mem);
547 				return -ENOMEM;
548 			}
549 			sd_entry->u.pd_table.pd_entry = vmem->va;
550 
551 			irdma_memcpy(&sd_entry->u.pd_table.pd_page_addr, &dma_mem,
552 				     sizeof(sd_entry->u.pd_table.pd_page_addr));
553 		} else {
554 			irdma_memcpy(&sd_entry->u.bp.addr, &dma_mem,
555 				     sizeof(sd_entry->u.bp.addr));
556 
557 			sd_entry->u.bp.sd_pd_index = sd_index;
558 		}
559 
560 		hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
561 		hmc_info->sd_table.use_cnt++;
562 	}
563 	if (sd_entry->entry_type == IRDMA_SD_TYPE_DIRECT)
564 		sd_entry->u.bp.use_cnt++;
565 
566 	return 0;
567 }
568 
569 /**
570  * irdma_add_pd_table_entry - Adds page descriptor to the specified table
571  * @dev: pointer to our device structure
572  * @hmc_info: pointer to the HMC configuration information structure
573  * @pd_index: which page descriptor index to manipulate
574  * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one.
575  *
576  * This function:
577  *	1. Initializes the pd entry
578  *	2. Adds pd_entry in the pd_table
579  *	3. Mark the entry valid in irdma_hmc_pd_entry structure
580  *	4. Initializes the pd_entry's ref count to 1
581  * assumptions:
582  *	1. The memory for pd should be pinned down, physically contiguous and
583  *	   aligned on 4K boundary and zeroed memory.
584  *	2. It should be 4K in size.
585  */
586 int
587 irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
588 			 struct irdma_hmc_info *hmc_info, u32 pd_index,
589 			 struct irdma_dma_mem *rsrc_pg)
590 {
591 	struct irdma_hmc_pd_table *pd_table;
592 	struct irdma_hmc_pd_entry *pd_entry;
593 	struct irdma_dma_mem mem;
594 	struct irdma_dma_mem *page = &mem;
595 	u32 sd_idx, rel_pd_idx;
596 	u64 *pd_addr;
597 	u64 page_desc;
598 
599 	if (pd_index / IRDMA_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
600 		return -EINVAL;
601 
602 	sd_idx = (pd_index / IRDMA_HMC_PD_CNT_IN_SD);
603 	if (hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
604 	    IRDMA_SD_TYPE_PAGED)
605 		return 0;
606 
607 	rel_pd_idx = (pd_index % IRDMA_HMC_PD_CNT_IN_SD);
608 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
609 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
610 	if (!pd_entry->valid) {
611 		if (rsrc_pg) {
612 			pd_entry->rsrc_pg = true;
613 			page = rsrc_pg;
614 		} else {
615 			page->size = IRDMA_HMC_PAGED_BP_SIZE;
616 			page->va = irdma_allocate_dma_mem(dev->hw, page,
617 							  page->size,
618 							  IRDMA_HMC_PD_BP_BUF_ALIGNMENT);
619 			if (!page->va)
620 				return -ENOMEM;
621 
622 			pd_entry->rsrc_pg = false;
623 		}
624 
625 		irdma_memcpy(&pd_entry->bp.addr, page, sizeof(pd_entry->bp.addr));
626 		pd_entry->bp.sd_pd_index = pd_index;
627 		pd_entry->bp.entry_type = IRDMA_SD_TYPE_PAGED;
628 		page_desc = page->pa | 0x1;
629 		pd_addr = pd_table->pd_page_addr.va;
630 		pd_addr += rel_pd_idx;
631 		irdma_memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
632 		pd_entry->sd_index = sd_idx;
633 		pd_entry->valid = true;
634 		pd_table->use_cnt++;
635 		irdma_invalidate_pf_hmc_pd(dev, sd_idx, rel_pd_idx);
636 	}
637 	pd_entry->bp.use_cnt++;
638 
639 	return 0;
640 }
641 
642 /**
643  * irdma_remove_pd_bp - remove a backing page from a page descriptor
644  * @dev: pointer to our HW structure
645  * @hmc_info: pointer to the HMC configuration information structure
646  * @idx: the page index
647  *
648  * This function:
649  *	1. Marks the entry in pd table (for paged address mode) or in sd table
650  *	   (for direct address mode) invalid.
651  *	2. Write to register PMPDINV to invalidate the backing page in FV cache
652  *	3. Decrement the ref count for the pd _entry
653  * assumptions:
654  *	1. Caller can deallocate the memory used by backing storage after this
655  *	   function returns.
656  */
657 int
658 irdma_remove_pd_bp(struct irdma_sc_dev *dev,
659 		   struct irdma_hmc_info *hmc_info, u32 idx)
660 {
661 	struct irdma_hmc_pd_entry *pd_entry;
662 	struct irdma_hmc_pd_table *pd_table;
663 	struct irdma_hmc_sd_entry *sd_entry;
664 	u32 sd_idx, rel_pd_idx;
665 	struct irdma_dma_mem *mem;
666 	u64 *pd_addr;
667 
668 	sd_idx = idx / IRDMA_HMC_PD_CNT_IN_SD;
669 	rel_pd_idx = idx % IRDMA_HMC_PD_CNT_IN_SD;
670 	if (sd_idx >= hmc_info->sd_table.sd_cnt)
671 		return -EINVAL;
672 
673 	sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
674 	if (sd_entry->entry_type != IRDMA_SD_TYPE_PAGED)
675 		return -EINVAL;
676 
677 	pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
678 	pd_entry = &pd_table->pd_entry[rel_pd_idx];
679 	if (--pd_entry->bp.use_cnt)
680 		return 0;
681 
682 	pd_entry->valid = false;
683 	pd_table->use_cnt--;
684 	pd_addr = pd_table->pd_page_addr.va;
685 	pd_addr += rel_pd_idx;
686 	irdma_memset(pd_addr, 0, sizeof(u64));
687 	irdma_invalidate_pf_hmc_pd(dev, sd_idx, idx);
688 
689 	if (!pd_entry->rsrc_pg) {
690 		mem = &pd_entry->bp.addr;
691 		if (!mem || !mem->va)
692 			return -EINVAL;
693 
694 		irdma_free_dma_mem(dev->hw, mem);
695 	}
696 	if (!pd_table->use_cnt)
697 		kfree(pd_table->pd_entry_virt_mem.va);
698 
699 	return 0;
700 }
701 
702 /**
703  * irdma_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry
704  * @hmc_info: pointer to the HMC configuration information structure
705  * @idx: the page index
706  */
707 int
708 irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx)
709 {
710 	struct irdma_hmc_sd_entry *sd_entry;
711 
712 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
713 	if (--sd_entry->u.bp.use_cnt)
714 		return -EBUSY;
715 
716 	hmc_info->sd_table.use_cnt--;
717 	sd_entry->valid = false;
718 
719 	return 0;
720 }
721 
722 /**
723  * irdma_prep_remove_pd_page - Prepares to remove a PD page from sd entry.
724  * @hmc_info: pointer to the HMC configuration information structure
725  * @idx: segment descriptor index to find the relevant page descriptor
726  */
727 int
728 irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx)
729 {
730 	struct irdma_hmc_sd_entry *sd_entry;
731 
732 	sd_entry = &hmc_info->sd_table.sd_entry[idx];
733 
734 	if (sd_entry->u.pd_table.use_cnt)
735 		return -EBUSY;
736 
737 	sd_entry->valid = false;
738 	hmc_info->sd_table.use_cnt--;
739 
740 	return 0;
741 }
742