xref: /freebsd/sys/dev/bnxt/bnxt_re/qplib_res.c (revision acd884de)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: QPLib resource manager
29  */
30 
31 #include <linux/spinlock.h>
32 #include <linux/pci.h>
33 #include <linux/interrupt.h>
34 #include <linux/inetdevice.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/if_vlan.h>
37 #include <linux/vmalloc.h>
38 
39 #include <net/ipv6.h>
40 #include <rdma/ib_verbs.h>
41 
42 #include "hsi_struct_def.h"
43 #include "qplib_res.h"
44 #include "qplib_sp.h"
45 #include "qplib_rcfw.h"
46 #include "bnxt.h"
47 #include "bnxt_ulp.h"
48 
_get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx * cctx)49 uint8_t _get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx *cctx)
50 {
51        /* Extend this for granular type */
52 	return(BNXT_RE_DEFAULT);
53 }
54 
_is_alloc_mr_unified(struct bnxt_qplib_dev_attr * dattr)55 inline bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr)
56 {
57 	return dattr->dev_cap_flags &
58 	       CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
59 }
60 
61 /* PBL */
__free_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,bool is_umem)62 static void __free_pbl(struct bnxt_qplib_res *res,
63 		       struct bnxt_qplib_pbl *pbl, bool is_umem)
64 {
65 	struct pci_dev *pdev;
66 	int i;
67 
68 	pdev = res->pdev;
69 	if (is_umem == false) {
70 		for (i = 0; i < pbl->pg_count; i++) {
71 			if (pbl->pg_arr[i]) {
72 				dma_free_coherent(&pdev->dev, pbl->pg_size,
73 					(void *)((u64)pbl->pg_arr[i] &
74 						 PAGE_MASK),
75 					pbl->pg_map_arr[i]);
76 			}
77 			else
78 				dev_warn(&pdev->dev,
79 					 "QPLIB: PBL free pg_arr[%d] empty?!\n",
80 					 i);
81 			pbl->pg_arr[i] = NULL;
82 		}
83 	}
84 
85 	if (pbl->pg_arr) {
86 		vfree(pbl->pg_arr);
87 		pbl->pg_arr = NULL;
88 	}
89 	if (pbl->pg_map_arr) {
90 		vfree(pbl->pg_map_arr);
91 		pbl->pg_map_arr = NULL;
92 	}
93 	pbl->pg_count = 0;
94 	pbl->pg_size = 0;
95 }
96 
97 struct qplib_sg {
98 	dma_addr_t 	pg_map_arr;
99 	u32		size;
100 };
101 
__fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)102 static int __fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
103 				 struct bnxt_qplib_sg_info *sginfo)
104 {
105 	int sg_indx, pg_indx, tmp_size, offset;
106 	struct qplib_sg *tmp_sg = NULL;
107 	struct scatterlist *sg;
108 	u64 pmask, addr;
109 
110 	tmp_sg = vzalloc(sginfo->nmap * sizeof(struct qplib_sg));
111 	if (!tmp_sg)
112 		return -ENOMEM;
113 
114 	pmask = BIT_ULL(sginfo->pgshft) - 1;
115 	sg_indx = 0;
116 	for_each_sg(sginfo->sghead, sg, sginfo->nmap, sg_indx) {
117 		tmp_sg[sg_indx].pg_map_arr = sg_dma_address(sg);
118 		tmp_sg[sg_indx].size = sg_dma_len(sg);
119 	}
120 	pg_indx = 0;
121 	for (sg_indx = 0; sg_indx < sginfo->nmap; sg_indx++) {
122 		tmp_size = tmp_sg[sg_indx].size;
123 		offset = 0;
124 		while (tmp_size > 0) {
125 			addr = tmp_sg[sg_indx].pg_map_arr + offset;
126 			if ((!sg_indx && !pg_indx) || !(addr & pmask)) {
127 				pbl->pg_map_arr[pg_indx] = addr &(~pmask);
128 				pbl->pg_count++;
129 				pg_indx++;
130 			}
131 			offset += sginfo->pgsize;
132 			tmp_size -= sginfo->pgsize;
133 		}
134 	}
135 
136 	vfree(tmp_sg);
137 	return 0;
138 }
139 
bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)140 static int bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
141 					  struct bnxt_qplib_sg_info *sginfo)
142 {
143 	int rc = 0;
144 
145 	rc =  __fill_user_dma_pages(pbl, sginfo);
146 
147 	return rc;
148 }
149 
__alloc_pbl(struct bnxt_qplib_res * res,struct bnxt_qplib_pbl * pbl,struct bnxt_qplib_sg_info * sginfo)150 static int __alloc_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
151 		       struct bnxt_qplib_sg_info *sginfo)
152 {
153 	struct pci_dev *pdev;
154 	bool is_umem = false;
155 	int i;
156 
157 	if (sginfo->nopte)
158 		return 0;
159 
160 	pdev = res->pdev;
161 	/* page ptr arrays */
162 	pbl->pg_arr = vmalloc(sginfo->npages * sizeof(void *));
163 	if (!pbl->pg_arr)
164 		return -ENOMEM;
165 
166 	pbl->pg_map_arr = vmalloc(sginfo->npages * sizeof(dma_addr_t));
167 	if (!pbl->pg_map_arr) {
168 		vfree(pbl->pg_arr);
169 		return -ENOMEM;
170 	}
171 	pbl->pg_count = 0;
172 	pbl->pg_size = sginfo->pgsize;
173 	if (!sginfo->sghead) {
174 		for (i = 0; i < sginfo->npages; i++) {
175 			pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
176 							     pbl->pg_size,
177 							     &pbl->pg_map_arr[i],
178 							     GFP_KERNEL);
179 			if (!pbl->pg_arr[i])
180 				goto fail;
181 			pbl->pg_count++;
182 		}
183 	} else {
184 		is_umem = true;
185 		if (bnxt_qplib_fill_user_dma_pages(pbl, sginfo))
186 			goto fail;
187 	}
188 
189 	return 0;
190 fail:
191 	__free_pbl(res, pbl, is_umem);
192 	return -ENOMEM;
193 }
194 
195 /* HWQ */
bnxt_qplib_free_hwq(struct bnxt_qplib_res * res,struct bnxt_qplib_hwq * hwq)196 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
197 			 struct bnxt_qplib_hwq *hwq)
198 {
199 	int i;
200 
201 	if (!hwq->max_elements)
202 		return;
203 	if (hwq->level >= PBL_LVL_MAX)
204 		return;
205 
206 	for (i = 0; i < hwq->level + 1; i++) {
207 		if (i == hwq->level)
208 			__free_pbl(res, &hwq->pbl[i], hwq->is_user);
209 		else
210 			__free_pbl(res, &hwq->pbl[i], false);
211 	}
212 
213 	hwq->level = PBL_LVL_MAX;
214 	hwq->max_elements = 0;
215 	hwq->element_size = 0;
216 	hwq->prod = hwq->cons = 0;
217 	hwq->cp_bit = 0;
218 }
219 
220 /* All HWQs are power of 2 in size */
bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq * hwq,struct bnxt_qplib_hwq_attr * hwq_attr)221 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
222 			      struct bnxt_qplib_hwq_attr *hwq_attr)
223 {
224 	u32 npages = 0, depth, stride, aux_pages = 0;
225 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
226 	struct bnxt_qplib_sg_info sginfo = {};
227 	u32 aux_size = 0, npbl, npde;
228 	void *umem;
229 	struct bnxt_qplib_res *res;
230 	u32 aux_slots, pg_size;
231 	struct pci_dev *pdev;
232 	int i, rc, lvl;
233 
234 	res = hwq_attr->res;
235 	pdev = res->pdev;
236 	umem = hwq_attr->sginfo->sghead;
237 	pg_size = hwq_attr->sginfo->pgsize;
238 	hwq->level = PBL_LVL_MAX;
239 
240 	depth = roundup_pow_of_two(hwq_attr->depth);
241 	stride = roundup_pow_of_two(hwq_attr->stride);
242 	if (hwq_attr->aux_depth) {
243 		aux_slots = hwq_attr->aux_depth;
244 		aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
245 		aux_pages = (aux_slots * aux_size) / pg_size;
246 		if ((aux_slots * aux_size) % pg_size)
247 			aux_pages++;
248 	}
249 
250 	if (!umem) {
251 		hwq->is_user = false;
252 		npages = (depth * stride) / pg_size + aux_pages;
253 		if ((depth * stride) % pg_size)
254 			npages++;
255 		if (!npages)
256 			return -EINVAL;
257 		hwq_attr->sginfo->npages = npages;
258 	} else {
259 		hwq->is_user = true;
260 		npages = hwq_attr->sginfo->npages;
261 		npages = (npages * (u64)pg_size) /
262 			  BIT_ULL(hwq_attr->sginfo->pgshft);
263 		if ((hwq_attr->sginfo->npages * (u64)pg_size) %
264 		     BIT_ULL(hwq_attr->sginfo->pgshft))
265 			npages++;
266 	}
267 	if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
268 		/* This request is Level 0, map PTE */
269 		rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
270 		if (rc)
271 			goto fail;
272 		hwq->level = PBL_LVL_0;
273 		goto done;
274 	}
275 
276 	if (npages >= MAX_PBL_LVL_0_PGS) {
277 		if (npages > MAX_PBL_LVL_1_PGS) {
278 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
279 				    0 : PTU_PTE_VALID;
280 			/* 2 levels of indirection */
281 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
282 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
283 				npbl++;
284 			npde = npbl >> MAX_PDL_LVL_SHIFT;
285 			if(npbl % BIT(MAX_PDL_LVL_SHIFT))
286 				npde++;
287 			/* Alloc PDE pages */
288 			sginfo.pgsize = npde * PAGE_SIZE;
289 			sginfo.npages = 1;
290 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
291 
292 			/* Alloc PBL pages */
293 			sginfo.npages = npbl;
294 			sginfo.pgsize = PAGE_SIZE;
295 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
296 			if (rc)
297 				goto fail;
298 			/* Fill PDL with PBL page pointers */
299 			dst_virt_ptr =
300 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
301 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
302 			if (hwq_attr->type == HWQ_TYPE_MR) {
303 			/* For MR it is expected that we supply only 1 contigous
304 			 * page i.e only 1 entry in the PDL that will contain
305 			 * all the PBLs for the user supplied memory region
306 			 */
307 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
308 					dst_virt_ptr[0][i] = src_phys_ptr[i] |
309 						flag;
310 			} else {
311 				for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
312 					dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
313 						src_phys_ptr[i] | PTU_PDE_VALID;
314 			}
315 			/* Alloc or init PTEs */
316 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
317 					 hwq_attr->sginfo);
318 			if (rc)
319 				goto fail;
320 			hwq->level = PBL_LVL_2;
321 			if (hwq_attr->sginfo->nopte)
322 				goto done;
323 			/* Fill PBLs with PTE pointers */
324 			dst_virt_ptr =
325 				(dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
326 			src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
327 			for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
328 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
329 					src_phys_ptr[i] | PTU_PTE_VALID;
330 			}
331 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
332 				/* Find the last pg of the size */
333 				i = hwq->pbl[PBL_LVL_2].pg_count;
334 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
335 								  PTU_PTE_LAST;
336 				if (i > 1)
337 					dst_virt_ptr[PTR_PG(i - 2)]
338 						    [PTR_IDX(i - 2)] |=
339 						    PTU_PTE_NEXT_TO_LAST;
340 			}
341 		} else { /* pages < 512 npbl = 1, npde = 0 */
342 			u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
343 				    0 : PTU_PTE_VALID;
344 
345 			/* 1 level of indirection */
346 			npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
347 			if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
348 				npbl++;
349 			sginfo.npages = npbl;
350 			sginfo.pgsize = PAGE_SIZE;
351 			/* Alloc PBL page */
352 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
353 			if (rc)
354 				goto fail;
355 			/* Alloc or init  PTEs */
356 			rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
357 					 hwq_attr->sginfo);
358 			if (rc)
359 				goto fail;
360 			hwq->level = PBL_LVL_1;
361 			if (hwq_attr->sginfo->nopte)
362 				goto done;
363 			/* Fill PBL with PTE pointers */
364 			dst_virt_ptr =
365 				(dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
366 			src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
367 			for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
368 				dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
369 					src_phys_ptr[i] | flag;
370 			if (hwq_attr->type == HWQ_TYPE_QUEUE) {
371 				/* Find the last pg of the size */
372 				i = hwq->pbl[PBL_LVL_1].pg_count;
373 				dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
374 								  PTU_PTE_LAST;
375 				if (i > 1)
376 					dst_virt_ptr[PTR_PG(i - 2)]
377 						    [PTR_IDX(i - 2)] |=
378 						    PTU_PTE_NEXT_TO_LAST;
379 			}
380 		}
381 	}
382 done:
383 	hwq->prod = 0;
384 	hwq->cons = 0;
385 	hwq->pdev = pdev;
386 	hwq->depth = hwq_attr->depth;
387 	hwq->max_elements = depth;
388 	hwq->element_size = stride;
389 	hwq->qe_ppg = (pg_size/stride);
390 
391 	if (hwq->level >= PBL_LVL_MAX)
392 		goto fail;
393 	/* For direct access to the elements */
394 	lvl = hwq->level;
395 	if (hwq_attr->sginfo->nopte && hwq->level)
396 		lvl = hwq->level - 1;
397 	hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
398 	hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
399 	spin_lock_init(&hwq->lock);
400 
401 	return 0;
402 fail:
403 	bnxt_qplib_free_hwq(res, hwq);
404 	return -ENOMEM;
405 }
406 
407 /* Context Tables */
bnxt_qplib_free_hwctx(struct bnxt_qplib_res * res)408 void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res)
409 {
410 	struct bnxt_qplib_ctx *hctx;
411 	int i;
412 
413 	hctx = res->hctx;
414 	bnxt_qplib_free_hwq(res, &hctx->qp_ctx.hwq);
415 	bnxt_qplib_free_hwq(res, &hctx->mrw_ctx.hwq);
416 	bnxt_qplib_free_hwq(res, &hctx->srq_ctx.hwq);
417 	bnxt_qplib_free_hwq(res, &hctx->cq_ctx.hwq);
418 	bnxt_qplib_free_hwq(res, &hctx->tim_ctx.hwq);
419 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
420 		bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.qtbl[i]);
421 	/* restor original pde level before destroy */
422 	hctx->tqm_ctx.pde.level = hctx->tqm_ctx.pde_level;
423 	bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.pde);
424 }
425 
bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * hctx)426 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
427 				      struct bnxt_qplib_ctx *hctx)
428 {
429 	struct bnxt_qplib_hwq_attr hwq_attr = {};
430 	struct bnxt_qplib_sg_info sginfo = {};
431 	struct bnxt_qplib_tqm_ctx *tqmctx;
432 	int rc = 0;
433 	int i;
434 
435 	tqmctx = &hctx->tqm_ctx;
436 
437 	sginfo.pgsize = PAGE_SIZE;
438 	sginfo.pgshft = PAGE_SHIFT;
439 	hwq_attr.sginfo = &sginfo;
440 	hwq_attr.res = res;
441 	hwq_attr.type = HWQ_TYPE_CTX;
442 	hwq_attr.depth = 512;
443 	hwq_attr.stride = sizeof(u64);
444 	/* Alloc pdl buffer */
445 	rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
446 	if (rc)
447 		goto out;
448 	/* Save original pdl level */
449 	tqmctx->pde_level = tqmctx->pde.level;
450 
451 	hwq_attr.stride = 1;
452 	for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
453 		if (!tqmctx->qcount[i])
454 			continue;
455 		hwq_attr.depth = hctx->qp_ctx.max * tqmctx->qcount[i];
456 		rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
457 		if (rc)
458 			goto out;
459 	}
460 out:
461 	return rc;
462 }
463 
bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx * ctx)464 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
465 {
466 	struct bnxt_qplib_hwq *qtbl_hwq;
467 	dma_addr_t *dma_ptr;
468 	__le64 **pbl_ptr, *ptr;
469 	int i, j, k;
470 	int fnz_idx = -1;
471 	int pg_count;
472 
473 	pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
474 
475 	for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
476 	     i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
477 		qtbl_hwq = &ctx->qtbl[i];
478 		if (!qtbl_hwq->max_elements)
479 			continue;
480 		if (fnz_idx == -1)
481 			fnz_idx = i; /* first non-zero index */
482 		switch (qtbl_hwq->level) {
483 			case PBL_LVL_2:
484 			pg_count = qtbl_hwq->pbl[PBL_LVL_1].pg_count;
485 			for (k = 0; k < pg_count; k++) {
486 				ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
487 				dma_ptr = &qtbl_hwq->pbl[PBL_LVL_1].pg_map_arr[k];
488 				*ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
489 			}
490 			break;
491 			case PBL_LVL_1:
492 			case PBL_LVL_0:
493 			default:
494 			ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
495 			*ptr = cpu_to_le64(qtbl_hwq->pbl[PBL_LVL_0].pg_map_arr[0] |
496 					   PTU_PTE_VALID);
497 			break;
498 		}
499 	}
500 	if (fnz_idx == -1)
501 		fnz_idx = 0;
502 	/* update pde level as per page table programming */
503 	ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
504 			  ctx->qtbl[fnz_idx].level + 1;
505 }
506 
bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res * res,struct bnxt_qplib_ctx * hctx)507 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
508 				      struct bnxt_qplib_ctx *hctx)
509 {
510 	int rc = 0;
511 
512 	rc = bnxt_qplib_alloc_tqm_rings(res, hctx);
513 	if (rc)
514 		goto fail;
515 
516 	bnxt_qplib_map_tqm_pgtbl(&hctx->tqm_ctx);
517 fail:
518 	return rc;
519 }
520 
521 /*
522  * Routine: bnxt_qplib_alloc_hwctx
523  * Description:
524  *     Context tables are memories which are used by the chip.
525  *     The 6 tables defined are:
526  *             QPC ctx - holds QP states
527  *             MRW ctx - holds memory region and window
528  *             SRQ ctx - holds shared RQ states
529  *             CQ ctx - holds completion queue states
530  *             TQM ctx - holds Tx Queue Manager context
531  *             TIM ctx - holds timer context
532  *     Depending on the size of the tbl requested, either a 1 Page Buffer List
533  *     or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
534  *     instead.
535  *     Table might be employed as follows:
536  *             For 0      < ctx size <= 1 PAGE, 0 level of ind is used
537  *             For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
538  *             For 512    < ctx size <= MAX, 2 levels of ind is used
539  * Returns:
540  *     0 if success, else -ERRORS
541  */
bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res * res)542 int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res)
543 {
544 	struct bnxt_qplib_hwq_attr hwq_attr = {};
545 	struct bnxt_qplib_sg_info sginfo = {};
546 	struct bnxt_qplib_ctx *hctx;
547 	struct bnxt_qplib_hwq *hwq;
548 	int rc = 0;
549 
550 	hctx = res->hctx;
551 	/* QPC Tables */
552 	sginfo.pgsize = PAGE_SIZE;
553 	sginfo.pgshft = PAGE_SHIFT;
554 	hwq_attr.sginfo = &sginfo;
555 
556 	hwq_attr.res = res;
557 	hwq_attr.depth = hctx->qp_ctx.max;
558 	hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
559 	hwq_attr.type = HWQ_TYPE_CTX;
560 	hwq = &hctx->qp_ctx.hwq;
561 	rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
562 	if (rc)
563 		goto fail;
564 
565 	/* MRW Tables */
566 	hwq_attr.depth = hctx->mrw_ctx.max;
567 	hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
568 	hwq = &hctx->mrw_ctx.hwq;
569 	rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
570 	if (rc)
571 		goto fail;
572 
573 	/* SRQ Tables */
574 	hwq_attr.depth = hctx->srq_ctx.max;
575 	hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
576 	hwq = &hctx->srq_ctx.hwq;
577 	rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
578 	if (rc)
579 		goto fail;
580 
581 	/* CQ Tables */
582 	hwq_attr.depth = hctx->cq_ctx.max;
583 	hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
584 	hwq = &hctx->cq_ctx.hwq;
585 	rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
586 	if (rc)
587 		goto fail;
588 
589 	/* TQM Buffer */
590 	rc = bnxt_qplib_setup_tqm_rings(res, hctx);
591 	if (rc)
592 		goto fail;
593 	/* TIM Buffer */
594 	hwq_attr.depth = hctx->qp_ctx.max * 16;
595 	hwq_attr.stride = 1;
596 	hwq = &hctx->tim_ctx.hwq;
597 	rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
598 	if (rc)
599 		goto fail;
600 
601 	return 0;
602 fail:
603 	bnxt_qplib_free_hwctx(res);
604 	return rc;
605 }
606 
607 /* GUID */
bnxt_qplib_get_guid(const u8 * dev_addr,u8 * guid)608 void bnxt_qplib_get_guid(const u8 *dev_addr, u8 *guid)
609 {
610 	u8 mac[ETH_ALEN];
611 
612 	/* MAC-48 to EUI-64 mapping */
613 	memcpy(mac, dev_addr, ETH_ALEN);
614 	guid[0] = mac[0] ^ 2;
615 	guid[1] = mac[1];
616 	guid[2] = mac[2];
617 	guid[3] = 0xff;
618 	guid[4] = 0xfe;
619 	guid[5] = mac[3];
620 	guid[6] = mac[4];
621 	guid[7] = mac[5];
622 }
623 
bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res * res)624 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res)
625 {
626 	struct bnxt_qplib_sgid_tbl *sgid_tbl;
627 
628 	sgid_tbl = &res->sgid_tbl;
629 
630 	if (sgid_tbl->tbl) {
631 		kfree(sgid_tbl->tbl);
632 		sgid_tbl->tbl = NULL;
633 		kfree(sgid_tbl->hw_id);
634 		sgid_tbl->hw_id = NULL;
635 		kfree(sgid_tbl->ctx);
636 		sgid_tbl->ctx = NULL;
637 		kfree(sgid_tbl->vlan);
638 		sgid_tbl->vlan = NULL;
639 	} else {
640 		dev_dbg(&res->pdev->dev, "QPLIB: SGID tbl not present");
641 	}
642 	sgid_tbl->max = 0;
643 	sgid_tbl->active = 0;
644 }
645 
bnxt_qplib_free_reftbls(struct bnxt_qplib_res * res)646 static void bnxt_qplib_free_reftbls(struct bnxt_qplib_res *res)
647 {
648 	struct bnxt_qplib_reftbl *tbl;
649 
650 	tbl = &res->reftbl.srqref;
651 	vfree(tbl->rec);
652 
653 	tbl = &res->reftbl.cqref;
654 	vfree(tbl->rec);
655 
656 	tbl = &res->reftbl.qpref;
657 	vfree(tbl->rec);
658 }
659 
bnxt_qplib_alloc_reftbl(struct bnxt_qplib_reftbl * tbl,u32 max)660 static int bnxt_qplib_alloc_reftbl(struct bnxt_qplib_reftbl *tbl, u32 max)
661 {
662 	tbl->max = max;
663 	tbl->rec = vzalloc(sizeof(*tbl->rec) * max);
664 	if (!tbl->rec)
665 		return -ENOMEM;
666 	spin_lock_init(&tbl->lock);
667 	return 0;
668 }
669 
bnxt_qplib_alloc_reftbls(struct bnxt_qplib_res * res,struct bnxt_qplib_dev_attr * dattr)670 static int bnxt_qplib_alloc_reftbls(struct bnxt_qplib_res *res,
671 				    struct bnxt_qplib_dev_attr *dattr)
672 {
673 	u32 max_cq = BNXT_QPLIB_MAX_CQ_COUNT;
674 	struct bnxt_qplib_reftbl *tbl;
675 	u32 res_cnt;
676 	int rc;
677 
678 	/*
679 	 * Allocating one extra entry  to hold QP1 info.
680 	 * Store QP1 info at the last entry of the table.
681 	 * Decrement the tbl->max by one so that modulo
682 	 * operation to get the qp table index from qp id
683 	 * returns any value between 0 and max_qp-1
684 	 */
685 	res_cnt = max_t(u32, BNXT_QPLIB_MAX_QPC_COUNT + 1, dattr->max_qp);
686 	tbl = &res->reftbl.qpref;
687 	rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt);
688 	if (rc)
689 		goto fail;
690 	tbl->max--;
691 
692 	if (_is_chip_gen_p5_p7(res->cctx))
693 		max_cq = BNXT_QPLIB_MAX_CQ_COUNT_P5;
694 	res_cnt = max_t(u32, max_cq, dattr->max_cq);
695 	tbl = &res->reftbl.cqref;
696 	rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt);
697 	if (rc)
698 		goto fail;
699 
700 	res_cnt = max_t(u32, BNXT_QPLIB_MAX_SRQC_COUNT, dattr->max_cq);
701 	tbl = &res->reftbl.srqref;
702 	rc = bnxt_qplib_alloc_reftbl(tbl, BNXT_QPLIB_MAX_SRQC_COUNT);
703 	if (rc)
704 		goto fail;
705 
706 	return 0;
707 fail:
708 	return rc;
709 }
710 
bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res * res,u16 max)711 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, u16 max)
712 {
713 	struct bnxt_qplib_sgid_tbl *sgid_tbl;
714 
715 	sgid_tbl = &res->sgid_tbl;
716 
717 	sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
718 	if (!sgid_tbl->tbl)
719 		return -ENOMEM;
720 
721 	sgid_tbl->hw_id = kcalloc(max, sizeof(u32), GFP_KERNEL);
722 	if (!sgid_tbl->hw_id)
723 		goto free_tbl;
724 
725 	sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
726 	if (!sgid_tbl->ctx)
727 		goto free_hw_id;
728 
729 	sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
730 	if (!sgid_tbl->vlan)
731 		goto free_ctx;
732 
733 	sgid_tbl->max = max;
734 	return 0;
735 free_ctx:
736 	kfree(sgid_tbl->ctx);
737 free_hw_id:
738 	kfree(sgid_tbl->hw_id);
739 free_tbl:
740 	kfree(sgid_tbl->tbl);
741 	return -ENOMEM;
742 };
743 
bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_sgid_tbl * sgid_tbl)744 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
745 					struct bnxt_qplib_sgid_tbl *sgid_tbl)
746 {
747 	int i;
748 
749 	for (i = 0; i < sgid_tbl->max; i++) {
750 		if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
751 			   sizeof(bnxt_qplib_gid_zero)))
752 			bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
753 					    sgid_tbl->tbl[i].vlan_id, true);
754 	}
755 	memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
756 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
757 	memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
758 	sgid_tbl->active = 0;
759 }
760 
bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl * sgid_tbl,struct ifnet * netdev)761 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
762 				     struct ifnet *netdev)
763 {
764 	u32 i;
765 
766 	for (i = 0; i < sgid_tbl->max; i++)
767 		sgid_tbl->tbl[i].vlan_id = 0xffff;
768 	memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
769 }
770 
771 /* PDs */
bnxt_qplib_alloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd * pd)772 int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
773 {
774 	u32 bit_num;
775 	int rc = 0;
776 	struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
777 
778 	mutex_lock(&res->pd_tbl_lock);
779 	bit_num = find_first_bit(pdt->tbl, pdt->max);
780 	if (bit_num == pdt->max - 1) {/* Last bit is reserved */
781 		rc = -ENOMEM;
782 		goto fail;
783 	}
784 
785 	/* Found unused PD */
786 	clear_bit(bit_num, pdt->tbl);
787 	pd->id = bit_num;
788 fail:
789 	mutex_unlock(&res->pd_tbl_lock);
790 	return rc;
791 }
792 
bnxt_qplib_dealloc_pd(struct bnxt_qplib_res * res,struct bnxt_qplib_pd_tbl * pdt,struct bnxt_qplib_pd * pd)793 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
794 			  struct bnxt_qplib_pd_tbl *pdt,
795 			  struct bnxt_qplib_pd *pd)
796 {
797 	mutex_lock(&res->pd_tbl_lock);
798 	if (test_and_set_bit(pd->id, pdt->tbl)) {
799 		dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
800 			 pd->id);
801 		mutex_unlock(&res->pd_tbl_lock);
802 		return -EINVAL;
803 	}
804 	/* Reset to reserved pdid. */
805 	pd->id = pdt->max - 1;
806 
807 	mutex_unlock(&res->pd_tbl_lock);
808 	return 0;
809 }
810 
bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl * pdt)811 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
812 {
813 	if (pdt->tbl) {
814 		kfree(pdt->tbl);
815 		pdt->tbl = NULL;
816 	}
817 	pdt->max = 0;
818 }
819 
bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res * res,u32 max)820 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, u32 max)
821 {
822 	struct bnxt_qplib_pd_tbl *pdt;
823 	u32 bytes;
824 
825 	pdt = &res->pd_tbl;
826 
827 	max++; /* One extra for reserved pdid. */
828 	bytes = DIV_ROUND_UP(max, 8);
829 
830 	if (!bytes)
831 		bytes = 1;
832 	pdt->tbl = kmalloc(bytes, GFP_KERNEL);
833 	if (!pdt->tbl) {
834 		dev_err(&res->pdev->dev,
835 			"QPLIB: PD tbl allocation failed for size = %d\n", bytes);
836 		return -ENOMEM;
837 	}
838 	pdt->max = max;
839 	memset((u8 *)pdt->tbl, 0xFF, bytes);
840 	mutex_init(&res->pd_tbl_lock);
841 
842 	return 0;
843 }
844 
845 /* DPIs */
bnxt_qplib_alloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi,void * app,u8 type)846 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res	*res,
847 			 struct bnxt_qplib_dpi	*dpi,
848 			 void *app, u8 type)
849 {
850 	struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
851 	struct bnxt_qplib_reg_desc *reg;
852 	u32 bit_num;
853 	u64 umaddr;
854 	int rc = 0;
855 
856 	reg = &dpit->wcreg;
857 	mutex_lock(&res->dpi_tbl_lock);
858 	if (type == BNXT_QPLIB_DPI_TYPE_WC && _is_chip_p7(res->cctx) &&
859 	    !dpit->avail_ppp) {
860 		rc = -ENOMEM;
861 		goto fail;
862 	}
863 	bit_num = find_first_bit(dpit->tbl, dpit->max);
864 	if (bit_num == dpit->max) {
865 		rc = -ENOMEM;
866 		goto fail;
867 	}
868 	/* Found unused DPI */
869 	clear_bit(bit_num, dpit->tbl);
870 	dpit->app_tbl[bit_num] = app;
871 	dpi->bit = bit_num;
872 	dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
873 
874 	umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
875 	dpi->umdbr = umaddr;
876 	switch (type) {
877 	case BNXT_QPLIB_DPI_TYPE_KERNEL:
878 		/* priviledged dbr was already mapped just initialize it. */
879 		dpi->umdbr = dpit->ucreg.bar_base +
880 			     dpit->ucreg.offset + bit_num * PAGE_SIZE;
881 		dpi->dbr = dpit->priv_db;
882 		dpi->dpi = dpi->bit;
883 		break;
884 	case BNXT_QPLIB_DPI_TYPE_WC:
885 		dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
886 		if (_is_chip_p7(res->cctx) && dpi->dbr)
887 			dpit->avail_ppp--;
888 		break;
889 	default:
890 		dpi->dbr = ioremap(umaddr, PAGE_SIZE);
891 	}
892 	if (!dpi->dbr) {
893 		dev_err(&res->pdev->dev, "QPLIB: DB remap failed, type = %d\n",
894 			type);
895 		rc = -ENOMEM;
896 	}
897 	dpi->type = type;
898 fail:
899 	mutex_unlock(&res->dpi_tbl_lock);
900 	return rc;
901 }
902 
bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res * res,struct bnxt_qplib_dpi * dpi)903 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
904 			   struct bnxt_qplib_dpi *dpi)
905 {
906 	struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
907 	int rc = 0;
908 
909 	mutex_lock(&res->dpi_tbl_lock);
910 	if (dpi->bit >= dpit->max) {
911 		dev_warn(&res->pdev->dev,
912 			 "Invalid DPI? dpi = %d, bit = %d\n",
913 			 dpi->dpi, dpi->bit);
914 		rc = -EINVAL;
915 		goto fail;
916 	}
917 
918 	if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL) {
919 		if (dpi->type == BNXT_QPLIB_DPI_TYPE_WC &&
920 		    _is_chip_p7(res->cctx) && dpi->dbr)
921 			dpit->avail_ppp++;
922 		pci_iounmap(res->pdev, dpi->dbr);
923 	}
924 
925 	if (test_and_set_bit(dpi->bit, dpit->tbl)) {
926 		dev_warn(&res->pdev->dev,
927 			 "Freeing an unused DPI? dpi = %d, bit = %d\n",
928 			 dpi->dpi, dpi->bit);
929 		rc = -EINVAL;
930 		goto fail;
931 	}
932 	if (dpit->app_tbl)
933 		dpit->app_tbl[dpi->bit] = NULL;
934 	memset(dpi, 0, sizeof(*dpi));
935 fail:
936 	mutex_unlock(&res->dpi_tbl_lock);
937 	return rc;
938 }
939 
bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_dpi_tbl * dpit)940 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_dpi_tbl *dpit)
941 {
942 	kfree(dpit->tbl);
943 	kfree(dpit->app_tbl);
944 	dpit->tbl = NULL;
945 	dpit->app_tbl = NULL;
946 	dpit->max = 0;
947 }
948 
bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res * res,struct bnxt_qplib_dev_attr * dev_attr,u8 pppp_factor)949 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
950 				    struct bnxt_qplib_dev_attr *dev_attr,
951 				    u8 pppp_factor)
952 {
953 	struct bnxt_qplib_dpi_tbl *dpit;
954 	struct bnxt_qplib_reg_desc *reg;
955 	unsigned long bar_len;
956 	u32 dbr_offset;
957 	u32 bytes;
958 
959 	dpit = &res->dpi_tbl;
960 	reg = &dpit->wcreg;
961 
962 	if (!_is_chip_gen_p5_p7(res->cctx)) {
963 		/* Offest should come from L2 driver */
964 		dbr_offset = dev_attr->l2_db_size;
965 		dpit->ucreg.offset = dbr_offset;
966 		dpit->wcreg.offset = dbr_offset;
967 	}
968 
969 	bar_len = pci_resource_len(res->pdev, reg->bar_id);
970 	dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
971 	if (dev_attr->max_dpi)
972 		dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
973 
974 	dpit->app_tbl = kzalloc(dpit->max * sizeof(void*), GFP_KERNEL);
975 	if (!dpit->app_tbl) {
976 		dev_err(&res->pdev->dev,
977 			"QPLIB: DPI app tbl allocation failed");
978 		return -ENOMEM;
979 	}
980 
981 	bytes = dpit->max >> 3;
982 	if (!bytes)
983 		bytes = 1;
984 
985 	dpit->tbl = kmalloc(bytes, GFP_KERNEL);
986 	if (!dpit->tbl) {
987 		kfree(dpit->app_tbl);
988 		dev_err(&res->pdev->dev,
989 			"QPLIB: DPI tbl allocation failed for size = %d\n",
990 			bytes);
991 		return -ENOMEM;
992 	}
993 
994 	memset((u8 *)dpit->tbl, 0xFF, bytes);
995 	/*
996 	 * On SR2, 2nd doorbell page of each function
997 	 * is reserved for L2 PPP. Now that the tbl is
998 	 * initialized, mark it as unavailable. By default
999 	 * RoCE can make use of the 512 extended pages for
1000 	 * PPP.
1001 	 */
1002 	if (_is_chip_p7(res->cctx)) {
1003 		clear_bit(1, dpit->tbl);
1004 		if (pppp_factor)
1005 			dpit->avail_ppp =
1006 				BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES / pppp_factor;
1007 	}
1008 	mutex_init(&res->dpi_tbl_lock);
1009 	dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
1010 
1011 	return 0;
1012 }
1013 
1014 /* Stats */
bnxt_qplib_free_stat_mem(struct bnxt_qplib_res * res,struct bnxt_qplib_stats * stats)1015 void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res,
1016 			      struct bnxt_qplib_stats *stats)
1017 {
1018 	struct pci_dev *pdev;
1019 
1020 	pdev = res->pdev;
1021 	if (stats->dma)
1022 		dma_free_coherent(&pdev->dev, stats->size,
1023 				  stats->dma, stats->dma_map);
1024 
1025 	memset(stats, 0, sizeof(*stats));
1026 	stats->fw_id = -1;
1027 }
1028 
bnxt_qplib_alloc_stat_mem(struct pci_dev * pdev,struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_stats * stats)1029 int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev,
1030 			      struct bnxt_qplib_chip_ctx *cctx,
1031 			      struct bnxt_qplib_stats *stats)
1032 {
1033 	cctx->hw_stats_size = 168;
1034 
1035 	memset(stats, 0, sizeof(*stats));
1036 	stats->fw_id = -1;
1037 	stats->size = cctx->hw_stats_size;
1038 	stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
1039 					&stats->dma_map, GFP_KERNEL);
1040 	if (!stats->dma) {
1041 		dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
1042 		return -ENOMEM;
1043 	}
1044 	return 0;
1045 }
1046 
1047 /* Resource */
bnxt_qplib_stop_res(struct bnxt_qplib_res * res)1048 int bnxt_qplib_stop_res(struct bnxt_qplib_res *res)
1049 {
1050 	struct bnxt_qplib_rcfw *rcfw = res->rcfw;
1051 	struct creq_stop_func_resp resp = {};
1052 	struct bnxt_qplib_cmdqmsg msg = {};
1053 	struct cmdq_stop_func req = {};
1054 	int rc;
1055 
1056 	bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_STOP_FUNC,
1057 				 sizeof(req));
1058 	bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
1059 				sizeof(resp), 0);
1060 	rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
1061 	return rc;
1062 }
1063 
bnxt_qplib_clear_tbls(struct bnxt_qplib_res * res)1064 void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res)
1065 {
1066 	bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
1067 }
1068 
bnxt_qplib_init_tbls(struct bnxt_qplib_res * res)1069 int bnxt_qplib_init_tbls(struct bnxt_qplib_res *res)
1070 {
1071 	bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
1072 
1073 	return 0;
1074 }
1075 
bnxt_qplib_free_tbls(struct bnxt_qplib_res * res)1076 void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res)
1077 {
1078 	bnxt_qplib_free_sgid_tbl(res);
1079 	bnxt_qplib_free_pd_tbl(&res->pd_tbl);
1080 	bnxt_qplib_free_dpi_tbl(&res->dpi_tbl);
1081 	bnxt_qplib_free_reftbls(res);
1082 }
1083 
bnxt_qplib_alloc_tbls(struct bnxt_qplib_res * res,u8 pppp_factor)1084 int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res, u8 pppp_factor)
1085 {
1086 	struct bnxt_qplib_dev_attr *dev_attr;
1087 	int rc = 0;
1088 
1089 	dev_attr = res->dattr;
1090 
1091 	rc = bnxt_qplib_alloc_reftbls(res, dev_attr);
1092 	if (rc)
1093 		goto fail;
1094 
1095 	rc = bnxt_qplib_alloc_sgid_tbl(res, dev_attr->max_sgid);
1096 	if (rc)
1097 		goto fail;
1098 
1099 	rc = bnxt_qplib_alloc_pd_tbl(res, dev_attr->max_pd);
1100 	if (rc)
1101 		goto fail;
1102 
1103 	rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr, pppp_factor);
1104 	if (rc)
1105 		goto fail;
1106 
1107 	return 0;
1108 fail:
1109 	bnxt_qplib_free_tbls(res);
1110 	return rc;
1111 }
1112 
bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res * res)1113 void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
1114 {
1115 	struct bnxt_qplib_reg_desc *reg;
1116 
1117 	reg = &res->dpi_tbl.ucreg;
1118 	if (reg->bar_reg)
1119 		pci_iounmap(res->pdev, reg->bar_reg);
1120 	reg->bar_reg = NULL;
1121 	reg->bar_base = 0;
1122 	reg->len = 0;
1123 	reg->bar_id = 0; /* Zero? or ff */
1124 }
1125 
bnxt_qplib_map_db_bar(struct bnxt_qplib_res * res)1126 int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
1127 {
1128 	struct bnxt_qplib_reg_desc *ucreg;
1129 	struct bnxt_qplib_reg_desc *wcreg;
1130 
1131 	wcreg = &res->dpi_tbl.wcreg;
1132 	wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
1133 	if (!res || !res->pdev || !wcreg)
1134 		return -1;
1135 	wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
1136 	/* No need to set the wcreg->len here */
1137 
1138 	ucreg = &res->dpi_tbl.ucreg;
1139 	ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
1140 	ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
1141 
1142 	ucreg->offset = 65536;
1143 
1144 	ucreg->len = ucreg->offset + PAGE_SIZE;
1145 
1146 	if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
1147 		dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d\n",
1148 			(int)ucreg->len);
1149 		return -EINVAL;
1150 	}
1151 	ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
1152 	if (!ucreg->bar_reg) {
1153 		dev_err(&res->pdev->dev, "priviledged dpi map failed!\n");
1154 		return -ENOMEM;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 /**
1161  * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
1162  * @dev: the PCI device
1163  * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
1164  *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
1165  *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
1166  *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
1167  *
1168  * Return 0 if all upstream bridges support AtomicOp routing, egress
1169  * blocking is disabled on all upstream ports, and the root port supports
1170  * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
1171  * AtomicOp completion), or negative otherwise.
1172  */
pci_enable_atomic_ops_to_root(struct pci_dev * dev,u32 cap_mask)1173 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
1174 {
1175 	struct pci_bus *bus = dev->bus;
1176 	struct pci_dev *bridge;
1177 	u32 cap;
1178 
1179 	if (!pci_is_pcie(dev))
1180 		return -EINVAL;
1181 
1182 	/*
1183 	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
1184 	 * AtomicOp requesters.  For now, we only support endpoints as
1185 	 * requesters and root ports as completers.  No endpoints as
1186 	 * completers, and no peer-to-peer.
1187 	 */
1188 
1189 	switch (pci_pcie_type(dev)) {
1190 	case PCI_EXP_TYPE_ENDPOINT:
1191 	case PCI_EXP_TYPE_LEG_END:
1192 		break;
1193 	default:
1194 		return -EINVAL;
1195 	}
1196 
1197 	bridge = bus->self;
1198 
1199 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
1200 
1201 	switch (pci_pcie_type(bridge)) {
1202 	case PCI_EXP_TYPE_DOWNSTREAM:
1203 		if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
1204 			return -EINVAL;
1205 		break;
1206 
1207 	/* Ensure root port supports all the sizes we care about */
1208 	case PCI_EXP_TYPE_ROOT_PORT:
1209 		if ((cap & cap_mask) != cap_mask)
1210 			return -EINVAL;
1211 		break;
1212 	}
1213 	return 0;
1214 }
1215 
bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev * dev)1216 int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev)
1217 {
1218 	u16 ctl2;
1219 
1220 	if(pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
1221 	   pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP64))
1222 		return -EOPNOTSUPP;
1223 
1224 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
1225 	return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
1226 }
1227