xref: /freebsd/sys/dev/bnxt/bnxt_re/qplib_res.h (revision acd884de)
1 /*
2  * Copyright (c) 2015-2024, Broadcom. All rights reserved.  The term
3  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
23  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
25  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
26  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  *
28  * Description: QPLib resource manager (header)
29  */
30 
31 #ifndef __BNXT_QPLIB_RES_H__
32 #define __BNXT_QPLIB_RES_H__
33 
34 #include "hsi_struct_def.h"
35 
36 extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
37 
38 #define CHIP_NUM_57508		0x1750
39 #define CHIP_NUM_57504		0x1751
40 #define CHIP_NUM_57502		0x1752
41 #define CHIP_NUM_58818          0xd818
42 #define CHIP_NUM_57608		0x1760
43 
44 #define BNXT_QPLIB_MAX_QPC_COUNT	(64 * 1024)
45 #define BNXT_QPLIB_MAX_SRQC_COUNT	(64 * 1024)
46 #define BNXT_QPLIB_MAX_CQ_COUNT		(64 * 1024)
47 #define BNXT_QPLIB_MAX_CQ_COUNT_P5	(128 * 1024)
48 
49 #define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
50 #define BNXT_QPLIB_DBR_EPOCH_SHIFT   24
51 #define BNXT_QPLIB_DBR_TOGGLE_SHIFT  25
52 
53 #define BNXT_QPLIB_DBR_PF_DB_OFFSET	0x10000
54 #define BNXT_QPLIB_DBR_VF_DB_OFFSET	0x4000
55 
56 #define BNXT_QPLIB_DBR_KEY_INVALID	-1
57 
58 /* chip gen type */
59 #define BNXT_RE_DEFAULT 0xf
60 
61 enum bnxt_qplib_wqe_mode {
62 	BNXT_QPLIB_WQE_MODE_STATIC	= 0x00,
63 	BNXT_QPLIB_WQE_MODE_VARIABLE	= 0x01,
64 	BNXT_QPLIB_WQE_MODE_INVALID	= 0x02
65 };
66 
67 #define BNXT_RE_PUSH_MODE_NONE	0
68 #define BNXT_RE_PUSH_MODE_WCB	1
69 #define BNXT_RE_PUSH_MODE_PPP	2
70 #define BNXT_RE_PUSH_ENABLED(mode) ((mode) == BNXT_RE_PUSH_MODE_WCB ||\
71 				    (mode) == BNXT_RE_PUSH_MODE_PPP)
72 #define BNXT_RE_PPP_ENABLED(cctx) ((cctx)->modes.db_push_mode ==\
73 				   BNXT_RE_PUSH_MODE_PPP)
74 #define	PCI_EXP_DEVCAP2_ATOMIC_ROUTE	0x00000040 /* Atomic Op routing */
75 #define	PCI_EXP_DEVCAP2_ATOMIC_COMP32	0x00000080 /* 32b AtomicOp completion */
76 #define	PCI_EXP_DEVCAP2_ATOMIC_COMP64	0x00000100 /* 64b AtomicOp completion */
77 #define	PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
78 #define PCI_EXP_DEVCTL2_ATOMIC_REQ	0x0040	/* Set Atomic requests */
79 
80 int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
81 
82 struct bnxt_qplib_drv_modes {
83 	u8	wqe_mode;
84 	u8	te_bypass;
85 	u8	db_push;
86 	/* To control advanced cc params display in configfs */
87 	u8	cc_pr_mode;
88 	/* Other modes to follow here e.g. GSI QP mode */
89 	u8	dbr_pacing;
90 	u8	dbr_pacing_ext;
91 	u8	dbr_drop_recov;
92 	u8	dbr_primary_pf;
93 	u8	dbr_pacing_v0;
94 };
95 
96 struct bnxt_qplib_chip_ctx {
97 	u16     chip_num;
98 	u8      chip_rev;
99 	u8      chip_metal;
100 	u64	hwrm_intf_ver;
101 	struct bnxt_qplib_drv_modes	modes;
102 	u32	dbr_stat_db_fifo;
103 	u32	dbr_aeq_arm_reg;
104 	u32	dbr_throttling_reg;
105 	u16	hw_stats_size;
106 	u16	hwrm_cmd_max_timeout;
107 };
108 
_is_chip_num_p7(u16 chip_num)109 static inline bool _is_chip_num_p7(u16 chip_num)
110 {
111 	return (chip_num == CHIP_NUM_58818 ||
112 		chip_num == CHIP_NUM_57608);
113 }
114 
_is_chip_p7(struct bnxt_qplib_chip_ctx * cctx)115 static inline bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx)
116 {
117 	return _is_chip_num_p7(cctx->chip_num);
118 }
119 
120 /* SR2 is Gen P5 */
_is_chip_gen_p5(struct bnxt_qplib_chip_ctx * cctx)121 static inline bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
122 {
123 	return (cctx->chip_num == CHIP_NUM_57508 ||
124 		cctx->chip_num == CHIP_NUM_57504 ||
125 		cctx->chip_num == CHIP_NUM_57502);
126 }
127 
_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx * cctx)128 static inline bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
129 {
130 	return (_is_chip_gen_p5(cctx) || _is_chip_p7(cctx));
131 }
132 
_is_wqe_mode_variable(struct bnxt_qplib_chip_ctx * cctx)133 static inline bool _is_wqe_mode_variable(struct bnxt_qplib_chip_ctx *cctx)
134 {
135 	return cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE;
136 }
137 
138 struct bnxt_qplib_db_pacing_data {
139 	u32 do_pacing;
140 	u32 pacing_th;
141 	u32 dev_err_state;
142 	u32 alarm_th;
143 	u32 grc_reg_offset;
144 	u32 fifo_max_depth;
145 	u32 fifo_room_mask;
146 	u8  fifo_room_shift;
147 };
148 
bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx * cctx)149 static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
150 {
151 	return cctx->modes.dbr_pacing;
152 }
153 
bnxt_qplib_dbr_pacing_ext_en(struct bnxt_qplib_chip_ctx * cctx)154 static inline u8 bnxt_qplib_dbr_pacing_ext_en(struct bnxt_qplib_chip_ctx *cctx)
155 {
156 	return cctx->modes.dbr_pacing_ext;
157 }
158 
bnxt_qplib_dbr_pacing_is_primary_pf(struct bnxt_qplib_chip_ctx * cctx)159 static inline u8 bnxt_qplib_dbr_pacing_is_primary_pf(struct bnxt_qplib_chip_ctx *cctx)
160 {
161 	return cctx->modes.dbr_primary_pf;
162 }
163 
bnxt_qplib_dbr_pacing_set_primary_pf(struct bnxt_qplib_chip_ctx * cctx,u8 val)164 static inline void bnxt_qplib_dbr_pacing_set_primary_pf
165 		(struct bnxt_qplib_chip_ctx *cctx, u8 val)
166 {
167 	cctx->modes.dbr_primary_pf = val;
168 }
169 
170 /* Defines for handling the HWRM version check */
171 #define HWRM_VERSION_DEV_ATTR_MAX_DPI	0x1000A0000000D
172 #define HWRM_VERSION_ROCE_STATS_FN_ID	0x1000A00000045
173 
174 #define PTR_CNT_PER_PG		(PAGE_SIZE / sizeof(void *))
175 #define PTR_MAX_IDX_PER_PG	(PTR_CNT_PER_PG - 1)
176 #define PTR_PG(x)		(((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
177 #define PTR_IDX(x)		((x) & PTR_MAX_IDX_PER_PG)
178 
179 #define HWQ_CMP(idx, hwq)	((idx) & ((hwq)->max_elements - 1))
180 #define HWQ_FREE_SLOTS(hwq)	(hwq->max_elements - \
181 				((HWQ_CMP(hwq->prod, hwq)\
182 				- HWQ_CMP(hwq->cons, hwq))\
183 				& (hwq->max_elements - 1)))
184 enum bnxt_qplib_hwq_type {
185 	HWQ_TYPE_CTX,
186 	HWQ_TYPE_QUEUE,
187 	HWQ_TYPE_L2_CMPL,
188 	HWQ_TYPE_MR
189 };
190 
191 #define MAX_PBL_LVL_0_PGS		1
192 #define MAX_PBL_LVL_1_PGS		512
193 #define MAX_PBL_LVL_1_PGS_SHIFT		9
194 #define MAX_PDL_LVL_SHIFT		9
195 
196 enum bnxt_qplib_pbl_lvl {
197 	PBL_LVL_0,
198 	PBL_LVL_1,
199 	PBL_LVL_2,
200 	PBL_LVL_MAX
201 };
202 
203 #define ROCE_PG_SIZE_4K		(4 * 1024)
204 #define ROCE_PG_SIZE_8K		(8 * 1024)
205 #define ROCE_PG_SIZE_64K	(64 * 1024)
206 #define ROCE_PG_SIZE_2M		(2 * 1024 * 1024)
207 #define ROCE_PG_SIZE_8M		(8 * 1024 * 1024)
208 #define ROCE_PG_SIZE_1G		(1024 * 1024 * 1024)
209 enum bnxt_qplib_hwrm_pg_size {
210 	BNXT_QPLIB_HWRM_PG_SIZE_4K	= 0,
211 	BNXT_QPLIB_HWRM_PG_SIZE_8K	= 1,
212 	BNXT_QPLIB_HWRM_PG_SIZE_64K	= 2,
213 	BNXT_QPLIB_HWRM_PG_SIZE_2M	= 3,
214 	BNXT_QPLIB_HWRM_PG_SIZE_8M	= 4,
215 	BNXT_QPLIB_HWRM_PG_SIZE_1G	= 5,
216 };
217 
218 struct bnxt_qplib_reg_desc {
219 	u8		bar_id;
220 	resource_size_t	bar_base;
221 	unsigned long	offset;
222 	void __iomem	*bar_reg;
223 	size_t		len;
224 };
225 
226 struct bnxt_qplib_pbl {
227 	u32				pg_count;
228 	u32				pg_size;
229 	void				**pg_arr;
230 	dma_addr_t			*pg_map_arr;
231 };
232 
233 struct bnxt_qplib_sg_info {
234 	struct scatterlist              *sghead;
235 	u32                             nmap;
236 	u32                             npages;
237 	u32				pgshft;
238 	u32				pgsize;
239 	bool				nopte;
240 };
241 
242 struct bnxt_qplib_hwq_attr {
243 	struct bnxt_qplib_res		*res;
244 	struct bnxt_qplib_sg_info	*sginfo;
245 	enum bnxt_qplib_hwq_type	type;
246 	u32				depth;
247 	u32				stride;
248 	u32				aux_stride;
249 	u32				aux_depth;
250 };
251 
252 struct bnxt_qplib_hwq {
253 	struct pci_dev			*pdev;
254 	spinlock_t			lock;
255 	struct bnxt_qplib_pbl		pbl[PBL_LVL_MAX];
256 	enum bnxt_qplib_pbl_lvl		level;		/* 0, 1, or 2 */
257 	void				**pbl_ptr;	/* ptr for easy access
258 							   to the PBL entries */
259 	dma_addr_t			*pbl_dma_ptr;	/* ptr for easy access
260 							   to the dma_addr */
261 	u32				max_elements;
262 	u32				depth;	/* original requested depth */
263 	u16				element_size;	/* Size of each entry */
264 	u16				qe_ppg;		/* queue entry per page */
265 
266 	u32				prod;		/* raw */
267 	u32				cons;		/* raw */
268 	u8				cp_bit;
269 	u8				is_user;
270 	u64				*pad_pg;
271 	u32				pad_stride;
272 	u32				pad_pgofft;
273 };
274 
275 struct bnxt_qplib_db_info {
276 	void __iomem		*db;
277 	void __iomem		*priv_db;
278 	struct bnxt_qplib_hwq	*hwq;
279 	struct bnxt_qplib_res   *res;
280 	u32			xid;
281 	u32			max_slot;
282 	u32			flags;
283 	u8			toggle;
284 	spinlock_t		lock;
285 	u64			shadow_key;
286 	u64			shadow_key_arm_ena;
287 	u32			seed; /* For DB pacing */
288 };
289 
290 enum bnxt_qplib_db_info_flags_mask {
291 	BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT	= 0x0UL,
292 	BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT	= 0x1UL,
293 	BNXT_QPLIB_FLAG_EPOCH_CONS_MASK		= 0x1UL,
294 	BNXT_QPLIB_FLAG_EPOCH_PROD_MASK		= 0x2UL,
295 };
296 
297 enum bnxt_qplib_db_epoch_flag_shift {
298 	BNXT_QPLIB_DB_EPOCH_CONS_SHIFT	= BNXT_QPLIB_DBR_EPOCH_SHIFT,
299 	BNXT_QPLIB_DB_EPOCH_PROD_SHIFT	= (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1)
300 };
301 
302 /* Tables */
303 struct bnxt_qplib_pd_tbl {
304 	unsigned long			*tbl;
305 	u32				max;
306 };
307 
308 struct bnxt_qplib_sgid_tbl {
309 	struct bnxt_qplib_gid_info	*tbl;
310 	u16				*hw_id;
311 	u16				max;
312 	u16				active;
313 	void				*ctx;
314 	bool                            *vlan;
315 };
316 
317 enum {
318 	BNXT_QPLIB_DPI_TYPE_KERNEL	= 0,
319 	BNXT_QPLIB_DPI_TYPE_UC		= 1,
320 	BNXT_QPLIB_DPI_TYPE_WC		= 2
321 };
322 
323 struct bnxt_qplib_dpi {
324 	u32				dpi;
325 	u32				bit;
326 	void __iomem			*dbr;
327 	u64				umdbr;
328 	u8				type;
329 };
330 
331 #define BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES	512
332 struct bnxt_qplib_dpi_tbl {
333 	void				**app_tbl;
334 	unsigned long			*tbl;
335 	u16				max;
336 	u16				avail_ppp;
337 	struct bnxt_qplib_reg_desc	ucreg; /* Hold entire DB bar. */
338 	struct bnxt_qplib_reg_desc	wcreg;
339 	void __iomem			*priv_db;
340 };
341 
342 struct bnxt_qplib_stats {
343 	dma_addr_t			dma_map;
344 	void				*dma;
345 	u32				size;
346 	u32				fw_id;
347 };
348 
349 struct bnxt_qplib_vf_res {
350 	u32 max_qp;
351 	u32 max_mrw;
352 	u32 max_srq;
353 	u32 max_cq;
354 	u32 max_gid;
355 };
356 
357 #define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE	448
358 #define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE	64
359 #define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE	64
360 #define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE	128
361 
362 #define MAX_TQM_ALLOC_REQ		48
363 #define MAX_TQM_ALLOC_BLK_SIZE		8
364 struct bnxt_qplib_tqm_ctx {
365 	struct bnxt_qplib_hwq		pde;
366 	enum bnxt_qplib_pbl_lvl		pde_level; /* Original level */
367 	struct bnxt_qplib_hwq		qtbl[MAX_TQM_ALLOC_REQ];
368 	u8				qcount[MAX_TQM_ALLOC_REQ];
369 };
370 
371 struct bnxt_qplib_hctx {
372 	struct bnxt_qplib_hwq	hwq;
373 	u32			max;
374 };
375 
376 struct bnxt_qplib_refrec {
377 	void *handle;
378 	u32 xid;
379 };
380 
381 struct bnxt_qplib_reftbl {
382 	struct bnxt_qplib_refrec *rec;
383 	u32 max;
384 	spinlock_t lock; /* reftbl lock */
385 };
386 
387 struct bnxt_qplib_reftbls {
388 	struct bnxt_qplib_reftbl qpref;
389 	struct bnxt_qplib_reftbl cqref;
390 	struct bnxt_qplib_reftbl srqref;
391 };
392 
393 #define GET_TBL_INDEX(id, tbl) ((id) % (((tbl)->max) - 1))
map_qp_id_to_tbl_indx(u32 qid,struct bnxt_qplib_reftbl * tbl)394 static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_reftbl *tbl)
395 {
396 	return (qid == 1) ? tbl->max : GET_TBL_INDEX(qid, tbl);
397 }
398 
399 /*
400  * This structure includes the number of various roce resource table sizes
401  * actually allocated by the driver. May be less than the maximums the firmware
402  * allows if the driver imposes lower limits than the firmware.
403  */
404 struct bnxt_qplib_ctx {
405 	struct bnxt_qplib_hctx		qp_ctx;
406 	struct bnxt_qplib_hctx		mrw_ctx;
407 	struct bnxt_qplib_hctx		srq_ctx;
408 	struct bnxt_qplib_hctx		cq_ctx;
409 	struct bnxt_qplib_hctx		tim_ctx;
410 	struct bnxt_qplib_tqm_ctx	tqm_ctx;
411 
412 	struct bnxt_qplib_stats		stats;
413 	struct bnxt_qplib_stats		stats2;
414 	struct bnxt_qplib_vf_res	vf_res;
415 };
416 
417 struct bnxt_qplib_res {
418 	struct pci_dev			*pdev;
419 	struct bnxt_qplib_chip_ctx	*cctx;
420 	struct bnxt_qplib_dev_attr      *dattr;
421 	struct bnxt_qplib_ctx		*hctx;
422 	struct ifnet			*netdev;
423 	struct bnxt_en_dev		*en_dev;
424 
425 	struct bnxt_qplib_rcfw		*rcfw;
426 
427 	struct bnxt_qplib_pd_tbl	pd_tbl;
428 	struct mutex			pd_tbl_lock;
429 	struct bnxt_qplib_sgid_tbl	sgid_tbl;
430 	struct bnxt_qplib_dpi_tbl	dpi_tbl;
431 	struct mutex			dpi_tbl_lock;
432 	struct bnxt_qplib_reftbls	reftbl;
433 	bool				prio;
434 	bool				is_vf;
435 	struct bnxt_qplib_db_pacing_data *pacing_data;
436 };
437 
438 struct bnxt_qplib_query_stats_info {
439 	u32 function_id;
440 	u8 collection_id;
441 	bool vf_valid;
442 };
443 
444 struct bnxt_qplib_query_qp_info {
445 	u32 function_id;
446 	u32 num_qps;
447 	u32 start_index;
448 	bool vf_valid;
449 };
450 
451 struct bnxt_qplib_query_fn_info {
452 	bool vf_valid;
453 	u32 host;
454 	u32 filter;
455 };
456 
457 
458 #define to_bnxt_qplib(ptr, type, member)	\
459 	container_of(ptr, type, member)
460 
461 struct bnxt_qplib_pd;
462 struct bnxt_qplib_dev_attr;
463 
464 bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx);
465 bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx);
466 bool _is_chip_a0(struct bnxt_qplib_chip_ctx *cctx);
467 bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx);
468 bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr);
469 void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
470 			 struct bnxt_qplib_hwq *hwq);
471 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
472 			      struct bnxt_qplib_hwq_attr *hwq_attr);
473 void bnxt_qplib_get_guid(const u8 *dev_addr, u8 *guid);
474 int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
475 			struct bnxt_qplib_pd *pd);
476 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
477 			  struct bnxt_qplib_pd_tbl *pd_tbl,
478 			  struct bnxt_qplib_pd *pd);
479 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
480 			 struct bnxt_qplib_dpi *dpi,
481 			 void *app, u8 type);
482 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
483 			   struct bnxt_qplib_dpi *dpi);
484 int bnxt_qplib_stop_res(struct bnxt_qplib_res *res);
485 void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res);
486 int bnxt_qplib_init_tbls(struct bnxt_qplib_res *res);
487 void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res);
488 int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res, u8 pppp_factor);
489 void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res);
490 int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res);
491 int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev,
492 			      struct bnxt_qplib_chip_ctx *cctx,
493 			      struct bnxt_qplib_stats *stats);
494 void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res,
495 			      struct bnxt_qplib_stats *stats);
496 
497 int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
498 void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
499 int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev);
500 u8 _get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx *cctx);
501 
bnxt_qplib_get_qe(struct bnxt_qplib_hwq * hwq,u32 indx,u64 * pg)502 static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
503 				      u32 indx, u64 *pg)
504 {
505 	u32 pg_num, pg_idx;
506 
507 	pg_num = (indx / hwq->qe_ppg);
508 	pg_idx = (indx % hwq->qe_ppg);
509 	if (pg)
510 		*pg = (u64)&hwq->pbl_ptr[pg_num];
511 	return (void *)((u8 *)hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
512 }
513 
bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info * dbinfo,struct bnxt_qplib_hwq * hwq,u32 cnt)514 static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
515 					    struct bnxt_qplib_hwq *hwq, u32 cnt)
516 {
517 	/* move prod and update toggle/epoch if wrap around */
518 	hwq->prod += cnt;
519 	if (hwq->prod >= hwq->depth) {
520 		hwq->prod %= hwq->depth;
521 		dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
522 	}
523 }
524 
bnxt_qplib_hwq_incr_cons(u32 max_elements,u32 * cons,u32 cnt,u32 * dbinfo_flags)525 static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons,
526 					    u32 cnt, u32 *dbinfo_flags)
527 {
528 	/* move cons and update toggle/epoch if wrap around */
529 	*cons += cnt;
530 	if (*cons >= max_elements) {
531 		*cons %= max_elements;
532 		*dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
533 	}
534 }
535 
_get_pte_pg_size(struct bnxt_qplib_hwq * hwq)536 static inline u8 _get_pte_pg_size(struct bnxt_qplib_hwq *hwq)
537 {
538 	u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
539 	struct bnxt_qplib_pbl *pbl;
540 
541 	pbl = &hwq->pbl[hwq->level];
542 	switch (pbl->pg_size) {
543 		case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
544 		break;
545 		case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
546 		break;
547 		case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
548 		break;
549 		case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
550 		break;
551 		case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
552 		break;
553 		case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
554 		break;
555 		default:
556 		break;
557 	}
558 	return pg_size;
559 }
560 
_get_base_addr(struct bnxt_qplib_hwq * hwq)561 static inline u64 _get_base_addr(struct bnxt_qplib_hwq *hwq)
562 {
563 	return hwq->pbl[PBL_LVL_0].pg_map_arr[0];
564 }
565 
_get_base_pg_size(struct bnxt_qplib_hwq * hwq)566 static inline u8 _get_base_pg_size(struct bnxt_qplib_hwq *hwq)
567 {
568 	u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
569 	struct bnxt_qplib_pbl *pbl;
570 
571 	pbl = &hwq->pbl[PBL_LVL_0];
572 	switch (pbl->pg_size) {
573 		case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
574 		break;
575 		case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
576 		break;
577 		case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
578 		break;
579 		case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
580 		break;
581 		case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
582 		break;
583 		case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
584 		break;
585 		default:
586 		break;
587 	}
588 	return pg_size;
589 }
590 
_get_hwq_type(struct bnxt_qplib_res * res)591 static inline enum bnxt_qplib_hwq_type _get_hwq_type(struct bnxt_qplib_res *res)
592 {
593 	return _is_chip_gen_p5_p7(res->cctx) ? HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
594 }
595 
_is_ext_stats_supported(u16 dev_cap_flags)596 static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
597 {
598 	return dev_cap_flags &
599 		CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
600 }
601 
bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx * ctx,u16 flags,bool virtfn)602 static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
603 					   u16 flags, bool virtfn)
604 {
605 	return (_is_ext_stats_supported(flags) &&
606 		((virtfn && _is_chip_p7(ctx)) || (!virtfn)));
607 }
608 
_is_hw_retx_supported(u16 dev_cap_flags)609 static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
610 {
611 	return dev_cap_flags &
612 		(CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
613 		 CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
614 }
615 
616 /* Disable HW_RETX */
617 #define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
618 
_is_cqe_v2_supported(u16 dev_cap_flags)619 static inline bool _is_cqe_v2_supported(u16 dev_cap_flags)
620 {
621 	return dev_cap_flags &
622 		CREQ_QUERY_FUNC_RESP_SB_CQE_V2;
623 }
624 
625 #define BNXT_DB_FIFO_ROOM_MASK      0x1fff8000
626 #define BNXT_DB_FIFO_ROOM_SHIFT     15
627 #define BNXT_MAX_FIFO_DEPTH         0x2c00
628 
629 #define BNXT_DB_PACING_ALGO_THRESHOLD	250
630 #define BNXT_DEFAULT_PACING_PROBABILITY 0xFFFF
631 
632 #define BNXT_DBR_PACING_WIN_BASE	0x2000
633 #define BNXT_DBR_PACING_WIN_MAP_OFF	4
634 #define BNXT_DBR_PACING_WIN_OFF(reg)	(BNXT_DBR_PACING_WIN_BASE +	\
635 
bnxt_qplib_ring_db32(struct bnxt_qplib_db_info * info,bool arm)636 static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
637 					bool arm)
638 {
639 	u32 key = 0;
640 
641 	key = info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
642 		(CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
643 	if (!arm)
644 		key |= CMPL_DOORBELL_MASK;
645 	/* memory barrier */
646 	wmb();
647 	writel(key, info->db);
648 }
649 
650 #define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle)			\
651 	(((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE |	\
652 	    (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) |		\
653 	    ((toggle) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
654 
bnxt_qplib_write_db(struct bnxt_qplib_db_info * info,u64 key,void __iomem * db,u64 * shadow_key)655 static inline void bnxt_qplib_write_db(struct bnxt_qplib_db_info *info,
656 				       u64 key, void __iomem *db,
657 				       u64 *shadow_key)
658 {
659 	unsigned long flags;
660 
661 	spin_lock_irqsave(&info->lock, flags);
662 	*shadow_key = key;
663 	writeq(key, db);
664 	spin_unlock_irqrestore(&info->lock, flags);
665 }
666 
__replay_writeq(u64 key,void __iomem * db)667 static inline void __replay_writeq(u64 key, void __iomem *db)
668 {
669 	/* No need to replay uninitialised shadow_keys */
670 	if (key != BNXT_QPLIB_DBR_KEY_INVALID)
671 		writeq(key, db);
672 }
673 
bnxt_qplib_replay_db(struct bnxt_qplib_db_info * info,bool is_arm_ena)674 static inline void bnxt_qplib_replay_db(struct bnxt_qplib_db_info *info,
675 					bool is_arm_ena)
676 
677 {
678 	if (!spin_trylock_irq(&info->lock))
679 		return;
680 
681 	if (is_arm_ena)
682 		__replay_writeq(info->shadow_key_arm_ena, info->priv_db);
683 	else
684 		__replay_writeq(info->shadow_key, info->db);
685 
686 	spin_unlock_irq(&info->lock);
687 }
688 
bnxt_qplib_ring_db(struct bnxt_qplib_db_info * info,u32 type)689 static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
690 				      u32 type)
691 {
692 	u64 key = 0;
693 	u32 indx;
694 	u8 toggle = 0;
695 
696 	if (type == DBC_DBC_TYPE_CQ_ARMALL ||
697 	    type == DBC_DBC_TYPE_CQ_ARMSE)
698 		toggle = info->toggle;
699 
700 	indx = ((info->hwq->cons & DBC_DBC_INDEX_MASK) |
701 		((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
702 		 BNXT_QPLIB_DB_EPOCH_CONS_SHIFT));
703 
704 	key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
705 	bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
706 }
707 
bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info * info,u32 type)708 static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
709 					   u32 type)
710 {
711 	u64 key = 0;
712 	u32 indx;
713 
714 	indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
715 		((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
716 		 BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
717 	key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
718 	bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
719 }
720 
bnxt_qplib_armen_db(struct bnxt_qplib_db_info * info,u32 type)721 static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
722 				       u32 type)
723 {
724 	u64 key = 0;
725 	u8 toggle = 0;
726 
727 	if (type == DBC_DBC_TYPE_CQ_ARMENA)
728 		toggle = info->toggle;
729 	/* Index always at 0 */
730 	key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
731 	bnxt_qplib_write_db(info, key, info->priv_db,
732 			    &info->shadow_key_arm_ena);
733 }
734 
bnxt_qplib_cq_coffack_db(struct bnxt_qplib_db_info * info)735 static inline void bnxt_qplib_cq_coffack_db(struct bnxt_qplib_db_info *info)
736 {
737 	u64 key = 0;
738 
739 	/* Index always at 0 */
740 	key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_CQ_CUTOFF_ACK, 0, 0);
741 	bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
742 }
743 
bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info * info)744 static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info)
745 {
746 	u64 key = 0;
747 
748 	/* Index always at 0 */
749 	key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, 0);
750 	bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
751 }
752 
bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info * info,struct bnxt_qplib_chip_ctx * cctx,bool arm)753 static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
754 					 struct bnxt_qplib_chip_ctx *cctx,
755 					 bool arm)
756 {
757 	u32 type;
758 
759 	type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
760 	if (_is_chip_gen_p5_p7(cctx))
761 		bnxt_qplib_ring_db(info, type);
762 	else
763 		bnxt_qplib_ring_db32(info, arm);
764 }
765 
766 struct bnxt_qplib_max_res {
767 	u32 max_qp;
768 	u32 max_mr;
769 	u32 max_cq;
770 	u32 max_srq;
771 	u32 max_ah;
772 	u32 max_pd;
773 };
774 
775 /*
776  * Defines for maximum resources supported for chip revisions
777  * Maximum PDs supported are restricted to Max QPs
778  * GENP4 - Wh+
779  * DEFAULT - Thor
780  */
781 #define BNXT_QPLIB_GENP4_PF_MAX_QP	(16 * 1024)
782 #define BNXT_QPLIB_GENP4_PF_MAX_MRW	(16 * 1024)
783 #define BNXT_QPLIB_GENP4_PF_MAX_CQ	(16 * 1024)
784 #define BNXT_QPLIB_GENP4_PF_MAX_SRQ	(1 * 1024)
785 #define BNXT_QPLIB_GENP4_PF_MAX_AH	(16 * 1024)
786 #define BNXT_QPLIB_GENP4_PF_MAX_PD	 BNXT_QPLIB_GENP4_PF_MAX_QP
787 
788 #define BNXT_QPLIB_DEFAULT_PF_MAX_QP	(64 * 1024)
789 #define BNXT_QPLIB_DEFAULT_PF_MAX_MRW	(256 * 1024)
790 #define BNXT_QPLIB_DEFAULT_PF_MAX_CQ	(64 * 1024)
791 #define BNXT_QPLIB_DEFAULT_PF_MAX_SRQ	(4 * 1024)
792 #define BNXT_QPLIB_DEFAULT_PF_MAX_AH	(64 * 1024)
793 #define BNXT_QPLIB_DEFAULT_PF_MAX_PD	BNXT_QPLIB_DEFAULT_PF_MAX_QP
794 
795 #define BNXT_QPLIB_DEFAULT_VF_MAX_QP	(6 * 1024)
796 #define BNXT_QPLIB_DEFAULT_VF_MAX_MRW	(6 * 1024)
797 #define BNXT_QPLIB_DEFAULT_VF_MAX_CQ	(6 * 1024)
798 #define BNXT_QPLIB_DEFAULT_VF_MAX_SRQ	(4 * 1024)
799 #define BNXT_QPLIB_DEFAULT_VF_MAX_AH	(6 * 1024)
800 #define BNXT_QPLIB_DEFAULT_VF_MAX_PD	BNXT_QPLIB_DEFAULT_VF_MAX_QP
801 
bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx * cctx,struct bnxt_qplib_res * qpl_res,struct bnxt_qplib_max_res * max_res,bool vf_res_limit)802 static inline void bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx *cctx,
803 						struct bnxt_qplib_res *qpl_res,
804 						struct bnxt_qplib_max_res *max_res,
805 						bool vf_res_limit)
806 {
807 	switch (cctx->chip_num) {
808 	case CHIP_NUM_57608:
809 	case CHIP_NUM_58818:
810 	case CHIP_NUM_57504:
811 	case CHIP_NUM_57502:
812 	case CHIP_NUM_57508:
813 		if (!qpl_res->is_vf) {
814 			max_res->max_qp = BNXT_QPLIB_DEFAULT_PF_MAX_QP;
815 			max_res->max_mr = BNXT_QPLIB_DEFAULT_PF_MAX_MRW;
816 			max_res->max_cq = BNXT_QPLIB_DEFAULT_PF_MAX_CQ;
817 			max_res->max_srq = BNXT_QPLIB_DEFAULT_PF_MAX_SRQ;
818 			max_res->max_ah = BNXT_QPLIB_DEFAULT_PF_MAX_AH;
819 			max_res->max_pd = BNXT_QPLIB_DEFAULT_PF_MAX_PD;
820 		} else {
821 			max_res->max_qp = BNXT_QPLIB_DEFAULT_VF_MAX_QP;
822 			max_res->max_mr = BNXT_QPLIB_DEFAULT_VF_MAX_MRW;
823 			max_res->max_cq = BNXT_QPLIB_DEFAULT_VF_MAX_CQ;
824 			max_res->max_srq = BNXT_QPLIB_DEFAULT_VF_MAX_SRQ;
825 			max_res->max_ah = BNXT_QPLIB_DEFAULT_VF_MAX_AH;
826 			max_res->max_pd = BNXT_QPLIB_DEFAULT_VF_MAX_PD;
827 		}
828 		break;
829 	default:
830 		/* Wh+/Stratus max resources */
831 		max_res->max_qp = BNXT_QPLIB_GENP4_PF_MAX_QP;
832 		max_res->max_mr = BNXT_QPLIB_GENP4_PF_MAX_MRW;
833 		max_res->max_cq = BNXT_QPLIB_GENP4_PF_MAX_CQ;
834 		max_res->max_srq = BNXT_QPLIB_GENP4_PF_MAX_SRQ;
835 		max_res->max_ah = BNXT_QPLIB_GENP4_PF_MAX_AH;
836 		max_res->max_pd = BNXT_QPLIB_GENP4_PF_MAX_PD;
837 		break;
838 	}
839 }
840 #endif
841