xref: /freebsd/sys/dev/qlnx/qlnxr/qlnxr_def.h (revision 42249ef2)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  */
29 
30 
31 /*
32  * File: qlnxr_def.h
33  * Author: David C Somayajulu
34  */
35 
36 #ifndef __QLNX_DEF_H_
37 #define __QLNX_DEF_H_
38 
39 #include <sys/ktr.h>
40 
41 #include <linux/list.h>
42 #include <linux/spinlock.h>
43 #include <linux/idr.h>
44 #include <linux/completion.h>
45 #include <linux/netdevice.h>
46 #include <linux/sched.h>
47 #include <linux/pci.h>
48 #include <linux/dma-mapping.h>
49 #include <linux/wait.h>
50 #include <linux/kref.h>
51 #include <linux/timer.h>
52 #include <linux/io.h>
53 #include <linux/fs.h>
54 #include <sys/vmem.h>
55 
56 #include <asm/byteorder.h>
57 
58 #include <netinet/in.h>
59 #include <net/ipv6.h>
60 #include <netinet/toecore.h>
61 
62 #include <rdma/ib_smi.h>
63 #include <rdma/ib_user_verbs.h>
64 #include <rdma/ib_addr.h>
65 #include <rdma/ib_verbs.h>
66 #include <rdma/iw_cm.h>
67 #include <rdma/ib_umem.h>
68 #include <rdma/ib_mad.h>
69 #include <rdma/ib_sa.h>
70 
71 #if __FreeBSD_version < 1100000
72 #undef MODULE_VERSION
73 #endif
74 
75 #include "qlnx_os.h"
76 #include "bcm_osal.h"
77 
78 #include "reg_addr.h"
79 #include "ecore_gtt_reg_addr.h"
80 #include "ecore.h"
81 #include "ecore_chain.h"
82 #include "ecore_status.h"
83 #include "ecore_hw.h"
84 #include "ecore_rt_defs.h"
85 #include "ecore_init_ops.h"
86 #include "ecore_int.h"
87 #include "ecore_cxt.h"
88 #include "ecore_spq.h"
89 #include "ecore_init_fw_funcs.h"
90 #include "ecore_sp_commands.h"
91 #include "ecore_dev_api.h"
92 #include "ecore_l2_api.h"
93 #ifdef CONFIG_ECORE_SRIOV
94 #include "ecore_sriov.h"
95 #include "ecore_vf.h"
96 #endif
97 #ifdef CONFIG_ECORE_LL2
98 #include "ecore_ll2.h"
99 #endif
100 #ifdef CONFIG_ECORE_FCOE
101 #include "ecore_fcoe.h"
102 #endif
103 #ifdef CONFIG_ECORE_ISCSI
104 #include "ecore_iscsi.h"
105 #endif
106 #include "ecore_mcp.h"
107 #include "ecore_hw_defs.h"
108 #include "mcp_public.h"
109 
110 #ifdef CONFIG_ECORE_RDMA
111 #include "ecore_rdma.h"
112 #include "ecore_rdma_api.h"
113 #endif
114 
115 #ifdef CONFIG_ECORE_ROCE
116 #include "ecore_roce.h"
117 #endif
118 
119 #ifdef CONFIG_ECORE_IWARP
120 #include "ecore_iwarp.h"
121 #endif
122 
123 #include "ecore_iro.h"
124 #include "nvm_cfg.h"
125 
126 #include "ecore_dbg_fw_funcs.h"
127 #include "rdma_common.h"
128 
129 #include "qlnx_ioctl.h"
130 #include "qlnx_def.h"
131 #include "qlnx_rdma.h"
132 #include "qlnxr_verbs.h"
133 #include "qlnxr_user.h"
134 #include "qlnx_ver.h"
135 #include <sys/smp.h>
136 
137 #define QLNXR_ROCE_INTERFACE_VERSION     1801
138 
139 #define QLNXR_MODULE_VERSION     "8.18.1.0"
140 #define QLNXR_NODE_DESC "QLogic 579xx RoCE HCA"
141 
142 #define OC_SKH_DEVICE_PF 0x720
143 #define OC_SKH_DEVICE_VF 0x728
144 #define QLNXR_MAX_AH 512
145 
146 /* QLNXR Limitations */
147 
148 /* SQ/RQ Limitations
149  * An S/RQ PBL contains a list a pointers to pages. Each page contains S/RQE
150  * elements. Several S/RQE elements make an S/RQE, up to a certain maximum that
151  * is different between SQ and RQ. The size of the PBL was chosen such as not to
152  * limit the MAX_WR supported by ECORE, and rounded up to a power of two.
153  */
154 /* SQ */
155 #define QLNXR_MAX_SQ_PBL (0x8000) /* 2^15 bytes */
156 #define QLNXR_MAX_SQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
157 #define QLNXR_SQE_ELEMENT_SIZE (sizeof(struct rdma_sq_sge)) /* bytes */
158 #define QLNXR_MAX_SQE_ELEMENTS_PER_SQE (ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE / \
159                 QLNXR_SQE_ELEMENT_SIZE) /* number */
160 #define QLNXR_MAX_SQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
161                 QLNXR_SQE_ELEMENT_SIZE) /* number */
162 #define QLNXR_MAX_SQE ((QLNXR_MAX_SQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
163                 (QLNXR_SQE_ELEMENT_SIZE) / (QLNXR_MAX_SQE_ELEMENTS_PER_SQE))
164 /* RQ */
165 #define QLNXR_MAX_RQ_PBL (0x2000) /* 2^13 bytes */
166 #define QLNXR_MAX_RQ_PBL_ENTRIES (0x10000 / sizeof(void *)) /* number */
167 #define QLNXR_RQE_ELEMENT_SIZE (sizeof(struct rdma_rq_sge)) /* bytes */
168 #define QLNXR_MAX_RQE_ELEMENTS_PER_RQE (RDMA_MAX_SGE_PER_RQ_WQE) /* number */
169 #define QLNXR_MAX_RQE_ELEMENTS_PER_PAGE ((RDMA_RING_PAGE_SIZE) / \
170                 QLNXR_RQE_ELEMENT_SIZE) /* number */
171 #define QLNXR_MAX_RQE ((QLNXR_MAX_RQ_PBL_ENTRIES) * (RDMA_RING_PAGE_SIZE) / \
172                 (QLNXR_RQE_ELEMENT_SIZE) / (QLNXR_MAX_RQE_ELEMENTS_PER_RQE))
173 
174 /* CQE Limitation
175  * Although FW supports two layer PBL we use single layer since it is more
176  * than enough. For that layer we use a maximum size of 512 kB, again, because
177  * it reaches the maximum number of page pointers. Notice is the '-1' in the
178  * calculation that comes from having a u16 for the number of pages i.e. 0xffff
179  * is the maximum number of pages (in single layer).
180  */
181 #define QLNXR_CQE_SIZE   (sizeof(union rdma_cqe))
182 #define QLNXR_MAX_CQE_PBL_SIZE (512*1024) /* 512kB */
183 #define QLNXR_MAX_CQE_PBL_ENTRIES (((QLNXR_MAX_CQE_PBL_SIZE) / \
184                                   sizeof(u64)) - 1) /* 64k -1 */
185 #define QLNXR_MAX_CQES ((u32)((QLNXR_MAX_CQE_PBL_ENTRIES) * (ECORE_CHAIN_PAGE_SIZE)\
186                              / QLNXR_CQE_SIZE)) /* 8M -4096/32 = 8,388,480 */
187 
188 /* CNQ size Limitation
189  * The maximum CNQ size is not reachable because the FW supports a chain of u16
190  * (specifically 64k-1). The FW can buffer CNQ elements avoiding an overflow, on
191  * the expense of performance. Hence we set it to an arbitrarily smaller value
192  * than the maximum.
193  */
194 #define QLNXR_ROCE_MAX_CNQ_SIZE          (0x4000) /* 2^16 */
195 
196 #define QLNXR_MAX_PORT                   (1)
197 #define QLNXR_PORT                       (1)
198 
199 #define QLNXR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
200 
201 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
202 
203 /* The following number is used to determine if a handle recevied from the FW
204  * actually point to a CQ/QP.
205  */
206 #define QLNXR_CQ_MAGIC_NUMBER    (0x11223344)
207 #define QLNXR_QP_MAGIC_NUMBER    (0x77889900)
208 
209 /* Fast path debug prints */
210 #define FP_DP_VERBOSE(...)
211 /* #define FP_DP_VERBOSE(...)   DP_VERBOSE(__VA_ARGS__) */
212 
213 #define FW_PAGE_SIZE    (RDMA_RING_PAGE_SIZE)
214 
215 #define QLNXR_MSG_INIT		0x10000,
216 #define QLNXR_MSG_FAIL		0x10000,
217 #define QLNXR_MSG_CQ		0x20000,
218 #define QLNXR_MSG_RQ		0x40000,
219 #define QLNXR_MSG_SQ		0x80000,
220 #define QLNXR_MSG_QP		(QLNXR_MSG_SQ | QLNXR_MSG_RQ),
221 #define QLNXR_MSG_MR		0x100000,
222 #define QLNXR_MSG_GSI		0x200000,
223 #define QLNXR_MSG_MISC		0x400000,
224 #define QLNXR_MSG_SRQ		0x800000,
225 #define QLNXR_MSG_IWARP		0x1000000,
226 
227 #define QLNXR_ROCE_PKEY_MAX		1
228 #define QLNXR_ROCE_PKEY_TABLE_LEN	1
229 #define QLNXR_ROCE_PKEY_DEFAULT		0xffff
230 
231 #define QLNXR_MAX_SGID			128 /* TBD - add more source gids... */
232 
233 #define QLNXR_ENET_STATE_BIT     (0)
234 
235 #define QLNXR_MAX_MSIX		(16)
236 
237 
238 struct qlnxr_cnq {
239         struct qlnxr_dev	*dev;
240         struct ecore_chain	pbl;
241         struct ecore_sb_info	*sb;
242         char			name[32];
243         u64			n_comp;
244         __le16			*hw_cons_ptr;
245         u8			index;
246 	int			irq_rid;
247 	struct resource		*irq;
248 	void			*irq_handle;
249 };
250 
251 struct qlnxr_device_attr {
252         /* Vendor specific information */
253         u32     vendor_id;
254         u32     vendor_part_id;
255         u32     hw_ver;
256         u64     fw_ver;
257 
258         u64     node_guid;      /* node GUID */
259         u64     sys_image_guid; /* System image GUID */
260 
261         u8      max_cnq;
262         u8      max_sge;        /* Maximum # of scatter/gather entries
263                                  * per Work Request supported
264                                  */
265         u16     max_inline;
266         u32     max_sqe;        /* Maximum number of send outstanding send work
267                                  * requests on any Work Queue supported
268                                  */
269         u32     max_rqe;        /* Maximum number of receive outstanding receive
270                                  * work requests on any Work Queue supported
271                                  */
272         u8      max_qp_resp_rd_atomic_resc;     /* Maximum number of RDMA Reads
273                                                  * & atomic operation that can
274                                                  * be outstanding per QP
275                                                  */
276 
277         u8      max_qp_req_rd_atomic_resc;      /* The maximum depth per QP for
278                                                  * initiation of RDMA Read
279                                                  * & atomic operations
280                                                  */
281         u64     max_dev_resp_rd_atomic_resc;
282         u32     max_cq;
283         u32     max_qp;
284         u32     max_mr;         /* Maximum # of MRs supported */
285         u64     max_mr_size;    /* Size (in bytes) of largest contiguous memory
286                                  * block that can be registered by this device
287                                  */
288         u32     max_cqe;
289         u32     max_mw;         /* Maximum # of memory windows supported */
290         u32     max_fmr;
291         u32     max_mr_mw_fmr_pbl;
292         u64     max_mr_mw_fmr_size;
293         u32     max_pd;         /* Maximum # of protection domains supported */
294         u32     max_ah;
295         u8      max_pkey;
296         u32     max_srq;        /* Maximum number of SRQs */
297         u32     max_srq_wr;     /* Maximum number of WRs per SRQ */
298         u8      max_srq_sge;     /* Maximum number of SGE per WQE */
299         u8      max_stats_queues; /* Maximum number of statistics queues */
300         u32     dev_caps;
301 
302         /* Abilty to support RNR-NAK generation */
303 
304 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_MASK                           0x1
305 #define QLNXR_ROCE_DEV_CAP_RNR_NAK_SHIFT                  0
306         /* Abilty to support shutdown port */
307 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_MASK                     0x1
308 #define QLNXR_ROCE_DEV_CAP_SHUTDOWN_PORT_SHIFT                    1
309         /* Abilty to support port active event */
310 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_MASK         0x1
311 #define QLNXR_ROCE_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT                2
312         /* Abilty to support port change event */
313 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_MASK         0x1
314 #define QLNXR_ROCE_DEV_CAP_PORT_CHANGE_EVENT_SHIFT                3
315         /* Abilty to support system image GUID */
316 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_MASK                 0x1
317 #define QLNXR_ROCE_DEV_CAP_SYS_IMAGE_SHIFT                        4
318         /* Abilty to support bad P_Key counter support */
319 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_MASK                      0x1
320 #define QLNXR_ROCE_DEV_CAP_BAD_PKEY_CNT_SHIFT                     5
321         /* Abilty to support atomic operations */
322 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_MASK                 0x1
323 #define QLNXR_ROCE_DEV_CAP_ATOMIC_OP_SHIFT                        6
324 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_MASK                 0x1
325 #define QLNXR_ROCE_DEV_CAP_RESIZE_CQ_SHIFT                        7
326         /* Abilty to support modifying the maximum number of
327          * outstanding work requests per QP
328          */
329 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_MASK                     0x1
330 #define QLNXR_ROCE_DEV_CAP_RESIZE_MAX_WR_SHIFT                    8
331 
332                 /* Abilty to support automatic path migration */
333 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_MASK                     0x1
334 #define QLNXR_ROCE_DEV_CAP_AUTO_PATH_MIG_SHIFT                    9
335         /* Abilty to support the base memory management extensions */
336 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_MASK                   0x1
337 #define QLNXR_ROCE_DEV_CAP_BASE_MEMORY_EXT_SHIFT          10
338 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_MASK                    0x1
339 #define QLNXR_ROCE_DEV_CAP_BASE_QUEUE_EXT_SHIFT                   11
340         /* Abilty to support multipile page sizes per memory region */
341 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK             0x1
342 #define QLNXR_ROCE_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT            12
343         /* Abilty to support block list physical buffer list */
344 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_MASK                        0x1
345 #define QLNXR_ROCE_DEV_CAP_BLOCK_MODE_SHIFT                       13
346         /* Abilty to support zero based virtual addresses */
347 #define QLNXR_ROCE_DEV_CAP_ZBVA_MASK                              0x1
348 #define QLNXR_ROCE_DEV_CAP_ZBVA_SHIFT                             14
349         /* Abilty to support local invalidate fencing */
350 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_MASK                   0x1
351 #define QLNXR_ROCE_DEV_CAP_LOCAL_INV_FENCE_SHIFT          15
352         /* Abilty to support Loopback on QP */
353 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_MASK                      0x1
354 #define QLNXR_ROCE_DEV_CAP_LB_INDICATOR_SHIFT                     16
355         u64                     page_size_caps;
356         u8                      dev_ack_delay;
357         u32                     reserved_lkey;   /* Value of reserved L_key */
358         u32                     bad_pkey_counter;/* Bad P_key counter support
359                                                   * indicator
360                                                   */
361         struct ecore_rdma_events  events;
362 };
363 
364 struct qlnxr_dev {
365 	struct ib_device	ibdev;
366 	qlnx_host_t		*ha;
367 	struct ecore_dev	*cdev;
368 
369 	/* Added to extend Applications Support */
370         struct pci_dev          *pdev;
371 	uint32_t		dp_module;
372 	uint8_t			dp_level;
373 
374 	void			*rdma_ctx;
375 
376 	struct mtx		idr_lock;
377 	struct idr		qpidr;
378 
379 	uint32_t		wq_multiplier;
380 	int			num_cnq;
381 
382 	struct ecore_sb_info	sb_array[QLNXR_MAX_MSIX];
383 	struct qlnxr_cnq	cnq_array[QLNXR_MAX_MSIX];
384 
385         int			sb_start;
386 
387         int			gsi_qp_created;
388         struct qlnxr_cq		*gsi_sqcq;
389         struct qlnxr_cq		*gsi_rqcq;
390         struct qlnxr_qp		*gsi_qp;
391 
392         /* TBD: we'll need an array of these probablly per DPI... */
393         void __iomem		*db_addr;
394         uint64_t		db_phys_addr;
395         uint32_t		db_size;
396         uint16_t		dpi;
397 
398         uint64_t		guid;
399         enum ib_atomic_cap	atomic_cap;
400 
401         union ib_gid		sgid_tbl[QLNXR_MAX_SGID];
402         struct mtx		sgid_lock;
403         struct notifier_block	nb_inet;
404         struct notifier_block	nb_inet6;
405 
406         uint8_t			mr_key;
407         struct list_head	entry;
408 
409         struct dentry		*dbgfs;
410 
411         uint8_t			gsi_ll2_mac_address[ETH_ALEN];
412         uint8_t			gsi_ll2_handle;
413 
414 	unsigned long		enet_state;
415 
416 	struct workqueue_struct *iwarp_wq;
417 
418 	volatile uint32_t	pd_count;
419 	struct                  qlnxr_device_attr attr;
420         uint8_t                 user_dpm_enabled;
421 };
422 
423 typedef struct qlnxr_dev qlnxr_dev_t;
424 
425 
426 struct qlnxr_pd {
427         struct ib_pd ibpd;
428         u32 pd_id;
429         struct qlnxr_ucontext *uctx;
430 };
431 
432 struct qlnxr_ucontext {
433         struct ib_ucontext ibucontext;
434         struct qlnxr_dev *dev;
435         struct qlnxr_pd *pd;
436         u64 dpi_addr;
437         u64 dpi_phys_addr;
438         u32 dpi_size;
439         u16 dpi;
440 
441         struct list_head mm_head;
442         struct mutex mm_list_lock;
443 };
444 
445 
446 
447 struct qlnxr_dev_attr {
448         struct ib_device_attr ib_attr;
449 };
450 
451 struct qlnxr_dma_mem {
452         void *va;
453         dma_addr_t pa;
454         u32 size;
455 };
456 
457 struct qlnxr_pbl {
458         struct list_head list_entry;
459         void *va;
460         dma_addr_t pa;
461 };
462 
463 struct qlnxr_queue_info {
464         void *va;
465         dma_addr_t dma;
466         u32 size;
467         u16 len;
468         u16 entry_size;         /* Size of an element in the queue */
469         u16 id;                 /* qid, where to ring the doorbell. */
470         u16 head, tail;
471         bool created;
472 };
473 
474 struct qlnxr_eq {
475         struct qlnxr_queue_info q;
476         u32 vector;
477         int cq_cnt;
478         struct qlnxr_dev *dev;
479         char irq_name[32];
480 };
481 
482 struct qlnxr_mq {
483         struct qlnxr_queue_info sq;
484         struct qlnxr_queue_info cq;
485         bool rearm_cq;
486 };
487 
488 struct phy_info {
489         u16 auto_speeds_supported;
490         u16 fixed_speeds_supported;
491         u16 phy_type;
492         u16 interface_type;
493 };
494 
495 union db_prod64 {
496 	struct rdma_pwm_val32_data data;
497         u64 raw;
498 };
499 
500 enum qlnxr_cq_type {
501         QLNXR_CQ_TYPE_GSI,
502         QLNXR_CQ_TYPE_KERNEL,
503         QLNXR_CQ_TYPE_USER
504 };
505 
506 struct qlnxr_pbl_info {
507         u32 num_pbls;
508         u32 num_pbes;
509         u32 pbl_size;
510         u32 pbe_size;
511         bool two_layered;
512 };
513 
514 struct qlnxr_userq {
515         struct ib_umem *umem;
516         struct qlnxr_pbl_info pbl_info;
517         struct qlnxr_pbl *pbl_tbl;
518         u64 buf_addr;
519         size_t buf_len;
520 };
521 
522 struct qlnxr_cq {
523         struct ib_cq		ibcq; /* must be first */
524 
525         enum qlnxr_cq_type	cq_type;
526         uint32_t		sig;
527         uint16_t		icid;
528 
529         /* relevant to cqs created from kernel space only (ULPs) */
530         spinlock_t		cq_lock;
531         uint8_t			arm_flags;
532         struct ecore_chain	pbl;
533 
534         void __iomem		*db_addr; /* db address for cons update*/
535         union db_prod64		db;
536 
537         uint8_t			pbl_toggle;
538         union rdma_cqe		*latest_cqe;
539         union rdma_cqe		*toggle_cqe;
540 
541         /* TODO: remove since it is redundant with 32 bit chains */
542         uint32_t		cq_cons;
543 
544         /* relevant to cqs created from user space only (applications) */
545         struct qlnxr_userq	q;
546 
547         /* destroy-IRQ handler race prevention */
548         uint8_t			destroyed;
549         uint16_t		cnq_notif;
550 };
551 
552 
553 struct qlnxr_ah {
554         struct ib_ah		ibah;
555         struct ib_ah_attr	attr;
556 };
557 
558 union db_prod32 {
559 	struct rdma_pwm_val16_data data;
560         u32 raw;
561 };
562 
563 struct qlnxr_qp_hwq_info {
564         /* WQE Elements*/
565         struct ecore_chain      pbl;
566         u64                     p_phys_addr_tbl;
567         u32                     max_sges;
568 
569         /* WQE */
570         u16                     prod;     /* WQE prod index for SW ring */
571         u16                     cons;     /* WQE cons index for SW ring */
572         u16                     wqe_cons;
573         u16                     gsi_cons; /* filled in by GSI implementation */
574         u16                     max_wr;
575 
576         /* DB */
577         void __iomem            *db;      /* Doorbell address */
578         union db_prod32         db_data;  /* Doorbell data */
579 
580         /* Required for iwarp_only */
581         void __iomem            *iwarp_db2;      /* Doorbell address */
582         union db_prod32         iwarp_db2_data;  /* Doorbell data */
583 };
584 
585 #define QLNXR_INC_SW_IDX(p_info, index)                          \
586         do {                                                    \
587                 p_info->index = (p_info->index + 1) &           \
588                         ecore_chain_get_capacity(p_info->pbl)     \
589         } while (0)
590 
591 struct qlnxr_srq_hwq_info {
592         u32 max_sges;
593         u32 max_wr;
594         struct ecore_chain pbl;
595         u64 p_phys_addr_tbl;
596         u32 wqe_prod;     /* WQE prod index in HW ring */
597         u32 sge_prod;     /* SGE prod index in HW ring */
598         u32 wr_prod_cnt; /* wr producer count */
599         u32 wr_cons_cnt; /* wr consumer count */
600         u32 num_elems;
601 
602         u32 *virt_prod_pair_addr; /* producer pair virtual address */
603         dma_addr_t phy_prod_pair_addr; /* producer pair physical address */
604 };
605 
606 struct qlnxr_srq {
607         struct ib_srq ibsrq;
608         struct qlnxr_dev *dev;
609         /* relevant to cqs created from user space only (applications) */
610         struct qlnxr_userq       usrq;
611         struct qlnxr_srq_hwq_info hw_srq;
612         struct ib_umem *prod_umem;
613         u16 srq_id;
614         /* lock to protect srq recv post */
615         spinlock_t lock;
616 };
617 
618 enum qlnxr_qp_err_bitmap {
619         QLNXR_QP_ERR_SQ_FULL     = 1 << 0,
620         QLNXR_QP_ERR_RQ_FULL     = 1 << 1,
621         QLNXR_QP_ERR_BAD_SR      = 1 << 2,
622         QLNXR_QP_ERR_BAD_RR      = 1 << 3,
623         QLNXR_QP_ERR_SQ_PBL_FULL = 1 << 4,
624         QLNXR_QP_ERR_RQ_PBL_FULL = 1 << 5,
625 };
626 
627 struct mr_info {
628         struct qlnxr_pbl *pbl_table;
629         struct qlnxr_pbl_info pbl_info;
630         struct list_head free_pbl_list;
631         struct list_head inuse_pbl_list;
632         u32 completed;
633         u32 completed_handled;
634 };
635 
636 #if __FreeBSD_version < 1102000
637 #define DEFINE_IB_FAST_REG
638 #else
639 #define DEFINE_ALLOC_MR
640 #endif
641 
642 #ifdef DEFINE_IB_FAST_REG
643 struct qlnxr_fast_reg_page_list {
644         struct ib_fast_reg_page_list ibfrpl;
645         struct qlnxr_dev *dev;
646         struct mr_info info;
647 };
648 #endif
649 struct qlnxr_qp {
650         struct ib_qp ibqp;              /* must be first */
651         struct qlnxr_dev *dev;
652         struct qlnxr_iw_ep *ep;
653         struct qlnxr_qp_hwq_info sq;
654         struct qlnxr_qp_hwq_info rq;
655 
656         u32 max_inline_data;
657 
658 #if __FreeBSD_version >= 1100000
659         spinlock_t q_lock ____cacheline_aligned;
660 #else
661 	spinlock_t q_lock;
662 #endif
663 
664         struct qlnxr_cq *sq_cq;
665         struct qlnxr_cq *rq_cq;
666         struct qlnxr_srq *srq;
667         enum ecore_roce_qp_state state;   /*  QP state */
668         u32 id;
669         struct qlnxr_pd *pd;
670         enum ib_qp_type qp_type;
671         struct ecore_rdma_qp *ecore_qp;
672         u32 qp_id;
673         u16 icid;
674         u16 mtu;
675         int sgid_idx;
676         u32 rq_psn;
677         u32 sq_psn;
678         u32 qkey;
679         u32 dest_qp_num;
680         u32 sig;                /* unique siganture to identify valid QP */
681 
682         /* relevant to qps created from kernel space only (ULPs) */
683         u8 prev_wqe_size;
684         u16 wqe_cons;
685         u32 err_bitmap;
686         bool signaled;
687         /* SQ shadow */
688         struct {
689                 u64 wr_id;
690                 enum ib_wc_opcode opcode;
691                 u32 bytes_len;
692                 u8 wqe_size;
693                 bool  signaled;
694                 dma_addr_t icrc_mapping;
695                 u32 *icrc;
696 #ifdef DEFINE_IB_FAST_REG
697                 struct qlnxr_fast_reg_page_list *frmr;
698 #endif
699                 struct qlnxr_mr *mr;
700         } *wqe_wr_id;
701 
702         /* RQ shadow */
703         struct {
704                 u64 wr_id;
705                 struct ib_sge sg_list[RDMA_MAX_SGE_PER_RQ_WQE];
706                 uint8_t wqe_size;
707 
708                 /* for GSI only */
709                 u8 smac[ETH_ALEN];
710                 u16 vlan_id;
711                 int rc;
712         } *rqe_wr_id;
713 
714         /* relevant to qps created from user space only (applications) */
715         struct qlnxr_userq usq;
716         struct qlnxr_userq urq;
717         atomic_t refcnt;
718 	bool destroyed;
719 };
720 
721 enum qlnxr_mr_type {
722         QLNXR_MR_USER,
723         QLNXR_MR_KERNEL,
724         QLNXR_MR_DMA,
725         QLNXR_MR_FRMR
726 };
727 
728 
729 struct qlnxr_mr {
730         struct ib_mr    ibmr;
731         struct ib_umem  *umem;
732 
733         struct ecore_rdma_register_tid_in_params hw_mr;
734         enum qlnxr_mr_type type;
735 
736         struct qlnxr_dev *dev;
737         struct mr_info info;
738 
739         u64 *pages;
740         u32 npages;
741 
742 	u64 *iova_start; /* valid only for kernel_mr */
743 };
744 
745 
746 struct qlnxr_mm {
747         struct {
748                 u64 phy_addr;
749                 unsigned long len;
750         } key;
751         struct list_head entry;
752 };
753 
754 struct qlnxr_iw_listener {
755         struct qlnxr_dev *dev;
756         struct iw_cm_id *cm_id;
757         int backlog;
758         void *ecore_handle;
759 };
760 
761 struct qlnxr_iw_ep {
762         struct qlnxr_dev *dev;
763         struct iw_cm_id *cm_id;
764         struct qlnxr_qp *qp;
765         void *ecore_context;
766 	u8 during_connect;
767 };
768 
769 static inline void
770 qlnxr_inc_sw_cons(struct qlnxr_qp_hwq_info *info)
771 {
772         info->cons = (info->cons + 1) % info->max_wr;
773         info->wqe_cons++;
774 }
775 
776 static inline void
777 qlnxr_inc_sw_prod(struct qlnxr_qp_hwq_info *info)
778 {
779         info->prod = (info->prod + 1) % info->max_wr;
780 }
781 
782 static inline struct qlnxr_dev *
783 get_qlnxr_dev(struct ib_device *ibdev)
784 {
785         return container_of(ibdev, struct qlnxr_dev, ibdev);
786 }
787 
788 static inline struct qlnxr_ucontext *
789 get_qlnxr_ucontext(struct ib_ucontext *ibucontext)
790 {
791         return container_of(ibucontext, struct qlnxr_ucontext, ibucontext);
792 }
793 
794 static inline struct qlnxr_pd *
795 get_qlnxr_pd(struct ib_pd *ibpd)
796 {
797         return container_of(ibpd, struct qlnxr_pd, ibpd);
798 }
799 
800 static inline struct qlnxr_cq *
801 get_qlnxr_cq(struct ib_cq *ibcq)
802 {
803         return container_of(ibcq, struct qlnxr_cq, ibcq);
804 }
805 
806 static inline struct qlnxr_qp *
807 get_qlnxr_qp(struct ib_qp *ibqp)
808 {
809         return container_of(ibqp, struct qlnxr_qp, ibqp);
810 }
811 
812 static inline struct qlnxr_mr *
813 get_qlnxr_mr(struct ib_mr *ibmr)
814 {
815         return container_of(ibmr, struct qlnxr_mr, ibmr);
816 }
817 
818 static inline struct qlnxr_ah *
819 get_qlnxr_ah(struct ib_ah *ibah)
820 {
821         return container_of(ibah, struct qlnxr_ah, ibah);
822 }
823 
824 static inline struct qlnxr_srq *
825 get_qlnxr_srq(struct ib_srq *ibsrq)
826 {
827         return container_of(ibsrq, struct qlnxr_srq, ibsrq);
828 }
829 
830 static inline bool qlnxr_qp_has_srq(struct qlnxr_qp *qp)
831 {
832         return !!qp->srq;
833 }
834 
835 static inline bool qlnxr_qp_has_sq(struct qlnxr_qp *qp)
836 {
837         if (qp->qp_type == IB_QPT_GSI)
838                 return 0;
839 
840         return 1;
841 }
842 
843 static inline bool qlnxr_qp_has_rq(struct qlnxr_qp *qp)
844 {
845         if (qp->qp_type == IB_QPT_GSI || qlnxr_qp_has_srq(qp))
846                 return 0;
847 
848         return 1;
849 }
850 
851 
852 #ifdef DEFINE_IB_FAST_REG
853 static inline struct qlnxr_fast_reg_page_list *get_qlnxr_frmr_list(
854         struct ib_fast_reg_page_list *ifrpl)
855 {
856         return container_of(ifrpl, struct qlnxr_fast_reg_page_list, ibfrpl);
857 }
858 #endif
859 
860 #define SET_FIELD2(value, name, flag)                          \
861         do {                                                   \
862                 (value) |= ((flag) << (name ## _SHIFT));       \
863         } while (0)
864 
865 #define QLNXR_RESP_IMM	(RDMA_CQE_RESPONDER_IMM_FLG_MASK << \
866                          RDMA_CQE_RESPONDER_IMM_FLG_SHIFT)
867 #define QLNXR_RESP_RDMA	(RDMA_CQE_RESPONDER_RDMA_FLG_MASK << \
868                          RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT)
869 #define QLNXR_RESP_INV  (RDMA_CQE_RESPONDER_INV_FLG_MASK << \
870                          RDMA_CQE_RESPONDER_INV_FLG_SHIFT)
871 
872 #define QLNXR_RESP_RDMA_IMM (QLNXR_RESP_IMM | QLNXR_RESP_RDMA)
873 
874 static inline int
875 qlnxr_get_dmac(struct qlnxr_dev *dev, struct ib_ah_attr *ah_attr, u8 *mac_addr)
876 {
877 #ifdef DEFINE_NO_IP_BASED_GIDS
878         u8 *guid = &ah_attr->grh.dgid.raw[8]; /* GID's 64 MSBs are the GUID */
879 #endif
880         union ib_gid zero_sgid = { { 0 } };
881         struct in6_addr in6;
882 
883         if (!memcmp(&ah_attr->grh.dgid, &zero_sgid, sizeof(union ib_gid))) {
884                 memset(mac_addr, 0x00, ETH_ALEN);
885                 return -EINVAL;
886         }
887 
888         memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
889 
890 #ifdef DEFINE_NO_IP_BASED_GIDS
891         /* get the MAC address from the GUID i.e. EUI-64 to MAC address */
892         mac_addr[0] = guid[0] ^ 2; /* toggle the local/universal bit to local */
893         mac_addr[1] = guid[1];
894         mac_addr[2] = guid[2];
895         mac_addr[3] = guid[5];
896         mac_addr[4] = guid[6];
897         mac_addr[5] = guid[7];
898 #else
899         memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
900 #endif
901         return 0;
902 }
903 
904 extern int qlnx_rdma_ll2_set_mac_filter(void *rdma_ctx, uint8_t *old_mac_address,
905                 uint8_t *new_mac_address);
906 
907 
908 #define QLNXR_ROCE_PKEY_MAX 1
909 #define QLNXR_ROCE_PKEY_TABLE_LEN 1
910 #define QLNXR_ROCE_PKEY_DEFAULT 0xffff
911 
912 #if __FreeBSD_version < 1100000
913 #define DEFINE_IB_AH_ATTR_WITH_DMAC     (0)
914 #define DEFINE_IB_UMEM_WITH_CHUNK	(1)
915 #else
916 #define DEFINE_IB_AH_ATTR_WITH_DMAC     (1)
917 #endif
918 
919 #define QLNX_IS_IWARP(rdev)	IS_IWARP(ECORE_LEADING_HWFN(rdev->cdev))
920 #define QLNX_IS_ROCE(rdev)	IS_ROCE(ECORE_LEADING_HWFN(rdev->cdev))
921 
922 #define MAX_RXMIT_CONNS		16
923 
924 #endif /* #ifndef __QLNX_DEF_H_ */
925