xref: /freebsd/sys/dev/qlnx/qlnxe/ecore_rdma_api.h (revision 315ee00f)
1 /*
2  * Copyright (c) 2018-2019 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #ifndef __ECORE_RDMA_API_H__
29 #define __ECORE_RDMA_API_H__
30 
31 #ifndef LINUX_REMOVE
32 #ifndef ETH_ALEN
33 #define ETH_ALEN 6
34 #endif
35 #endif
36 
37 #ifndef __EXTRACT__LINUX__
38 
39 enum ecore_roce_ll2_tx_dest
40 {
41 	ECORE_ROCE_LL2_TX_DEST_NW /* Light L2 TX Destination to the Network */,
42 	ECORE_ROCE_LL2_TX_DEST_LB /* Light L2 TX Destination to the Loopback */,
43 	ECORE_ROCE_LL2_TX_DEST_MAX
44 };
45 
46 /* HW/FW RoCE Limitations (external. For internal see ecore_roce.h) */
47 /* CNQ size Limitation
48  * The CNQ size should be set as twice the amount of CQs, since for each CQ one
49  * element may be inserted into the CNQ and another element is used per CQ to
50  * accommodate for a possible race in the arm mechanism.
51  * The FW supports a CNQ of 64k-1 and this apparently causes an issue - notice
52  * that the number of QPs can reach 32k giving 64k CQs and 128k CNQ elements.
53  * Luckily the FW can buffer CNQ elements avoiding an overflow, on the expense
54  * of performance.
55  */
56 #define ECORE_RDMA_MAX_CNQ_SIZE               (0xFFFF) /* 2^16 - 1 */
57 
58 /* rdma interface */
59 
60 enum ecore_roce_qp_state {
61 	ECORE_ROCE_QP_STATE_RESET, /* Reset */
62 	ECORE_ROCE_QP_STATE_INIT,  /* Initialized */
63 	ECORE_ROCE_QP_STATE_RTR,   /* Ready to Receive */
64 	ECORE_ROCE_QP_STATE_RTS,   /* Ready to Send */
65 	ECORE_ROCE_QP_STATE_SQD,   /* Send Queue Draining */
66 	ECORE_ROCE_QP_STATE_ERR,   /* Error */
67 	ECORE_ROCE_QP_STATE_SQE    /* Send Queue Error */
68 };
69 
70 enum ecore_rdma_qp_type {
71 	ECORE_RDMA_QP_TYPE_RC,
72 	ECORE_RDMA_QP_TYPE_XRC_INI,
73 	ECORE_RDMA_QP_TYPE_XRC_TGT,
74 	ECORE_RDMA_QP_TYPE_INVAL = 0xffff,
75 };
76 
77 enum ecore_rdma_tid_type
78 {
79 	ECORE_RDMA_TID_REGISTERED_MR,
80 	ECORE_RDMA_TID_FMR,
81 	ECORE_RDMA_TID_MW_TYPE1,
82 	ECORE_RDMA_TID_MW_TYPE2A
83 };
84 
85 typedef
86 void (*affiliated_event_t)(void	*context,
87 			   u8	fw_event_code,
88 			   void	*fw_handle);
89 
90 typedef
91 void (*unaffiliated_event_t)(void *context,
92 			     u8   event_code);
93 
94 struct ecore_rdma_events {
95 	void			*context;
96 	affiliated_event_t	affiliated_event;
97 	unaffiliated_event_t	unaffiliated_event;
98 };
99 
100 struct ecore_rdma_device {
101     /* Vendor specific information */
102 	u32	vendor_id;
103 	u32	vendor_part_id;
104 	u32	hw_ver;
105 	u64	fw_ver;
106 
107 	u64	node_guid; /* node GUID */
108 	u64	sys_image_guid; /* System image GUID */
109 
110 	u8	max_cnq;
111 	u8	max_sge; /* The maximum number of scatter/gather entries
112 			  * per Work Request supported
113 			  */
114 	u8	max_srq_sge; /* The maximum number of scatter/gather entries
115 			      * per Work Request supported for SRQ
116 			      */
117 	u16	max_inline;
118 	u32	max_wqe; /* The maximum number of outstanding work
119 			  * requests on any Work Queue supported
120 			  */
121 	u32	max_srq_wqe; /* The maximum number of outstanding work
122 			      * requests on any Work Queue supported for SRQ
123 			      */
124 	u8	max_qp_resp_rd_atomic_resc; /* The maximum number of RDMA Reads
125 					     * & atomic operation that can be
126 					     * outstanding per QP
127 					     */
128 
129 	u8	max_qp_req_rd_atomic_resc; /* The maximum depth per QP for
130 					    * initiation of RDMA Read
131 					    * & atomic operations
132 					    */
133 	u64	max_dev_resp_rd_atomic_resc;
134 	u32	max_cq;
135 	u32	max_qp;
136 	u32	max_srq; /* Maximum number of SRQs */
137 	u32	max_mr; /* Maximum number of MRs supported by this device */
138 	u64	max_mr_size; /* Size (in bytes) of the largest contiguous memory
139 			      * block that can be registered by this device
140 			      */
141 	u32	max_cqe;
142 	u32	max_mw; /* The maximum number of memory windows supported */
143 	u32	max_fmr;
144 	u32	max_mr_mw_fmr_pbl;
145 	u64	max_mr_mw_fmr_size;
146 	u32	max_pd; /* The maximum number of protection domains supported */
147 	u32	max_ah;
148 	u8	max_pkey;
149 	u16	max_srq_wr; /* Maximum number of WRs per SRQ */
150 	u8	max_stats_queues; /* Maximum number of statistics queues */
151 	u32	dev_caps;
152 
153 	/* Abilty to support RNR-NAK generation */
154 
155 #define ECORE_RDMA_DEV_CAP_RNR_NAK_MASK				0x1
156 #define ECORE_RDMA_DEV_CAP_RNR_NAK_SHIFT			0
157 	/* Abilty to support shutdown port */
158 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK			0x1
159 #define ECORE_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT			1
160 	/* Abilty to support port active event */
161 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK		0x1
162 #define ECORE_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT		2
163 	/* Abilty to support port change event */
164 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK		0x1
165 #define ECORE_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT		3
166 	/* Abilty to support system image GUID */
167 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_MASK			0x1
168 #define ECORE_RDMA_DEV_CAP_SYS_IMAGE_SHIFT			4
169 	/* Abilty to support bad P_Key counter support */
170 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK			0x1
171 #define ECORE_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT			5
172 	/* Abilty to support atomic operations */
173 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_MASK			0x1
174 #define ECORE_RDMA_DEV_CAP_ATOMIC_OP_SHIFT			6
175 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_MASK			0x1
176 #define ECORE_RDMA_DEV_CAP_RESIZE_CQ_SHIFT			7
177 	/* Abilty to support modifying the maximum number of
178 	 * outstanding work requests per QP
179 	 */
180 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK			0x1
181 #define ECORE_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT			8
182 	/* Abilty to support automatic path migration */
183 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK			0x1
184 #define ECORE_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT			9
185 	/* Abilty to support the base memory management extensions */
186 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK			0x1
187 #define ECORE_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT		10
188 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK			0x1
189 #define ECORE_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT			11
190 	/* Abilty to support multipile page sizes per memory region */
191 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK		0x1
192 #define ECORE_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT		12
193 	/* Abilty to support block list physical buffer list */
194 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_MASK			0x1
195 #define ECORE_RDMA_DEV_CAP_BLOCK_MODE_SHIFT			13
196 	/* Abilty to support zero based virtual addresses */
197 #define ECORE_RDMA_DEV_CAP_ZBVA_MASK				0x1
198 #define ECORE_RDMA_DEV_CAP_ZBVA_SHIFT				14
199 	/* Abilty to support local invalidate fencing */
200 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK			0x1
201 #define ECORE_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT		15
202 	/* Abilty to support Loopback on QP */
203 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_MASK			0x1
204 #define ECORE_RDMA_DEV_CAP_LB_INDICATOR_SHIFT			16
205 	u64	page_size_caps;
206 	u8	dev_ack_delay;
207 	u32	reserved_lkey; /* Value of reserved L_key */
208 	u32	bad_pkey_counter; /* Bad P_key counter support indicator */
209 	struct ecore_rdma_events events;
210 };
211 
212 enum ecore_port_state {
213 	ECORE_RDMA_PORT_UP,
214 	ECORE_RDMA_PORT_DOWN,
215 };
216 
217 enum ecore_roce_capability {
218 	ECORE_ROCE_V1	= 1 << 0,
219 	ECORE_ROCE_V2	= 1 << 1,
220 };
221 
222 struct ecore_rdma_port {
223 	enum ecore_port_state port_state;
224 	int	link_speed;
225 	u64	max_msg_size;
226 	u8	source_gid_table_len;
227 	void	*source_gid_table_ptr;
228 	u8	pkey_table_len;
229 	void	*pkey_table_ptr;
230 	u32	pkey_bad_counter;
231 	enum ecore_roce_capability capability;
232 };
233 
234 struct ecore_rdma_cnq_params
235 {
236 	u8  num_pbl_pages; /* Number of pages in the PBL allocated
237 				   * for this queue
238 				   */
239 	u64 pbl_ptr; /* Address to the first entry of the queue PBL */
240 };
241 
242 /* The CQ Mode affects the CQ doorbell transaction size.
243  * 64/32 bit machines should configure to 32/16 bits respectively.
244  */
245 enum ecore_rdma_cq_mode {
246 	ECORE_RDMA_CQ_MODE_16_BITS,
247 	ECORE_RDMA_CQ_MODE_32_BITS,
248 };
249 
250 struct ecore_roce_dcqcn_params {
251 	u8	notification_point;
252 	u8	reaction_point;
253 
254 	/* fields for notification point */
255 	u32	cnp_send_timeout;
256 	u8	cnp_dscp;
257 	u8	cnp_vlan_priority;
258 
259 	/* fields for reaction point */
260 	u32	rl_bc_rate;  /* Byte Counter Limit. */
261 	u32	rl_max_rate; /* Maximum rate in Mbps resolution */
262 	u32	rl_r_ai;     /* Active increase rate */
263 	u32	rl_r_hai;    /* Hyper active increase rate */
264 	u32	dcqcn_gd;    /* Alpha denominator */
265 	u32	dcqcn_k_us;  /* Alpha update interval */
266 	u32	dcqcn_timeout_us;
267 };
268 
269 struct ecore_rdma_glob_cfg {
270 	/* global tunables affecting all QPs created after they are
271 	 * set.
272 	 */
273 	u8 vlan_pri_en;
274 	u8 vlan_pri;
275 	u8 ecn_en;
276 	u8 ecn;
277 	u8 dscp_en;
278 	u8 dscp;
279 };
280 
281 #ifndef LINUX_REMOVE
282 #define ECORE_RDMA_DCSP_BIT_MASK			0x01
283 #define ECORE_RDMA_DCSP_EN_BIT_MASK			0x02
284 #define ECORE_RDMA_ECN_BIT_MASK				0x04
285 #define ECORE_RDMA_ECN_EN_BIT_MASK			0x08
286 #define ECORE_RDMA_VLAN_PRIO_BIT_MASK		0x10
287 #define ECORE_RDMA_VLAN_PRIO_EN_BIT_MASK	0x20
288 
289 enum _ecore_status_t
290 ecore_rdma_set_glob_cfg(struct ecore_hwfn *p_hwfn,
291 			struct ecore_rdma_glob_cfg *in_params,
292 			u32 glob_cfg_bits);
293 
294 enum _ecore_status_t
295 ecore_rdma_get_glob_cfg(struct ecore_hwfn *p_hwfn,
296 			struct ecore_rdma_glob_cfg *out_params);
297 #endif /* LINUX_REMOVE */
298 
299 #ifdef CONFIG_ECORE_IWARP
300 
301 #define ECORE_IWARP_MAX_LIS_BACKLOG		(256)
302 
303 #define ECORE_MPA_RTR_TYPE_NONE		0 /* No RTR type */
304 #define ECORE_MPA_RTR_TYPE_ZERO_SEND	(1 << 0)
305 #define ECORE_MPA_RTR_TYPE_ZERO_WRITE	(1 << 1)
306 #define ECORE_MPA_RTR_TYPE_ZERO_READ	(1 << 2)
307 
308 enum ecore_mpa_rev {
309 	ECORE_MPA_REV1,
310 	ECORE_MPA_REV2,
311 };
312 
313 struct ecore_iwarp_params {
314 	u32				rcv_wnd_size;
315 	u16				ooo_num_rx_bufs;
316 #define ECORE_IWARP_TS_EN (1 << 0)
317 #define ECORE_IWARP_DA_EN (1 << 1)
318 	u8				flags;
319 	u8				crc_needed;
320 	enum ecore_mpa_rev		mpa_rev;
321 	u8				mpa_rtr;
322 	u8				mpa_peer2peer;
323 };
324 
325 #endif
326 
327 struct ecore_roce_params {
328 	enum ecore_rdma_cq_mode		cq_mode;
329 	struct ecore_roce_dcqcn_params	dcqcn_params;
330 	u8				ll2_handle; /* required for UD QPs */
331 };
332 
333 struct ecore_rdma_start_in_params {
334 	struct ecore_rdma_events	*events;
335 	struct ecore_rdma_cnq_params	cnq_pbl_list[128];
336 	u8				desired_cnq;
337 	u16				max_mtu;
338 	u8				mac_addr[ETH_ALEN];
339 #ifdef CONFIG_ECORE_IWARP
340 	struct ecore_iwarp_params	iwarp;
341 #endif
342 	struct ecore_roce_params	roce;
343 };
344 
345 struct ecore_rdma_add_user_out_params {
346 	/* output variables (given to miniport) */
347 	u16	dpi;
348 	u64	dpi_addr;
349 	u64	dpi_phys_addr;
350 	u32	dpi_size;
351 	u16	wid_count;
352 };
353 
354 enum roce_mode
355 {
356 	ROCE_V1,
357 	ROCE_V2_IPV4,
358 	ROCE_V2_IPV6,
359 	MAX_ROCE_MODE
360 };
361 
362 /* ECORE GID can be used as IPv4/6 address in RoCE v2 */
363 union ecore_gid {
364 	u8 bytes[16];
365 	u16 words[8];
366 	u32 dwords[4];
367 	u64 qwords[2];
368 	u32 ipv4_addr;
369 };
370 
371 struct ecore_rdma_register_tid_in_params {
372 	/* input variables (given by miniport) */
373 	u32	itid; /* index only, 18 bit long, lkey = itid << 8 | key */
374 	enum ecore_rdma_tid_type tid_type;
375 	u8	key;
376 	u16	pd;
377 	bool	local_read;
378 	bool	local_write;
379 	bool	remote_read;
380 	bool	remote_write;
381 	bool	remote_atomic;
382 	bool	mw_bind;
383 	u64	pbl_ptr;
384 	bool	pbl_two_level;
385 	u8	pbl_page_size_log; /* for the pages that contain the pointers
386 		       * to the MR pages
387 		       */
388 	u8	page_size_log; /* for the MR pages */
389 	u32	fbo;
390 	u64	length; /* only lower 40 bits are valid */
391 	u64	vaddr;
392 	bool	zbva;
393 	bool	phy_mr;
394 	bool	dma_mr;
395 
396 	/* DIF related fields */
397 	bool	dif_enabled;
398 	u64	dif_error_addr;
399 	u64	dif_runt_addr;
400 };
401 
402 /*Returns the CQ CID or zero in case of failure */
403 struct ecore_rdma_create_cq_in_params {
404 	/* input variables (given by miniport) */
405 	u32	cq_handle_lo; /* CQ handle to be written in CNQ */
406 	u32	cq_handle_hi;
407 	u32	cq_size;
408 	u16	dpi;
409 	bool	pbl_two_level;
410 	u64	pbl_ptr;
411 	u16	pbl_num_pages;
412 	u8	pbl_page_size_log; /* for the pages that contain the
413 			   * pointers to the CQ pages
414 			   */
415 	u8	cnq_id;
416 	u16	int_timeout;
417 };
418 
419 struct ecore_rdma_create_srq_in_params	{
420 	u64 pbl_base_addr;
421 	u64 prod_pair_addr;
422 	u16 num_pages;
423 	u16 pd_id;
424 	u16 page_size;
425 
426 	/* XRC related only */
427 	bool is_xrc;
428 	u16 xrcd_id;
429 	u32 cq_cid;
430 	bool reserved_key_en;
431 };
432 
433 struct ecore_rdma_destroy_cq_in_params {
434 	/* input variables (given by miniport) */
435 	u16 icid;
436 };
437 
438 struct ecore_rdma_destroy_cq_out_params {
439 	/* output variables, provided to the upper layer */
440 
441 	/* Sequence number of completion notification sent for the CQ on
442 	 * the associated CNQ
443 	 */
444 	u16	num_cq_notif;
445 };
446 #endif
447 
448 struct ecore_rdma_resize_cq_in_params {
449 	/* input variables (given by miniport) */
450 
451 	u16	icid;
452 	u32	cq_size;
453 	bool	pbl_two_level;
454 	u64	pbl_ptr;
455 	u16	pbl_num_pages;
456 	u8	pbl_page_size_log; /* for the pages that contain the
457 		       * pointers to the CQ pages
458 		       */
459 };
460 
461 #ifndef __EXTRACT__LINUX__
462 
463 struct ecore_rdma_create_qp_in_params {
464 	/* input variables (given by miniport) */
465 	u32	qp_handle_lo; /* QP handle to be written in CQE */
466 	u32	qp_handle_hi;
467 	u32	qp_handle_async_lo; /* QP handle to be written in async event */
468 	u32	qp_handle_async_hi;
469 	bool	use_srq;
470 	bool	signal_all;
471 	bool	fmr_and_reserved_lkey;
472 	u16	pd;
473 	u16	dpi;
474 	u16	sq_cq_id;
475 	u16	sq_num_pages;
476 	u64	sq_pbl_ptr;	/* Not relevant for iWARP */
477 	u8	max_sq_sges;
478 	u16	rq_cq_id;
479 	u16	rq_num_pages;
480 	u64	rq_pbl_ptr;	/* Not relevant for iWARP */
481 	u16	srq_id;
482 	u8	stats_queue;
483 	enum	ecore_rdma_qp_type qp_type;
484 	u16	xrcd_id;
485 };
486 
487 struct ecore_rdma_create_qp_out_params {
488 	/* output variables (given to miniport) */
489 	u32		qp_id;
490 	u16		icid;
491 	void		*rq_pbl_virt;
492 	dma_addr_t	rq_pbl_phys;
493 	void		*sq_pbl_virt;
494 	dma_addr_t	sq_pbl_phys;
495 };
496 
497 struct ecore_rdma_modify_qp_in_params {
498 	/* input variables (given by miniport) */
499 	u32		modify_flags;
500 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK               0x1
501 #define ECORE_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT              0
502 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_MASK                    0x1
503 #define ECORE_ROCE_MODIFY_QP_VALID_PKEY_SHIFT                   1
504 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK             0x1
505 #define ECORE_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT            2
506 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_MASK                 0x1
507 #define ECORE_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT                3
508 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK          0x1
509 #define ECORE_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT         4
510 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK                  0x1
511 #define ECORE_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT                 5
512 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK                  0x1
513 #define ECORE_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT                 6
514 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK       0x1
515 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT      7
516 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK      0x1
517 #define ECORE_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT     8
518 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK             0x1
519 #define ECORE_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT            9
520 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK               0x1
521 #define ECORE_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT              10
522 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK           0x1
523 #define ECORE_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT          11
524 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK       0x1
525 #define ECORE_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT      12
526 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK     0x1
527 #define ECORE_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT    13
528 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK               0x1
529 #define ECORE_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT              14
530 
531 	enum ecore_roce_qp_state	new_state;
532 	u16		pkey;
533 	bool		incoming_rdma_read_en;
534 	bool		incoming_rdma_write_en;
535 	bool		incoming_atomic_en;
536 	bool		e2e_flow_control_en;
537 	u32		dest_qp;
538 	u16		mtu;
539 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
540 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
541 	u32		flow_label; /* ignored in IPv4 */
542 	union ecore_gid	sgid; /* GRH SGID; IPv4/6 Source IP */
543 	union ecore_gid	dgid; /* GRH DGID; IPv4/6 Destination IP */
544 	u16		udp_src_port; /* RoCEv2 only */
545 
546 	u16		vlan_id;
547 
548 	u32		rq_psn;
549 	u32		sq_psn;
550 	u8		max_rd_atomic_resp;
551 	u8		max_rd_atomic_req;
552 	u32		ack_timeout;
553 	u8		retry_cnt;
554 	u8		rnr_retry_cnt;
555 	u8		min_rnr_nak_timer;
556 	bool		sqd_async;
557 	u8		remote_mac_addr[6];
558 	u8		local_mac_addr[6];
559 	bool		use_local_mac;
560 	enum roce_mode	roce_mode;
561 };
562 
563 struct ecore_rdma_query_qp_out_params {
564 	/* output variables (given to miniport) */
565 	enum ecore_roce_qp_state	state;
566 	u32		rq_psn; /* responder */
567 	u32		sq_psn; /* requester */
568 	bool		draining; /* send queue is draining */
569 	u16		mtu;
570 	u32		dest_qp;
571 	bool		incoming_rdma_read_en;
572 	bool		incoming_rdma_write_en;
573 	bool		incoming_atomic_en;
574 	bool		e2e_flow_control_en;
575 	union ecore_gid sgid; /* GRH SGID; IPv4/6 Source IP */
576 	union ecore_gid dgid; /* GRH DGID; IPv4/6 Destination IP */
577 	u32		flow_label; /* ignored in IPv4 */
578 	u8		hop_limit_ttl; /* IPv6/GRH hop limit; IPv4 TTL */
579 	u8		traffic_class_tos; /* IPv6/GRH tc; IPv4 TOS */
580 	u32		timeout;
581 	u8		rnr_retry;
582 	u8		retry_cnt;
583 	u8		min_rnr_nak_timer;
584 	u16		pkey_index;
585 	u8		max_rd_atomic;
586 	u8		max_dest_rd_atomic;
587 	bool		sqd_async;
588 };
589 
590 struct ecore_rdma_destroy_qp_out_params {
591 	u32		sq_cq_prod;
592 	u32		rq_cq_prod;
593 };
594 
595 struct ecore_rdma_create_srq_out_params {
596 	u16 srq_id;
597 };
598 
599 struct ecore_rdma_destroy_srq_in_params {
600 	u16 srq_id;
601 	bool is_xrc;
602 };
603 
604 struct ecore_rdma_modify_srq_in_params {
605 	u32 wqe_limit;
606 	u16 srq_id;
607 	bool is_xrc;
608 };
609 #endif
610 
611 struct ecore_rdma_resize_cq_out_params {
612 	/* output variables, provided to the upper layer */
613 	u32 prod; /* CQ producer value on old PBL */
614 	u32 cons; /* CQ consumer value on old PBL */
615 };
616 
617 struct ecore_rdma_resize_cnq_in_params {
618 	/* input variables (given by miniport) */
619 	u32	cnq_id;
620 	u32	pbl_page_size_log; /* for the pages that contain the
621 			* pointers to the cnq pages
622 			*/
623 	u64	pbl_ptr;
624 };
625 
626 #ifndef __EXTRACT__LINUX__
627 struct ecore_rdma_stats_out_params {
628 	u64	sent_bytes;
629 	u64	sent_pkts;
630 	u64	rcv_bytes;
631 	u64	rcv_pkts;
632 
633 	/* RoCE only */
634 	u64	icrc_errors;		/* wraps at 32 bits */
635 	u64	retransmit_events;	/* wraps at 32 bits */
636 	u64	silent_drops;		/* wraps at 16 bits */
637 	u64	rnr_nacks_sent;		/* wraps at 16 bits */
638 
639 	/* RoCE DCQCN */
640 	u64	ecn_pkt_rcv;
641 	u64	cnp_pkt_rcv;
642 	u64	cnp_pkt_sent;
643 
644 	/* iWARP only */
645 	u64	iwarp_tx_fast_rxmit_cnt;
646 	u64	iwarp_tx_slow_start_cnt;
647 	u64	unalign_rx_comp;
648 };
649 
650 struct ecore_rdma_counters_out_params {
651 	u64	pd_count;
652 	u64	max_pd;
653 	u64	dpi_count;
654 	u64	max_dpi;
655 	u64	cq_count;
656 	u64	max_cq;
657 	u64	qp_count;
658 	u64	max_qp;
659 	u64	tid_count;
660 	u64	max_tid;
661 	u64	srq_count;
662 	u64	max_srq;
663 	u64	xrc_srq_count;
664 	u64	max_xrc_srq;
665 	u64	xrcd_count;
666 	u64	max_xrcd;
667 };
668 #endif
669 
670 enum _ecore_status_t
671 ecore_rdma_add_user(void *rdma_cxt,
672 		    struct ecore_rdma_add_user_out_params *out_params);
673 
674 enum _ecore_status_t
675 ecore_rdma_alloc_pd(void *rdma_cxt,
676 		    u16	*pd);
677 
678 enum _ecore_status_t
679 ecore_rdma_alloc_tid(void *rdma_cxt,
680 		     u32 *tid);
681 
682 enum _ecore_status_t
683 ecore_rdma_create_cq(void *rdma_cxt,
684 		     struct ecore_rdma_create_cq_in_params *params,
685 		     u16 *icid);
686 
687 /* Returns a pointer to the responders' CID, which is also a pointer to the
688  * ecore_qp_params struct. Returns NULL in case of failure.
689  */
690 struct ecore_rdma_qp*
691 ecore_rdma_create_qp(void *rdma_cxt,
692 		     struct ecore_rdma_create_qp_in_params  *in_params,
693 		     struct ecore_rdma_create_qp_out_params *out_params);
694 
695 enum _ecore_status_t
696 ecore_roce_create_ud_qp(void *rdma_cxt,
697 			struct ecore_rdma_create_qp_out_params *out_params);
698 
699 enum _ecore_status_t
700 ecore_rdma_deregister_tid(void *rdma_cxt,
701 			  u32		tid);
702 
703 enum _ecore_status_t
704 ecore_rdma_destroy_cq(void *rdma_cxt,
705 		      struct ecore_rdma_destroy_cq_in_params  *in_params,
706 		      struct ecore_rdma_destroy_cq_out_params *out_params);
707 
708 enum _ecore_status_t
709 ecore_rdma_destroy_qp(void *rdma_cxt,
710 		      struct ecore_rdma_qp *qp,
711 		      struct ecore_rdma_destroy_qp_out_params *out_params);
712 
713 enum _ecore_status_t
714 ecore_roce_destroy_ud_qp(void *rdma_cxt, u16 cid);
715 
716 void
717 ecore_rdma_free_pd(void *rdma_cxt,
718 		   u16	pd);
719 
720 enum _ecore_status_t
721 ecore_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id);
722 
723 void
724 ecore_rdma_free_xrcd(void  *rdma_cxt, u16 xrcd_id);
725 
726 void
727 ecore_rdma_free_tid(void *rdma_cxt,
728 		    u32	tid);
729 
730 enum _ecore_status_t
731 ecore_rdma_modify_qp(void *rdma_cxt,
732 		     struct ecore_rdma_qp *qp,
733 		     struct ecore_rdma_modify_qp_in_params *params);
734 
735 struct ecore_rdma_device*
736 ecore_rdma_query_device(void *rdma_cxt);
737 
738 struct ecore_rdma_port*
739 ecore_rdma_query_port(void *rdma_cxt);
740 
741 enum _ecore_status_t
742 ecore_rdma_query_qp(void *rdma_cxt,
743 		    struct ecore_rdma_qp		  *qp,
744 		    struct ecore_rdma_query_qp_out_params *out_params);
745 
746 enum _ecore_status_t
747 ecore_rdma_register_tid(void *rdma_cxt,
748 			struct ecore_rdma_register_tid_in_params *params);
749 
750 void ecore_rdma_remove_user(void *rdma_cxt,
751 			    u16		dpi);
752 
753 enum _ecore_status_t
754 ecore_rdma_resize_cnq(void *rdma_cxt,
755 		      struct ecore_rdma_resize_cnq_in_params *in_params);
756 
757 /*Returns the CQ CID or zero in case of failure */
758 enum _ecore_status_t
759 ecore_rdma_resize_cq(void *rdma_cxt,
760 		     struct ecore_rdma_resize_cq_in_params  *in_params,
761 		     struct ecore_rdma_resize_cq_out_params *out_params);
762 
763 /* Before calling rdma_start upper layer (VBD/qed) should fill the
764  * page-size and mtu in hwfn context
765  */
766 enum _ecore_status_t
767 ecore_rdma_start(void *p_hwfn,
768 		 struct ecore_rdma_start_in_params *params);
769 
770 enum _ecore_status_t
771 ecore_rdma_stop(void *rdma_cxt);
772 
773 enum _ecore_status_t
774 ecore_rdma_query_stats(void *rdma_cxt, u8 stats_queue,
775 		       struct ecore_rdma_stats_out_params *out_parms);
776 
777 enum _ecore_status_t
778 ecore_rdma_query_counters(void *rdma_cxt,
779 			  struct ecore_rdma_counters_out_params *out_parms);
780 
781 u32 ecore_rdma_get_sb_id(struct ecore_hwfn *p_hwfn, u32 rel_sb_id);
782 
783 #ifndef LINUX_REMOVE
784 u32 ecore_rdma_query_cau_timer_res(void);
785 #endif
786 
787 void ecore_rdma_cnq_prod_update(void *rdma_cxt, u8 cnq_index, u16 prod);
788 
789 void ecore_rdma_resc_free(struct ecore_hwfn *p_hwfn);
790 
791 enum _ecore_status_t
792 ecore_rdma_create_srq(void *rdma_cxt,
793 		      struct ecore_rdma_create_srq_in_params *in_params,
794 		      struct ecore_rdma_create_srq_out_params *out_params);
795 
796 enum _ecore_status_t
797 ecore_rdma_destroy_srq(void *rdma_cxt,
798 		       struct ecore_rdma_destroy_srq_in_params *in_params);
799 
800 enum _ecore_status_t
801 ecore_rdma_modify_srq(void *rdma_cxt,
802 		      struct ecore_rdma_modify_srq_in_params *in_params);
803 
804 #ifdef CONFIG_ECORE_IWARP
805 
806 /* iWARP API */
807 
808 #ifndef __EXTRACT__LINUX__
809 
810 enum ecore_iwarp_event_type {
811 	ECORE_IWARP_EVENT_MPA_REQUEST, /* Passive side request received */
812 	ECORE_IWARP_EVENT_PASSIVE_COMPLETE, /* Passive side established
813 					     * ( ack on mpa response )
814 					     */
815 	ECORE_IWARP_EVENT_LISTEN_PAUSE_COMP, /* Passive side will drop
816 					      * MPA requests
817 					      */
818 	ECORE_IWARP_EVENT_ACTIVE_COMPLETE, /* Active side reply received */
819 	ECORE_IWARP_EVENT_DISCONNECT,
820 	ECORE_IWARP_EVENT_CLOSE,
821     /* Slow/Error path events start from here */
822 	ECORE_IWARP_EVENT_IRQ_FULL,
823 	ECORE_IWARP_ERROR_EVENTS_START = ECORE_IWARP_EVENT_IRQ_FULL,
824 	ECORE_IWARP_EVENT_RQ_EMPTY,
825 	ECORE_IWARP_EVENT_LLP_TIMEOUT,
826 	ECORE_IWARP_EVENT_REMOTE_PROTECTION_ERROR,
827 	ECORE_IWARP_EVENT_CQ_OVERFLOW,
828 	ECORE_IWARP_EVENT_QP_CATASTROPHIC,
829 	ECORE_IWARP_EVENT_ACTIVE_MPA_REPLY,
830 	ECORE_IWARP_EVENT_LOCAL_ACCESS_ERROR,
831 	ECORE_IWARP_EVENT_REMOTE_OPERATION_ERROR,
832 	ECORE_IWARP_EVENT_TERMINATE_RECEIVED
833 };
834 
835 enum ecore_tcp_ip_version
836 {
837 	ECORE_TCP_IPV4,
838 	ECORE_TCP_IPV6,
839 };
840 
841 struct ecore_iwarp_cm_info {
842 	enum ecore_tcp_ip_version ip_version;
843 	u32 remote_ip[4];
844 	u32 local_ip[4];
845 	u16 remote_port;
846 	u16 local_port;
847 	u16 vlan;
848 	const void *private_data;
849 	u16 private_data_len;
850 	u8 ord;
851 	u8 ird;
852 };
853 
854 struct ecore_iwarp_cm_event_params {
855 	enum ecore_iwarp_event_type event;
856 	const struct ecore_iwarp_cm_info *cm_info;
857 	void *ep_context; /* To be passed to accept call */
858 	int status;
859 };
860 
861 typedef int (*iwarp_event_handler)(void *context,
862 				   struct ecore_iwarp_cm_event_params *event);
863 
864 /* Active Side Connect Flow:
865  * upper layer driver calls ecore_iwarp_connect
866  * Function is blocking: i.e. returns after tcp connection is established
867  * After MPA connection is established ECORE_IWARP_EVENT_ACTIVE_COMPLETE event
868  * will be passed to upperlayer driver using the event_cb passed in
869  * ecore_iwarp_connect_in. Information of the established connection will be
870  * initialized in event data.
871  */
872 struct ecore_iwarp_connect_in {
873 	iwarp_event_handler event_cb;
874 	void *cb_context;
875 	struct ecore_rdma_qp *qp;
876 	struct ecore_iwarp_cm_info cm_info;
877 	u16 mss;
878 	u8 remote_mac_addr[6];
879 	u8 local_mac_addr[6];
880 };
881 
882 struct ecore_iwarp_connect_out {
883 	void *ep_context;
884 };
885 
886 /* Passive side connect flow:
887  * upper layer driver calls ecore_iwarp_create_listen
888  * once Syn packet that matches a ip/port that is listened on arrives, ecore
889  * will offload the tcp connection. After MPA Request is received on the
890  * offload connection, the event ECORE_IWARP_EVENT_MPA_REQUEST will be sent
891  * to upper layer driver using the event_cb passed below. The event data
892  * will be placed in event parameter. After upper layer driver processes the
893  * event, ecore_iwarp_accept or ecore_iwarp_reject should be called to continue
894  * MPA negotiation. Once negotiation is complete the event
895  * ECORE_IWARP_EVENT_PASSIVE_COMPLETE will be passed to the event_cb passed
896  * originally in ecore_iwarp_listen_in structure.
897  */
898 struct ecore_iwarp_listen_in {
899 	iwarp_event_handler event_cb; /* Callback func for delivering events */
900 	void *cb_context; /* passed to event_cb */
901 	u32 max_backlog; /* Max num of pending incoming connection requests */
902 	enum ecore_tcp_ip_version ip_version;
903 	u32 ip_addr[4];
904 	u16 port;
905 	u16 vlan;
906 };
907 
908 struct ecore_iwarp_listen_out {
909 	void *handle; /* to be sent to destroy */
910 };
911 
912 struct ecore_iwarp_accept_in {
913 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
914 	void *cb_context; /* context to be passed to event_cb */
915 	struct ecore_rdma_qp *qp;
916 	const void *private_data;
917 	u16 private_data_len;
918 	u8 ord;
919 	u8 ird;
920 };
921 
922 struct ecore_iwarp_reject_in {
923 	void *ep_context; /* From event data of ECORE_IWARP_EVENT_MPA_REQUEST */
924 	void *cb_context; /* context to be passed to event_cb */
925 	const void *private_data;
926 	u16 private_data_len;
927 };
928 
929 struct ecore_iwarp_send_rtr_in {
930 	void *ep_context;
931 };
932 
933 struct ecore_iwarp_tcp_abort_in {
934 	void *ep_context;
935 };
936 
937 #endif
938 
939 enum _ecore_status_t
940 ecore_iwarp_connect(void *rdma_cxt,
941 		    struct ecore_iwarp_connect_in *iparams,
942 		    struct ecore_iwarp_connect_out *oparams);
943 
944 enum _ecore_status_t
945 ecore_iwarp_create_listen(void *rdma_cxt,
946 			  struct ecore_iwarp_listen_in *iparams,
947 			  struct ecore_iwarp_listen_out *oparams);
948 
949 enum _ecore_status_t
950 ecore_iwarp_accept(void *rdma_cxt,
951 		   struct ecore_iwarp_accept_in *iparams);
952 
953 enum _ecore_status_t
954 ecore_iwarp_reject(void *rdma_cxt,
955 		   struct ecore_iwarp_reject_in *iparams);
956 
957 enum _ecore_status_t
958 ecore_iwarp_destroy_listen(void *rdma_cxt, void *handle);
959 
960 enum _ecore_status_t
961 ecore_iwarp_send_rtr(void *rdma_cxt, struct ecore_iwarp_send_rtr_in *iparams);
962 
963 enum _ecore_status_t
964 ecore_iwarp_pause_listen(void *rdma_cxt, void *handle, bool pause, bool comp);
965 
966 #endif /* CONFIG_ECORE_IWARP */
967 
968 #endif
969