xref: /freebsd/sys/dev/qlnx/qlnxe/rdma_common.h (revision 9768746b)
1 /*
2  * Copyright (c) 2017-2018 Cavium, Inc.
3  * All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions
7  *  are met:
8  *
9  *  1. Redistributions of source code must retain the above copyright
10  *     notice, this list of conditions and the following disclaimer.
11  *  2. Redistributions in binary form must reproduce the above copyright
12  *     notice, this list of conditions and the following disclaimer in the
13  *     documentation and/or other materials provided with the distribution.
14  *
15  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
19  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  *  POSSIBILITY OF SUCH DAMAGE.
26  *
27  * $FreeBSD$
28  *
29  */
30 
31 #ifndef __RDMA_COMMON__
32 #define __RDMA_COMMON__
33 /************************************************************************/
34 /* Add include to common rdma target for both eCore and protocol rdma driver */
35 /************************************************************************/
36 
37 #define RDMA_RESERVED_LKEY                      (0)                     //Reserved lkey
38 #define RDMA_RING_PAGE_SIZE                     (0x1000)        //4KB pages
39 
40 #define RDMA_MAX_SGE_PER_SQ_WQE         (4)             //max number of SGEs in a single request
41 #define RDMA_MAX_SGE_PER_RQ_WQE         (4)             //max number of SGEs in a single request
42 
43 #define RDMA_MAX_DATA_SIZE_IN_WQE       (0x80000000)    //max size of data in single request
44 
45 #define RDMA_REQ_RD_ATOMIC_ELM_SIZE             (0x50)
46 #define RDMA_RESP_RD_ATOMIC_ELM_SIZE    (0x20)
47 
48 #define RDMA_MAX_CQS                            (64*1024)
49 #define RDMA_MAX_TIDS                           (128*1024-1)
50 #define RDMA_MAX_PDS                            (64*1024)
51 #define RDMA_MAX_XRC_SRQS                       (1024)
52 #define RDMA_MAX_SRQS                           (32*1024)
53 
54 #define RDMA_NUM_STATISTIC_COUNTERS                     MAX_NUM_VPORTS
55 #define RDMA_NUM_STATISTIC_COUNTERS_K2                  MAX_NUM_VPORTS_K2
56 #define RDMA_NUM_STATISTIC_COUNTERS_BB                  MAX_NUM_VPORTS_BB
57 
58 #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
59 
60 struct rdma_srq_id
61 {
62         __le16 srq_idx /* SRQ index */;
63         __le16 opaque_fid;
64 };
65 
66 struct rdma_srq_producers
67 {
68         __le32 sge_prod /* Current produced sge in SRQ */;
69         __le32 wqe_prod /* Current produced WQE to SRQ */;
70 };
71 
72 /*
73  * rdma completion notification queue element
74  */
75 struct rdma_cnqe
76 {
77 	struct regpair cq_handle;
78 };
79 
80 struct rdma_cqe_responder
81 {
82 	struct regpair srq_wr_id;
83 	struct regpair qp_handle;
84 	__le32 imm_data_or_inv_r_Key /* immediate data in case imm_flg is set, or invalidated r_key in case inv_flg is set */;
85 	__le32 length;
86 	__le32 imm_data_hi /* High bytes of immediate data in case imm_flg is set in iWARP only */;
87 	__le16 rq_cons /* Valid only when status is WORK_REQUEST_FLUSHED_ERR. Indicates an aggregative flush on all posted RQ WQEs until the reported rq_cons. */;
88 	u8 flags;
89 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
90 #define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
91 #define RDMA_CQE_RESPONDER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
92 #define RDMA_CQE_RESPONDER_TYPE_SHIFT       1
93 #define RDMA_CQE_RESPONDER_INV_FLG_MASK     0x1 /* r_key invalidated indicator */
94 #define RDMA_CQE_RESPONDER_INV_FLG_SHIFT    3
95 #define RDMA_CQE_RESPONDER_IMM_FLG_MASK     0x1 /* immediate data indicator */
96 #define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT    4
97 #define RDMA_CQE_RESPONDER_RDMA_FLG_MASK    0x1 /* 1=this CQE relates to an RDMA Write. 0=Send. */
98 #define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT   5
99 #define RDMA_CQE_RESPONDER_RESERVED2_MASK   0x3
100 #define RDMA_CQE_RESPONDER_RESERVED2_SHIFT  6
101 	u8 status;
102 };
103 
104 struct rdma_cqe_requester
105 {
106 	__le16 sq_cons;
107 	__le16 reserved0;
108 	__le32 reserved1;
109 	struct regpair qp_handle;
110 	struct regpair reserved2;
111 	__le32 reserved3;
112 	__le16 reserved4;
113 	u8 flags;
114 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
115 #define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
116 #define RDMA_CQE_REQUESTER_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
117 #define RDMA_CQE_REQUESTER_TYPE_SHIFT       1
118 #define RDMA_CQE_REQUESTER_RESERVED5_MASK   0x1F
119 #define RDMA_CQE_REQUESTER_RESERVED5_SHIFT  3
120 	u8 status;
121 };
122 
123 struct rdma_cqe_common
124 {
125 	struct regpair reserved0;
126 	struct regpair qp_handle;
127 	__le16 reserved1[7];
128 	u8 flags;
129 #define RDMA_CQE_COMMON_TOGGLE_BIT_MASK  0x1 /* indicates a valid completion written by FW. FW toggle this bit each time it finishes producing all PBL entries */
130 #define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
131 #define RDMA_CQE_COMMON_TYPE_MASK        0x3 /*  (use enum rdma_cqe_type) */
132 #define RDMA_CQE_COMMON_TYPE_SHIFT       1
133 #define RDMA_CQE_COMMON_RESERVED2_MASK   0x1F
134 #define RDMA_CQE_COMMON_RESERVED2_SHIFT  3
135 	u8 status;
136 };
137 
138 /*
139  * rdma completion queue element
140  */
141 union rdma_cqe
142 {
143 	struct rdma_cqe_responder resp;
144 	struct rdma_cqe_requester req;
145 	struct rdma_cqe_common cmn;
146 };
147 
148 /*
149  * CQE requester status enumeration
150  */
151 enum rdma_cqe_requester_status_enum
152 {
153 	RDMA_CQE_REQ_STS_OK,
154 	RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
155 	RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
156 	RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
157 	RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
158 	RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
159 	RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
160 	RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
161 	RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
162 	RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
163 	RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
164 	RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
165 	MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
166 };
167 
168 /*
169  * CQE responder status enumeration
170  */
171 enum rdma_cqe_responder_status_enum
172 {
173 	RDMA_CQE_RESP_STS_OK,
174 	RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
175 	RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
176 	RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
177 	RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
178 	RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
179 	RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
180 	RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
181 	MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
182 };
183 
184 /*
185  * CQE type enumeration
186  */
187 enum rdma_cqe_type
188 {
189 	RDMA_CQE_TYPE_REQUESTER,
190 	RDMA_CQE_TYPE_RESPONDER_RQ,
191 	RDMA_CQE_TYPE_RESPONDER_SRQ,
192 	RDMA_CQE_TYPE_INVALID,
193 	MAX_RDMA_CQE_TYPE
194 };
195 
196 /*
197  * DIF Block size options
198  */
199 enum rdma_dif_block_size
200 {
201 	RDMA_DIF_BLOCK_512=0,
202 	RDMA_DIF_BLOCK_4096=1,
203 	MAX_RDMA_DIF_BLOCK_SIZE
204 };
205 
206 /*
207  * DIF CRC initial value
208  */
209 enum rdma_dif_crc_seed
210 {
211 	RDMA_DIF_CRC_SEED_0000=0,
212 	RDMA_DIF_CRC_SEED_FFFF=1,
213 	MAX_RDMA_DIF_CRC_SEED
214 };
215 
216 /*
217  * RDMA DIF Error Result Structure
218  */
219 struct rdma_dif_error_result
220 {
221 	__le32 error_intervals /* Total number of error intervals in the IO. */;
222 	__le32 dif_error_1st_interval /* Number of the first interval that contained error. Set to 0xFFFFFFFF if error occurred in the Runt Block. */;
223 	u8 flags;
224 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_MASK      0x1 /* CRC error occurred. */
225 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_CRC_SHIFT     0
226 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_MASK  0x1 /* App Tag error occurred. */
227 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_APP_TAG_SHIFT 1
228 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_MASK  0x1 /* Ref Tag error occurred. */
229 #define RDMA_DIF_ERROR_RESULT_DIF_ERROR_TYPE_REF_TAG_SHIFT 2
230 #define RDMA_DIF_ERROR_RESULT_RESERVED0_MASK               0xF
231 #define RDMA_DIF_ERROR_RESULT_RESERVED0_SHIFT              3
232 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_MASK              0x1 /* Used to indicate the structure is valid. Toggles each time an invalidate region is performed. */
233 #define RDMA_DIF_ERROR_RESULT_TOGGLE_BIT_SHIFT             7
234 	u8 reserved1[55] /* Pad to 64 bytes to ensure efficient word line writing. */;
235 };
236 
237 /*
238  * DIF IO direction
239  */
240 enum rdma_dif_io_direction_flg
241 {
242 	RDMA_DIF_DIR_RX=0,
243 	RDMA_DIF_DIR_TX=1,
244 	MAX_RDMA_DIF_IO_DIRECTION_FLG
245 };
246 
247 /*
248  * RDMA DIF Runt Result Structure
249  */
250 struct rdma_dif_runt_result
251 {
252 	__le16 guard_tag /* CRC result of received IO. */;
253 	__le16 reserved[3];
254 };
255 
256 /*
257  * memory window type enumeration
258  */
259 enum rdma_mw_type
260 {
261 	RDMA_MW_TYPE_1,
262 	RDMA_MW_TYPE_2A,
263 	MAX_RDMA_MW_TYPE
264 };
265 
266 struct rdma_rq_sge
267 {
268 	struct regpair addr;
269 	__le32 length;
270 	__le32 flags;
271 #define RDMA_RQ_SGE_L_KEY_MASK      0x3FFFFFF /* key of memory relating to this RQ */
272 #define RDMA_RQ_SGE_L_KEY_SHIFT     0
273 #define RDMA_RQ_SGE_NUM_SGES_MASK   0x7 /* first SGE - number of SGEs in this RQ WQE. Other SGEs - should be set to 0 */
274 #define RDMA_RQ_SGE_NUM_SGES_SHIFT  26
275 #define RDMA_RQ_SGE_RESERVED0_MASK  0x7
276 #define RDMA_RQ_SGE_RESERVED0_SHIFT 29
277 };
278 
279 struct rdma_sq_atomic_wqe
280 {
281 	__le32 reserved1;
282 	__le32 length /* Total data length (8 bytes for Atomic) */;
283 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
284 	u8 req_type /* Type of WQE */;
285 	u8 flags;
286 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
287 #define RDMA_SQ_ATOMIC_WQE_COMP_FLG_SHIFT        0
288 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
289 #define RDMA_SQ_ATOMIC_WQE_RD_FENCE_FLG_SHIFT    1
290 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
291 #define RDMA_SQ_ATOMIC_WQE_INV_FENCE_FLG_SHIFT   2
292 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_MASK           0x1 /* Dont care for atomic wqe */
293 #define RDMA_SQ_ATOMIC_WQE_SE_FLG_SHIFT          3
294 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for atomic wqe */
295 #define RDMA_SQ_ATOMIC_WQE_INLINE_FLG_SHIFT      4
296 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for atomic wqe */
297 #define RDMA_SQ_ATOMIC_WQE_DIF_ON_HOST_FLG_SHIFT 5
298 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_MASK        0x3
299 #define RDMA_SQ_ATOMIC_WQE_RESERVED0_SHIFT       6
300 	u8 wqe_size /* Size of WQE in 16B chunks including SGE */;
301 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
302 	struct regpair remote_va /* remote virtual address */;
303 	__le32 r_key /* Remote key */;
304 	__le32 reserved2;
305 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
306 	struct regpair swap_data /* Swap or add data */;
307 };
308 
309 /*
310  * First element (16 bytes) of atomic wqe
311  */
312 struct rdma_sq_atomic_wqe_1st
313 {
314 	__le32 reserved1;
315 	__le32 length /* Total data length (8 bytes for Atomic) */;
316 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
317 	u8 req_type /* Type of WQE */;
318 	u8 flags;
319 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
320 #define RDMA_SQ_ATOMIC_WQE_1ST_COMP_FLG_SHIFT      0
321 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
322 #define RDMA_SQ_ATOMIC_WQE_1ST_RD_FENCE_FLG_SHIFT  1
323 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
324 #define RDMA_SQ_ATOMIC_WQE_1ST_INV_FENCE_FLG_SHIFT 2
325 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for atomic wqe */
326 #define RDMA_SQ_ATOMIC_WQE_1ST_SE_FLG_SHIFT        3
327 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for atomic wqe */
328 #define RDMA_SQ_ATOMIC_WQE_1ST_INLINE_FLG_SHIFT    4
329 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_MASK      0x7
330 #define RDMA_SQ_ATOMIC_WQE_1ST_RESERVED0_SHIFT     5
331 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs. Set to number of SGEs + 1. */;
332 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
333 };
334 
335 /*
336  * Second element (16 bytes) of atomic wqe
337  */
338 struct rdma_sq_atomic_wqe_2nd
339 {
340 	struct regpair remote_va /* remote virtual address */;
341 	__le32 r_key /* Remote key */;
342 	__le32 reserved2;
343 };
344 
345 /*
346  * Third element (16 bytes) of atomic wqe
347  */
348 struct rdma_sq_atomic_wqe_3rd
349 {
350 	struct regpair cmp_data /* Data to compare in case of ATOMIC_CMP_AND_SWAP */;
351 	struct regpair swap_data /* Swap or add data */;
352 };
353 
354 struct rdma_sq_bind_wqe
355 {
356 	struct regpair addr;
357 	__le32 l_key;
358 	u8 req_type /* Type of WQE */;
359 	u8 flags;
360 #define RDMA_SQ_BIND_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
361 #define RDMA_SQ_BIND_WQE_COMP_FLG_SHIFT      0
362 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
363 #define RDMA_SQ_BIND_WQE_RD_FENCE_FLG_SHIFT  1
364 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
365 #define RDMA_SQ_BIND_WQE_INV_FENCE_FLG_SHIFT 2
366 #define RDMA_SQ_BIND_WQE_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
367 #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT        3
368 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
369 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT    4
370 #define RDMA_SQ_BIND_WQE_RESERVED0_MASK      0x7
371 #define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT     5
372 	u8 wqe_size /* Size of WQE in 16B chunks */;
373 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
374 	u8 bind_ctrl;
375 #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK     0x1 /* zero based indication */
376 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT    0
377 #define RDMA_SQ_BIND_WQE_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
378 #define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT       1
379 #define RDMA_SQ_BIND_WQE_RESERVED1_MASK      0x3F
380 #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT     2
381 	u8 access_ctrl;
382 #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK    0x1
383 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT   0
384 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_MASK   0x1
385 #define RDMA_SQ_BIND_WQE_REMOTE_WRITE_SHIFT  1
386 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_MASK  0x1
387 #define RDMA_SQ_BIND_WQE_ENABLE_ATOMIC_SHIFT 2
388 #define RDMA_SQ_BIND_WQE_LOCAL_READ_MASK     0x1
389 #define RDMA_SQ_BIND_WQE_LOCAL_READ_SHIFT    3
390 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_MASK    0x1
391 #define RDMA_SQ_BIND_WQE_LOCAL_WRITE_SHIFT   4
392 #define RDMA_SQ_BIND_WQE_RESERVED2_MASK      0x7
393 #define RDMA_SQ_BIND_WQE_RESERVED2_SHIFT     5
394 	u8 reserved3;
395 	u8 length_hi /* upper 8 bits of the registered MW length */;
396 	__le32 length_lo /* lower 32 bits of the registered MW length */;
397 	__le32 parent_l_key /* l_key of the parent MR */;
398 	__le32 reserved4;
399 };
400 
401 /*
402  * First element (16 bytes) of bind wqe
403  */
404 struct rdma_sq_bind_wqe_1st
405 {
406 	struct regpair addr;
407 	__le32 l_key;
408 	u8 req_type /* Type of WQE */;
409 	u8 flags;
410 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
411 #define RDMA_SQ_BIND_WQE_1ST_COMP_FLG_SHIFT      0
412 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
413 #define RDMA_SQ_BIND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
414 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
415 #define RDMA_SQ_BIND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
416 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_MASK         0x1 /* Dont care for bind wqe */
417 #define RDMA_SQ_BIND_WQE_1ST_SE_FLG_SHIFT        3
418 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_MASK     0x1 /* Should be 0 for bind wqe */
419 #define RDMA_SQ_BIND_WQE_1ST_INLINE_FLG_SHIFT    4
420 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_MASK      0x7
421 #define RDMA_SQ_BIND_WQE_1ST_RESERVED0_SHIFT     5
422 	u8 wqe_size /* Size of WQE in 16B chunks */;
423 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
424 };
425 
426 /*
427  * Second element (16 bytes) of bind wqe
428  */
429 struct rdma_sq_bind_wqe_2nd
430 {
431 	u8 bind_ctrl;
432 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
433 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT    0
434 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK        0x1 /*  (use enum rdma_mw_type) */
435 #define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT       1
436 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK      0x3F
437 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT     2
438 	u8 access_ctrl;
439 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK    0x1
440 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT   0
441 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_MASK   0x1
442 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_WRITE_SHIFT  1
443 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
444 #define RDMA_SQ_BIND_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
445 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_MASK     0x1
446 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_READ_SHIFT    3
447 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_MASK    0x1
448 #define RDMA_SQ_BIND_WQE_2ND_LOCAL_WRITE_SHIFT   4
449 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_MASK      0x7
450 #define RDMA_SQ_BIND_WQE_2ND_RESERVED2_SHIFT     5
451 	u8 reserved3;
452 	u8 length_hi /* upper 8 bits of the registered MW length */;
453 	__le32 length_lo /* lower 32 bits of the registered MW length */;
454 	__le32 parent_l_key /* l_key of the parent MR */;
455 	__le32 reserved4;
456 };
457 
458 /*
459  * Structure with only the SQ WQE common fields. Size is of one SQ element (16B)
460  */
461 struct rdma_sq_common_wqe
462 {
463 	__le32 reserved1[3];
464 	u8 req_type /* Type of WQE */;
465 	u8 flags;
466 #define RDMA_SQ_COMMON_WQE_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
467 #define RDMA_SQ_COMMON_WQE_COMP_FLG_SHIFT      0
468 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
469 #define RDMA_SQ_COMMON_WQE_RD_FENCE_FLG_SHIFT  1
470 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
471 #define RDMA_SQ_COMMON_WQE_INV_FENCE_FLG_SHIFT 2
472 #define RDMA_SQ_COMMON_WQE_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE (only relevant in SENDs and RDMA write with Imm) */
473 #define RDMA_SQ_COMMON_WQE_SE_FLG_SHIFT        3
474 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs (only relevant in SENDs and RDMA writes) */
475 #define RDMA_SQ_COMMON_WQE_INLINE_FLG_SHIFT    4
476 #define RDMA_SQ_COMMON_WQE_RESERVED0_MASK      0x7
477 #define RDMA_SQ_COMMON_WQE_RESERVED0_SHIFT     5
478 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
479 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
480 };
481 
482 struct rdma_sq_fmr_wqe
483 {
484 	struct regpair addr;
485 	__le32 l_key;
486 	u8 req_type /* Type of WQE */;
487 	u8 flags;
488 #define RDMA_SQ_FMR_WQE_COMP_FLG_MASK                0x1 /* If set, completion will be generated when the WQE is completed */
489 #define RDMA_SQ_FMR_WQE_COMP_FLG_SHIFT               0
490 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_MASK            0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
491 #define RDMA_SQ_FMR_WQE_RD_FENCE_FLG_SHIFT           1
492 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_MASK           0x1 /* If set, all pending operations will be completed before start processing this WQE */
493 #define RDMA_SQ_FMR_WQE_INV_FENCE_FLG_SHIFT          2
494 #define RDMA_SQ_FMR_WQE_SE_FLG_MASK                  0x1 /* Dont care for FMR wqe */
495 #define RDMA_SQ_FMR_WQE_SE_FLG_SHIFT                 3
496 #define RDMA_SQ_FMR_WQE_INLINE_FLG_MASK              0x1 /* Should be 0 for FMR wqe */
497 #define RDMA_SQ_FMR_WQE_INLINE_FLG_SHIFT             4
498 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_MASK         0x1 /* If set, indicated host memory of this WQE is DIF protected. */
499 #define RDMA_SQ_FMR_WQE_DIF_ON_HOST_FLG_SHIFT        5
500 #define RDMA_SQ_FMR_WQE_RESERVED0_MASK               0x3
501 #define RDMA_SQ_FMR_WQE_RESERVED0_SHIFT              6
502 	u8 wqe_size /* Size of WQE in 16B chunks */;
503 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
504 	u8 fmr_ctrl;
505 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_MASK           0x1F /* 0 is 4k, 1 is 8k... */
506 #define RDMA_SQ_FMR_WQE_PAGE_SIZE_LOG_SHIFT          0
507 #define RDMA_SQ_FMR_WQE_ZERO_BASED_MASK              0x1 /* zero based indication */
508 #define RDMA_SQ_FMR_WQE_ZERO_BASED_SHIFT             5
509 #define RDMA_SQ_FMR_WQE_BIND_EN_MASK                 0x1 /* indication whether bind is enabled for this MR */
510 #define RDMA_SQ_FMR_WQE_BIND_EN_SHIFT                6
511 #define RDMA_SQ_FMR_WQE_RESERVED1_MASK               0x1
512 #define RDMA_SQ_FMR_WQE_RESERVED1_SHIFT              7
513 	u8 access_ctrl;
514 #define RDMA_SQ_FMR_WQE_REMOTE_READ_MASK             0x1
515 #define RDMA_SQ_FMR_WQE_REMOTE_READ_SHIFT            0
516 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_MASK            0x1
517 #define RDMA_SQ_FMR_WQE_REMOTE_WRITE_SHIFT           1
518 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_MASK           0x1
519 #define RDMA_SQ_FMR_WQE_ENABLE_ATOMIC_SHIFT          2
520 #define RDMA_SQ_FMR_WQE_LOCAL_READ_MASK              0x1
521 #define RDMA_SQ_FMR_WQE_LOCAL_READ_SHIFT             3
522 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_MASK             0x1
523 #define RDMA_SQ_FMR_WQE_LOCAL_WRITE_SHIFT            4
524 #define RDMA_SQ_FMR_WQE_RESERVED2_MASK               0x7
525 #define RDMA_SQ_FMR_WQE_RESERVED2_SHIFT              5
526 	u8 reserved3;
527 	u8 length_hi /* upper 8 bits of the registered MR length */;
528 	__le32 length_lo /* lower 32 bits of the registered MR length. In case of DIF the length is specified including the DIF guards. */;
529 	struct regpair pbl_addr /* Address of PBL */;
530 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
531 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
532 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
533 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
534 	__le16 dif_flags;
535 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
536 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT   0
537 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
538 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT         1
539 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
540 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT     2
541 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
542 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
543 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
544 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT   4
545 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
546 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT   5
547 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
548 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT           6
549 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK    0x1 /* In RX IO, Ref Tag will remain at constant value of dif_base_ref_tag */
550 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT   7
551 #define RDMA_SQ_FMR_WQE_RESERVED4_MASK               0xFF
552 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT              8
553 	__le32 Reserved5;
554 };
555 
556 /*
557  * First element (16 bytes) of fmr wqe
558  */
559 struct rdma_sq_fmr_wqe_1st
560 {
561 	struct regpair addr;
562 	__le32 l_key;
563 	u8 req_type /* Type of WQE */;
564 	u8 flags;
565 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
566 #define RDMA_SQ_FMR_WQE_1ST_COMP_FLG_SHIFT        0
567 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
568 #define RDMA_SQ_FMR_WQE_1ST_RD_FENCE_FLG_SHIFT    1
569 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
570 #define RDMA_SQ_FMR_WQE_1ST_INV_FENCE_FLG_SHIFT   2
571 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_MASK           0x1 /* Dont care for FMR wqe */
572 #define RDMA_SQ_FMR_WQE_1ST_SE_FLG_SHIFT          3
573 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_MASK       0x1 /* Should be 0 for FMR wqe */
574 #define RDMA_SQ_FMR_WQE_1ST_INLINE_FLG_SHIFT      4
575 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
576 #define RDMA_SQ_FMR_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
577 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_MASK        0x3
578 #define RDMA_SQ_FMR_WQE_1ST_RESERVED0_SHIFT       6
579 	u8 wqe_size /* Size of WQE in 16B chunks */;
580 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
581 };
582 
583 /*
584  * Second element (16 bytes) of fmr wqe
585  */
586 struct rdma_sq_fmr_wqe_2nd
587 {
588 	u8 fmr_ctrl;
589 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_MASK  0x1F /* 0 is 4k, 1 is 8k... */
590 #define RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG_SHIFT 0
591 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_MASK     0x1 /* zero based indication */
592 #define RDMA_SQ_FMR_WQE_2ND_ZERO_BASED_SHIFT    5
593 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_MASK        0x1 /* indication whether bind is enabled for this MR */
594 #define RDMA_SQ_FMR_WQE_2ND_BIND_EN_SHIFT       6
595 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_MASK      0x1
596 #define RDMA_SQ_FMR_WQE_2ND_RESERVED1_SHIFT     7
597 	u8 access_ctrl;
598 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_MASK    0x1
599 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_READ_SHIFT   0
600 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_MASK   0x1
601 #define RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE_SHIFT  1
602 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_MASK  0x1
603 #define RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC_SHIFT 2
604 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_MASK     0x1
605 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_READ_SHIFT    3
606 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_MASK    0x1
607 #define RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE_SHIFT   4
608 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_MASK      0x7
609 #define RDMA_SQ_FMR_WQE_2ND_RESERVED2_SHIFT     5
610 	u8 reserved3;
611 	u8 length_hi /* upper 8 bits of the registered MR length */;
612 	__le32 length_lo /* lower 32 bits of the registered MR length. */;
613 	struct regpair pbl_addr /* Address of PBL */;
614 };
615 
616 /*
617  * Third element (16 bytes) of fmr wqe
618  */
619 struct rdma_sq_fmr_wqe_3rd
620 {
621 	__le32 dif_base_ref_tag /* Ref tag of the first DIF Block. */;
622 	__le16 dif_app_tag /* App tag of all DIF Blocks. */;
623 	__le16 dif_app_tag_mask /* Bitmask for verifying dif_app_tag. */;
624 	__le16 dif_runt_crc_value /* In TX IO, in case the runt_valid_flg is set, this value is used to validate the last Block in the IO. */;
625 	__le16 dif_flags;
626 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK    0x1 /* 0=RX, 1=TX (use enum rdma_dif_io_direction_flg) */
627 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT   0
628 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK          0x1 /* DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
629 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT         1
630 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK      0x1 /* In TX IO, indicates the runt_value field is valid. In RX IO, indicates the calculated runt value is to be placed on host buffer. */
631 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT     2
632 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK  0x1 /* In TX IO, indicates CRC of each DIF guard tag is checked. */
633 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
634 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK    0x1 /* In TX IO, indicates Ref tag of each DIF guard tag is checked. */
635 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT   4
636 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK    0x1 /* In TX IO, indicates App tag of each DIF guard tag is checked. */
637 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT   5
638 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK            0x1 /* DIF CRC Seed to use. 0=0x000 1=0xFFFF (use enum rdma_dif_crc_seed) */
639 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT           6
640 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK               0x1FF
641 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT              7
642 	__le32 Reserved5;
643 };
644 
645 struct rdma_sq_local_inv_wqe
646 {
647 	struct regpair reserved;
648 	__le32 inv_l_key /* The invalidate local key */;
649 	u8 req_type /* Type of WQE */;
650 	u8 flags;
651 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
652 #define RDMA_SQ_LOCAL_INV_WQE_COMP_FLG_SHIFT        0
653 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
654 #define RDMA_SQ_LOCAL_INV_WQE_RD_FENCE_FLG_SHIFT    1
655 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
656 #define RDMA_SQ_LOCAL_INV_WQE_INV_FENCE_FLG_SHIFT   2
657 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_MASK           0x1 /* Dont care for local invalidate wqe */
658 #define RDMA_SQ_LOCAL_INV_WQE_SE_FLG_SHIFT          3
659 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_MASK       0x1 /* Should be 0 for local invalidate wqe */
660 #define RDMA_SQ_LOCAL_INV_WQE_INLINE_FLG_SHIFT      4
661 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
662 #define RDMA_SQ_LOCAL_INV_WQE_DIF_ON_HOST_FLG_SHIFT 5
663 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_MASK        0x3
664 #define RDMA_SQ_LOCAL_INV_WQE_RESERVED0_SHIFT       6
665 	u8 wqe_size /* Size of WQE in 16B chunks */;
666 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
667 };
668 
669 struct rdma_sq_rdma_wqe
670 {
671 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
672 	__le32 length /* Total data length. If DIF on host is enabled, length does NOT include DIF guards. */;
673 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
674 	u8 req_type /* Type of WQE */;
675 	u8 flags;
676 #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK                  0x1 /* If set, completion will be generated when the WQE is completed */
677 #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT                 0
678 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK              0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
679 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT             1
680 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK             0x1 /* If set, all pending operations will be completed before start processing this WQE */
681 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT            2
682 #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK                    0x1 /* If set, signal the responder to generate a solicited event on this WQE */
683 #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT                   3
684 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK                0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
685 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT               4
686 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK           0x1 /* If set, indicated host memory of this WQE is DIF protected. */
687 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT          5
688 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK              0x1 /* If set, indicated read with invalidate WQE. iWARP only */
689 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT             6
690 #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK                 0x1
691 #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT                7
692 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
693 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
694 	struct regpair remote_va /* Remote virtual address */;
695 	__le32 r_key /* Remote key */;
696 	u8 dif_flags;
697 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK            0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
698 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT           0
699 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first RDMA on related IO. */
700 #define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1
701 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last RDMA on related IO. */
702 #define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT  2
703 #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK                 0x1F
704 #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT                3
705 	u8 reserved2[3];
706 };
707 
708 /*
709  * First element (16 bytes) of rdma wqe
710  */
711 struct rdma_sq_rdma_wqe_1st
712 {
713 	__le32 imm_data /* The immediate data in case of RDMA_WITH_IMM */;
714 	__le32 length /* Total data length */;
715 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
716 	u8 req_type /* Type of WQE */;
717 	u8 flags;
718 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
719 #define RDMA_SQ_RDMA_WQE_1ST_COMP_FLG_SHIFT        0
720 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
721 #define RDMA_SQ_RDMA_WQE_1ST_RD_FENCE_FLG_SHIFT    1
722 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
723 #define RDMA_SQ_RDMA_WQE_1ST_INV_FENCE_FLG_SHIFT   2
724 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
725 #define RDMA_SQ_RDMA_WQE_1ST_SE_FLG_SHIFT          3
726 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs. Applicable for RDMA_WR or RDMA_WR_WITH_IMM. Should be 0 for RDMA_RD */
727 #define RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG_SHIFT      4
728 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_MASK  0x1 /* If set, indicated host memory of this WQE is DIF protected. */
729 #define RDMA_SQ_RDMA_WQE_1ST_DIF_ON_HOST_FLG_SHIFT 5
730 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_MASK     0x1 /* If set, indicated read with invalidate WQE. iWARP only */
731 #define RDMA_SQ_RDMA_WQE_1ST_READ_INV_FLG_SHIFT    6
732 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_MASK        0x1
733 #define RDMA_SQ_RDMA_WQE_1ST_RESERVED0_SHIFT       7
734 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
735 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
736 };
737 
738 /*
739  * Second element (16 bytes) of rdma wqe
740  */
741 struct rdma_sq_rdma_wqe_2nd
742 {
743 	struct regpair remote_va /* Remote virtual address */;
744 	__le32 r_key /* Remote key */;
745 	u8 dif_flags;
746 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_MASK         0x1 /* if dif_on_host_flg set: DIF block size. 0=512B 1=4096B (use enum rdma_dif_block_size) */
747 #define RDMA_SQ_RDMA_WQE_2ND_DIF_BLOCK_SIZE_SHIFT        0
748 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_MASK  0x1 /* if dif_on_host_flg set: WQE executes first DIF on related MR. */
749 #define RDMA_SQ_RDMA_WQE_2ND_DIF_FIRST_SEGMENT_FLG_SHIFT 1
750 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_MASK   0x1 /* if dif_on_host_flg set: WQE executes last DIF on related MR. */
751 #define RDMA_SQ_RDMA_WQE_2ND_DIF_LAST_SEGMENT_FLG_SHIFT  2
752 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_MASK              0x1F
753 #define RDMA_SQ_RDMA_WQE_2ND_RESERVED1_SHIFT             3
754 	u8 reserved2[3];
755 };
756 
757 /*
758  * SQ WQE req type enumeration
759  */
760 enum rdma_sq_req_type
761 {
762 	RDMA_SQ_REQ_TYPE_SEND,
763 	RDMA_SQ_REQ_TYPE_SEND_WITH_IMM,
764 	RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE,
765 	RDMA_SQ_REQ_TYPE_RDMA_WR,
766 	RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM,
767 	RDMA_SQ_REQ_TYPE_RDMA_RD,
768 	RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP,
769 	RDMA_SQ_REQ_TYPE_ATOMIC_ADD,
770 	RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE,
771 	RDMA_SQ_REQ_TYPE_FAST_MR,
772 	RDMA_SQ_REQ_TYPE_BIND,
773 	RDMA_SQ_REQ_TYPE_INVALID,
774 	MAX_RDMA_SQ_REQ_TYPE
775 };
776 
777 struct rdma_sq_send_wqe
778 {
779 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
780 	__le32 length /* Total data length */;
781 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
782 	u8 req_type /* Type of WQE */;
783 	u8 flags;
784 #define RDMA_SQ_SEND_WQE_COMP_FLG_MASK         0x1 /* If set, completion will be generated when the WQE is completed */
785 #define RDMA_SQ_SEND_WQE_COMP_FLG_SHIFT        0
786 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_MASK     0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
787 #define RDMA_SQ_SEND_WQE_RD_FENCE_FLG_SHIFT    1
788 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_MASK    0x1 /* If set, all pending operations will be completed before start processing this WQE */
789 #define RDMA_SQ_SEND_WQE_INV_FENCE_FLG_SHIFT   2
790 #define RDMA_SQ_SEND_WQE_SE_FLG_MASK           0x1 /* If set, signal the responder to generate a solicited event on this WQE */
791 #define RDMA_SQ_SEND_WQE_SE_FLG_SHIFT          3
792 #define RDMA_SQ_SEND_WQE_INLINE_FLG_MASK       0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
793 #define RDMA_SQ_SEND_WQE_INLINE_FLG_SHIFT      4
794 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_MASK  0x1 /* Should be 0 for send wqe */
795 #define RDMA_SQ_SEND_WQE_DIF_ON_HOST_FLG_SHIFT 5
796 #define RDMA_SQ_SEND_WQE_RESERVED0_MASK        0x3
797 #define RDMA_SQ_SEND_WQE_RESERVED0_SHIFT       6
798 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
799 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
800 	__le32 reserved1[4];
801 };
802 
803 struct rdma_sq_send_wqe_1st
804 {
805 	__le32 inv_key_or_imm_data /* the r_key to invalidate in case of SEND_WITH_INVALIDATE, or the immediate data in case of SEND_WITH_IMM */;
806 	__le32 length /* Total data length */;
807 	__le32 xrc_srq /* Valid only when XRC is set for the QP */;
808 	u8 req_type /* Type of WQE */;
809 	u8 flags;
810 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_MASK       0x1 /* If set, completion will be generated when the WQE is completed */
811 #define RDMA_SQ_SEND_WQE_1ST_COMP_FLG_SHIFT      0
812 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_MASK   0x1 /* If set, all pending RDMA read or Atomic operations will be completed before start processing this WQE */
813 #define RDMA_SQ_SEND_WQE_1ST_RD_FENCE_FLG_SHIFT  1
814 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_MASK  0x1 /* If set, all pending operations will be completed before start processing this WQE */
815 #define RDMA_SQ_SEND_WQE_1ST_INV_FENCE_FLG_SHIFT 2
816 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_MASK         0x1 /* If set, signal the responder to generate a solicited event on this WQE */
817 #define RDMA_SQ_SEND_WQE_1ST_SE_FLG_SHIFT        3
818 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_MASK     0x1 /* if set, indicates inline data is following this WQE instead of SGEs */
819 #define RDMA_SQ_SEND_WQE_1ST_INLINE_FLG_SHIFT    4
820 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_MASK      0x7
821 #define RDMA_SQ_SEND_WQE_1ST_RESERVED0_SHIFT     5
822 	u8 wqe_size /* Size of WQE in 16B chunks including all SGEs or inline data. In case there are SGEs: set to number of SGEs + 1. In case of inline data: set to the whole number of 16B which contain the inline data + 1. */;
823 	u8 prev_wqe_size /* Previous WQE size in 16B chunks */;
824 };
825 
826 struct rdma_sq_send_wqe_2st
827 {
828 	__le32 reserved1[4];
829 };
830 
831 struct rdma_sq_sge
832 {
833 	__le32 length /* Total length of the send. If DIF on host is enabled, SGE length includes the DIF guards. */;
834 	struct regpair addr;
835 	__le32 l_key;
836 };
837 
838 struct rdma_srq_wqe_header
839 {
840 	struct regpair wr_id;
841 	u8 num_sges /* number of SGEs in WQE */;
842 	u8 reserved2[7];
843 };
844 
845 struct rdma_srq_sge
846 {
847 	struct regpair addr;
848 	__le32 length;
849 	__le32 l_key;
850 };
851 
852 /*
853  * rdma srq sge
854  */
855 union rdma_srq_elm
856 {
857 	struct rdma_srq_wqe_header header;
858 	struct rdma_srq_sge sge;
859 };
860 
861 /*
862  * Rdma doorbell data for flags update
863  */
864 struct rdma_pwm_flags_data
865 {
866 	__le16 icid /* internal CID */;
867 	u8 agg_flags /* aggregative flags */;
868 	u8 reserved;
869 };
870 
871 /*
872  * Rdma doorbell data for SQ and RQ
873  */
874 struct rdma_pwm_val16_data
875 {
876 	__le16 icid /* internal CID */;
877 	__le16 value /* aggregated value to update */;
878 };
879 
880 union rdma_pwm_val16_data_union
881 {
882 	struct rdma_pwm_val16_data as_struct /* Parameters field */;
883 	__le32 as_dword;
884 };
885 
886 /*
887  * Rdma doorbell data for CQ
888  */
889 struct rdma_pwm_val32_data
890 {
891 	__le16 icid /* internal CID */;
892 	u8 agg_flags /* bit for every DQ counter flags in CM context that DQ can increment */;
893 	u8 params;
894 #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK             0x3 /* aggregative command to CM (use enum db_agg_cmd_sel) */
895 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT            0
896 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK           0x1 /* enable QM bypass */
897 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT          2
898 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK  0x1 /* Connection type is iWARP */
899 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
900 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK         0x1 /* Flag indicating 16b variable should be updated. Should be used when conn_type_is_iwarp is used */
901 #define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT        4
902 #define RDMA_PWM_VAL32_DATA_RESERVED_MASK            0x7
903 #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT           5
904 	__le32 value /* aggregated value to update */;
905 };
906 
907 union rdma_pwm_val32_data_union
908 {
909 	struct rdma_pwm_val32_data as_struct /* Parameters field */;
910 	struct regpair as_repair;
911 };
912 
913 #endif /* __RDMA_COMMON__ */
914