1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef	_SYS_IB_IBTL_IBTL_TYPES_H
27 #define	_SYS_IB_IBTL_IBTL_TYPES_H
28 
29 #pragma ident	"%Z%%M%	%I%	%E% SMI"
30 
31 /*
32  * ibtl_types.h
33  *
34  * All common IBTL defined types. These are common data types
35  * that are shared by the IBTI and IBCI interfaces, it is only included
36  * by ibti.h and ibci.h
37  */
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/ib/ib_types.h>
41 #include <sys/ib/ibtl/ibtl_status.h>
42 #include <sys/socket.h>
43 
44 
45 #ifdef	__cplusplus
46 extern "C" {
47 #endif
48 
49 /*
50  * Endian Macros
51  *    h2b - host endian to big endian protocol
52  *    b2h - big endian protocol to host endian
53  *    h2l - host endian to little endian protocol
54  *    l2h - little endian protocol to host endian
55  */
56 #if defined(_LITTLE_ENDIAN)
57 #define	h2b16(x)	(htons(x))
58 #define	h2b32(x)	(htonl(x))
59 #define	h2b64(x)	(ddi_swap64(x))
60 #define	b2h16(x)	(ntohs(x))
61 #define	b2h32(x)	(ntohl(x))
62 #define	b2h64(x)	(ddi_swap64(x))
63 
64 #define	h2l16(x)	(x)
65 #define	h2l32(x)	(x)
66 #define	h2l64(x)	(x)
67 #define	l2h16(x)	(x)
68 #define	l2h32(x)	(x)
69 #define	l2h64(x)	(x)
70 
71 #elif defined(_BIG_ENDIAN)
72 #define	h2b16(x)	(x)
73 #define	h2b32(x)	(x)
74 #define	h2b64(x)	(x)
75 #define	b2h16(x)	(x)
76 #define	b2h32(x)	(x)
77 #define	b2h64(x)	(x)
78 
79 #define	h2l16(x)	(ddi_swap16(x))
80 #define	h2l32(x)	(ddi_swap32(x))
81 #define	h2l64(x)	(ddi_swap64(x))
82 #define	l2h16(x)	(ddi_swap16(x))
83 #define	l2h32(x)	(ddi_swap32(x))
84 #define	l2h64(x)	(ddi_swap64(x))
85 
86 #else
87 #error	"what endian is this machine?"
88 #endif
89 
90 /*
91  * Define Internal IBTL handles
92  */
93 typedef	struct	ibtl_clnt_s	*ibt_clnt_hdl_t;    /* ibt_attach() */
94 typedef	struct	ibtl_hca_s	*ibt_hca_hdl_t;	    /* ibt_open_hca() */
95 typedef	struct	ibtl_channel_s	*ibt_channel_hdl_t; /* alloc_rc|ud_channel() */
96 typedef	struct	ibtl_srq_s	*ibt_srq_hdl_t;	    /* ibt_alloc_srq() */
97 typedef	struct	ibtl_cq_s	*ibt_cq_hdl_t;	    /* ibt_alloc_cq() */
98 typedef	struct	ibcm_svc_info_s	*ibt_srv_hdl_t;	    /* ibt_register_service() */
99 typedef	struct	ibcm_svc_bind_s	*ibt_sbind_hdl_t;   /* ibt_bind_service() */
100 
101 typedef	struct	ibc_fmr_pool_s	*ibt_fmr_pool_hdl_t; /* ibt_create_fmr_pool() */
102 typedef	struct	ibc_ma_s	*ibt_ma_hdl_t;	    /* ibt_map_mem_area() */
103 typedef	struct	ibc_pd_s	*ibt_pd_hdl_t;	    /* ibt_alloc_pd() */
104 typedef	struct	ibc_sched_s	*ibt_sched_hdl_t;   /* ibt_alloc_cq_sched() */
105 typedef	struct	ibc_mr_s	*ibt_mr_hdl_t;	    /* ibt_register_mr() */
106 typedef	struct	ibc_mw_s	*ibt_mw_hdl_t;	    /* ibt_alloc_mw() */
107 typedef	struct	ibt_ud_dest_s	*ibt_ud_dest_hdl_t; /* UD dest handle */
108 typedef	struct	ibc_ah_s	*ibt_ah_hdl_t;	    /* ibt_alloc_ah() */
109 typedef struct	ibtl_eec_s	*ibt_eec_hdl_t;
110 typedef	struct	ibt_rd_dest_s	*ibt_rd_dest_hdl_t;	/* Reserved for */
111 							/* Future use */
112 
113 /*
114  * Some General Types.
115  */
116 typedef uint32_t	ibt_lkey_t;		/* L_Key */
117 typedef uint32_t	ibt_rkey_t;		/* R_Key */
118 typedef uint64_t	ibt_wrid_t;		/* Client assigned WR ID */
119 typedef uint32_t	ibt_immed_t;		/* WR Immediate Data */
120 typedef uint64_t	ibt_atom_arg_t;		/* WR Atomic Operation arg */
121 typedef	uint_t		ibt_cq_handler_id_t;	/* Event handler ID */
122 
123 /*
124  * IBT selector type, used when looking up/requesting either an
125  * MTU, Pkt lifetime, or Static rate.
126  * The interpretation of IBT_BEST depends on the attribute being selected.
127  */
128 typedef enum ibt_selector_e {
129 	IBT_GT		= 0,	/* Greater than */
130 	IBT_LT		= 1,	/* Less than */
131 	IBT_EQU		= 2,	/* Equal to */
132 	IBT_BEST	= 3	/* Best */
133 } ibt_selector_t;
134 
135 
136 /*
137  * Static rate definitions.
138  */
139 typedef enum ibt_srate_e {
140 	IBT_SRATE_NOT_SPECIFIED	= 0,
141 	IBT_SRATE_2		= 2,	/*  1X SDR i.e 2.5 Gbps */
142 	IBT_SRATE_10		= 3,	/*  4X SDR or 1X QDR i.e 10 Gbps */
143 	IBT_SRATE_30		= 4,	/* 12X SDR i.e 30 Gbps */
144 
145 	IBT_SRATE_5		= 5,	/*  1X DDR i.e  5 Gbps */
146 	IBT_SRATE_20		= 6,	/*  4X DDR or 8X SDR i.e 20 Gbps */
147 	IBT_SRATE_40		= 7,	/*  8X DDR or 4X QDR i.e 40 Gbps */
148 
149 	IBT_SRATE_60		= 8,	/* 12X DDR i.e 60 Gbps */
150 	IBT_SRATE_80		= 9,	/*  8X QDR i.e 80 Gbps */
151 	IBT_SRATE_120		= 10	/* 12X QDR i.e 120 Gbps */
152 } ibt_srate_t;
153 
154 /* retain old definition to be compatible with older bits. */
155 #define	IBT_SRATE_1X	IBT_SRATE_2
156 #define	IBT_SRATE_4X	IBT_SRATE_10
157 #define	IBT_SRATE_12X	IBT_SRATE_30
158 
159 /*
160  * Static rate request type.
161  */
162 typedef struct ibt_srate_req_s {
163 	ibt_srate_t	r_srate;	/* Requested srate */
164 	ibt_selector_t	r_selector;	/* Qualifier for r_srate */
165 } ibt_srate_req_t;
166 
167 /*
168  * Packet Life Time Request Type.
169  */
170 typedef struct ibt_pkt_lt_req_s {
171 	clock_t		p_pkt_lt;	/* Requested Packet Life Time */
172 	ibt_selector_t	p_selector;	/* Qualifier for p_pkt_lt */
173 } ibt_pkt_lt_req_t;
174 
175 /*
176  * Queue size struct.
177  */
178 typedef struct ibt_queue_sizes_s {
179 	uint_t	qs_sq;		/* SendQ size. */
180 	uint_t	qs_rq;		/* RecvQ size. */
181 } ibt_queue_sizes_t;
182 
183 /*
184  * Channel sizes struct, used by functions that allocate/query RC or UD
185  * channels.
186  */
187 typedef struct ibt_chan_sizes_s {
188 	uint_t	cs_sq;		/* SendQ size. */
189 	uint_t	cs_rq;		/* ReceiveQ size. */
190 	uint_t	cs_sq_sgl;	/* Max SGL elements in a SQ WR. */
191 	uint_t	cs_rq_sgl;	/* Max SGL elements in a RQ Wr. */
192 } ibt_chan_sizes_t;
193 
194 /*
195  * Shared Queue size struct.
196  */
197 typedef struct ibt_srq_sizes_s {
198 	uint_t	srq_wr_sz;
199 	uint_t	srq_sgl_sz;
200 } ibt_srq_sizes_t;
201 
202 /*
203  * SRQ Modify Flags
204  */
205 typedef enum ibt_srq_modify_flags_e {
206 	IBT_SRQ_SET_NOTHING		= 0,
207 	IBT_SRQ_SET_SIZE		= (1 << 1),
208 	IBT_SRQ_SET_LIMIT		= (1 << 2)
209 } ibt_srq_modify_flags_t;
210 
211 
212 /*
213  * Execution flags, indicates if the function should block or not.
214  * Note: in some cases, e.g., a NULL rc_cm_handler, IBT_NONBLOCKING
215  * will not have an effect, and the thread will block.
216  * IBT_NOCALLBACKS is valid for ibt_close_rc_channel only.
217  */
218 typedef enum ibt_execution_mode_e {
219 	IBT_BLOCKING	= 0,	/* Block */
220 	IBT_NONBLOCKING	= 1,	/* Return as soon as possible */
221 	IBT_NOCALLBACKS	= 2	/* cm_handler is not invoked after */
222 				/* ibt_close_rc_channel returns */
223 } ibt_execution_mode_t;
224 
225 /*
226  * Memory window alloc flags
227  */
228 typedef enum ibt_mw_flags_e {
229 	IBT_MW_SLEEP		= 0,		/* Can block */
230 	IBT_MW_NOSLEEP		= (1 << 0),	/* Can't block */
231 	IBT_MW_USER_MAP		= (1 << 1),
232 	IBT_MW_DEFER_ALLOC	= (1 << 2),
233 	IBT_MW_TYPE_1		= (1 << 3),
234 	IBT_MW_TYPE_2		= (1 << 4)
235 } ibt_mw_flags_t;
236 
237 /*
238  * PD alloc flags
239  */
240 typedef enum ibt_pd_flags_e {
241 	IBT_PD_NO_FLAGS		= 0,
242 	IBT_PD_USER_MAP		= (1 << 0),
243 	IBT_PD_DEFER_ALLOC	= (1 << 1)
244 } ibt_pd_flags_t;
245 
246 /*
247  * UD Dest alloc flags
248  */
249 typedef enum ibt_ud_dest_flags_e {
250 	IBT_UD_DEST_NO_FLAGS	= 0,
251 	IBT_UD_DEST_USER_MAP	= (1 << 0),
252 	IBT_UD_DEST_DEFER_ALLOC	= (1 << 1)
253 } ibt_ud_dest_flags_t;
254 
255 /*
256  * SRQ alloc flags
257  */
258 typedef enum ibt_srq_flags_e {
259 	IBT_SRQ_NO_FLAGS	= 0,
260 	IBT_SRQ_USER_MAP	= (1 << 0),
261 	IBT_SRQ_DEFER_ALLOC	= (1 << 1)
262 } ibt_srq_flags_t;
263 
264 /*
265  * ibt_alloc_lkey() alloc flags
266  */
267 typedef enum ibt_lkey_flags_e {
268 	IBT_KEY_NO_FLAGS	= 0,
269 	IBT_KEY_REMOTE		= (1 << 0)
270 } ibt_lkey_flags_t;
271 
272 /*
273  *  RNR NAK retry counts.
274  */
275 typedef enum ibt_rnr_retry_cnt_e {
276 	IBT_RNR_NO_RETRY	= 0x0,	/* Don't retry, fail on first timeout */
277 	IBT_RNR_RETRY_1		= 0x1,	/* Retry once */
278 	IBT_RNR_RETRY_2		= 0x2,	/* Retry twice */
279 	IBT_RNR_RETRY_3		= 0x3,	/* Retry three times */
280 	IBT_RNR_RETRY_4		= 0x4,	/* Retry four times */
281 	IBT_RNR_RETRY_5		= 0x5,	/* Retry five times */
282 	IBT_RNR_RETRY_6		= 0x6,	/* Retry six times */
283 	IBT_RNR_INFINITE_RETRY	= 0x7	/* Retry forever */
284 } ibt_rnr_retry_cnt_t;
285 
286 /*
287  * Valid values for RNR NAK timer fields, part of a channel's context.
288  */
289 typedef enum ibt_rnr_nak_time_e {
290 	IBT_RNR_NAK_655ms	= 0x0,
291 	IBT_RNR_NAK_10us	= 0x1,
292 	IBT_RNR_NAK_20us	= 0x2,
293 	IBT_RNR_NAK_30us	= 0x3,
294 	IBT_RNR_NAK_40us	= 0x4,
295 	IBT_RNR_NAK_60us	= 0x5,
296 	IBT_RNR_NAK_80us	= 0x6,
297 	IBT_RNR_NAK_120us	= 0x7,
298 	IBT_RNR_NAK_160us	= 0x8,
299 	IBT_RNR_NAK_240us	= 0x9,
300 	IBT_RNR_NAK_320us	= 0xA,
301 	IBT_RNR_NAK_480us	= 0xB,
302 	IBT_RNR_NAK_640us	= 0xC,
303 	IBT_RNR_NAK_960us	= 0xD,
304 	IBT_RNR_NAK_1280us	= 0xE,
305 	IBT_RNR_NAK_1920us	= 0xF,
306 	IBT_RNR_NAK_2560us	= 0x10,
307 	IBT_RNR_NAK_3840us	= 0x11,
308 	IBT_RNR_NAK_5120us	= 0x12,
309 	IBT_RNR_NAK_7680us	= 0x13,
310 	IBT_RNR_NAK_10ms	= 0x14,
311 	IBT_RNR_NAK_15ms	= 0x15,
312 	IBT_RNR_NAK_20ms	= 0x16,
313 	IBT_RNR_NAK_31ms	= 0x17,
314 	IBT_RNR_NAK_41ms	= 0x18,
315 	IBT_RNR_NAK_61ms	= 0x19,
316 	IBT_RNR_NAK_82ms	= 0x1A,
317 	IBT_RNR_NAK_123ms	= 0x1B,
318 	IBT_RNR_NAK_164ms	= 0x1C,
319 	IBT_RNR_NAK_246ms	= 0x1D,
320 	IBT_RNR_NAK_328ms	= 0x1E,
321 	IBT_RNR_NAK_492ms	= 0x1F
322 } ibt_rnr_nak_time_t;
323 
324 /*
325  * The definition of HCA capabilities etc as a bitfield.
326  */
327 typedef enum ibt_hca_flags_e {
328 	IBT_HCA_NO_FLAGS	= 0,
329 
330 	IBT_HCA_RD		= 1 << 0,
331 	IBT_HCA_UD_MULTICAST	= 1 << 1,
332 	IBT_HCA_RAW_MULTICAST	= 1 << 2,
333 
334 	IBT_HCA_ATOMICS_HCA	= 1 << 3,
335 	IBT_HCA_ATOMICS_GLOBAL	= 1 << 4,
336 
337 	IBT_HCA_RESIZE_CHAN	= 1 << 5,	/* Is resize supported? */
338 	IBT_HCA_AUTO_PATH_MIG	= 1 << 6,	/* Is APM supported? */
339 	IBT_HCA_SQD_SQD_PORT	= 1 << 7,	/* Can change physical port */
340 						/* on transit from SQD to SQD */
341 	IBT_HCA_PKEY_CNTR	= 1 << 8,
342 	IBT_HCA_QKEY_CNTR	= 1 << 9,
343 	IBT_HCA_AH_PORT_CHECK	= 1 << 10,	/* HCA checks AH port match */
344 						/* in UD WRs */
345 	IBT_HCA_PORT_UP		= 1 << 11,	/* PortActive event supported */
346 	IBT_HCA_INIT_TYPE	= 1 << 12,	/* InitType supported */
347 	IBT_HCA_SI_GUID		= 1 << 13,	/* System Image GUID */
348 						/* supported */
349 	IBT_HCA_SHUTDOWN_PORT	= 1 << 14,	/* ShutdownPort supported */
350 	IBT_HCA_RNR_NAK		= 1 << 15,	/* RNR-NAK supported for RC */
351 	IBT_HCA_CURRENT_QP_STATE = 1 << 16,	/* Does modify_qp support */
352 						/* checking of current state? */
353 	IBT_HCA_SRQ 		= 1 << 17,	/* Shared Receive Queue */
354 	IBT_HCA_RESIZE_SRQ	= 1 << 18,	/* Is resize SRQ supported? */
355 	IBT_HCA_BASE_MEM_MGT	= 1 << 19,	/* Base memory mgt supported? */
356 	IBT_HCA_MULT_PAGE_SZ_MR	= 1 << 20,	/* Support of multiple page */
357 						/* sizes per memory region? */
358 	IBT_HCA_BLOCK_LIST	= 1 << 21,	/* Block list physical buffer */
359 						/* lists supported? */
360 	IBT_HCA_ZERO_BASED_VA	= 1 << 22,	/* Zero Based Virtual */
361 						/* Addresses supported? */
362 	IBT_HCA_LOCAL_INVAL_FENCE = 1 << 23,	/* Local invalidate fencing? */
363 	IBT_HCA_BASE_QUEUE_MGT	= 1 << 24,	/* Base Queue Mgt supported? */
364 	IBT_HCA_CKSUM_FULL	= 1 << 25,	/* Checksum offload supported */
365 	IBT_HCA_MEM_WIN_TYPE_2B	= 1 << 26,	/* Type 2B memory windows */
366 	IBT_HCA_PHYS_BUF_BLOCK	= 1 << 27,	/* Block mode phys buf lists */
367 	IBT_HCA_FMR		= 1 << 28	/* FMR Support */
368 } ibt_hca_flags_t;
369 
370 /*
371  * The definition of HCA page size capabilities as a bitfield
372  */
373 typedef enum ibt_page_sizes_e {
374 	IBT_PAGE_4K		= 0x1 << 2,
375 	IBT_PAGE_8K		= 0x1 << 3,
376 	IBT_PAGE_16K		= 0x1 << 4,
377 	IBT_PAGE_32K		= 0x1 << 5,
378 	IBT_PAGE_64K		= 0x1 << 6,
379 	IBT_PAGE_128K		= 0x1 << 7,
380 	IBT_PAGE_256K		= 0x1 << 8,
381 	IBT_PAGE_512K		= 0x1 << 9,
382 	IBT_PAGE_1M		= 0x1 << 10,
383 	IBT_PAGE_2M		= 0x1 << 11,
384 	IBT_PAGE_4M		= 0x1 << 12,
385 	IBT_PAGE_8M		= 0x1 << 13,
386 	IBT_PAGE_16M		= 0x1 << 14,
387 	IBT_PAGE_32M		= 0x1 << 15,
388 	IBT_PAGE_64M		= 0x1 << 16,
389 	IBT_PAGE_128M		= 0x1 << 17,
390 	IBT_PAGE_256M		= 0x1 << 18,
391 	IBT_PAGE_512M		= 0x1 << 19,
392 	IBT_PAGE_1G		= 0x1 << 20,
393 	IBT_PAGE_2G		= 0x1 << 21,
394 	IBT_PAGE_4G		= 0x1 << 22,
395 	IBT_PAGE_8G		= 0x1 << 23,
396 	IBT_PAGE_16G		= 0x1 << 24
397 } ibt_page_sizes_t;
398 
399 /*
400  * Memory Window Type.
401  */
402 typedef enum ibt_mem_win_type_e {
403 	IBT_MEM_WIN_TYPE_NOT_DEFINED	= 0,
404 	IBT_MEM_WIN_TYPE_1		= (1 << 0),
405 	IBT_MEM_WIN_TYPE_2		= (1 << 1)
406 } ibt_mem_win_type_t;
407 
408 /*
409  * HCA attributes.
410  * Contains all HCA static attributes.
411  */
412 typedef struct ibt_hca_attr_s {
413 	ibt_hca_flags_t	hca_flags;		/* HCA capabilities etc */
414 
415 	/* device/version inconsistency w/ NodeInfo and IOControllerProfile */
416 	uint32_t	hca_vendor_id:24;	/* 24 bit Vendor ID */
417 	uint16_t	hca_device_id;
418 	uint32_t	hca_version_id;
419 
420 	uint_t		hca_max_chans;		/* Max Chans supported */
421 	uint_t		hca_max_chan_sz;	/* Max outstanding WRs on any */
422 						/* channel */
423 
424 	uint_t		hca_max_sgl;		/* Max SGL entries per WR */
425 
426 	uint_t		hca_max_cq;		/* Max num of CQs supported  */
427 	uint_t		hca_max_cq_sz;		/* Max capacity of each CQ */
428 
429 	ibt_page_sizes_t	hca_page_sz;	/* Bit mask of page sizes */
430 
431 	uint_t		hca_max_memr;		/* Max num of HCA mem regions */
432 	ib_memlen_t	hca_max_memr_len;	/* Largest block, in bytes of */
433 						/* mem that can be registered */
434 	uint_t		hca_max_mem_win;	/* Max Memory windows in HCA */
435 
436 	uint_t		hca_max_rsc; 		/* Max Responder Resources of */
437 						/* this HCA for RDMAR/Atomics */
438 						/* with this HCA as target. */
439 	uint8_t		hca_max_rdma_in_chan;	/* Max RDMAR/Atomics in per */
440 						/* chan this HCA as target. */
441 	uint8_t		hca_max_rdma_out_chan;	/* Max RDMA Reads/Atomics out */
442 						/* per channel by this HCA */
443 	uint_t		hca_max_ipv6_chan;	/* Max IPV6 channels in HCA */
444 	uint_t		hca_max_ether_chan;	/* Max Ether channels in HCA */
445 
446 	uint_t		hca_max_mcg_chans;	/* Max number of channels */
447 						/* that can join multicast */
448 						/* groups */
449 	uint_t		hca_max_mcg;		/* Max multicast groups */
450 	uint_t		hca_max_chan_per_mcg;	/* Max number of channels per */
451 						/* Multicast group in HCA */
452 
453 	uint16_t	hca_max_partitions;	/* Max partitions in HCA */
454 	uint8_t		hca_nports;		/* Number of physical ports */
455 	ib_guid_t	hca_node_guid;		/* Node GUID */
456 
457 	ib_time_t	hca_local_ack_delay;
458 
459 	uint_t		hca_max_port_sgid_tbl_sz;
460 	uint16_t	hca_max_port_pkey_tbl_sz;
461 	uint_t		hca_max_pd;		/* Max# of Protection Domains */
462 	ib_guid_t	hca_si_guid;		/* Optional System Image GUID */
463 	uint_t		hca_hca_max_ci_priv_sz;
464 	uint_t		hca_chan_max_ci_priv_sz;
465 	uint_t		hca_cq_max_ci_priv_sz;
466 	uint_t		hca_pd_max_ci_priv_sz;
467 	uint_t		hca_mr_max_ci_priv_sz;
468 	uint_t		hca_mw_max_ci_priv_sz;
469 	uint_t		hca_ud_dest_max_ci_priv_sz;
470 	uint_t		hca_cq_sched_max_ci_priv_sz;
471 	uint_t		hca_max_ud_dest;
472 	uint_t		hca_opaque2;
473 	uint_t		hca_opaque3;
474 	uint_t		hca_opaque4;
475 	uint8_t		hca_opaque5;
476 	uint8_t		hca_opaque6;
477 	uint_t		hca_opaque7;
478 	uint_t		hca_opaque8;
479 	uint_t		hca_max_srqs;		/* Max SRQs supported */
480 	uint_t		hca_max_srqs_sz;	/* Max outstanding WRs on any */
481 						/* SRQ */
482 	uint_t		hca_max_srq_sgl;	/* Max SGL entries per SRQ WR */
483 	uint_t		hca_max_phys_buf_list_sz;
484 	size_t		hca_block_sz_lo;	/* Range of block sizes */
485 	size_t		hca_block_sz_hi;	/* supported by the HCA */
486 	uint_t		hca_max_cq_handlers;
487 	ibt_lkey_t	hca_reserved_lkey;
488 	uint_t		hca_max_fmrs;		/* Max FMR Supported */
489 	uint_t		hca_opaque9;
490 } ibt_hca_attr_t;
491 
492 /*
493  * HCA Port link states.
494  */
495 typedef enum ibt_port_state_e {
496 	IBT_PORT_DOWN	= 1,
497 	IBT_PORT_INIT,
498 	IBT_PORT_ARM,
499 	IBT_PORT_ACTIVE
500 } ibt_port_state_t;
501 
502 /*
503  * HCA Port capabilities as a bitfield.
504  */
505 typedef enum ibt_port_caps_e {
506 	IBT_PORT_CAP_NO_FLAGS		= 0,
507 	IBT_PORT_CAP_SM			= 1 << 0,	/* SM port */
508 	IBT_PORT_CAP_SM_DISABLED	= 1 << 1,
509 	IBT_PORT_CAP_SNMP_TUNNEL	= 1 << 2,	/* SNMP Tunneling */
510 	IBT_PORT_CAP_DM			= 1 << 3,	/* DM supported */
511 	IBT_PORT_CAP_VENDOR		= 1 << 4	/* Vendor Class */
512 } ibt_port_caps_t;
513 
514 
515 /*
516  * HCA port attributes structure definition. The number of ports per HCA
517  * can be found from the "ibt_hca_attr_t" structure.
518  *
519  * p_pkey_tbl is a pointer to an array of ib_pkey_t, members are
520  * accessed as:
521  *		hca_portinfo->p_pkey_tbl[i]
522  *
523  * Where 0 <= i < hca_portinfo.p_pkey_tbl_sz
524  *
525  * Similarly p_sgid_tbl is a pointer to an array of ib_gid_t.
526  *
527  * The Query Port function - ibt_query_hca_ports() allocates the memory
528  * required for the ibt_hca_portinfo_t struct as well as the memory
529  * required for the SGID and P_Key tables. The memory is freed by calling
530  * ibt_free_portinfo().
531  */
532 typedef struct ibt_hca_portinfo_s {
533 	ib_lid_t		p_opaque1;	/* Base LID of port */
534 	ib_qkey_cntr_t		p_qkey_violations; /* Bad Q_Key cnt */
535 	ib_pkey_cntr_t		p_pkey_violations; /* Optional bad P_Key cnt */
536 	uint8_t			p_sm_sl:4;	/* SM Service level */
537 	ib_lid_t		p_sm_lid;	/* SM LID */
538 	ibt_port_state_t	p_linkstate;	/* Port state */
539 	uint8_t			p_port_num;
540 	ib_mtu_t		p_mtu;		/* Max transfer unit - pkt */
541 	uint8_t			p_lmc:3;	/* Local mask control */
542 	ib_gid_t		*p_sgid_tbl;	/* SGID Table */
543 	uint_t			p_sgid_tbl_sz;	/* Size of SGID table */
544 	uint16_t		p_pkey_tbl_sz;	/* Size of P_Key table */
545 	uint16_t		p_def_pkey_ix;	/* default pkey index for TI */
546 	ib_pkey_t		*p_pkey_tbl;	/* P_Key table */
547 	uint8_t			p_max_vl;	/* Max num of virtual lanes */
548 	uint8_t			p_init_type_reply; /* Optional InitTypeReply */
549 	ib_time_t		p_subnet_timeout; /* Max Subnet Timeout */
550 	ibt_port_caps_t		p_capabilities;	/* Port Capabilities */
551 	uint32_t		p_msg_sz;	/* Max message size */
552 } ibt_hca_portinfo_t;
553 
554 /*
555  * Modify HCA port attributes flags, specifies which HCA port
556  * attributes to modify.
557  */
558 typedef enum ibt_port_modify_flags_e {
559 	IBT_PORT_NO_FLAGS	= 0,
560 
561 	IBT_PORT_RESET_QKEY	= 1 << 0,	/* Reset Q_Key violation */
562 						/* counter */
563 	IBT_PORT_RESET_SM	= 1 << 1,	/* SM */
564 	IBT_PORT_SET_SM		= 1 << 2,
565 	IBT_PORT_RESET_SNMP	= 1 << 3,	/* SNMP Tunneling */
566 	IBT_PORT_SET_SNMP	= 1 << 4,
567 	IBT_PORT_RESET_DEVMGT	= 1 << 5,	/* Device Management */
568 	IBT_PORT_SET_DEVMGT	= 1 << 6,
569 	IBT_PORT_RESET_VENDOR	= 1 << 7,	/* Vendor Class */
570 	IBT_PORT_SET_VENDOR	= 1 << 8,
571 	IBT_PORT_SHUTDOWN	= 1 << 9,	/* Shut down the port */
572 	IBT_PORT_SET_INIT_TYPE	= 1 << 10	/* InitTypeReply value */
573 } ibt_port_modify_flags_t;
574 
575 /*
576  * Modify HCA port InitType bit definitions, applicable only if
577  * IBT_PORT_SET_INIT_TYPE modify flag (ibt_port_modify_flags_t) is set.
578  */
579 #define	IBT_PINIT_NO_LOAD		0x1
580 #define	IBT_PINIT_PRESERVE_CONTENT	0x2
581 #define	IBT_PINIT_PRESERVE_PRESENCE	0x4
582 #define	IBT_PINIT_NO_RESUSCITATE	0x8
583 
584 
585 /*
586  * Address vector definition.
587  */
588 typedef struct ibt_adds_vect_s {
589 	ib_gid_t	av_dgid;	/* IPV6 dest GID in GRH */
590 	ib_gid_t	av_sgid;	/* SGID */
591 	ibt_srate_t	av_srate;	/* Max static rate */
592 	uint8_t		av_srvl:4;	/* Service level in LRH */
593 	uint_t		av_flow:20;	/* 20 bit Flow Label */
594 	uint8_t		av_tclass;	/* Traffic Class */
595 	uint8_t		av_hop;		/* Hop Limit */
596 	uint8_t		av_port_num;	/* Port number for UD */
597 	boolean_t	av_opaque1;
598 	ib_lid_t	av_opaque2;
599 	ib_path_bits_t	av_opaque3;
600 	uint32_t	av_opaque4;
601 } ibt_adds_vect_t;
602 
603 typedef struct ibt_cep_path_s {
604 	ibt_adds_vect_t	cep_adds_vect;		/* Address Vector */
605 	uint16_t	cep_pkey_ix;		/* P_Key Index */
606 	uint8_t		cep_hca_port_num;	/* Port number for connected */
607 						/* channels.  A value of 0 */
608 						/* indicates an invalid path */
609 	ib_time_t	cep_cm_opaque1;
610 } ibt_cep_path_t;
611 
612 /*
613  * Channel Migration State.
614  */
615 typedef enum ibt_cep_cmstate_e {
616 	IBT_STATE_NOT_SUPPORTED	= 0,
617 	IBT_STATE_MIGRATED	= 1,
618 	IBT_STATE_REARMED	= 2,
619 	IBT_STATE_ARMED		= 3
620 } ibt_cep_cmstate_t;
621 
622 /*
623  * Transport service type
624  *
625  * NOTE: this was converted from an enum to a uint8_t to save space.
626  */
627 typedef uint8_t ibt_tran_srv_t;
628 
629 #define	IBT_RC_SRV		0
630 #define	IBT_UC_SRV		1
631 #define	IBT_RD_SRV		2
632 #define	IBT_UD_SRV		3
633 #define	IBT_RAWIP_SRV		4
634 #define	IBT_RAWETHER_SRV	5
635 
636 /*
637  * Channel (QP/EEC) state definitions.
638  */
639 typedef enum ibt_cep_state_e {
640 	IBT_STATE_RESET	= 0,		/* Reset */
641 	IBT_STATE_INIT,			/* Initialized */
642 	IBT_STATE_RTR,			/* Ready to Receive */
643 	IBT_STATE_RTS,			/* Ready to Send */
644 	IBT_STATE_SQD,			/* Send Queue Drained */
645 	IBT_STATE_SQE,			/* Send Queue Error */
646 	IBT_STATE_ERROR,		/* Error */
647 	IBT_STATE_SQDRAIN,		/* Send Queue Draining */
648 	IBT_STATE_NUM			/* Number of states */
649 } ibt_cep_state_t;
650 
651 
652 /*
653  * Channel Attribute flags.
654  */
655 typedef enum ibt_attr_flags_e {
656 	IBT_ALL_SIGNALED	= 0,	/* All sends signaled */
657 	IBT_WR_SIGNALED		= 1,	/* Signaled on a WR basis */
658 	IBT_FAST_REG_RES_LKEY	= (1 << 1)
659 } ibt_attr_flags_t;
660 
661 /*
662  * Channel End Point (CEP) Control Flags.
663  */
664 typedef enum ibt_cep_flags_e {
665 	IBT_CEP_NO_FLAGS	= 0,		/* Enable Nothing */
666 	IBT_CEP_RDMA_RD		= (1 << 0),	/* Enable incoming RDMA RD's */
667 						/* RC & RD only */
668 	IBT_CEP_RDMA_WR		= (1 << 1),	/* Enable incoming RDMA WR's */
669 						/* RC & RD only */
670 	IBT_CEP_ATOMIC		= (1 << 2)	/* Enable incoming Atomics, */
671 						/* RC & RD only */
672 } ibt_cep_flags_t;
673 
674 /*
675  * Channel Modify Flags
676  */
677 typedef enum ibt_cep_modify_flags_e {
678 	IBT_CEP_SET_NOTHING		= 0,
679 	IBT_CEP_SET_SQ_SIZE		= (1 << 1),
680 	IBT_CEP_SET_RQ_SIZE		= (1 << 2),
681 
682 	IBT_CEP_SET_RDMA_R		= (1 << 3),
683 	IBT_CEP_SET_RDMA_W		= (1 << 4),
684 	IBT_CEP_SET_ATOMIC		= (1 << 5),
685 
686 	IBT_CEP_SET_ALT_PATH		= (1 << 6),	/* Alternate Path */
687 
688 	IBT_CEP_SET_ADDS_VECT		= (1 << 7),
689 	IBT_CEP_SET_PORT		= (1 << 8),
690 	IBT_CEP_SET_OPAQUE5		= (1 << 9),
691 	IBT_CEP_SET_RETRY		= (1 << 10),
692 	IBT_CEP_SET_RNR_NAK_RETRY 	= (1 << 11),
693 	IBT_CEP_SET_MIN_RNR_NAK		= (1 << 12),
694 
695 	IBT_CEP_SET_QKEY		= (1 << 13),
696 	IBT_CEP_SET_RDMARA_OUT		= (1 << 14),
697 	IBT_CEP_SET_RDMARA_IN		= (1 << 15),
698 
699 	IBT_CEP_SET_OPAQUE1		= (1 << 16),
700 	IBT_CEP_SET_OPAQUE2		= (1 << 17),
701 	IBT_CEP_SET_OPAQUE3		= (1 << 18),
702 	IBT_CEP_SET_OPAQUE4		= (1 << 19),
703 	IBT_CEP_SET_SQD_EVENT		= (1 << 20),
704 	IBT_CEP_SET_OPAQUE6		= (1 << 21),
705 	IBT_CEP_SET_OPAQUE7		= (1 << 22),
706 	IBT_CEP_SET_OPAQUE8		= (1 << 23)
707 } ibt_cep_modify_flags_t;
708 
709 /*
710  * CQ notify types.
711  */
712 typedef enum ibt_cq_notify_flags_e {
713 	IBT_NEXT_COMPLETION	= 1,
714 	IBT_NEXT_SOLICITED	= 2
715 } ibt_cq_notify_flags_t;
716 
717 /*
718  * CQ types shared across TI and CI.
719  */
720 typedef enum ibt_cq_flags_e {
721 	IBT_CQ_NO_FLAGS			= 0,
722 	IBT_CQ_HANDLER_IN_THREAD	= 1 << 0,	/* A thread calls the */
723 							/* CQ handler */
724 	IBT_CQ_USER_MAP			= 1 << 1,
725 	IBT_CQ_DEFER_ALLOC		= 1 << 2
726 } ibt_cq_flags_t;
727 
728 /*
729  * CQ types shared across TI and CI.
730  */
731 typedef enum ibt_cq_sched_flags_e {
732 	IBT_CQS_NO_FLAGS	= 0,
733 	IBT_CQS_WARM_CACHE	= 1 << 0, /* run on same CPU */
734 	IBT_CQS_AFFINITY	= 1 << 1,
735 	IBT_CQS_SCHED_GROUP	= 1 << 2,
736 	IBT_CQS_USER_MAP	= 1 << 3,
737 	IBT_CQS_DEFER_ALLOC	= 1 << 4
738 } ibt_cq_sched_flags_t;
739 
740 /*
741  * Attributes when creating a Completion Queue.
742  *
743  * Note:
744  *	The IBT_CQ_HANDLER_IN_THREAD cq_flags bit should be ignored by the CI.
745  */
746 typedef struct ibt_cq_attr_s {
747 	uint_t			cq_size;
748 	ibt_sched_hdl_t		cq_sched;	/* 0 = no hint, */
749 						/* other = cq_sched value */
750 	ibt_cq_flags_t		cq_flags;
751 } ibt_cq_attr_t;
752 
753 /*
754  * Memory Management
755  */
756 
757 /* Memory management flags */
758 typedef enum ibt_mr_flags_e {
759 	IBT_MR_SLEEP			= 0,
760 	IBT_MR_NOSLEEP			= (1 << 1),
761 	IBT_MR_NONCOHERENT		= (1 << 2),
762 	IBT_MR_PHYS_IOVA		= (1 << 3),  /* ibt_(re)register_buf */
763 
764 	/* Access control flags */
765 	IBT_MR_ENABLE_WINDOW_BIND	= (1 << 4),
766 	IBT_MR_ENABLE_LOCAL_WRITE	= (1 << 5),
767 	IBT_MR_ENABLE_REMOTE_READ	= (1 << 6),
768 	IBT_MR_ENABLE_REMOTE_WRITE	= (1 << 7),
769 	IBT_MR_ENABLE_REMOTE_ATOMIC	= (1 << 8),
770 
771 	/* Reregister flags */
772 	IBT_MR_CHANGE_TRANSLATION	= (1 << 9),
773 	IBT_MR_CHANGE_ACCESS		= (1 << 10),
774 	IBT_MR_CHANGE_PD		= (1 << 11),
775 
776 	/* Additional registration flags */
777 	IBT_MR_ZBVA			= (1 << 12),
778 
779 	/* Additional physical registration flags */
780 	IBT_MR_CONSUMER_KEY		= (1 << 13)	/* Consumer owns key */
781 							/* portion of keys */
782 } ibt_mr_flags_t;
783 
784 
785 /* Memory Region attribute flags */
786 typedef enum ibt_mr_attr_flags_e {
787 	/* Access control flags */
788 	IBT_MR_WINDOW_BIND		= (1 << 0),
789 	IBT_MR_LOCAL_WRITE		= (1 << 1),
790 	IBT_MR_REMOTE_READ		= (1 << 2),
791 	IBT_MR_REMOTE_WRITE		= (1 << 3),
792 	IBT_MR_REMOTE_ATOMIC		= (1 << 4),
793 	IBT_MR_ZERO_BASED_VA		= (1 << 5),
794 	IBT_MR_CONSUMER_OWNED_KEY	= (1 << 6),
795 	IBT_MR_SHARED			= (1 << 7),
796 	IBT_MR_FMR			= (1 << 8)
797 } ibt_mr_attr_flags_t;
798 
799 /* Memory region physical descriptor. */
800 typedef struct ibt_phys_buf_s {
801 	union {
802 		uint64_t	_p_ll;		/* 64 bit DMA address */
803 		uint32_t	_p_la[2];	/* 2 x 32 bit address */
804 	} _phys_buf;
805 	size_t	p_size;
806 } ibt_phys_buf_t;
807 
808 #define	p_laddr		_phys_buf._p_ll
809 #ifdef	_LONG_LONG_HTOL
810 #define	p_notused	_phys_buf._p_la[0]
811 #define	p_addr		_phys_buf._p_la[1]
812 #else
813 #define	p_addr		_phys_buf._p_la[0]
814 #define	p_notused	_phys_buf._p_la[1]
815 #endif
816 
817 
818 /* Memory region descriptor. */
819 typedef struct ibt_mr_desc_s {
820 	ib_vaddr_t	md_vaddr;	/* IB virtual adds of memory */
821 	ibt_lkey_t	md_lkey;
822 	ibt_rkey_t	md_rkey;
823 	boolean_t	md_sync_required;
824 } ibt_mr_desc_t;
825 
826 /* Physical Memory region descriptor. */
827 typedef struct ibt_pmr_desc_s {
828 	ib_vaddr_t	pmd_iova;	/* Returned I/O Virtual Address */
829 	ibt_lkey_t	pmd_lkey;
830 	ibt_rkey_t	pmd_rkey;
831 	uint_t 		pmd_phys_buf_list_sz;	/* Allocated Phys buf sz */
832 	boolean_t	pmd_sync_required;
833 } ibt_pmr_desc_t;
834 
835 /* Memory region protection bounds. */
836 typedef struct ibt_mr_prot_bounds_s {
837 	ib_vaddr_t	pb_addr;	/* Beginning address */
838 	size_t		pb_len;		/* Length of protected region */
839 } ibt_mr_prot_bounds_t;
840 
841 /* Memory Region (Re)Register attributes */
842 typedef struct ibt_mr_attr_s {
843 	ib_vaddr_t	mr_vaddr;	/* Virtual address to register */
844 	ib_memlen_t	mr_len;		/* Length of region to register */
845 	struct as	*mr_as;		/* A pointer to an address space */
846 					/* structure. This parameter should */
847 					/* be set to NULL, which implies */
848 					/* kernel address space. */
849 	ibt_mr_flags_t	mr_flags;
850 } ibt_mr_attr_t;
851 
852 /* Physical Memory Region (Re)Register */
853 typedef struct ibt_pmr_attr_s {
854 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
855 					/* client for the first byte of the */
856 					/* region */
857 	ib_memlen_t	pmr_len;	/* Length of region to register */
858 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
859 					/* IOVA within the 1st physical */
860 					/* buffer */
861 	ibt_mr_flags_t	pmr_flags;
862 	ibt_lkey_t	pmr_lkey;	/* Reregister only */
863 	ibt_rkey_t	pmr_rkey;	/* Reregister only */
864 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
865 	uint_t		pmr_num_buf;	/* Num of entries in the pmr_buf_list */
866 	size_t		pmr_buf_sz;
867 	ibt_phys_buf_t	*pmr_buf_list;	/* List of physical buffers accessed */
868 					/* as an array */
869 	ibt_ma_hdl_t	pmr_ma;		/* Memory handle used to obtain the */
870 					/* pmr_buf_list */
871 } ibt_pmr_attr_t;
872 
873 
874 /*
875  * Memory Region (Re)Register attributes - used by ibt_register_shared_mr(),
876  * ibt_register_buf() and ibt_reregister_buf().
877  */
878 typedef struct ibt_smr_attr_s {
879 	ib_vaddr_t		mr_vaddr;
880 	ibt_mr_flags_t		mr_flags;
881 	uint8_t			mr_key;		/* Only for physical */
882 						/* ibt_(Re)register_buf() */
883 	ibt_lkey_t		mr_lkey;	/* Only for physical */
884 	ibt_rkey_t		mr_rkey;	/* ibt_Reregister_buf() */
885 } ibt_smr_attr_t;
886 
887 /*
888  * key states.
889  */
890 typedef enum ibt_key_state_e {
891 	IBT_KEY_INVALID	= 0,
892 	IBT_KEY_FREE,
893 	IBT_KEY_VALID
894 } ibt_key_state_t;
895 
896 /* Memory region query attributes */
897 typedef struct ibt_mr_query_attr_s {
898 	ibt_lkey_t		mr_lkey;
899 	ibt_rkey_t		mr_rkey;
900 	ibt_mr_prot_bounds_t	mr_lbounds;	/* Actual local CI protection */
901 						/* bounds */
902 	ibt_mr_prot_bounds_t	mr_rbounds;	/* Actual remote CI */
903 						/* protection bounds */
904 	ibt_mr_attr_flags_t	mr_attr_flags;	/* Access rights etc. */
905 	ibt_pd_hdl_t		mr_pd;		/* Protection domain */
906 	boolean_t		mr_sync_required;
907 	ibt_key_state_t		mr_lkey_state;
908 	uint_t			mr_phys_buf_list_sz;
909 } ibt_mr_query_attr_t;
910 
911 /* Memory window query attributes */
912 typedef struct ibt_mw_query_attr_s {
913 	ibt_pd_hdl_t		mw_pd;
914 	ibt_mem_win_type_t	mw_type;
915 	ibt_rkey_t		mw_rkey;
916 	ibt_key_state_t		mw_state;
917 } ibt_mw_query_attr_t;
918 
919 
920 /* Memory Region Sync Flags. */
921 #define	IBT_SYNC_READ	0x1	/* Make memory changes visible to incoming */
922 				/* RDMA reads */
923 
924 #define	IBT_SYNC_WRITE	0x2	/* Make the affects of an incoming RDMA write */
925 				/* visible to the consumer */
926 
927 /* Memory region sync args */
928 typedef struct ibt_mr_sync_s {
929 	ibt_mr_hdl_t	ms_handle;
930 	ib_vaddr_t	ms_vaddr;
931 	ib_memlen_t	ms_len;
932 	uint32_t	ms_flags;	/* IBT_SYNC_READ or  IBT_SYNC_WRITE */
933 } ibt_mr_sync_t;
934 
935 /*
936  * Flags for Virtual Address to HCA Physical Address translation.
937  */
938 typedef enum ibt_va_flags_e {
939 	IBT_VA_SLEEP		= 0,
940 	IBT_VA_NOSLEEP		= (1 << 0),
941 	IBT_VA_NONCOHERENT	= (1 << 1),
942 	IBT_VA_FMR		= (1 << 2),
943 	IBT_VA_BLOCK_MODE	= (1 << 3),
944 	IBT_VA_BUF		= (1 << 4)
945 } ibt_va_flags_t;
946 
947 
948 /*  Address Translation parameters */
949 typedef struct ibt_va_attr_s {
950 	ib_vaddr_t	va_vaddr;	/* Virtual address to register */
951 	ib_memlen_t	va_len;		/* Length of region to register */
952 	struct as	*va_as;		/* A pointer to an address space */
953 					/* structure. */
954 	size_t		va_phys_buf_min;
955 	size_t		va_phys_buf_max;
956 	ibt_va_flags_t	va_flags;
957 	struct buf	*va_buf;
958 } ibt_va_attr_t;
959 
960 
961 /*
962  * Fast Memory Registration (FMR) support.
963  */
964 
965 /* FMR flush function handler. */
966 typedef void (*ibt_fmr_flush_handler_t)(ibt_fmr_pool_hdl_t fmr_pool,
967     void *fmr_func_arg);
968 
969 /* FMR Pool create attributes. */
970 typedef struct ibt_fmr_pool_attr_s {
971 	uint_t			fmr_max_pages_per_fmr;
972 	uint_t			fmr_pool_size;
973 	uint_t			fmr_dirty_watermark;
974 	size_t			fmr_page_sz;
975 	boolean_t		fmr_cache;
976 	ibt_mr_flags_t		fmr_flags;
977 	ibt_fmr_flush_handler_t	fmr_func_hdlr;
978 	void			*fmr_func_arg;
979 } ibt_fmr_pool_attr_t;
980 
981 
982 /*
983  * WORK REQUEST AND WORK REQUEST COMPLETION DEFINITIONS.
984  */
985 
986 /*
987  * Work Request and Work Request Completion types - These types are used
988  *   to indicate the type of work requests posted to a work queue
989  *   or the type of completion received.  Immediate Data is indicated via
990  *   ibt_wr_flags_t or ibt_wc_flags_t.
991  *
992  *   IBT_WRC_RECV and IBT_WRC_RECV_RDMAWI are only used as opcodes in the
993  *   work completions.
994  *
995  * NOTE: this was converted from an enum to a uint8_t to save space.
996  */
997 typedef uint8_t ibt_wrc_opcode_t;
998 
999 #define	IBT_WRC_SEND		1	/* Send */
1000 #define	IBT_WRC_RDMAR		2	/* RDMA Read */
1001 #define	IBT_WRC_RDMAW		3	/* RDMA Write */
1002 #define	IBT_WRC_CSWAP		4	/* Compare & Swap Atomic */
1003 #define	IBT_WRC_FADD		5	/* Fetch & Add Atomic */
1004 #define	IBT_WRC_BIND		6	/* Bind Memory Window */
1005 #define	IBT_WRC_RECV		7	/* Receive */
1006 #define	IBT_WRC_RECV_RDMAWI	8	/* Received RDMA Write w/ Immediate */
1007 #define	IBT_WRC_FAST_REG_PMR	9	/* Fast Register Physical mem region */
1008 #define	IBT_WRC_LOCAL_INVALIDATE 10
1009 
1010 
1011 /*
1012  * Work Request Completion flags - These flags indicate what type
1013  *   of data is present in the Work Request Completion structure
1014  */
1015 typedef uint8_t ibt_wc_flags_t;
1016 
1017 #define	IBT_WC_NO_FLAGS			0
1018 #define	IBT_WC_GRH_PRESENT		(1 << 0)
1019 #define	IBT_WC_IMMED_DATA_PRESENT	(1 << 1)
1020 #define	IBT_WC_RKEY_INVALIDATED		(1 << 2)
1021 #define	IBT_WC_CKSUM_OK			(1 << 3)
1022 
1023 
1024 /*
1025  * Work Request Completion - This structure encapsulates the information
1026  *   necessary to define a work request completion.
1027  */
1028 typedef struct ibt_wc_s {
1029 	ibt_wrid_t		wc_id;		/* Work Request Id */
1030 	uint64_t		wc_fma_ena;	/* fault management err data */
1031 	ib_msglen_t		wc_bytes_xfer;	/* Number of Bytes */
1032 						/* Transferred */
1033 	ibt_wc_flags_t		wc_flags;	/* WR Completion Flags */
1034 	ibt_wrc_opcode_t	wc_type;	/* Operation Type */
1035 	uint16_t		wc_cksum;	/* payload checksum */
1036 	ibt_immed_t		wc_immed_data;	/* Immediate Data */
1037 	uint32_t		wc_freed_rc;	/* Freed Resource Count */
1038 	ibt_wc_status_t		wc_status;	/* Completion Status */
1039 	uint8_t			wc_sl:4;	/* Remote SL */
1040 	uint16_t		wc_ethertype;	/* Ethertype Field - RE */
1041 	ib_lid_t		wc_opaque1;
1042 	uint16_t		wc_opaque2;
1043 	ib_qpn_t		wc_qpn;		/* Source QPN Datagram only */
1044 	ib_eecn_t		wc_opaque3;
1045 	ib_qpn_t		wc_local_qpn;
1046 	ibt_rkey_t		wc_rkey;
1047 	ib_path_bits_t		wc_opaque4;
1048 } ibt_wc_t;
1049 
1050 
1051 /*
1052  * WR Flags. Common for both RC and UD
1053  *
1054  * NOTE: this was converted from an enum to a uint8_t to save space.
1055  */
1056 typedef uint8_t ibt_wr_flags_t;
1057 
1058 #define	IBT_WR_NO_FLAGS		0
1059 #define	IBT_WR_SEND_IMMED	(1 << 0)	/* Immediate Data Indicator */
1060 #define	IBT_WR_SEND_SIGNAL	(1 << 1)	/* Signaled, if set */
1061 #define	IBT_WR_SEND_FENCE	(1 << 2)	/* Fence Indicator */
1062 #define	IBT_WR_SEND_SOLICIT	(1 << 3)	/* Solicited Event Indicator */
1063 #define	IBT_WR_SEND_REMOTE_INVAL	(1 << 4) /* Remote Invalidate */
1064 #define	IBT_WR_SEND_CKSUM	(1 << 5)	/* Checksum offload Indicator */
1065 
1066 /*
1067  * Access control flags for Bind Memory Window operation,
1068  * applicable for RC/UC/RD only.
1069  *
1070  * If IBT_WR_BIND_WRITE or IBT_WR_BIND_ATOMIC is desired then
1071  * it is required that Memory Region should have Local Write Access.
1072  */
1073 typedef enum ibt_bind_flags_e {
1074 	IBT_WR_BIND_READ	= (1 << 0),	/* enable remote read */
1075 	IBT_WR_BIND_WRITE	= (1 << 1),	/* enable remote write */
1076 	IBT_WR_BIND_ATOMIC	= (1 << 2),	/* enable remote atomics */
1077 	IBT_WR_BIND_ZBVA	= (1 << 3)	/* Zero Based Virtual Address */
1078 } ibt_bind_flags_t;
1079 
1080 /*
1081  * Data Segment for scatter-gather list
1082  *
1083  * SGL consists of an array of data segments and the length of the SGL.
1084  */
1085 typedef struct ibt_wr_ds_s {
1086 	ib_vaddr_t	ds_va;		/* Virtual Address */
1087 	ibt_lkey_t	ds_key;		/* L_Key */
1088 	ib_msglen_t	ds_len;		/* Length of DS */
1089 } ibt_wr_ds_t;
1090 
1091 /*
1092  * Bind Memory Window WR
1093  *
1094  * WR ID from ibt_send_wr_t applies here too, SWG_0038 errata.
1095  */
1096 typedef struct ibt_wr_bind_s {
1097 	ibt_bind_flags_t	bind_flags;
1098 	ibt_rkey_t		bind_rkey;		/* Mem Window's R_key */
1099 	ibt_lkey_t		bind_lkey;		/* Mem Region's L_Key */
1100 	ibt_rkey_t		bind_rkey_out;		/* OUT: new R_Key */
1101 	ibt_mr_hdl_t		bind_ibt_mr_hdl;	/* Mem Region handle */
1102 	ibt_mw_hdl_t		bind_ibt_mw_hdl;	/* Mem Window handle */
1103 	ib_vaddr_t		bind_va;		/* Virtual Address */
1104 	ib_memlen_t		bind_len;		/* Length of Window */
1105 } ibt_wr_bind_t;
1106 
1107 /*
1108  * Atomic WR
1109  *
1110  * Operation type (compare & swap or fetch & add) in ibt_wrc_opcode_t.
1111  *
1112  * A copy of the original contents of the remote memory will be stored
1113  * in the local data segment described by wr_sgl within ibt_send_wr_t,
1114  * and wr_nds should be set to 1.
1115  *
1116  * Atomic operation operands:
1117  *   Compare & Swap Operation:
1118  *	atom_arg1 - Compare Operand
1119  *	atom_arg2 - Swap Operand
1120  *
1121  *   Fetch & Add Operation:
1122  *	atom_arg1 - Add Operand
1123  *	atom_arg2 - ignored
1124  */
1125 typedef struct ibt_wr_atomic_s {
1126 	ib_vaddr_t	atom_raddr;	/* Remote address. */
1127 	ibt_atom_arg_t	atom_arg1;	/* operand #1 */
1128 	ibt_atom_arg_t	atom_arg2;	/* operand #2 */
1129 	ibt_rkey_t	atom_rkey;	/* R_Key. */
1130 } ibt_wr_atomic_t;
1131 
1132 /*
1133  * RDMA WR
1134  * Immediate Data indicator in ibt_wr_flags_t.
1135  */
1136 typedef struct ibt_wr_rdma_s {
1137 	ib_vaddr_t	rdma_raddr;	/* Remote address. */
1138 	ibt_rkey_t	rdma_rkey;	/* R_Key. */
1139 	ibt_immed_t	rdma_immed;	/* Immediate Data */
1140 } ibt_wr_rdma_t;
1141 
1142 /*
1143  * Fast Register Physical Memory Region Work Request.
1144  */
1145 typedef struct ibt_wr_reg_pmr_s {
1146 	ib_vaddr_t	pmr_iova;	/* I/O virtual address requested by */
1147 					/* client for the first byte of the */
1148 					/* region */
1149 	ib_memlen_t	pmr_len;	/* Length of region to register */
1150 	ib_memlen_t	pmr_offset;	/* Offset of the regions starting */
1151 					/* IOVA within the 1st physical */
1152 					/* buffer */
1153 	ibt_mr_hdl_t	pmr_mr_hdl;
1154 	ibt_phys_buf_t	*pmr_buf_list;	/* List of physical buffers accessed */
1155 					/* as an array */
1156 	uint_t		pmr_num_buf;	/* Num of entries in the pmr_buf_list */
1157 	ibt_lkey_t	pmr_lkey;
1158 	ibt_rkey_t	pmr_rkey;
1159 	ibt_mr_flags_t	pmr_flags;
1160 	uint8_t		pmr_key;	/* Key to use on new Lkey & Rkey */
1161 } ibt_wr_reg_pmr_t;
1162 
1163 /*
1164  * Local Invalidate.
1165  */
1166 typedef struct ibt_wr_li_s {
1167 	ibt_mr_hdl_t	li_mr_hdl;	/* Null for MW invalidates */
1168 	ibt_mw_hdl_t	li_mw_hdl;	/* Null for MR invalidates */
1169 	ibt_lkey_t	li_lkey;	/* Ignore for MW invalidates */
1170 	ibt_rkey_t	li_rkey;
1171 } ibt_wr_li_t;
1172 
1173 /*
1174  * Reserved For Future Use.
1175  * Raw IPv6 Send WR
1176  */
1177 typedef struct ibt_wr_ripv6_s {
1178 	ib_lid_t	rip_dlid;	/* DLID */
1179 	ib_path_bits_t  rip_slid_bits;	/* SLID path bits, SWG_0033 errata */
1180 	uint8_t		rip_sl:4;	/* SL */
1181 	ibt_srate_t	rip_rate;	/* Max Static Rate, SWG_0007 errata */
1182 } ibt_wr_ripv6_t;
1183 
1184 /*
1185  * Reserved For Future Use.
1186  * Raw Ethertype Send WR
1187  */
1188 typedef struct ibt_wr_reth_s {
1189 	ib_ethertype_t  reth_type;	/* Ethertype */
1190 	ib_lid_t	reth_dlid;	/* DLID */
1191 	ib_path_bits_t	reth_slid_bits;	/* SLID path bits, SWG_0033 errata */
1192 	uint8_t		reth_sl:4;	/* SL */
1193 	ibt_srate_t	reth_rate;	/* Max Static Rate, SWG_0007 errata */
1194 } ibt_wr_reth_t;
1195 
1196 /*
1197  * Reserved For future Use.
1198  * RD Send WR, Operation type in ibt_wrc_opcode_t.
1199  */
1200 typedef struct ibt_wr_rd_s {
1201 	ibt_rd_dest_hdl_t	rdwr_dest_hdl;
1202 	union {
1203 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1204 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1205 						/* IBT_WRC_RDMAW */
1206 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1207 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_FADD */
1208 						/* IBT_WRC_CSWAP */
1209 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1210 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1211 	} rdwr;
1212 } ibt_wr_rd_t;
1213 
1214 /*
1215  * Reserved For Future Use.
1216  * UC Send WR, Operation type in ibt_wrc_opcode_t, the only valid
1217  * ones are:
1218  *		IBT_WRC_SEND
1219  *		IBT_WRC_RDMAW
1220  *		IBT_WRC_BIND
1221  */
1222 typedef struct ibt_wr_uc_s {
1223 	union {
1224 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND */
1225 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAW */
1226 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1227 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1228 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1229 	} ucwr;
1230 } ibt_wr_uc_t;
1231 
1232 /*
1233  * RC Send WR, Operation type in ibt_wrc_opcode_t.
1234  */
1235 typedef struct ibt_wr_rc_s {
1236 	union {
1237 	    ibt_immed_t		send_immed;	/* IBT_WRC_SEND w/ immediate */
1238 	    ibt_rkey_t		send_inval;	/* IBT_WRC_SEND w/ invalidate */
1239 	    ibt_wr_rdma_t	rdma;		/* IBT_WRC_RDMAR */
1240 						/* IBT_WRC_RDMAW */
1241 	    ibt_wr_li_t		*li;		/* IBT_WRC_LOCAL_INVALIDATE */
1242 	    ibt_wr_atomic_t	*atomic;	/* IBT_WRC_CSWAP */
1243 						/* IBT_WRC_FADD */
1244 	    ibt_wr_bind_t	*bind;		/* IBT_WRC_BIND */
1245 	    ibt_wr_reg_pmr_t	*reg_pmr;	/* IBT_WRC_FAST_REG_PMR */
1246 	} rcwr;
1247 } ibt_wr_rc_t;
1248 
1249 /*
1250  * UD Send WR, the only valid Operation is IBT_WRC_SEND.
1251  */
1252 typedef struct ibt_wr_ud_s {
1253 	ibt_immed_t		udwr_immed;
1254 	ibt_ud_dest_hdl_t	udwr_dest;
1255 } ibt_wr_ud_t;
1256 
1257 /*
1258  * Send Work Request (WR) attributes structure.
1259  *
1260  * Operation type in ibt_wrc_opcode_t.
1261  * Immediate Data indicator in ibt_wr_flags_t.
1262  */
1263 typedef struct ibt_send_wr_s {
1264 	ibt_wrid_t		wr_id;		/* WR ID */
1265 	ibt_wr_flags_t		wr_flags;	/* Work Request Flags. */
1266 	ibt_tran_srv_t		wr_trans;	/* Transport Type. */
1267 	ibt_wrc_opcode_t	wr_opcode;	/* Operation Type. */
1268 	uint8_t			wr_rsvd;	/* maybe later */
1269 	uint32_t		wr_nds;		/* Number of data segments */
1270 						/* pointed to by wr_sgl */
1271 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1272 	union {
1273 		ibt_wr_ud_t	ud;
1274 		ibt_wr_rc_t	rc;
1275 		ibt_wr_rd_t	rd;	/* Reserved For Future Use */
1276 		ibt_wr_uc_t	uc;	/* Reserved For Future Use */
1277 		ibt_wr_reth_t	reth;	/* Reserved For Future Use */
1278 		ibt_wr_ripv6_t	ripv6;	/* Reserved For Future Use */
1279 	} wr;				/* operation specific */
1280 } ibt_send_wr_t;
1281 
1282 /*
1283  * Receive Work Request (WR) attributes structure.
1284  */
1285 typedef struct ibt_recv_wr_s {
1286 	ibt_wrid_t		wr_id;		/* WR ID */
1287 	uint32_t		wr_nds;		/* number of data segments */
1288 						/* pointed to by wr_sgl */
1289 	ibt_wr_ds_t		*wr_sgl;	/* SGL */
1290 } ibt_recv_wr_t;
1291 
1292 
1293 /*
1294  * Asynchronous Events and Errors.
1295  *
1296  * The following codes are not used in calls to ibc_async_handler, but
1297  * are used by IBTL to inform IBT clients of a significant event.
1298  *
1299  *  IBT_HCA_ATTACH_EVENT	- New HCA available.
1300  *  IBT_HCA_DETACH_EVENT	- HCA is requesting not to be used.
1301  *
1302  * ERRORs on a channel indicate that the channel has entered error state.
1303  * EVENTs on a channel indicate that the channel has not changed state.
1304  *
1305  */
1306 typedef enum ibt_async_code_e {
1307 	IBT_EVENT_PATH_MIGRATED			= 0x000001,
1308 	IBT_EVENT_SQD				= 0x000002,
1309 	IBT_EVENT_COM_EST			= 0x000004,
1310 	IBT_ERROR_CATASTROPHIC_CHAN		= 0x000008,
1311 	IBT_ERROR_INVALID_REQUEST_CHAN		= 0x000010,
1312 	IBT_ERROR_ACCESS_VIOLATION_CHAN		= 0x000020,
1313 	IBT_ERROR_PATH_MIGRATE_REQ		= 0x000040,
1314 
1315 	IBT_ERROR_CQ				= 0x000080,
1316 
1317 	IBT_EVENT_PORT_UP			= 0x000100,
1318 	IBT_ERROR_PORT_DOWN			= 0x000200,
1319 	IBT_ERROR_LOCAL_CATASTROPHIC		= 0x000400,
1320 
1321 	IBT_HCA_ATTACH_EVENT			= 0x000800,
1322 	IBT_HCA_DETACH_EVENT			= 0x001000,
1323 	IBT_ASYNC_OPAQUE1			= 0x002000,
1324 	IBT_ASYNC_OPAQUE2			= 0x004000,
1325 	IBT_ASYNC_OPAQUE3			= 0x008000,
1326 	IBT_ASYNC_OPAQUE4			= 0x010000,
1327 	IBT_EVENT_LIMIT_REACHED_SRQ		= 0x020000,
1328 	IBT_EVENT_EMPTY_CHAN			= 0x040000,
1329 	IBT_ERROR_CATASTROPHIC_SRQ		= 0x080000
1330 } ibt_async_code_t;
1331 
1332 
1333 /*
1334  * ibt_ci_data_in() and ibt_ci_data_out() flags.
1335  */
1336 typedef enum ibt_ci_data_flags_e {
1337 	IBT_CI_NO_FLAGS		= 0,
1338 	IBT_CI_COMPLETE_ALLOC	= (1 << 0)
1339 } ibt_ci_data_flags_t;
1340 
1341 /*
1342  * Used by ibt_ci_data_in() and ibt_ci_data_out() identifies the type of handle
1343  * mapping data is being obtained for.
1344  */
1345 typedef enum ibt_object_type_e {
1346 	IBT_HDL_HCA	=	1,
1347 	IBT_HDL_CHANNEL,
1348 	IBT_HDL_CQ,
1349 	IBT_HDL_PD,
1350 	IBT_HDL_MR,
1351 	IBT_HDL_MW,
1352 	IBT_HDL_UD_DEST,
1353 	IBT_HDL_SCHED,
1354 	IBT_HDL_OPAQUE1,
1355 	IBT_HDL_OPAQUE2,
1356 	IBT_HDL_SRQ
1357 } ibt_object_type_t;
1358 
1359 /*
1360  * Standard information for ibt_ci_data_in() for memory regions.
1361  *
1362  * IBT_MR_DATA_IN_IF_VERSION is the value used in the mr_rev member.
1363  * mr_func is the callback handler.  mr_arg1 and mr_arg2 are its arguments.
1364  */
1365 #define	IBT_MR_DATA_IN_IF_VERSION	1
1366 typedef struct ibt_mr_data_in_s {
1367 	uint_t	mr_rev;
1368 	void	(*mr_func)(void *, void *);
1369 	void	*mr_arg1;
1370 	void	*mr_arg2;
1371 } ibt_mr_data_in_t;
1372 
1373 /*
1374  * Memory error handler data structures; code, and payload data.
1375  */
1376 typedef enum ibt_mem_code_s {
1377 	IBT_MEM_AREA	= 0x1,
1378 	IBT_MEM_REGION	= 0x2
1379 } ibt_mem_code_t;
1380 
1381 typedef struct ibt_mem_data_s {
1382 	uint64_t	ev_fma_ena;	/* FMA Error data */
1383 	ibt_mr_hdl_t	ev_mr_hdl;	/* MR handle */
1384 	ibt_ma_hdl_t	ev_ma_hdl;	/* MA handle */
1385 } ibt_mem_data_t;
1386 
1387 /*
1388  * Special case failure type.
1389  */
1390 typedef enum ibt_failure_type_e {
1391 	IBT_FAILURE_STANDARD	= 0,
1392 	IBT_FAILURE_CI,
1393 	IBT_FAILURE_IBMF,
1394 	IBT_FAILURE_IBTL,
1395 	IBT_FAILURE_IBCM,
1396 	IBT_FAILURE_IBDM,
1397 	IBT_FAILURE_IBSM
1398 } ibt_failure_type_t;
1399 
1400 /*
1401  * RDMA IP CM service Annex definitions
1402  */
1403 typedef struct ibt_ip_addr_s {
1404 	sa_family_t family;		/* AF_INET or AF_INET6 */
1405 	union {
1406 		in_addr_t	ip4addr;
1407 		in6_addr_t	ip6addr;
1408 	} un;
1409 } ibt_ip_addr_t;
1410 
1411 #ifdef __cplusplus
1412 }
1413 #endif
1414 
1415 #endif /* _SYS_IB_IBTL_IBTL_TYPES_H */
1416