1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35 
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 
40 #if defined(__LITTLE_ENDIAN)
41 #define MLX5_SET_HOST_ENDIANNESS	0
42 #elif defined(__BIG_ENDIAN)
43 #define MLX5_SET_HOST_ENDIANNESS	0x80
44 #else
45 #error Host endianness not defined
46 #endif
47 
48 /* helper macros */
49 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
50 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
51 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
52 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
53 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
54 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
55 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
56 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
57 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
58 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
59 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
60 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
61 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
62 
63 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
64 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
65 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
66 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
67 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
68 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
69 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
70 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
71 
72 /* insert a value to a struct */
73 #define MLX5_SET(typ, p, fld, v) do { \
74 	u32 _v = v; \
75 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
76 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
77 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
78 		     (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
79 		     << __mlx5_dw_bit_off(typ, fld))); \
80 } while (0)
81 
82 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
83 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
84 	MLX5_SET(typ, p, fld[idx], v); \
85 } while (0)
86 
87 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
88 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
89 	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
90 	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
91 		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
92 		     << __mlx5_dw_bit_off(typ, fld))); \
93 } while (0)
94 
95 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
96 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
97 __mlx5_mask(typ, fld))
98 
99 #define MLX5_GET_PR(typ, p, fld) ({ \
100 	u32 ___t = MLX5_GET(typ, p, fld); \
101 	pr_debug(#fld " = 0x%x\n", ___t); \
102 	___t; \
103 })
104 
105 #define __MLX5_SET64(typ, p, fld, v) do { \
106 	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
107 	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
108 } while (0)
109 
110 #define MLX5_SET64(typ, p, fld, v) do { \
111 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
112 	__MLX5_SET64(typ, p, fld, v); \
113 } while (0)
114 
115 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
116 	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
117 	__MLX5_SET64(typ, p, fld[idx], v); \
118 } while (0)
119 
120 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
121 
122 #define MLX5_GET64_PR(typ, p, fld) ({ \
123 	u64 ___t = MLX5_GET64(typ, p, fld); \
124 	pr_debug(#fld " = 0x%llx\n", ___t); \
125 	___t; \
126 })
127 
128 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
129 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
130 __mlx5_mask16(typ, fld))
131 
132 #define MLX5_SET16(typ, p, fld, v) do { \
133 	u16 _v = v; \
134 	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16);             \
135 	*((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
136 	cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
137 		     (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
138 		     << __mlx5_16_bit_off(typ, fld))); \
139 } while (0)
140 
141 /* Big endian getters */
142 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
143 	__mlx5_64_off(typ, fld)))
144 
145 #define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
146 		type_t tmp;						  \
147 		switch (sizeof(tmp)) {					  \
148 		case sizeof(u8):					  \
149 			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
150 			break;						  \
151 		case sizeof(u16):					  \
152 			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
153 			break;						  \
154 		case sizeof(u32):					  \
155 			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
156 			break;						  \
157 		case sizeof(u64):					  \
158 			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
159 			break;						  \
160 			}						  \
161 		tmp;							  \
162 		})
163 
164 enum mlx5_inline_modes {
165 	MLX5_INLINE_MODE_NONE,
166 	MLX5_INLINE_MODE_L2,
167 	MLX5_INLINE_MODE_IP,
168 	MLX5_INLINE_MODE_TCP_UDP,
169 };
170 
171 enum {
172 	MLX5_MAX_COMMANDS		= 32,
173 	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
174 	MLX5_PCI_CMD_XPORT		= 7,
175 	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
176 	MLX5_MAX_PSVS			= 4,
177 };
178 
179 enum {
180 	MLX5_EXTENDED_UD_AV		= 0x80000000,
181 };
182 
183 enum {
184 	MLX5_CQ_STATE_ARMED		= 9,
185 	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
186 	MLX5_CQ_STATE_FIRED		= 0xa,
187 };
188 
189 enum {
190 	MLX5_STAT_RATE_OFFSET	= 5,
191 };
192 
193 enum {
194 	MLX5_INLINE_SEG = 0x80000000,
195 };
196 
197 enum {
198 	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
199 };
200 
201 enum {
202 	MLX5_MIN_PKEY_TABLE_SIZE = 128,
203 	MLX5_MAX_LOG_PKEY_TABLE  = 5,
204 };
205 
206 enum {
207 	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
208 };
209 
210 enum {
211 	MLX5_PFAULT_SUBTYPE_WQE = 0,
212 	MLX5_PFAULT_SUBTYPE_RDMA = 1,
213 };
214 
215 enum wqe_page_fault_type {
216 	MLX5_WQE_PF_TYPE_RMP = 0,
217 	MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
218 	MLX5_WQE_PF_TYPE_RESP = 2,
219 	MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
220 };
221 
222 enum {
223 	MLX5_PERM_LOCAL_READ	= 1 << 2,
224 	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
225 	MLX5_PERM_REMOTE_READ	= 1 << 4,
226 	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
227 	MLX5_PERM_ATOMIC	= 1 << 6,
228 	MLX5_PERM_UMR_EN	= 1 << 7,
229 };
230 
231 enum {
232 	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
233 	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
234 	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
235 	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
236 	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
237 };
238 
239 enum {
240 	MLX5_EN_RD	= (u64)1,
241 	MLX5_EN_WR	= (u64)2
242 };
243 
244 enum {
245 	MLX5_ADAPTER_PAGE_SHIFT		= 12,
246 	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
247 };
248 
249 enum {
250 	MLX5_BFREGS_PER_UAR		= 4,
251 	MLX5_MAX_UARS			= 1 << 8,
252 	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
253 	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
254 					  MLX5_NON_FP_BFREGS_PER_UAR,
255 	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
256 					  MLX5_NON_FP_BFREGS_PER_UAR,
257 	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
258 	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
259 	MLX5_MIN_DYN_BFREGS		= 512,
260 	MLX5_MAX_DYN_BFREGS		= 1024,
261 };
262 
263 enum {
264 	MLX5_MKEY_MASK_LEN		= 1ull << 0,
265 	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
266 	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
267 	MLX5_MKEY_MASK_PD		= 1ull << 7,
268 	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
269 	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
270 	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
271 	MLX5_MKEY_MASK_KEY		= 1ull << 13,
272 	MLX5_MKEY_MASK_QPN		= 1ull << 14,
273 	MLX5_MKEY_MASK_LR		= 1ull << 17,
274 	MLX5_MKEY_MASK_LW		= 1ull << 18,
275 	MLX5_MKEY_MASK_RR		= 1ull << 19,
276 	MLX5_MKEY_MASK_RW		= 1ull << 20,
277 	MLX5_MKEY_MASK_A		= 1ull << 21,
278 	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
279 	MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE	= 1ull << 25,
280 	MLX5_MKEY_MASK_FREE			= 1ull << 29,
281 	MLX5_MKEY_MASK_RELAXED_ORDERING_READ	= 1ull << 47,
282 };
283 
284 enum {
285 	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
286 
287 	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
288 	MLX5_UMR_CHECK_FREE		= (2 << 5),
289 
290 	MLX5_UMR_INLINE			= (1 << 7),
291 };
292 
293 #define MLX5_UMR_MTT_ALIGNMENT 0x40
294 #define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
295 #define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
296 
297 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
298 
299 enum {
300 	MLX5_EVENT_QUEUE_TYPE_QP = 0,
301 	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
302 	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
303 	MLX5_EVENT_QUEUE_TYPE_DCT = 6,
304 };
305 
306 /* mlx5 components can subscribe to any one of these events via
307  * mlx5_eq_notifier_register API.
308  */
309 enum mlx5_event {
310 	/* Special value to subscribe to any event */
311 	MLX5_EVENT_TYPE_NOTIFY_ANY	   = 0x0,
312 	/* HW events enum start: comp events are not subscribable */
313 	MLX5_EVENT_TYPE_COMP		   = 0x0,
314 	/* HW Async events enum start: subscribable events */
315 	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
316 	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
317 	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
318 	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
319 	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
320 
321 	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
322 	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
323 	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
324 	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
325 	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
326 	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
327 
328 	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
329 	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
330 	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
331 	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
332 	MLX5_EVENT_TYPE_TEMP_WARN_EVENT    = 0x17,
333 	MLX5_EVENT_TYPE_XRQ_ERROR	   = 0x18,
334 	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
335 	MLX5_EVENT_TYPE_GENERAL_EVENT	   = 0x22,
336 	MLX5_EVENT_TYPE_MONITOR_COUNTER    = 0x24,
337 	MLX5_EVENT_TYPE_PPS_EVENT          = 0x25,
338 
339 	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
340 	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
341 
342 	MLX5_EVENT_TYPE_CMD		   = 0x0a,
343 	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
344 
345 	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
346 	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
347 
348 	MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
349 	MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
350 
351 	MLX5_EVENT_TYPE_DCT_DRAINED        = 0x1c,
352 	MLX5_EVENT_TYPE_DCT_KEY_VIOLATION  = 0x1d,
353 
354 	MLX5_EVENT_TYPE_FPGA_ERROR         = 0x20,
355 	MLX5_EVENT_TYPE_FPGA_QP_ERROR      = 0x21,
356 
357 	MLX5_EVENT_TYPE_DEVICE_TRACER      = 0x26,
358 
359 	MLX5_EVENT_TYPE_MAX                = 0x100,
360 };
361 
362 enum mlx5_driver_event {
363 	MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
364 };
365 
366 enum {
367 	MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
368 	MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
369 };
370 
371 enum {
372 	MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
373 	MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
374 	MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
375 	MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
376 };
377 
378 enum {
379 	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
380 	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
381 	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
382 	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
383 	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
384 	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
385 	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
386 };
387 
388 enum {
389 	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
390 	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
391 	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
392 	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
393 	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
394 	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
395 	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24,
396 	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
397 	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
398 	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
399 	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
400 	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
401 };
402 
403 enum {
404 	MLX5_ROCE_VERSION_1		= 0,
405 	MLX5_ROCE_VERSION_2		= 2,
406 };
407 
408 enum {
409 	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
410 	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
411 };
412 
413 enum {
414 	MLX5_ROCE_L3_TYPE_IPV4		= 0,
415 	MLX5_ROCE_L3_TYPE_IPV6		= 1,
416 };
417 
418 enum {
419 	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
420 	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
421 };
422 
423 enum {
424 	MLX5_OPCODE_NOP			= 0x00,
425 	MLX5_OPCODE_SEND_INVAL		= 0x01,
426 	MLX5_OPCODE_RDMA_WRITE		= 0x08,
427 	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
428 	MLX5_OPCODE_SEND		= 0x0a,
429 	MLX5_OPCODE_SEND_IMM		= 0x0b,
430 	MLX5_OPCODE_LSO			= 0x0e,
431 	MLX5_OPCODE_RDMA_READ		= 0x10,
432 	MLX5_OPCODE_ATOMIC_CS		= 0x11,
433 	MLX5_OPCODE_ATOMIC_FA		= 0x12,
434 	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
435 	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
436 	MLX5_OPCODE_BIND_MW		= 0x18,
437 	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
438 	MLX5_OPCODE_ENHANCED_MPSW	= 0x29,
439 
440 	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
441 	MLX5_RECV_OPCODE_SEND		= 0x01,
442 	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
443 	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
444 
445 	MLX5_CQE_OPCODE_ERROR		= 0x1e,
446 	MLX5_CQE_OPCODE_RESIZE		= 0x16,
447 
448 	MLX5_OPCODE_SET_PSV		= 0x20,
449 	MLX5_OPCODE_GET_PSV		= 0x21,
450 	MLX5_OPCODE_CHECK_PSV		= 0x22,
451 	MLX5_OPCODE_DUMP		= 0x23,
452 	MLX5_OPCODE_RGET_PSV		= 0x26,
453 	MLX5_OPCODE_RCHECK_PSV		= 0x27,
454 
455 	MLX5_OPCODE_UMR			= 0x25,
456 
457 };
458 
459 enum {
460 	MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
461 	MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
462 };
463 
464 enum {
465 	MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
466 	MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
467 };
468 
469 struct mlx5_wqe_tls_static_params_seg {
470 	u8     ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
471 };
472 
473 struct mlx5_wqe_tls_progress_params_seg {
474 	__be32 tis_tir_num;
475 	u8     ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
476 };
477 
478 enum {
479 	MLX5_SET_PORT_RESET_QKEY	= 0,
480 	MLX5_SET_PORT_GUID0		= 16,
481 	MLX5_SET_PORT_NODE_GUID		= 17,
482 	MLX5_SET_PORT_SYS_GUID		= 18,
483 	MLX5_SET_PORT_GID_TABLE		= 19,
484 	MLX5_SET_PORT_PKEY_TABLE	= 20,
485 };
486 
487 enum {
488 	MLX5_BW_NO_LIMIT   = 0,
489 	MLX5_100_MBPS_UNIT = 3,
490 	MLX5_GBPS_UNIT	   = 4,
491 };
492 
493 enum {
494 	MLX5_MAX_PAGE_SHIFT		= 31
495 };
496 
497 enum {
498 	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
499 };
500 
501 enum {
502 	/*
503 	 * Max wqe size for rdma read is 512 bytes, so this
504 	 * limits our max_sge_rd as the wqe needs to fit:
505 	 * - ctrl segment (16 bytes)
506 	 * - rdma segment (16 bytes)
507 	 * - scatter elements (16 bytes each)
508 	 */
509 	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
510 };
511 
512 enum mlx5_odp_transport_cap_bits {
513 	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
514 	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
515 	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
516 	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
517 };
518 
519 struct mlx5_odp_caps {
520 	char reserved[0x10];
521 	struct {
522 		__be32			rc_odp_caps;
523 		__be32			uc_odp_caps;
524 		__be32			ud_odp_caps;
525 	} per_transport_caps;
526 	char reserved2[0xe4];
527 };
528 
529 struct mlx5_cmd_layout {
530 	u8		type;
531 	u8		rsvd0[3];
532 	__be32		inlen;
533 	__be64		in_ptr;
534 	__be32		in[4];
535 	__be32		out[4];
536 	__be64		out_ptr;
537 	__be32		outlen;
538 	u8		token;
539 	u8		sig;
540 	u8		rsvd1;
541 	u8		status_own;
542 };
543 
544 enum mlx5_fatal_assert_bit_offsets {
545 	MLX5_RFR_OFFSET = 31,
546 };
547 
548 struct health_buffer {
549 	__be32		assert_var[5];
550 	__be32		rsvd0[3];
551 	__be32		assert_exit_ptr;
552 	__be32		assert_callra;
553 	__be32		rsvd1[2];
554 	__be32		fw_ver;
555 	__be32		hw_id;
556 	__be32		rfr;
557 	u8		irisc_index;
558 	u8		synd;
559 	__be16		ext_synd;
560 };
561 
562 enum mlx5_initializing_bit_offsets {
563 	MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
564 };
565 
566 enum mlx5_cmd_addr_l_sz_offset {
567 	MLX5_NIC_IFC_OFFSET = 8,
568 };
569 
570 struct mlx5_init_seg {
571 	__be32			fw_rev;
572 	__be32			cmdif_rev_fw_sub;
573 	__be32			rsvd0[2];
574 	__be32			cmdq_addr_h;
575 	__be32			cmdq_addr_l_sz;
576 	__be32			cmd_dbell;
577 	__be32			rsvd1[120];
578 	__be32			initializing;
579 	struct health_buffer	health;
580 	__be32			rsvd2[880];
581 	__be32			internal_timer_h;
582 	__be32			internal_timer_l;
583 	__be32			rsvd3[2];
584 	__be32			health_counter;
585 	__be32			rsvd4[11];
586 	__be32			real_time_h;
587 	__be32			real_time_l;
588 	__be32			rsvd5[1006];
589 	__be64			ieee1588_clk;
590 	__be32			ieee1588_clk_type;
591 	__be32			clr_intx;
592 };
593 
594 struct mlx5_eqe_comp {
595 	__be32	reserved[6];
596 	__be32	cqn;
597 };
598 
599 struct mlx5_eqe_qp_srq {
600 	__be32	reserved1[5];
601 	u8	type;
602 	u8	reserved2[3];
603 	__be32	qp_srq_n;
604 };
605 
606 struct mlx5_eqe_cq_err {
607 	__be32	cqn;
608 	u8	reserved1[7];
609 	u8	syndrome;
610 };
611 
612 struct mlx5_eqe_xrq_err {
613 	__be32	reserved1[5];
614 	__be32	type_xrqn;
615 	__be32	reserved2;
616 };
617 
618 struct mlx5_eqe_port_state {
619 	u8	reserved0[8];
620 	u8	port;
621 };
622 
623 struct mlx5_eqe_gpio {
624 	__be32	reserved0[2];
625 	__be64	gpio_event;
626 };
627 
628 struct mlx5_eqe_congestion {
629 	u8	type;
630 	u8	rsvd0;
631 	u8	congestion_level;
632 };
633 
634 struct mlx5_eqe_stall_vl {
635 	u8	rsvd0[3];
636 	u8	port_vl;
637 };
638 
639 struct mlx5_eqe_cmd {
640 	__be32	vector;
641 	__be32	rsvd[6];
642 };
643 
644 struct mlx5_eqe_page_req {
645 	__be16		ec_function;
646 	__be16		func_id;
647 	__be32		num_pages;
648 	__be32		rsvd1[5];
649 };
650 
651 struct mlx5_eqe_page_fault {
652 	__be32 bytes_committed;
653 	union {
654 		struct {
655 			u16     reserved1;
656 			__be16  wqe_index;
657 			u16	reserved2;
658 			__be16  packet_length;
659 			__be32  token;
660 			u8	reserved4[8];
661 			__be32  pftype_wq;
662 		} __packed wqe;
663 		struct {
664 			__be32  r_key;
665 			u16	reserved1;
666 			__be16  packet_length;
667 			__be32  rdma_op_len;
668 			__be64  rdma_va;
669 			__be32  pftype_token;
670 		} __packed rdma;
671 	} __packed;
672 } __packed;
673 
674 struct mlx5_eqe_vport_change {
675 	u8		rsvd0[2];
676 	__be16		vport_num;
677 	__be32		rsvd1[6];
678 } __packed;
679 
680 struct mlx5_eqe_port_module {
681 	u8        reserved_at_0[1];
682 	u8        module;
683 	u8        reserved_at_2[1];
684 	u8        module_status;
685 	u8        reserved_at_4[2];
686 	u8        error_type;
687 } __packed;
688 
689 struct mlx5_eqe_pps {
690 	u8		rsvd0[3];
691 	u8		pin;
692 	u8		rsvd1[4];
693 	union {
694 		struct {
695 			__be32		time_sec;
696 			__be32		time_nsec;
697 		};
698 		struct {
699 			__be64		time_stamp;
700 		};
701 	};
702 	u8		rsvd2[12];
703 } __packed;
704 
705 struct mlx5_eqe_dct {
706 	__be32  reserved[6];
707 	__be32  dctn;
708 };
709 
710 struct mlx5_eqe_temp_warning {
711 	__be64 sensor_warning_msb;
712 	__be64 sensor_warning_lsb;
713 } __packed;
714 
715 #define SYNC_RST_STATE_MASK    0xf
716 
717 enum sync_rst_state_type {
718 	MLX5_SYNC_RST_STATE_RESET_REQUEST	= 0x0,
719 	MLX5_SYNC_RST_STATE_RESET_NOW		= 0x1,
720 	MLX5_SYNC_RST_STATE_RESET_ABORT		= 0x2,
721 };
722 
723 struct mlx5_eqe_sync_fw_update {
724 	u8 reserved_at_0[3];
725 	u8 sync_rst_state;
726 };
727 
728 struct mlx5_eqe_vhca_state {
729 	__be16 ec_function;
730 	__be16 function_id;
731 } __packed;
732 
733 union ev_data {
734 	__be32				raw[7];
735 	struct mlx5_eqe_cmd		cmd;
736 	struct mlx5_eqe_comp		comp;
737 	struct mlx5_eqe_qp_srq		qp_srq;
738 	struct mlx5_eqe_cq_err		cq_err;
739 	struct mlx5_eqe_port_state	port;
740 	struct mlx5_eqe_gpio		gpio;
741 	struct mlx5_eqe_congestion	cong;
742 	struct mlx5_eqe_stall_vl	stall_vl;
743 	struct mlx5_eqe_page_req	req_pages;
744 	struct mlx5_eqe_page_fault	page_fault;
745 	struct mlx5_eqe_vport_change	vport_change;
746 	struct mlx5_eqe_port_module	port_module;
747 	struct mlx5_eqe_pps		pps;
748 	struct mlx5_eqe_dct             dct;
749 	struct mlx5_eqe_temp_warning	temp_warning;
750 	struct mlx5_eqe_xrq_err		xrq_err;
751 	struct mlx5_eqe_sync_fw_update	sync_fw_update;
752 	struct mlx5_eqe_vhca_state	vhca_state;
753 } __packed;
754 
755 struct mlx5_eqe {
756 	u8		rsvd0;
757 	u8		type;
758 	u8		rsvd1;
759 	u8		sub_type;
760 	__be32		rsvd2[7];
761 	union ev_data	data;
762 	__be16		rsvd3;
763 	u8		signature;
764 	u8		owner;
765 } __packed;
766 
767 struct mlx5_cmd_prot_block {
768 	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
769 	u8		rsvd0[48];
770 	__be64		next;
771 	__be32		block_num;
772 	u8		rsvd1;
773 	u8		token;
774 	u8		ctrl_sig;
775 	u8		sig;
776 };
777 
778 enum {
779 	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
780 };
781 
782 struct mlx5_err_cqe {
783 	u8	rsvd0[32];
784 	__be32	srqn;
785 	u8	rsvd1[18];
786 	u8	vendor_err_synd;
787 	u8	syndrome;
788 	__be32	s_wqe_opcode_qpn;
789 	__be16	wqe_counter;
790 	u8	signature;
791 	u8	op_own;
792 };
793 
794 struct mlx5_cqe64 {
795 	u8		tls_outer_l3_tunneled;
796 	u8		rsvd0;
797 	__be16		wqe_id;
798 	u8		lro_tcppsh_abort_dupack;
799 	u8		lro_min_ttl;
800 	__be16		lro_tcp_win;
801 	__be32		lro_ack_seq_num;
802 	__be32		rss_hash_result;
803 	u8		rss_hash_type;
804 	u8		ml_path;
805 	u8		rsvd20[2];
806 	__be16		check_sum;
807 	__be16		slid;
808 	__be32		flags_rqpn;
809 	u8		hds_ip_ext;
810 	u8		l4_l3_hdr_type;
811 	__be16		vlan_info;
812 	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
813 	union {
814 		__be32 immediate;
815 		__be32 inval_rkey;
816 		__be32 pkey;
817 		__be32 ft_metadata;
818 	};
819 	u8		rsvd40[4];
820 	__be32		byte_cnt;
821 	__be32		timestamp_h;
822 	__be32		timestamp_l;
823 	__be32		sop_drop_qpn;
824 	__be16		wqe_counter;
825 	u8		signature;
826 	u8		op_own;
827 };
828 
829 struct mlx5_mini_cqe8 {
830 	union {
831 		__be32 rx_hash_result;
832 		struct {
833 			__be16 checksum;
834 			__be16 stridx;
835 		};
836 		struct {
837 			__be16 wqe_counter;
838 			u8  s_wqe_opcode;
839 			u8  reserved;
840 		} s_wqe_info;
841 	};
842 	__be32 byte_cnt;
843 };
844 
845 enum {
846 	MLX5_NO_INLINE_DATA,
847 	MLX5_INLINE_DATA32_SEG,
848 	MLX5_INLINE_DATA64_SEG,
849 	MLX5_COMPRESSED,
850 };
851 
852 enum {
853 	MLX5_CQE_FORMAT_CSUM = 0x1,
854 	MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
855 };
856 
857 #define MLX5_MINI_CQE_ARRAY_SIZE 8
858 
mlx5_get_cqe_format(struct mlx5_cqe64 * cqe)859 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
860 {
861 	return (cqe->op_own >> 2) & 0x3;
862 }
863 
get_cqe_opcode(struct mlx5_cqe64 * cqe)864 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
865 {
866 	return cqe->op_own >> 4;
867 }
868 
get_cqe_lro_tcppsh(struct mlx5_cqe64 * cqe)869 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
870 {
871 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
872 }
873 
get_cqe_l4_hdr_type(struct mlx5_cqe64 * cqe)874 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
875 {
876 	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
877 }
878 
get_cqe_l3_hdr_type(struct mlx5_cqe64 * cqe)879 static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
880 {
881 	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
882 }
883 
cqe_is_tunneled(struct mlx5_cqe64 * cqe)884 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
885 {
886 	return cqe->tls_outer_l3_tunneled & 0x1;
887 }
888 
get_cqe_tls_offload(struct mlx5_cqe64 * cqe)889 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
890 {
891 	return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
892 }
893 
cqe_has_vlan(struct mlx5_cqe64 * cqe)894 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
895 {
896 	return cqe->l4_l3_hdr_type & 0x1;
897 }
898 
get_cqe_ts(struct mlx5_cqe64 * cqe)899 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
900 {
901 	u32 hi, lo;
902 
903 	hi = be32_to_cpu(cqe->timestamp_h);
904 	lo = be32_to_cpu(cqe->timestamp_l);
905 
906 	return (u64)lo | ((u64)hi << 32);
907 }
908 
get_cqe_flow_tag(struct mlx5_cqe64 * cqe)909 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
910 {
911 	return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
912 }
913 
914 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE	3
915 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE		9
916 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX		16
917 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE		6
918 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX		13
919 
920 struct mpwrq_cqe_bc {
921 	__be16	filler_consumed_strides;
922 	__be16	byte_cnt;
923 };
924 
mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 * cqe)925 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
926 {
927 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
928 
929 	return be16_to_cpu(bc->byte_cnt);
930 }
931 
mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc * bc)932 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
933 {
934 	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
935 }
936 
mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 * cqe)937 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
938 {
939 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
940 
941 	return mpwrq_get_cqe_bc_consumed_strides(bc);
942 }
943 
mpwrq_is_filler_cqe(struct mlx5_cqe64 * cqe)944 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
945 {
946 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
947 
948 	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
949 }
950 
mpwrq_get_cqe_stride_index(struct mlx5_cqe64 * cqe)951 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
952 {
953 	return be16_to_cpu(cqe->wqe_counter);
954 }
955 
956 enum {
957 	CQE_L4_HDR_TYPE_NONE			= 0x0,
958 	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
959 	CQE_L4_HDR_TYPE_UDP			= 0x2,
960 	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
961 	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
962 };
963 
964 enum {
965 	CQE_RSS_HTYPE_IP	= 0x3 << 2,
966 	/* cqe->rss_hash_type[3:2] - IP destination selected for hash
967 	 * (00 = none,  01 = IPv4, 10 = IPv6, 11 = Reserved)
968 	 */
969 	CQE_RSS_HTYPE_L4	= 0x3 << 6,
970 	/* cqe->rss_hash_type[7:6] - L4 destination selected for hash
971 	 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
972 	 */
973 };
974 
975 enum {
976 	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
977 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
978 	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
979 };
980 
981 enum {
982 	CQE_L2_OK	= 1 << 0,
983 	CQE_L3_OK	= 1 << 1,
984 	CQE_L4_OK	= 1 << 2,
985 };
986 
987 enum {
988 	CQE_TLS_OFFLOAD_NOT_DECRYPTED		= 0x0,
989 	CQE_TLS_OFFLOAD_DECRYPTED		= 0x1,
990 	CQE_TLS_OFFLOAD_RESYNC			= 0x2,
991 	CQE_TLS_OFFLOAD_ERROR			= 0x3,
992 };
993 
994 struct mlx5_sig_err_cqe {
995 	u8		rsvd0[16];
996 	__be32		expected_trans_sig;
997 	__be32		actual_trans_sig;
998 	__be32		expected_reftag;
999 	__be32		actual_reftag;
1000 	__be16		syndrome;
1001 	u8		rsvd22[2];
1002 	__be32		mkey;
1003 	__be64		err_offset;
1004 	u8		rsvd30[8];
1005 	__be32		qpn;
1006 	u8		rsvd38[2];
1007 	u8		signature;
1008 	u8		op_own;
1009 };
1010 
1011 struct mlx5_wqe_srq_next_seg {
1012 	u8			rsvd0[2];
1013 	__be16			next_wqe_index;
1014 	u8			signature;
1015 	u8			rsvd1[11];
1016 };
1017 
1018 union mlx5_ext_cqe {
1019 	struct ib_grh	grh;
1020 	u8		inl[64];
1021 };
1022 
1023 struct mlx5_cqe128 {
1024 	union mlx5_ext_cqe	inl_grh;
1025 	struct mlx5_cqe64	cqe64;
1026 };
1027 
1028 enum {
1029 	MLX5_MKEY_STATUS_FREE = 1 << 6,
1030 };
1031 
1032 enum {
1033 	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
1034 	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1035 	MLX5_MKEY_BSF_EN	= 1 << 30,
1036 };
1037 
1038 struct mlx5_mkey_seg {
1039 	/* This is a two bit field occupying bits 31-30.
1040 	 * bit 31 is always 0,
1041 	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
1042 	 */
1043 	u8		status;
1044 	u8		pcie_control;
1045 	u8		flags;
1046 	u8		version;
1047 	__be32		qpn_mkey7_0;
1048 	u8		rsvd1[4];
1049 	__be32		flags_pd;
1050 	__be64		start_addr;
1051 	__be64		len;
1052 	__be32		bsfs_octo_size;
1053 	u8		rsvd2[16];
1054 	__be32		xlt_oct_size;
1055 	u8		rsvd3[3];
1056 	u8		log2_page_size;
1057 	u8		rsvd4[4];
1058 };
1059 
1060 #define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
1061 
1062 enum {
1063 	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
1064 };
1065 
1066 enum {
1067 	VPORT_STATE_DOWN		= 0x0,
1068 	VPORT_STATE_UP			= 0x1,
1069 };
1070 
1071 enum {
1072 	MLX5_VPORT_ADMIN_STATE_DOWN  = 0x0,
1073 	MLX5_VPORT_ADMIN_STATE_UP    = 0x1,
1074 	MLX5_VPORT_ADMIN_STATE_AUTO  = 0x2,
1075 };
1076 
1077 enum {
1078 	MLX5_L3_PROT_TYPE_IPV4		= 0,
1079 	MLX5_L3_PROT_TYPE_IPV6		= 1,
1080 };
1081 
1082 enum {
1083 	MLX5_L4_PROT_TYPE_TCP		= 0,
1084 	MLX5_L4_PROT_TYPE_UDP		= 1,
1085 };
1086 
1087 enum {
1088 	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
1089 	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
1090 	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
1091 	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
1092 	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
1093 };
1094 
1095 enum {
1096 	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
1097 	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
1098 	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
1099 	MLX5_MATCH_MISC_PARAMETERS_2	= 1 << 3,
1100 	MLX5_MATCH_MISC_PARAMETERS_3	= 1 << 4,
1101 	MLX5_MATCH_MISC_PARAMETERS_4	= 1 << 5,
1102 };
1103 
1104 enum {
1105 	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
1106 	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
1107 };
1108 
1109 enum {
1110 	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
1111 	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
1112 	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
1113 };
1114 
1115 enum mlx5_list_type {
1116 	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
1117 	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
1118 	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1119 };
1120 
1121 enum {
1122 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1123 	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
1124 };
1125 
1126 enum mlx5_wol_mode {
1127 	MLX5_WOL_DISABLE        = 0,
1128 	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
1129 	MLX5_WOL_MAGIC          = 1 << 2,
1130 	MLX5_WOL_ARP            = 1 << 3,
1131 	MLX5_WOL_BROADCAST      = 1 << 4,
1132 	MLX5_WOL_MULTICAST      = 1 << 5,
1133 	MLX5_WOL_UNICAST        = 1 << 6,
1134 	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
1135 };
1136 
1137 enum mlx5_mpls_supported_fields {
1138 	MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1139 	MLX5_FIELD_SUPPORT_MPLS_EXP   = 1 << 1,
1140 	MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1141 	MLX5_FIELD_SUPPORT_MPLS_TTL   = 1 << 3
1142 };
1143 
1144 enum mlx5_flex_parser_protos {
1145 	MLX5_FLEX_PROTO_GENEVE	      = 1 << 3,
1146 	MLX5_FLEX_PROTO_CW_MPLS_GRE   = 1 << 4,
1147 	MLX5_FLEX_PROTO_CW_MPLS_UDP   = 1 << 5,
1148 	MLX5_FLEX_PROTO_ICMP	      = 1 << 8,
1149 	MLX5_FLEX_PROTO_ICMPV6	      = 1 << 9,
1150 };
1151 
1152 /* MLX5 DEV CAPs */
1153 
1154 /* TODO: EAT.ME */
1155 enum mlx5_cap_mode {
1156 	HCA_CAP_OPMOD_GET_MAX	= 0,
1157 	HCA_CAP_OPMOD_GET_CUR	= 1,
1158 };
1159 
1160 enum mlx5_cap_type {
1161 	MLX5_CAP_GENERAL = 0,
1162 	MLX5_CAP_ETHERNET_OFFLOADS,
1163 	MLX5_CAP_ODP,
1164 	MLX5_CAP_ATOMIC,
1165 	MLX5_CAP_ROCE,
1166 	MLX5_CAP_IPOIB_OFFLOADS,
1167 	MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1168 	MLX5_CAP_FLOW_TABLE,
1169 	MLX5_CAP_ESWITCH_FLOW_TABLE,
1170 	MLX5_CAP_ESWITCH,
1171 	MLX5_CAP_RESERVED,
1172 	MLX5_CAP_VECTOR_CALC,
1173 	MLX5_CAP_QOS,
1174 	MLX5_CAP_DEBUG,
1175 	MLX5_CAP_RESERVED_14,
1176 	MLX5_CAP_DEV_MEM,
1177 	MLX5_CAP_RESERVED_16,
1178 	MLX5_CAP_TLS,
1179 	MLX5_CAP_VDPA_EMULATION = 0x13,
1180 	MLX5_CAP_DEV_EVENT = 0x14,
1181 	MLX5_CAP_IPSEC,
1182 	/* NUM OF CAP Types */
1183 	MLX5_CAP_NUM
1184 };
1185 
1186 enum mlx5_pcam_reg_groups {
1187 	MLX5_PCAM_REGS_5000_TO_507F                 = 0x0,
1188 };
1189 
1190 enum mlx5_pcam_feature_groups {
1191 	MLX5_PCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1192 };
1193 
1194 enum mlx5_mcam_reg_groups {
1195 	MLX5_MCAM_REGS_FIRST_128                    = 0x0,
1196 	MLX5_MCAM_REGS_0x9080_0x90FF                = 0x1,
1197 	MLX5_MCAM_REGS_0x9100_0x917F                = 0x2,
1198 	MLX5_MCAM_REGS_NUM                          = 0x3,
1199 };
1200 
1201 enum mlx5_mcam_feature_groups {
1202 	MLX5_MCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1203 };
1204 
1205 enum mlx5_qcam_reg_groups {
1206 	MLX5_QCAM_REGS_FIRST_128                    = 0x0,
1207 };
1208 
1209 enum mlx5_qcam_feature_groups {
1210 	MLX5_QCAM_FEATURE_ENHANCED_FEATURES         = 0x0,
1211 };
1212 
1213 /* GET Dev Caps macros */
1214 #define MLX5_CAP_GEN(mdev, cap) \
1215 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1216 
1217 #define MLX5_CAP_GEN_64(mdev, cap) \
1218 	MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
1219 
1220 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1221 	MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
1222 
1223 #define MLX5_CAP_ETH(mdev, cap) \
1224 	MLX5_GET(per_protocol_networking_offload_caps,\
1225 		 mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1226 
1227 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1228 	MLX5_GET(per_protocol_networking_offload_caps,\
1229 		 mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
1230 
1231 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1232 	MLX5_GET(per_protocol_networking_offload_caps,\
1233 		 mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap)
1234 
1235 #define MLX5_CAP_ROCE(mdev, cap) \
1236 	MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
1237 
1238 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1239 	MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
1240 
1241 #define MLX5_CAP_ATOMIC(mdev, cap) \
1242 	MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
1243 
1244 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1245 	MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
1246 
1247 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1248 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
1249 
1250 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1251 	MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
1252 
1253 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1254 	MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
1255 
1256 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1257 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1258 
1259 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1260 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1261 
1262 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1263 		MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1264 
1265 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1266 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1267 
1268 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1269 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1270 
1271 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1272 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1273 
1274 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1275 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1276 
1277 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1278 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1279 
1280 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1281 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1282 
1283 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
1284 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
1285 
1286 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1287 	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1288 
1289 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
1290 	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
1291 
1292 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1293 	MLX5_GET(flow_table_eswitch_cap, \
1294 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1295 
1296 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1297 	MLX5_GET(flow_table_eswitch_cap, \
1298 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1299 
1300 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1301 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1302 
1303 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1304 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1305 
1306 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1307 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1308 
1309 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1310 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1311 
1312 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1313 	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1314 
1315 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1316 	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1317 
1318 #define MLX5_CAP_ESW(mdev, cap) \
1319 	MLX5_GET(e_switch_cap, \
1320 		 mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
1321 
1322 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1323 	MLX5_GET64(flow_table_eswitch_cap, \
1324 		(mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
1325 
1326 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1327 	MLX5_GET(e_switch_cap, \
1328 		 mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
1329 
1330 #define MLX5_CAP_ODP(mdev, cap)\
1331 	MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
1332 
1333 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1334 	MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap)
1335 
1336 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1337 	MLX5_GET(vector_calc_cap, \
1338 		 mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
1339 
1340 #define MLX5_CAP_QOS(mdev, cap)\
1341 	MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
1342 
1343 #define MLX5_CAP_DEBUG(mdev, cap)\
1344 	MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap)
1345 
1346 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1347 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1348 
1349 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1350 	MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1351 
1352 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1353 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1354 		 mng_access_reg_cap_mask.access_regs.reg)
1355 
1356 #define MLX5_CAP_MCAM_REG1(mdev, reg) \
1357 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
1358 		 mng_access_reg_cap_mask.access_regs1.reg)
1359 
1360 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1361 	MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1362 		 mng_access_reg_cap_mask.access_regs2.reg)
1363 
1364 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1365 	MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1366 
1367 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1368 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1369 
1370 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1371 	MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1372 
1373 #define MLX5_CAP_FPGA(mdev, cap) \
1374 	MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1375 
1376 #define MLX5_CAP64_FPGA(mdev, cap) \
1377 	MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1378 
1379 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1380 	MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1381 
1382 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1383 	MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap)
1384 
1385 #define MLX5_CAP_TLS(mdev, cap) \
1386 	MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap)
1387 
1388 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1389 	MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
1390 
1391 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1392 	MLX5_GET(virtio_emulation_cap, \
1393 		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
1394 
1395 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1396 	MLX5_GET64(virtio_emulation_cap, \
1397 		(mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
1398 
1399 #define MLX5_CAP_IPSEC(mdev, cap)\
1400 	MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap)
1401 
1402 enum {
1403 	MLX5_CMD_STAT_OK			= 0x0,
1404 	MLX5_CMD_STAT_INT_ERR			= 0x1,
1405 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
1406 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
1407 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
1408 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
1409 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
1410 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
1411 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
1412 	MLX5_CMD_STAT_IX_ERR			= 0xa,
1413 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
1414 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
1415 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
1416 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
1417 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
1418 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
1419 };
1420 
1421 enum {
1422 	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
1423 	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
1424 	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
1425 	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
1426 	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1427 	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
1428 	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1429 	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
1430 	MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1431 	MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1432 	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
1433 };
1434 
1435 enum {
1436 	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
1437 };
1438 
mlx5_to_sw_pkey_sz(int pkey_sz)1439 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1440 {
1441 	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1442 		return 0;
1443 	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1444 }
1445 
1446 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1447 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1448 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1449 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1450 				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1451 				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1452 
1453 #endif /* MLX5_DEVICE_H */
1454