1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef MLX5_DEVICE_H
34 #define MLX5_DEVICE_H
35
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 #include <linux/bitfield.h>
40
41 #if defined(__LITTLE_ENDIAN)
42 #define MLX5_SET_HOST_ENDIANNESS 0
43 #elif defined(__BIG_ENDIAN)
44 #define MLX5_SET_HOST_ENDIANNESS 0x80
45 #else
46 #error Host endianness not defined
47 #endif
48
49 /* helper macros */
50 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
51 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
52 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
53 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
54 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
55 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
56 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
57 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
58 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
59 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
60 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
61 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
62 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
63
64 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
65 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
66 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
67 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
68 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
69 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
70 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
71 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld)))
72
73 /* insert a value to a struct */
74 #define MLX5_SET(typ, p, fld, v) do { \
75 u32 _v = v; \
76 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
77 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
78 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
79 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
80 << __mlx5_dw_bit_off(typ, fld))); \
81 } while (0)
82
83 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
84 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
85 MLX5_SET(typ, p, fld[idx], v); \
86 } while (0)
87
88 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
89 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
90 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
91 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
92 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
93 << __mlx5_dw_bit_off(typ, fld))); \
94 } while (0)
95
96 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
97 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
98 __mlx5_mask(typ, fld))
99
100 #define MLX5_GET_PR(typ, p, fld) ({ \
101 u32 ___t = MLX5_GET(typ, p, fld); \
102 pr_debug(#fld " = 0x%x\n", ___t); \
103 ___t; \
104 })
105
106 #define __MLX5_SET64(typ, p, fld, v) do { \
107 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
108 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
109 } while (0)
110
111 #define MLX5_SET64(typ, p, fld, v) do { \
112 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
113 __MLX5_SET64(typ, p, fld, v); \
114 } while (0)
115
116 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
117 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
118 __MLX5_SET64(typ, p, fld[idx], v); \
119 } while (0)
120
121 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
122
123 #define MLX5_GET64_PR(typ, p, fld) ({ \
124 u64 ___t = MLX5_GET64(typ, p, fld); \
125 pr_debug(#fld " = 0x%llx\n", ___t); \
126 ___t; \
127 })
128
129 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
130 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
131 __mlx5_mask16(typ, fld))
132
133 #define MLX5_SET16(typ, p, fld, v) do { \
134 u16 _v = v; \
135 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
136 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
137 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
138 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
139 << __mlx5_16_bit_off(typ, fld))); \
140 } while (0)
141
142 /* Big endian getters */
143 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
144 __mlx5_64_off(typ, fld)))
145
146 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
147 type_t tmp; \
148 switch (sizeof(tmp)) { \
149 case sizeof(u8): \
150 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
151 break; \
152 case sizeof(u16): \
153 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
154 break; \
155 case sizeof(u32): \
156 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
157 break; \
158 case sizeof(u64): \
159 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
160 break; \
161 } \
162 tmp; \
163 })
164
165 enum mlx5_inline_modes {
166 MLX5_INLINE_MODE_NONE,
167 MLX5_INLINE_MODE_L2,
168 MLX5_INLINE_MODE_IP,
169 MLX5_INLINE_MODE_TCP_UDP,
170 };
171
172 enum {
173 MLX5_MAX_COMMANDS = 32,
174 MLX5_CMD_DATA_BLOCK_SIZE = 512,
175 MLX5_PCI_CMD_XPORT = 7,
176 MLX5_MKEY_BSF_OCTO_SIZE = 4,
177 MLX5_MAX_PSVS = 4,
178 };
179
180 enum {
181 MLX5_EXTENDED_UD_AV = 0x80000000,
182 };
183
184 enum {
185 MLX5_CQ_STATE_ARMED = 9,
186 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
187 MLX5_CQ_STATE_FIRED = 0xa,
188 };
189
190 enum {
191 MLX5_STAT_RATE_OFFSET = 5,
192 };
193
194 enum {
195 MLX5_INLINE_SEG = 0x80000000,
196 };
197
198 enum {
199 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
200 };
201
202 enum {
203 MLX5_MIN_PKEY_TABLE_SIZE = 128,
204 MLX5_MAX_LOG_PKEY_TABLE = 5,
205 };
206
207 enum {
208 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
209 };
210
211 enum {
212 MLX5_PFAULT_SUBTYPE_WQE = 0,
213 MLX5_PFAULT_SUBTYPE_RDMA = 1,
214 };
215
216 enum wqe_page_fault_type {
217 MLX5_WQE_PF_TYPE_RMP = 0,
218 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
219 MLX5_WQE_PF_TYPE_RESP = 2,
220 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
221 };
222
223 enum {
224 MLX5_PERM_LOCAL_READ = 1 << 2,
225 MLX5_PERM_LOCAL_WRITE = 1 << 3,
226 MLX5_PERM_REMOTE_READ = 1 << 4,
227 MLX5_PERM_REMOTE_WRITE = 1 << 5,
228 MLX5_PERM_ATOMIC = 1 << 6,
229 MLX5_PERM_UMR_EN = 1 << 7,
230 };
231
232 enum {
233 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
234 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
235 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
236 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
237 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
238 };
239
240 enum {
241 MLX5_EN_RD = (u64)1,
242 MLX5_EN_WR = (u64)2
243 };
244
245 enum {
246 MLX5_ADAPTER_PAGE_SHIFT = 12,
247 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
248 };
249
250 enum {
251 MLX5_BFREGS_PER_UAR = 4,
252 MLX5_MAX_UARS = 1 << 8,
253 MLX5_NON_FP_BFREGS_PER_UAR = 2,
254 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
255 MLX5_NON_FP_BFREGS_PER_UAR,
256 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
257 MLX5_NON_FP_BFREGS_PER_UAR,
258 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
259 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
260 MLX5_MIN_DYN_BFREGS = 512,
261 MLX5_MAX_DYN_BFREGS = 1024,
262 };
263
264 enum {
265 MLX5_MKEY_MASK_LEN = 1ull << 0,
266 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
267 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
268 MLX5_MKEY_MASK_PD = 1ull << 7,
269 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
270 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
271 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
272 MLX5_MKEY_MASK_KEY = 1ull << 13,
273 MLX5_MKEY_MASK_QPN = 1ull << 14,
274 MLX5_MKEY_MASK_LR = 1ull << 17,
275 MLX5_MKEY_MASK_LW = 1ull << 18,
276 MLX5_MKEY_MASK_RR = 1ull << 19,
277 MLX5_MKEY_MASK_RW = 1ull << 20,
278 MLX5_MKEY_MASK_A = 1ull << 21,
279 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
280 MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
281 MLX5_MKEY_MASK_FREE = 1ull << 29,
282 MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
283 };
284
285 enum {
286 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
287
288 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
289 MLX5_UMR_CHECK_FREE = (2 << 5),
290
291 MLX5_UMR_INLINE = (1 << 7),
292 };
293
294 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
295 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
296 #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
297 #define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm))
298
299 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
300
301 enum {
302 MLX5_EVENT_QUEUE_TYPE_QP = 0,
303 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
304 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
305 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
306 };
307
308 /* mlx5 components can subscribe to any one of these events via
309 * mlx5_eq_notifier_register API.
310 */
311 enum mlx5_event {
312 /* Special value to subscribe to any event */
313 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
314 /* HW events enum start: comp events are not subscribable */
315 MLX5_EVENT_TYPE_COMP = 0x0,
316 /* HW Async events enum start: subscribable events */
317 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
318 MLX5_EVENT_TYPE_COMM_EST = 0x02,
319 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
320 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
321 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
322
323 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
324 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
325 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
326 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
327 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
328 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
329 MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
330
331 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
332 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
333 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
334 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
335 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
336 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
337 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
338 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
339 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
340 MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
341
342 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
343 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
344
345 MLX5_EVENT_TYPE_CMD = 0x0a,
346 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
347
348 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
349 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
350
351 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
352 MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
353
354 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
355 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
356
357 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
358 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
359
360 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
361
362 MLX5_EVENT_TYPE_MAX = 0x100,
363 };
364
365 enum mlx5_driver_event {
366 MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
367 MLX5_DRIVER_EVENT_UPLINK_NETDEV,
368 MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
369 MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
370 MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
371 MLX5_DRIVER_EVENT_AFFILIATION_DONE,
372 MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
373 };
374
375 enum {
376 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
377 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
378 MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
379 };
380
381 enum {
382 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
383 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
384 MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
385 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
386 };
387
388 enum {
389 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
390 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
391 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
392 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
393 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
394 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
395 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
396 };
397
398 enum {
399 MLX5_ROCE_VERSION_1 = 0,
400 MLX5_ROCE_VERSION_2 = 2,
401 };
402
403 enum {
404 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
405 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
406 };
407
408 enum {
409 MLX5_ROCE_L3_TYPE_IPV4 = 0,
410 MLX5_ROCE_L3_TYPE_IPV6 = 1,
411 };
412
413 enum {
414 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
415 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
416 };
417
418 enum {
419 MLX5_OPCODE_NOP = 0x00,
420 MLX5_OPCODE_SEND_INVAL = 0x01,
421 MLX5_OPCODE_RDMA_WRITE = 0x08,
422 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
423 MLX5_OPCODE_SEND = 0x0a,
424 MLX5_OPCODE_SEND_IMM = 0x0b,
425 MLX5_OPCODE_LSO = 0x0e,
426 MLX5_OPCODE_RDMA_READ = 0x10,
427 MLX5_OPCODE_ATOMIC_CS = 0x11,
428 MLX5_OPCODE_ATOMIC_FA = 0x12,
429 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
430 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
431 MLX5_OPCODE_BIND_MW = 0x18,
432 MLX5_OPCODE_CONFIG_CMD = 0x1f,
433 MLX5_OPCODE_ENHANCED_MPSW = 0x29,
434
435 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
436 MLX5_RECV_OPCODE_SEND = 0x01,
437 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
438 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
439
440 MLX5_CQE_OPCODE_ERROR = 0x1e,
441 MLX5_CQE_OPCODE_RESIZE = 0x16,
442
443 MLX5_OPCODE_SET_PSV = 0x20,
444 MLX5_OPCODE_GET_PSV = 0x21,
445 MLX5_OPCODE_CHECK_PSV = 0x22,
446 MLX5_OPCODE_DUMP = 0x23,
447 MLX5_OPCODE_RGET_PSV = 0x26,
448 MLX5_OPCODE_RCHECK_PSV = 0x27,
449
450 MLX5_OPCODE_UMR = 0x25,
451
452 MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
453
454 MLX5_OPCODE_ACCESS_ASO = 0x2d,
455 };
456
457 enum {
458 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
459 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
460 };
461
462 enum {
463 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
464 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
465 };
466
467 struct mlx5_wqe_tls_static_params_seg {
468 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
469 };
470
471 struct mlx5_wqe_tls_progress_params_seg {
472 __be32 tis_tir_num;
473 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
474 };
475
476 enum {
477 MLX5_SET_PORT_RESET_QKEY = 0,
478 MLX5_SET_PORT_GUID0 = 16,
479 MLX5_SET_PORT_NODE_GUID = 17,
480 MLX5_SET_PORT_SYS_GUID = 18,
481 MLX5_SET_PORT_GID_TABLE = 19,
482 MLX5_SET_PORT_PKEY_TABLE = 20,
483 };
484
485 enum {
486 MLX5_BW_NO_LIMIT = 0,
487 MLX5_100_MBPS_UNIT = 3,
488 MLX5_GBPS_UNIT = 4,
489 };
490
491 enum {
492 MLX5_MAX_PAGE_SHIFT = 31
493 };
494
495 enum {
496 /*
497 * Max wqe size for rdma read is 512 bytes, so this
498 * limits our max_sge_rd as the wqe needs to fit:
499 * - ctrl segment (16 bytes)
500 * - rdma segment (16 bytes)
501 * - scatter elements (16 bytes each)
502 */
503 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
504 };
505
506 enum mlx5_odp_transport_cap_bits {
507 MLX5_ODP_SUPPORT_SEND = 1 << 31,
508 MLX5_ODP_SUPPORT_RECV = 1 << 30,
509 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
510 MLX5_ODP_SUPPORT_READ = 1 << 28,
511 };
512
513 struct mlx5_odp_caps {
514 char reserved[0x10];
515 struct {
516 __be32 rc_odp_caps;
517 __be32 uc_odp_caps;
518 __be32 ud_odp_caps;
519 } per_transport_caps;
520 char reserved2[0xe4];
521 };
522
523 struct mlx5_cmd_layout {
524 u8 type;
525 u8 rsvd0[3];
526 __be32 inlen;
527 __be64 in_ptr;
528 __be32 in[4];
529 __be32 out[4];
530 __be64 out_ptr;
531 __be32 outlen;
532 u8 token;
533 u8 sig;
534 u8 rsvd1;
535 u8 status_own;
536 };
537
538 enum mlx5_rfr_severity_bit_offsets {
539 MLX5_RFR_BIT_OFFSET = 0x7,
540 };
541
542 struct health_buffer {
543 __be32 assert_var[6];
544 __be32 rsvd0[2];
545 __be32 assert_exit_ptr;
546 __be32 assert_callra;
547 __be32 rsvd1[1];
548 __be32 time;
549 __be32 fw_ver;
550 __be32 hw_id;
551 u8 rfr_severity;
552 u8 rsvd2[3];
553 u8 irisc_index;
554 u8 synd;
555 __be16 ext_synd;
556 };
557
558 enum mlx5_initializing_bit_offsets {
559 MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
560 };
561
562 enum mlx5_cmd_addr_l_sz_offset {
563 MLX5_NIC_IFC_OFFSET = 8,
564 };
565
566 struct mlx5_init_seg {
567 __be32 fw_rev;
568 __be32 cmdif_rev_fw_sub;
569 __be32 rsvd0[2];
570 __be32 cmdq_addr_h;
571 __be32 cmdq_addr_l_sz;
572 __be32 cmd_dbell;
573 __be32 rsvd1[120];
574 __be32 initializing;
575 struct health_buffer health;
576 __be32 rsvd2[878];
577 __be32 cmd_exec_to;
578 __be32 cmd_q_init_to;
579 __be32 internal_timer_h;
580 __be32 internal_timer_l;
581 __be32 rsvd3[2];
582 __be32 health_counter;
583 __be32 rsvd4[11];
584 __be32 real_time_h;
585 __be32 real_time_l;
586 __be32 rsvd5[1006];
587 __be64 ieee1588_clk;
588 __be32 ieee1588_clk_type;
589 __be32 clr_intx;
590 };
591
592 struct mlx5_eqe_comp {
593 __be32 reserved[6];
594 __be32 cqn;
595 };
596
597 struct mlx5_eqe_qp_srq {
598 __be32 reserved1[5];
599 u8 type;
600 u8 reserved2[3];
601 __be32 qp_srq_n;
602 };
603
604 struct mlx5_eqe_cq_err {
605 __be32 cqn;
606 u8 reserved1[7];
607 u8 syndrome;
608 };
609
610 struct mlx5_eqe_xrq_err {
611 __be32 reserved1[5];
612 __be32 type_xrqn;
613 __be32 reserved2;
614 };
615
616 struct mlx5_eqe_port_state {
617 u8 reserved0[8];
618 u8 port;
619 };
620
621 struct mlx5_eqe_gpio {
622 __be32 reserved0[2];
623 __be64 gpio_event;
624 };
625
626 struct mlx5_eqe_congestion {
627 u8 type;
628 u8 rsvd0;
629 u8 congestion_level;
630 };
631
632 struct mlx5_eqe_stall_vl {
633 u8 rsvd0[3];
634 u8 port_vl;
635 };
636
637 struct mlx5_eqe_cmd {
638 __be32 vector;
639 __be32 rsvd[6];
640 };
641
642 struct mlx5_eqe_page_req {
643 __be16 ec_function;
644 __be16 func_id;
645 __be32 num_pages;
646 __be32 rsvd1[5];
647 };
648
649 struct mlx5_eqe_page_fault {
650 __be32 bytes_committed;
651 union {
652 struct {
653 u16 reserved1;
654 __be16 wqe_index;
655 u16 reserved2;
656 __be16 packet_length;
657 __be32 token;
658 u8 reserved4[8];
659 __be32 pftype_wq;
660 } __packed wqe;
661 struct {
662 __be32 r_key;
663 u16 reserved1;
664 __be16 packet_length;
665 __be32 rdma_op_len;
666 __be64 rdma_va;
667 __be32 pftype_token;
668 } __packed rdma;
669 } __packed;
670 } __packed;
671
672 struct mlx5_eqe_vport_change {
673 u8 rsvd0[2];
674 __be16 vport_num;
675 __be32 rsvd1[6];
676 } __packed;
677
678 struct mlx5_eqe_port_module {
679 u8 reserved_at_0[1];
680 u8 module;
681 u8 reserved_at_2[1];
682 u8 module_status;
683 u8 reserved_at_4[2];
684 u8 error_type;
685 } __packed;
686
687 struct mlx5_eqe_pps {
688 u8 rsvd0[3];
689 u8 pin;
690 u8 rsvd1[4];
691 union {
692 struct {
693 __be32 time_sec;
694 __be32 time_nsec;
695 };
696 struct {
697 __be64 time_stamp;
698 };
699 };
700 u8 rsvd2[12];
701 } __packed;
702
703 struct mlx5_eqe_dct {
704 __be32 reserved[6];
705 __be32 dctn;
706 };
707
708 struct mlx5_eqe_temp_warning {
709 __be64 sensor_warning_msb;
710 __be64 sensor_warning_lsb;
711 } __packed;
712
713 struct mlx5_eqe_obj_change {
714 u8 rsvd0[2];
715 __be16 obj_type;
716 __be32 obj_id;
717 } __packed;
718
719 #define SYNC_RST_STATE_MASK 0xf
720
721 enum sync_rst_state_type {
722 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
723 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
724 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
725 MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
726 };
727
728 struct mlx5_eqe_sync_fw_update {
729 u8 reserved_at_0[3];
730 u8 sync_rst_state;
731 };
732
733 struct mlx5_eqe_vhca_state {
734 __be16 ec_function;
735 __be16 function_id;
736 } __packed;
737
738 union ev_data {
739 __be32 raw[7];
740 struct mlx5_eqe_cmd cmd;
741 struct mlx5_eqe_comp comp;
742 struct mlx5_eqe_qp_srq qp_srq;
743 struct mlx5_eqe_cq_err cq_err;
744 struct mlx5_eqe_port_state port;
745 struct mlx5_eqe_gpio gpio;
746 struct mlx5_eqe_congestion cong;
747 struct mlx5_eqe_stall_vl stall_vl;
748 struct mlx5_eqe_page_req req_pages;
749 struct mlx5_eqe_page_fault page_fault;
750 struct mlx5_eqe_vport_change vport_change;
751 struct mlx5_eqe_port_module port_module;
752 struct mlx5_eqe_pps pps;
753 struct mlx5_eqe_dct dct;
754 struct mlx5_eqe_temp_warning temp_warning;
755 struct mlx5_eqe_xrq_err xrq_err;
756 struct mlx5_eqe_sync_fw_update sync_fw_update;
757 struct mlx5_eqe_vhca_state vhca_state;
758 struct mlx5_eqe_obj_change obj_change;
759 } __packed;
760
761 struct mlx5_eqe {
762 u8 rsvd0;
763 u8 type;
764 u8 rsvd1;
765 u8 sub_type;
766 __be32 rsvd2[7];
767 union ev_data data;
768 __be16 rsvd3;
769 u8 signature;
770 u8 owner;
771 } __packed;
772
773 struct mlx5_cmd_prot_block {
774 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
775 u8 rsvd0[48];
776 __be64 next;
777 __be32 block_num;
778 u8 rsvd1;
779 u8 token;
780 u8 ctrl_sig;
781 u8 sig;
782 };
783
784 enum {
785 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
786 };
787
788 struct mlx5_err_cqe {
789 u8 rsvd0[32];
790 __be32 srqn;
791 u8 rsvd1[18];
792 u8 vendor_err_synd;
793 u8 syndrome;
794 __be32 s_wqe_opcode_qpn;
795 __be16 wqe_counter;
796 u8 signature;
797 u8 op_own;
798 };
799
800 struct mlx5_cqe64 {
801 u8 tls_outer_l3_tunneled;
802 u8 rsvd0;
803 __be16 wqe_id;
804 union {
805 struct {
806 u8 tcppsh_abort_dupack;
807 u8 min_ttl;
808 __be16 tcp_win;
809 __be32 ack_seq_num;
810 } lro;
811 struct {
812 u8 reserved0:1;
813 u8 match:1;
814 u8 flush:1;
815 u8 reserved3:5;
816 u8 header_size;
817 __be16 header_entry_index;
818 __be32 data_offset;
819 } shampo;
820 };
821 __be32 rss_hash_result;
822 u8 rss_hash_type;
823 u8 ml_path;
824 u8 rsvd20[2];
825 __be16 check_sum;
826 __be16 slid;
827 __be32 flags_rqpn;
828 u8 hds_ip_ext;
829 u8 l4_l3_hdr_type;
830 __be16 vlan_info;
831 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
832 union {
833 __be32 immediate;
834 __be32 inval_rkey;
835 __be32 pkey;
836 __be32 ft_metadata;
837 };
838 u8 rsvd40[4];
839 __be32 byte_cnt;
840 __be32 timestamp_h;
841 __be32 timestamp_l;
842 __be32 sop_drop_qpn;
843 __be16 wqe_counter;
844 union {
845 u8 signature;
846 u8 validity_iteration_count;
847 };
848 u8 op_own;
849 };
850
851 struct mlx5_mini_cqe8 {
852 union {
853 __be32 rx_hash_result;
854 struct {
855 __be16 checksum;
856 __be16 stridx;
857 };
858 struct {
859 __be16 wqe_counter;
860 u8 s_wqe_opcode;
861 u8 reserved;
862 } s_wqe_info;
863 };
864 __be32 byte_cnt;
865 };
866
867 enum {
868 MLX5_NO_INLINE_DATA,
869 MLX5_INLINE_DATA32_SEG,
870 MLX5_INLINE_DATA64_SEG,
871 MLX5_COMPRESSED,
872 };
873
874 enum {
875 MLX5_CQE_FORMAT_CSUM = 0x1,
876 MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
877 };
878
879 enum {
880 MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
881 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
882 };
883
884 #define MLX5_MINI_CQE_ARRAY_SIZE 8
885
mlx5_get_cqe_format(struct mlx5_cqe64 * cqe)886 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
887 {
888 return (cqe->op_own >> 2) & 0x3;
889 }
890
get_cqe_opcode(struct mlx5_cqe64 * cqe)891 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
892 {
893 return cqe->op_own >> 4;
894 }
895
get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 * cqe)896 static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
897 {
898 /* num_of_mini_cqes is zero based */
899 return get_cqe_opcode(cqe) + 1;
900 }
901
get_cqe_lro_tcppsh(struct mlx5_cqe64 * cqe)902 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
903 {
904 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
905 }
906
get_cqe_l4_hdr_type(struct mlx5_cqe64 * cqe)907 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
908 {
909 return (cqe->l4_l3_hdr_type >> 4) & 0x7;
910 }
911
cqe_is_tunneled(struct mlx5_cqe64 * cqe)912 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
913 {
914 return cqe->tls_outer_l3_tunneled & 0x1;
915 }
916
get_cqe_tls_offload(struct mlx5_cqe64 * cqe)917 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
918 {
919 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
920 }
921
cqe_has_vlan(const struct mlx5_cqe64 * cqe)922 static inline bool cqe_has_vlan(const struct mlx5_cqe64 *cqe)
923 {
924 return cqe->l4_l3_hdr_type & 0x1;
925 }
926
get_cqe_ts(struct mlx5_cqe64 * cqe)927 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
928 {
929 u32 hi, lo;
930
931 hi = be32_to_cpu(cqe->timestamp_h);
932 lo = be32_to_cpu(cqe->timestamp_l);
933
934 return (u64)lo | ((u64)hi << 32);
935 }
936
get_cqe_flow_tag(struct mlx5_cqe64 * cqe)937 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
938 {
939 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
940 }
941
942 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
943 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
944 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
945 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
946 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
947
948 struct mpwrq_cqe_bc {
949 __be16 filler_consumed_strides;
950 __be16 byte_cnt;
951 };
952
mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 * cqe)953 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
954 {
955 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
956
957 return be16_to_cpu(bc->byte_cnt);
958 }
959
mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc * bc)960 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
961 {
962 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
963 }
964
mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 * cqe)965 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
966 {
967 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
968
969 return mpwrq_get_cqe_bc_consumed_strides(bc);
970 }
971
mpwrq_is_filler_cqe(struct mlx5_cqe64 * cqe)972 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
973 {
974 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
975
976 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
977 }
978
mpwrq_get_cqe_stride_index(struct mlx5_cqe64 * cqe)979 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
980 {
981 return be16_to_cpu(cqe->wqe_counter);
982 }
983
984 enum {
985 CQE_L4_HDR_TYPE_NONE = 0x0,
986 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
987 CQE_L4_HDR_TYPE_UDP = 0x2,
988 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
989 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
990 };
991
992 enum {
993 CQE_RSS_HTYPE_IP = GENMASK(3, 2),
994 /* cqe->rss_hash_type[3:2] - IP destination selected for hash
995 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
996 */
997 CQE_RSS_IP_NONE = 0x0,
998 CQE_RSS_IPV4 = 0x1,
999 CQE_RSS_IPV6 = 0x2,
1000 CQE_RSS_RESERVED = 0x3,
1001
1002 CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
1003 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
1004 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
1005 */
1006 CQE_RSS_L4_NONE = 0x0,
1007 CQE_RSS_L4_TCP = 0x1,
1008 CQE_RSS_L4_UDP = 0x2,
1009 CQE_RSS_L4_IPSEC = 0x3,
1010 };
1011
1012 enum {
1013 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
1014 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
1015 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
1016 };
1017
1018 enum {
1019 CQE_L2_OK = 1 << 0,
1020 CQE_L3_OK = 1 << 1,
1021 CQE_L4_OK = 1 << 2,
1022 };
1023
1024 enum {
1025 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
1026 CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
1027 CQE_TLS_OFFLOAD_RESYNC = 0x2,
1028 CQE_TLS_OFFLOAD_ERROR = 0x3,
1029 };
1030
1031 struct mlx5_sig_err_cqe {
1032 u8 rsvd0[16];
1033 __be32 expected_trans_sig;
1034 __be32 actual_trans_sig;
1035 __be32 expected_reftag;
1036 __be32 actual_reftag;
1037 __be16 syndrome;
1038 u8 rsvd22[2];
1039 __be32 mkey;
1040 __be64 err_offset;
1041 u8 rsvd30[8];
1042 __be32 qpn;
1043 u8 rsvd38[2];
1044 u8 signature;
1045 u8 op_own;
1046 };
1047
1048 struct mlx5_wqe_srq_next_seg {
1049 u8 rsvd0[2];
1050 __be16 next_wqe_index;
1051 u8 signature;
1052 u8 rsvd1[11];
1053 };
1054
1055 union mlx5_ext_cqe {
1056 struct ib_grh grh;
1057 u8 inl[64];
1058 };
1059
1060 struct mlx5_cqe128 {
1061 union mlx5_ext_cqe inl_grh;
1062 struct mlx5_cqe64 cqe64;
1063 };
1064
1065 enum {
1066 MLX5_MKEY_STATUS_FREE = 1 << 6,
1067 };
1068
1069 enum {
1070 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
1071 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1072 MLX5_MKEY_BSF_EN = 1 << 30,
1073 };
1074
1075 struct mlx5_mkey_seg {
1076 /* This is a two bit field occupying bits 31-30.
1077 * bit 31 is always 0,
1078 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
1079 */
1080 u8 status;
1081 u8 pcie_control;
1082 u8 flags;
1083 u8 version;
1084 __be32 qpn_mkey7_0;
1085 u8 rsvd1[4];
1086 __be32 flags_pd;
1087 __be64 start_addr;
1088 __be64 len;
1089 __be32 bsfs_octo_size;
1090 u8 rsvd2[16];
1091 __be32 xlt_oct_size;
1092 u8 rsvd3[3];
1093 u8 log2_page_size;
1094 u8 rsvd4[4];
1095 };
1096
1097 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1098
1099 enum {
1100 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1101 };
1102
1103 enum {
1104 VPORT_STATE_DOWN = 0x0,
1105 VPORT_STATE_UP = 0x1,
1106 };
1107
1108 enum {
1109 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
1110 MLX5_VPORT_ADMIN_STATE_UP = 0x1,
1111 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
1112 };
1113
1114 enum {
1115 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
1116 MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
1117 };
1118
1119 enum {
1120 MLX5_L3_PROT_TYPE_IPV4 = 0,
1121 MLX5_L3_PROT_TYPE_IPV6 = 1,
1122 };
1123
1124 enum {
1125 MLX5_L4_PROT_TYPE_TCP = 0,
1126 MLX5_L4_PROT_TYPE_UDP = 1,
1127 };
1128
1129 enum {
1130 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1131 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1132 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1133 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1134 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1135 };
1136
1137 enum {
1138 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1139 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1140 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1141 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
1142 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
1143 MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
1144 MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
1145 };
1146
1147 enum {
1148 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1149 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1150 };
1151
1152 enum {
1153 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1154 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1155 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1156 };
1157
1158 enum mlx5_list_type {
1159 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1160 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1161 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1162 };
1163
1164 enum {
1165 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1166 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1167 };
1168
1169 enum mlx5_wol_mode {
1170 MLX5_WOL_DISABLE = 0,
1171 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1172 MLX5_WOL_MAGIC = 1 << 2,
1173 MLX5_WOL_ARP = 1 << 3,
1174 MLX5_WOL_BROADCAST = 1 << 4,
1175 MLX5_WOL_MULTICAST = 1 << 5,
1176 MLX5_WOL_UNICAST = 1 << 6,
1177 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1178 };
1179
1180 enum mlx5_mpls_supported_fields {
1181 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1182 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
1183 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1184 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
1185 };
1186
1187 enum mlx5_flex_parser_protos {
1188 MLX5_FLEX_PROTO_GENEVE = 1 << 3,
1189 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
1190 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
1191 MLX5_FLEX_PROTO_ICMP = 1 << 8,
1192 MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
1193 };
1194
1195 /* MLX5 DEV CAPs */
1196
1197 /* TODO: EAT.ME */
1198 enum mlx5_cap_mode {
1199 HCA_CAP_OPMOD_GET_MAX = 0,
1200 HCA_CAP_OPMOD_GET_CUR = 1,
1201 };
1202
1203 /* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
1204 * capability memory.
1205 */
1206 enum mlx5_cap_type {
1207 MLX5_CAP_GENERAL = 0,
1208 MLX5_CAP_ETHERNET_OFFLOADS,
1209 MLX5_CAP_ODP,
1210 MLX5_CAP_ATOMIC,
1211 MLX5_CAP_ROCE,
1212 MLX5_CAP_IPOIB_OFFLOADS,
1213 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1214 MLX5_CAP_FLOW_TABLE,
1215 MLX5_CAP_ESWITCH_FLOW_TABLE,
1216 MLX5_CAP_ESWITCH,
1217 MLX5_CAP_QOS = 0xc,
1218 MLX5_CAP_DEBUG,
1219 MLX5_CAP_RESERVED_14,
1220 MLX5_CAP_DEV_MEM,
1221 MLX5_CAP_RESERVED_16,
1222 MLX5_CAP_TLS,
1223 MLX5_CAP_VDPA_EMULATION = 0x13,
1224 MLX5_CAP_DEV_EVENT = 0x14,
1225 MLX5_CAP_IPSEC,
1226 MLX5_CAP_CRYPTO = 0x1a,
1227 MLX5_CAP_MACSEC = 0x1f,
1228 MLX5_CAP_GENERAL_2 = 0x20,
1229 MLX5_CAP_PORT_SELECTION = 0x25,
1230 MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
1231 /* NUM OF CAP Types */
1232 MLX5_CAP_NUM
1233 };
1234
1235 enum mlx5_pcam_reg_groups {
1236 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
1237 };
1238
1239 enum mlx5_pcam_feature_groups {
1240 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1241 };
1242
1243 enum mlx5_mcam_reg_groups {
1244 MLX5_MCAM_REGS_FIRST_128 = 0x0,
1245 MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
1246 MLX5_MCAM_REGS_NUM = 0x3,
1247 };
1248
1249 enum mlx5_mcam_feature_groups {
1250 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1251 };
1252
1253 enum mlx5_qcam_reg_groups {
1254 MLX5_QCAM_REGS_FIRST_128 = 0x0,
1255 };
1256
1257 enum mlx5_qcam_feature_groups {
1258 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1259 };
1260
1261 /* GET Dev Caps macros */
1262 #define MLX5_CAP_GEN(mdev, cap) \
1263 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1264
1265 #define MLX5_CAP_GEN_64(mdev, cap) \
1266 MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1267
1268 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1269 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
1270
1271 #define MLX5_CAP_GEN_2(mdev, cap) \
1272 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1273
1274 #define MLX5_CAP_GEN_2_64(mdev, cap) \
1275 MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1276
1277 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \
1278 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
1279
1280 #define MLX5_CAP_ETH(mdev, cap) \
1281 MLX5_GET(per_protocol_networking_offload_caps,\
1282 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
1283
1284 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1285 MLX5_GET(per_protocol_networking_offload_caps,\
1286 mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
1287
1288 #define MLX5_CAP_ROCE(mdev, cap) \
1289 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
1290
1291 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1292 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
1293
1294 #define MLX5_CAP_ATOMIC(mdev, cap) \
1295 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
1296
1297 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1298 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
1299
1300 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1301 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1302
1303 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1304 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1305
1306 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1307 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1308
1309 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1310 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1311
1312 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1313 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1314
1315 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1316 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1317
1318 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1319 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1320
1321 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1322 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1323
1324 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1325 MLX5_GET(flow_table_eswitch_cap, \
1326 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1327
1328 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1329 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1330
1331 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1332 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1333
1334 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1335 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1336
1337 #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
1338 MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
1339
1340 #define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \
1341 MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap)
1342
1343 #define MLX5_CAP_ESW(mdev, cap) \
1344 MLX5_GET(e_switch_cap, \
1345 mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
1346
1347 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1348 MLX5_GET64(flow_table_eswitch_cap, \
1349 (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1350
1351 #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
1352 MLX5_GET(port_selection_cap, \
1353 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
1354
1355 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
1356 MLX5_GET(port_selection_cap, \
1357 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
1358
1359 #define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
1360 MLX5_GET(adv_virtualization_cap, \
1361 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
1362
1363 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
1364 MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
1365
1366 #define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \
1367 MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap)
1368
1369 #define MLX5_CAP_ODP(mdev, cap)\
1370 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
1371
1372 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1373 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
1374
1375 #define MLX5_CAP_QOS(mdev, cap)\
1376 MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
1377
1378 #define MLX5_CAP_DEBUG(mdev, cap)\
1379 MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
1380
1381 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1382 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1383
1384 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1385 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1386
1387 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1388 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1389 mng_access_reg_cap_mask.access_regs.reg)
1390
1391 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1392 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1393 mng_access_reg_cap_mask.access_regs2.reg)
1394
1395 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1396 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1397
1398 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1399 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1400
1401 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1402 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1403
1404 #define MLX5_CAP_FPGA(mdev, cap) \
1405 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1406
1407 #define MLX5_CAP64_FPGA(mdev, cap) \
1408 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1409
1410 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1411 MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1412
1413 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1414 MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1415
1416 #define MLX5_CAP_TLS(mdev, cap) \
1417 MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
1418
1419 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1420 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
1421
1422 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1423 MLX5_GET(virtio_emulation_cap, \
1424 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1425
1426 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1427 MLX5_GET64(virtio_emulation_cap, \
1428 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1429
1430 #define MLX5_CAP_IPSEC(mdev, cap)\
1431 MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
1432
1433 #define MLX5_CAP_CRYPTO(mdev, cap)\
1434 MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
1435
1436 #define MLX5_CAP_MACSEC(mdev, cap)\
1437 MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
1438
1439 enum {
1440 MLX5_CMD_STAT_OK = 0x0,
1441 MLX5_CMD_STAT_INT_ERR = 0x1,
1442 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1443 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1444 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1445 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1446 MLX5_CMD_STAT_RES_BUSY = 0x6,
1447 MLX5_CMD_STAT_LIM_ERR = 0x8,
1448 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1449 MLX5_CMD_STAT_IX_ERR = 0xa,
1450 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1451 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1452 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1453 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1454 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1455 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1456 };
1457
1458 enum {
1459 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1460 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1461 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1462 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1463 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1464 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1465 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1466 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1467 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1468 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1469 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1470 MLX5_INFINIBAND_EXTENDED_PORT_COUNTERS_GROUP = 0x21,
1471 };
1472
1473 enum {
1474 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1475 };
1476
mlx5_to_sw_pkey_sz(int pkey_sz)1477 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1478 {
1479 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1480 return 0;
1481 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1482 }
1483
1484 #define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
1485 #define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
1486 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1487 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1488 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1489 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1490 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1491 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1492
1493 #endif /* MLX5_DEVICE_H */
1494