xref: /freebsd/sys/dev/mlx5/qp.h (revision e23731db)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #ifndef MLX5_QP_H
27 #define MLX5_QP_H
28 
29 #include <dev/mlx5/driver.h>
30 
31 #define MLX5_INVALID_LKEY	0x100
32 #define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
33 #define MLX5_DIF_SIZE		8
34 #define MLX5_STRIDE_BLOCK_OP	0x400
35 #define MLX5_CPY_GRD_MASK	0xc0
36 #define MLX5_CPY_APP_MASK	0x30
37 #define MLX5_CPY_REF_MASK	0x0f
38 #define MLX5_BSF_INC_REFTAG	(1 << 6)
39 #define MLX5_BSF_INL_VALID	(1 << 15)
40 #define MLX5_BSF_REFRESH_DIF	(1 << 14)
41 #define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
42 #define MLX5_BSF_APPTAG_ESCAPE	0x1
43 #define MLX5_BSF_APPREF_ESCAPE	0x2
44 #define MLX5_WQE_DS_UNITS 16
45 
46 enum mlx5_qp_optpar {
47 	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
48 	MLX5_QP_OPTPAR_RRE			= 1 << 1,
49 	MLX5_QP_OPTPAR_RAE			= 1 << 2,
50 	MLX5_QP_OPTPAR_RWE			= 1 << 3,
51 	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
52 	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
53 	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
54 	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
55 	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
56 	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
57 	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
58 	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
59 	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
60 	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
61 	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
62 	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
63 	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
64 	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
65 	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
66 };
67 
68 enum mlx5_qp_state {
69 	MLX5_QP_STATE_RST			= 0,
70 	MLX5_QP_STATE_INIT			= 1,
71 	MLX5_QP_STATE_RTR			= 2,
72 	MLX5_QP_STATE_RTS			= 3,
73 	MLX5_QP_STATE_SQER			= 4,
74 	MLX5_QP_STATE_SQD			= 5,
75 	MLX5_QP_STATE_ERR			= 6,
76 	MLX5_QP_STATE_SQ_DRAINING		= 7,
77 	MLX5_QP_STATE_SUSPENDED			= 9,
78 	MLX5_QP_NUM_STATE,
79 	MLX5_QP_STATE,
80 	MLX5_QP_STATE_BAD,
81 };
82 
83 enum {
84 	MLX5_SQ_STATE_NA	= MLX5_SQC_STATE_ERR + 1,
85 	MLX5_SQ_NUM_STATE	= MLX5_SQ_STATE_NA + 1,
86 	MLX5_RQ_STATE_NA	= MLX5_RQC_STATE_ERR + 1,
87 	MLX5_RQ_NUM_STATE	= MLX5_RQ_STATE_NA + 1,
88 };
89 
90 enum {
91 	MLX5_QP_ST_RC				= 0x0,
92 	MLX5_QP_ST_UC				= 0x1,
93 	MLX5_QP_ST_UD				= 0x2,
94 	MLX5_QP_ST_XRC				= 0x3,
95 	MLX5_QP_ST_MLX				= 0x4,
96 	MLX5_QP_ST_DCI				= 0x5,
97 	MLX5_QP_ST_DCT				= 0x6,
98 	MLX5_QP_ST_QP0				= 0x7,
99 	MLX5_QP_ST_QP1				= 0x8,
100 	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
101 	MLX5_QP_ST_RAW_IPV6			= 0xa,
102 	MLX5_QP_ST_SNIFFER			= 0xb,
103 	MLX5_QP_ST_SYNC_UMR			= 0xe,
104 	MLX5_QP_ST_PTP_1588			= 0xd,
105 	MLX5_QP_ST_REG_UMR			= 0xc,
106 	MLX5_QP_ST_SW_CNAK			= 0x10,
107 	MLX5_QP_ST_MAX
108 };
109 
110 enum {
111 	MLX5_NON_ZERO_RQ	= 0x0,
112 	MLX5_SRQ_RQ		= 0x1,
113 	MLX5_CRQ_RQ		= 0x2,
114 	MLX5_ZERO_LEN_RQ	= 0x3
115 };
116 
117 enum {
118 	/* params1 */
119 	MLX5_QP_BIT_SRE				= 1 << 15,
120 	MLX5_QP_BIT_SWE				= 1 << 14,
121 	MLX5_QP_BIT_SAE				= 1 << 13,
122 	/* params2 */
123 	MLX5_QP_BIT_RRE				= 1 << 15,
124 	MLX5_QP_BIT_RWE				= 1 << 14,
125 	MLX5_QP_BIT_RAE				= 1 << 13,
126 	MLX5_QP_BIT_RIC				= 1 <<	4,
127 	MLX5_QP_BIT_COLL_SYNC_RQ                = 1 << 2,
128 	MLX5_QP_BIT_COLL_SYNC_SQ                = 1 << 1,
129 	MLX5_QP_BIT_COLL_MASTER                 = 1 << 0
130 };
131 
132 enum {
133 	MLX5_DCT_BIT_RRE		= 1 << 19,
134 	MLX5_DCT_BIT_RWE		= 1 << 18,
135 	MLX5_DCT_BIT_RAE		= 1 << 17,
136 };
137 
138 enum {
139 	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
140 	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
141 	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
142 };
143 
144 #define	MLX5_SEND_WQE_DS	16
145 #define	MLX5_SEND_WQE_BB	64
146 #define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
147 #define MLX5_WQE_CTRL_QPN_SHIFT 8
148 #define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
149 
150 enum {
151 	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
152 };
153 
154 enum {
155 	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
156 	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
157 	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
158 	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
159 	MLX5_WQE_FMR_PERM_ATOMIC	= 1U << 31
160 };
161 
162 enum {
163 	MLX5_FENCE_MODE_NONE			= 0 << 5,
164 	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
165 	MLX5_FENCE_MODE_FENCE			= 2 << 5,
166 	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
167 	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
168 };
169 
170 enum {
171 	MLX5_RCV_DBR	= 0,
172 	MLX5_SND_DBR	= 1,
173 };
174 
175 enum {
176 	MLX5_FLAGS_INLINE	= 1<<7,
177 	MLX5_FLAGS_CHECK_FREE   = 1<<5,
178 };
179 
180 struct mlx5_wqe_fmr_seg {
181 	__be32			flags;
182 	__be32			mem_key;
183 	__be64			buf_list;
184 	__be64			start_addr;
185 	__be64			reg_len;
186 	__be32			offset;
187 	__be32			page_size;
188 	u32			reserved[2];
189 };
190 
191 struct mlx5_wqe_ctrl_seg {
192 	__be32			opmod_idx_opcode;
193 	__be32			qpn_ds;
194 	u8			signature;
195 	u8			rsvd[2];
196 	u8			fm_ce_se;
197 	union {
198 		__be32		imm;
199 		__be32          general_id;
200 	};
201 };
202 
203 #define MLX5_WQE_CTRL_DS_MASK 0x3f
204 
205 enum {
206 	MLX5_MLX_FLAG_MASK_VL15 = 0x40,
207 	MLX5_MLX_FLAG_MASK_SLR	= 0x20,
208 	MLX5_MLX_FLAG_MASK_ICRC = 0x8,
209 	MLX5_MLX_FLAG_MASK_FL	= 4
210 };
211 
212 struct mlx5_mlx_seg {
213 	__be32		rsvd0;
214 	u8		flags;
215 	u8		stat_rate_sl;
216 	u8		rsvd1[8];
217 	__be16		dlid;
218 };
219 
220 enum {
221 	MLX5_ETH_WQE_L3_INNER_CSUM	= 1 << 4,
222 	MLX5_ETH_WQE_L4_INNER_CSUM	= 1 << 5,
223 	MLX5_ETH_WQE_L3_CSUM		= 1 << 6,
224 	MLX5_ETH_WQE_L4_CSUM		= 1 << 7,
225 };
226 
227 enum {
228 	MLX5_ETH_WQE_SWP_INNER_L3_TYPE = 1 << 0,
229 	MLX5_ETH_WQE_SWP_INNER_L4_TYPE = 1 << 1,
230 	MLX5_ETH_WQE_SWP_OUTER_L3_TYPE = 1 << 4,
231 	MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5,
232 };
233 
234 enum {
235 	MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
236 };
237 
238 struct mlx5_wqe_eth_seg {
239 	u8              swp_outer_l4_offset;
240 	u8		swp_outer_l3_offset;
241 	u8		swp_inner_l4_offset;
242 	u8		swp_inner_l3_offset;
243 	u8		cs_flags;
244 	u8		swp_flags;
245 	__be16		mss;
246 	__be32		flow_table_metadata;
247 	union {
248 		struct {
249 			__be16		inline_hdr_sz;
250 			u8		inline_hdr_start[2];
251 		};
252 		struct {
253 			__be16		vlan_cmd;
254 			__be16		vlan_hdr;
255 		};
256 	};
257 };
258 
259 struct mlx5_wqe_xrc_seg {
260 	__be32			xrc_srqn;
261 	u8			rsvd[12];
262 };
263 
264 struct mlx5_wqe_masked_atomic_seg {
265 	__be64			swap_add;
266 	__be64			compare;
267 	__be64			swap_add_mask;
268 	__be64			compare_mask;
269 };
270 
271 struct mlx5_av {
272 	union {
273 		struct {
274 			__be32	qkey;
275 			__be32	reserved;
276 		} qkey;
277 		__be64	dc_key;
278 	} key;
279 	__be32	dqp_dct;
280 	u8	stat_rate_sl;
281 	u8	fl_mlid;
282 	union {
283 		__be16	rlid;
284 		__be16  udp_sport;
285 	};
286 	u8	reserved0[4];
287 	u8	rmac[6];
288 	u8	tclass;
289 	u8	hop_limit;
290 	__be32	grh_gid_fl;
291 	u8	rgid[16];
292 };
293 
294 struct mlx5_wqe_datagram_seg {
295 	struct mlx5_av	av;
296 };
297 
298 struct mlx5_wqe_raddr_seg {
299 	__be64			raddr;
300 	__be32			rkey;
301 	u32			reserved;
302 };
303 
304 struct mlx5_wqe_atomic_seg {
305 	__be64			swap_add;
306 	__be64			compare;
307 };
308 
309 struct mlx5_wqe_data_seg {
310 	__be32			byte_count;
311 	__be32			lkey;
312 	__be64			addr;
313 };
314 
315 struct mlx5_wqe_umr_ctrl_seg {
316 	u8		flags;
317 	u8		rsvd0[3];
318 	__be16		klm_octowords;
319 	__be16		bsf_octowords;
320 	__be64		mkey_mask;
321 	u8		rsvd1[32];
322 };
323 
324 struct mlx5_seg_set_psv {
325 	__be32		psv_num;
326 	__be16		syndrome;
327 	__be16		status;
328 	__be32		transient_sig;
329 	__be32		ref_tag;
330 };
331 
332 struct mlx5_wqe_qos_remap_seg {
333 	u8		rsvd0[4];
334 	u8		rsvd1[4];
335 	__be32		qos_handle;
336 	__be32		queue_handle;
337 };
338 
339 struct mlx5_seg_get_psv {
340 	u8		rsvd[19];
341 	u8		num_psv;
342 	__be32		l_key;
343 	__be64		va;
344 	__be32		psv_index[4];
345 };
346 
347 struct mlx5_seg_check_psv {
348 	u8		rsvd0[2];
349 	__be16		err_coalescing_op;
350 	u8		rsvd1[2];
351 	__be16		xport_err_op;
352 	u8		rsvd2[2];
353 	__be16		xport_err_mask;
354 	u8		rsvd3[7];
355 	u8		num_psv;
356 	__be32		l_key;
357 	__be64		va;
358 	__be32		psv_index[4];
359 };
360 
361 struct mlx5_rwqe_sig {
362 	u8	rsvd0[4];
363 	u8	signature;
364 	u8	rsvd1[11];
365 };
366 
367 struct mlx5_wqe_signature_seg {
368 	u8	rsvd0[4];
369 	u8	signature;
370 	u8	rsvd1[11];
371 };
372 
373 struct mlx5_wqe_inline_seg {
374 	__be32	byte_count;
375 };
376 
377 enum mlx5_sig_type {
378 	MLX5_DIF_CRC = 0x1,
379 	MLX5_DIF_IPCS = 0x2,
380 };
381 
382 struct mlx5_bsf_inl {
383 	__be16		vld_refresh;
384 	__be16		dif_apptag;
385 	__be32		dif_reftag;
386 	u8		sig_type;
387 	u8		rp_inv_seed;
388 	u8		rsvd[3];
389 	u8		dif_inc_ref_guard_check;
390 	__be16		dif_app_bitmask_check;
391 };
392 
393 struct mlx5_bsf {
394 	struct mlx5_bsf_basic {
395 		u8		bsf_size_sbs;
396 		u8		check_byte_mask;
397 		union {
398 			u8	copy_byte_mask;
399 			u8	bs_selector;
400 			u8	rsvd_wflags;
401 		} wire;
402 		union {
403 			u8	bs_selector;
404 			u8	rsvd_mflags;
405 		} mem;
406 		__be32		raw_data_size;
407 		__be32		w_bfs_psv;
408 		__be32		m_bfs_psv;
409 	} basic;
410 	struct mlx5_bsf_ext {
411 		__be32		t_init_gen_pro_size;
412 		__be32		rsvd_epi_size;
413 		__be32		w_tfs_psv;
414 		__be32		m_tfs_psv;
415 	} ext;
416 	struct mlx5_bsf_inl	w_inl;
417 	struct mlx5_bsf_inl	m_inl;
418 };
419 
420 struct mlx5_klm {
421 	__be32		bcount;
422 	__be32		key;
423 	__be64		va;
424 };
425 
426 struct mlx5_stride_block_entry {
427 	__be16		stride;
428 	__be16		bcount;
429 	__be32		key;
430 	__be64		va;
431 };
432 
433 struct mlx5_stride_block_ctrl_seg {
434 	__be32		bcount_per_cycle;
435 	__be32		op;
436 	__be32		repeat_count;
437 	u16		rsvd;
438 	__be16		num_entries;
439 };
440 
441 enum mlx5_pagefault_flags {
442 	MLX5_PFAULT_REQUESTOR = 1 << 0,
443 	MLX5_PFAULT_WRITE     = 1 << 1,
444 	MLX5_PFAULT_RDMA      = 1 << 2,
445 };
446 
447 /* Contains the details of a pagefault. */
448 struct mlx5_pagefault {
449 	u32			bytes_committed;
450 	u8			event_subtype;
451 	enum mlx5_pagefault_flags flags;
452 	union {
453 		/* Initiator or send message responder pagefault details. */
454 		struct {
455 			/* Received packet size, only valid for responders. */
456 			u32	packet_size;
457 			/*
458 			 * WQE index. Refers to either the send queue or
459 			 * receive queue, according to event_subtype.
460 			 */
461 			u16	wqe_index;
462 		} wqe;
463 		/* RDMA responder pagefault details */
464 		struct {
465 			u32	r_key;
466 			/*
467 			 * Received packet size, minimal size page fault
468 			 * resolution required for forward progress.
469 			 */
470 			u32	packet_size;
471 			u32	rdma_op_len;
472 			u64	rdma_va;
473 		} rdma;
474 	};
475 };
476 
477 struct mlx5_core_qp {
478 	struct mlx5_core_rsc_common	common; /* must be first */
479 	void (*event)		(struct mlx5_core_qp *, int);
480 	int			qpn;
481 	struct mlx5_rsc_debug	*dbg;
482 	int			pid;
483 	u16			uid;
484 };
485 
486 struct mlx5_qp_path {
487 	u8			fl_free_ar;
488 	u8			rsvd3;
489 	__be16			pkey_index;
490 	u8			rsvd0;
491 	u8			grh_mlid;
492 	__be16			rlid;
493 	u8			ackto_lt;
494 	u8			mgid_index;
495 	u8			static_rate;
496 	u8			hop_limit;
497 	__be32			tclass_flowlabel;
498 	union {
499 		u8		rgid[16];
500 		u8		rip[16];
501 	};
502 	u8			f_dscp_ecn_prio;
503 	u8			ecn_dscp;
504 	__be16			udp_sport;
505 	u8			dci_cfi_prio_sl;
506 	u8			port;
507 	u8			rmac[6];
508 };
509 
510 struct mlx5_qp_context {
511 	__be32			flags;
512 	__be32			flags_pd;
513 	u8			mtu_msgmax;
514 	u8			rq_size_stride;
515 	__be16			sq_crq_size;
516 	__be32			qp_counter_set_usr_page;
517 	__be32			wire_qpn;
518 	__be32			log_pg_sz_remote_qpn;
519 	struct			mlx5_qp_path pri_path;
520 	struct			mlx5_qp_path alt_path;
521 	__be32			params1;
522 	u8			reserved2[4];
523 	__be32			next_send_psn;
524 	__be32			cqn_send;
525 	__be32			deth_sqpn;
526 	u8			reserved3[4];
527 	__be32			last_acked_psn;
528 	__be32			ssn;
529 	__be32			params2;
530 	__be32			rnr_nextrecvpsn;
531 	__be32			xrcd;
532 	__be32			cqn_recv;
533 	__be64			db_rec_addr;
534 	__be32			qkey;
535 	__be32			rq_type_srqn;
536 	__be32			rmsn;
537 	__be16			hw_sq_wqe_counter;
538 	__be16			sw_sq_wqe_counter;
539 	__be16			hw_rcyclic_byte_counter;
540 	__be16			hw_rq_counter;
541 	__be16			sw_rcyclic_byte_counter;
542 	__be16			sw_rq_counter;
543 	u8			rsvd0[5];
544 	u8			cgs;
545 	u8			cs_req;
546 	u8			cs_res;
547 	__be64			dc_access_key;
548 	u8			rsvd1[24];
549 };
550 
551 struct mlx5_dct_context {
552 	u8			state;
553 	u8			rsvd0[7];
554 	__be32			cqn;
555 	__be32			flags;
556 	u8			rsvd1;
557 	u8			cs_res;
558 	u8			min_rnr;
559 	u8			rsvd2;
560 	__be32			srqn;
561 	__be32			pdn;
562 	__be32			tclass_flow_label;
563 	__be64			access_key;
564 	u8			mtu;
565 	u8			port;
566 	__be16			pkey_index;
567 	u8			rsvd4;
568 	u8			mgid_index;
569 	u8			rsvd5;
570 	u8			hop_limit;
571 	__be32			access_violations;
572 	u8			rsvd[12];
573 };
574 
__mlx5_qp_lookup(struct mlx5_core_dev * dev,u32 qpn)575 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
576 {
577 	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
578 }
579 
__mlx5_mr_lookup(struct mlx5_core_dev * dev,u32 key)580 static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
581 {
582 	return radix_tree_lookup(&dev->priv.mr_table.tree, key);
583 }
584 
585 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
586 			struct mlx5_core_qp *qp,
587 			u32 *in,
588 			int inlen);
589 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
590 			u32 opt_param_mask, void *qpc,
591 			struct mlx5_core_qp *qp);
592 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
593 			 struct mlx5_core_qp *qp);
594 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
595 		       u32 *out, int outlen);
596 int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
597 			u32 *out, int outlen);
598 int mlx5_core_arm_dct(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct);
599 
600 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
601 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
602 int mlx5_core_create_dct(struct mlx5_core_dev *dev,
603 			 struct mlx5_core_dct *dct,
604 			 u32 *in, int inlen,
605 			 u32 *out, int outlen);
606 int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
607 			  struct mlx5_core_dct *dct);
608 int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
609 				struct mlx5_core_qp *rq);
610 void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
611 				  struct mlx5_core_qp *rq);
612 int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
613 				struct mlx5_core_qp *sq);
614 void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
615 				  struct mlx5_core_qp *sq);
616 void mlx5_init_qp_table(struct mlx5_core_dev *dev);
617 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
618 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
619 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
620 
mlx5_qp_type_str(int type)621 static inline const char *mlx5_qp_type_str(int type)
622 {
623 	switch (type) {
624 	case MLX5_QP_ST_RC: return "RC";
625 	case MLX5_QP_ST_UC: return "C";
626 	case MLX5_QP_ST_UD: return "UD";
627 	case MLX5_QP_ST_XRC: return "XRC";
628 	case MLX5_QP_ST_MLX: return "MLX";
629 	case MLX5_QP_ST_DCI: return "DCI";
630 	case MLX5_QP_ST_QP0: return "QP0";
631 	case MLX5_QP_ST_QP1: return "QP1";
632 	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
633 	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
634 	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
635 	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
636 	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
637 	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
638 	case MLX5_QP_ST_SW_CNAK: return "DC_CNAK";
639 	default: return "Invalid transport type";
640 	}
641 }
642 
mlx5_qp_state_str(int state)643 static inline const char *mlx5_qp_state_str(int state)
644 {
645 	switch (state) {
646 	case MLX5_QP_STATE_RST:
647 	return "RST";
648 	case MLX5_QP_STATE_INIT:
649 	return "INIT";
650 	case MLX5_QP_STATE_RTR:
651 	return "RTR";
652 	case MLX5_QP_STATE_RTS:
653 	return "RTS";
654 	case MLX5_QP_STATE_SQER:
655 	return "SQER";
656 	case MLX5_QP_STATE_SQD:
657 	return "SQD";
658 	case MLX5_QP_STATE_ERR:
659 	return "ERR";
660 	case MLX5_QP_STATE_SQ_DRAINING:
661 	return "SQ_DRAINING";
662 	case MLX5_QP_STATE_SUSPENDED:
663 	return "SUSPENDED";
664 	default: return "Invalid QP state";
665 	}
666 }
667 
668 #endif /* MLX5_QP_H */
669