xref: /linux/drivers/infiniband/hw/irdma/verbs.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #ifndef IRDMA_VERBS_H
4 #define IRDMA_VERBS_H
5 
6 #define IRDMA_MAX_SAVED_PHY_PGADDR	4
7 
8 #define IRDMA_PKEY_TBL_SZ		1
9 #define IRDMA_DEFAULT_PKEY		0xFFFF
10 
11 struct irdma_ucontext {
12 	struct ib_ucontext ibucontext;
13 	struct irdma_device *iwdev;
14 	struct rdma_user_mmap_entry *db_mmap_entry;
15 	struct list_head cq_reg_mem_list;
16 	spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
17 	struct list_head qp_reg_mem_list;
18 	spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */
19 	int abi_ver;
20 	bool legacy_mode;
21 };
22 
23 struct irdma_pd {
24 	struct ib_pd ibpd;
25 	struct irdma_sc_pd sc_pd;
26 };
27 
28 union irdma_sockaddr {
29 	struct sockaddr_in saddr_in;
30 	struct sockaddr_in6 saddr_in6;
31 };
32 
33 struct irdma_av {
34 	u8 macaddr[16];
35 	struct rdma_ah_attr attrs;
36 	union irdma_sockaddr sgid_addr;
37 	union irdma_sockaddr dgid_addr;
38 	u8 net_type;
39 };
40 
41 struct irdma_ah {
42 	struct ib_ah ibah;
43 	struct irdma_sc_ah sc_ah;
44 	struct irdma_pd *pd;
45 	struct irdma_av av;
46 	u8 sgid_index;
47 	union ib_gid dgid;
48 	struct hlist_node list;
49 	refcount_t refcnt;
50 	struct irdma_ah *parent_ah; /* AH from cached list */
51 };
52 
53 struct irdma_hmc_pble {
54 	union {
55 		u32 idx;
56 		dma_addr_t addr;
57 	};
58 };
59 
60 struct irdma_cq_mr {
61 	struct irdma_hmc_pble cq_pbl;
62 	dma_addr_t shadow;
63 	bool split;
64 };
65 
66 struct irdma_qp_mr {
67 	struct irdma_hmc_pble sq_pbl;
68 	struct irdma_hmc_pble rq_pbl;
69 	dma_addr_t shadow;
70 	struct page *sq_page;
71 };
72 
73 struct irdma_cq_buf {
74 	struct irdma_dma_mem kmem_buf;
75 	struct irdma_cq_uk cq_uk;
76 	struct irdma_hw *hw;
77 	struct list_head list;
78 	struct work_struct work;
79 };
80 
81 struct irdma_pbl {
82 	struct list_head list;
83 	union {
84 		struct irdma_qp_mr qp_mr;
85 		struct irdma_cq_mr cq_mr;
86 	};
87 
88 	bool pbl_allocated:1;
89 	bool on_list:1;
90 	u64 user_base;
91 	struct irdma_pble_alloc pble_alloc;
92 	struct irdma_mr *iwmr;
93 };
94 
95 struct irdma_mr {
96 	union {
97 		struct ib_mr ibmr;
98 		struct ib_mw ibmw;
99 	};
100 	struct ib_umem *region;
101 	u16 type;
102 	u32 page_cnt;
103 	u64 page_size;
104 	u32 npages;
105 	u32 stag;
106 	u64 len;
107 	u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR];
108 	struct irdma_pbl iwpbl;
109 };
110 
111 struct irdma_cq {
112 	struct ib_cq ibcq;
113 	struct irdma_sc_cq sc_cq;
114 	u16 cq_head;
115 	u16 cq_size;
116 	u16 cq_num;
117 	bool user_mode;
118 	bool armed;
119 	enum irdma_cmpl_notify last_notify;
120 	u32 polled_cmpls;
121 	u32 cq_mem_size;
122 	struct irdma_dma_mem kmem;
123 	struct irdma_dma_mem kmem_shadow;
124 	spinlock_t lock; /* for poll cq */
125 	struct irdma_pbl *iwpbl;
126 	struct irdma_pbl *iwpbl_shadow;
127 	struct list_head resize_list;
128 	struct irdma_cq_poll_info cur_cqe;
129 };
130 
131 struct disconn_work {
132 	struct work_struct work;
133 	struct irdma_qp *iwqp;
134 };
135 
136 struct iw_cm_id;
137 
138 struct irdma_qp_kmode {
139 	struct irdma_dma_mem dma_mem;
140 	struct irdma_sq_uk_wr_trk_info *sq_wrid_mem;
141 	u64 *rq_wrid_mem;
142 };
143 
144 struct irdma_qp {
145 	struct ib_qp ibqp;
146 	struct irdma_sc_qp sc_qp;
147 	struct irdma_device *iwdev;
148 	struct irdma_cq *iwscq;
149 	struct irdma_cq *iwrcq;
150 	struct irdma_pd *iwpd;
151 	struct rdma_user_mmap_entry *push_wqe_mmap_entry;
152 	struct rdma_user_mmap_entry *push_db_mmap_entry;
153 	struct irdma_qp_host_ctx_info ctx_info;
154 	union {
155 		struct irdma_iwarp_offload_info iwarp_info;
156 		struct irdma_roce_offload_info roce_info;
157 	};
158 
159 	union {
160 		struct irdma_tcp_offload_info tcp_info;
161 		struct irdma_udp_offload_info udp_info;
162 	};
163 
164 	struct irdma_ah roce_ah;
165 	struct list_head teardown_entry;
166 	refcount_t refcnt;
167 	struct iw_cm_id *cm_id;
168 	struct irdma_cm_node *cm_node;
169 	struct ib_mr *lsmm_mr;
170 	atomic_t hw_mod_qp_pend;
171 	enum ib_qp_state ibqp_state;
172 	u32 qp_mem_size;
173 	u32 last_aeq;
174 	int max_send_wr;
175 	int max_recv_wr;
176 	atomic_t close_timer_started;
177 	spinlock_t lock; /* serialize posting WRs to SQ/RQ */
178 	struct irdma_qp_context *iwqp_context;
179 	void *pbl_vbase;
180 	dma_addr_t pbl_pbase;
181 	struct page *page;
182 	u8 active_conn : 1;
183 	u8 user_mode : 1;
184 	u8 hte_added : 1;
185 	u8 flush_issued : 1;
186 	u8 sig_all : 1;
187 	u8 pau_mode : 1;
188 	u8 rsvd : 1;
189 	u8 iwarp_state;
190 	u16 term_sq_flush_code;
191 	u16 term_rq_flush_code;
192 	u8 hw_iwarp_state;
193 	u8 hw_tcp_state;
194 	struct irdma_qp_kmode kqp;
195 	struct irdma_dma_mem host_ctx;
196 	struct timer_list terminate_timer;
197 	struct irdma_pbl *iwpbl;
198 	struct irdma_dma_mem q2_ctx_mem;
199 	struct irdma_dma_mem ietf_mem;
200 	struct completion free_qp;
201 	wait_queue_head_t waitq;
202 	wait_queue_head_t mod_qp_waitq;
203 	u8 rts_ae_rcvd;
204 };
205 
206 enum irdma_mmap_flag {
207 	IRDMA_MMAP_IO_NC,
208 	IRDMA_MMAP_IO_WC,
209 };
210 
211 struct irdma_user_mmap_entry {
212 	struct rdma_user_mmap_entry rdma_entry;
213 	u64 bar_offset;
214 	u8 mmap_flag;
215 };
216 
217 static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev)
218 {
219 	return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
220 }
221 
222 static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev)
223 {
224 	return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]);
225 }
226 
227 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4);
228 int irdma_ib_register_device(struct irdma_device *iwdev);
229 void irdma_ib_unregister_device(struct irdma_device *iwdev);
230 void irdma_ib_dealloc_device(struct ib_device *ibdev);
231 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
232 #endif /* IRDMA_VERBS_H */
233