xref: /linux/include/rdma/ib_hdrs.h (revision 0be3ff0c)
1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3  * Copyright(c) 2016 - 2018 Intel Corporation.
4  */
5 
6 #ifndef IB_HDRS_H
7 #define IB_HDRS_H
8 
9 #include <linux/types.h>
10 #include <asm/unaligned.h>
11 #include <rdma/ib_verbs.h>
12 
13 #define IB_SEQ_NAK	(3 << 29)
14 
15 /* AETH NAK opcode values */
16 #define IB_RNR_NAK                      0x20
17 #define IB_NAK_PSN_ERROR                0x60
18 #define IB_NAK_INVALID_REQUEST          0x61
19 #define IB_NAK_REMOTE_ACCESS_ERROR      0x62
20 #define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
21 #define IB_NAK_INVALID_RD_REQUEST       0x64
22 
23 #define IB_BTH_REQ_ACK		BIT(31)
24 #define IB_BTH_SOLICITED	BIT(23)
25 #define IB_BTH_MIG_REQ		BIT(22)
26 
27 #define IB_GRH_VERSION		6
28 #define IB_GRH_VERSION_MASK	0xF
29 #define IB_GRH_VERSION_SHIFT	28
30 #define IB_GRH_TCLASS_MASK	0xFF
31 #define IB_GRH_TCLASS_SHIFT	20
32 #define IB_GRH_FLOW_MASK	0xFFFFF
33 #define IB_GRH_FLOW_SHIFT	0
34 #define IB_GRH_NEXT_HDR		0x1B
35 #define IB_FECN_SHIFT 31
36 #define IB_FECN_MASK 1
37 #define IB_FECN_SMASK BIT(IB_FECN_SHIFT)
38 #define IB_BECN_SHIFT 30
39 #define IB_BECN_MASK 1
40 #define IB_BECN_SMASK BIT(IB_BECN_SHIFT)
41 
42 #define IB_AETH_CREDIT_SHIFT	24
43 #define IB_AETH_CREDIT_MASK	0x1F
44 #define IB_AETH_CREDIT_INVAL	0x1F
45 #define IB_AETH_NAK_SHIFT	29
46 #define IB_MSN_MASK		0xFFFFFF
47 
48 struct ib_reth {
49 	__be64 vaddr;        /* potentially unaligned */
50 	__be32 rkey;
51 	__be32 length;
52 } __packed;
53 
54 struct ib_atomic_eth {
55 	__be64 vaddr;        /* potentially unaligned */
56 	__be32 rkey;
57 	__be64 swap_data;    /* potentially unaligned */
58 	__be64 compare_data; /* potentially unaligned */
59 } __packed;
60 
61 #include <rdma/tid_rdma_defs.h>
62 
63 union ib_ehdrs {
64 	struct {
65 		__be32 deth[2];
66 		__be32 imm_data;
67 	} ud;
68 	struct {
69 		struct ib_reth reth;
70 		__be32 imm_data;
71 	} rc;
72 	struct {
73 		__be32 aeth;
74 		__be64 atomic_ack_eth; /* potentially unaligned */
75 	} __packed at;
76 	__be32 imm_data;
77 	__be32 aeth;
78 	__be32 ieth;
79 	struct ib_atomic_eth atomic_eth;
80 	/* TID RDMA headers */
81 	union {
82 		struct tid_rdma_read_req r_req;
83 		struct tid_rdma_read_resp r_rsp;
84 		struct tid_rdma_write_req w_req;
85 		struct tid_rdma_write_resp w_rsp;
86 		struct tid_rdma_write_data w_data;
87 		struct tid_rdma_resync resync;
88 		struct tid_rdma_ack ack;
89 	} tid_rdma;
90 }  __packed;
91 
92 struct ib_other_headers {
93 	__be32 bth[3];
94 	union ib_ehdrs u;
95 } __packed;
96 
97 struct ib_header {
98 	__be16 lrh[4];
99 	union {
100 		struct {
101 			struct ib_grh grh;
102 			struct ib_other_headers oth;
103 		} l;
104 		struct ib_other_headers oth;
105 	} u;
106 } __packed;
107 
108 /* accessors for unaligned __be64 items */
109 
110 static inline u64 ib_u64_get(__be64 *p)
111 {
112 	return get_unaligned_be64(p);
113 }
114 
115 static inline void ib_u64_put(u64 val, __be64 *p)
116 {
117 	put_unaligned_be64(val, p);
118 }
119 
120 static inline u64 get_ib_reth_vaddr(struct ib_reth *reth)
121 {
122 	return ib_u64_get(&reth->vaddr);
123 }
124 
125 static inline void put_ib_reth_vaddr(u64 val, struct ib_reth *reth)
126 {
127 	ib_u64_put(val, &reth->vaddr);
128 }
129 
130 static inline u64 get_ib_ateth_vaddr(struct ib_atomic_eth *ateth)
131 {
132 	return ib_u64_get(&ateth->vaddr);
133 }
134 
135 static inline void put_ib_ateth_vaddr(u64 val, struct ib_atomic_eth *ateth)
136 {
137 	ib_u64_put(val, &ateth->vaddr);
138 }
139 
140 static inline u64 get_ib_ateth_swap(struct ib_atomic_eth *ateth)
141 {
142 	return ib_u64_get(&ateth->swap_data);
143 }
144 
145 static inline void put_ib_ateth_swap(u64 val, struct ib_atomic_eth *ateth)
146 {
147 	ib_u64_put(val, &ateth->swap_data);
148 }
149 
150 static inline u64 get_ib_ateth_compare(struct ib_atomic_eth *ateth)
151 {
152 	return ib_u64_get(&ateth->compare_data);
153 }
154 
155 static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth)
156 {
157 	ib_u64_put(val, &ateth->compare_data);
158 }
159 
160 /*
161  * 9B/IB Packet Format
162  */
163 #define IB_LNH_MASK		3
164 #define IB_SC_MASK		0xf
165 #define IB_SC_SHIFT		12
166 #define IB_SC5_MASK		0x10
167 #define IB_SL_MASK		0xf
168 #define IB_SL_SHIFT		4
169 #define IB_SL_SHIFT		4
170 #define IB_LVER_MASK	0xf
171 #define IB_LVER_SHIFT	8
172 
173 static inline u8 ib_get_lnh(struct ib_header *hdr)
174 {
175 	return (be16_to_cpu(hdr->lrh[0]) & IB_LNH_MASK);
176 }
177 
178 static inline u8 ib_get_sc(struct ib_header *hdr)
179 {
180 	return ((be16_to_cpu(hdr->lrh[0]) >> IB_SC_SHIFT) & IB_SC_MASK);
181 }
182 
183 static inline bool ib_is_sc5(u16 sc5)
184 {
185 	return !!(sc5 & IB_SC5_MASK);
186 }
187 
188 static inline u8 ib_get_sl(struct ib_header *hdr)
189 {
190 	return ((be16_to_cpu(hdr->lrh[0]) >> IB_SL_SHIFT) & IB_SL_MASK);
191 }
192 
193 static inline u16 ib_get_dlid(struct ib_header *hdr)
194 {
195 	return (be16_to_cpu(hdr->lrh[1]));
196 }
197 
198 static inline u16 ib_get_slid(struct ib_header *hdr)
199 {
200 	return (be16_to_cpu(hdr->lrh[3]));
201 }
202 
203 static inline u8 ib_get_lver(struct ib_header *hdr)
204 {
205 	return (u8)((be16_to_cpu(hdr->lrh[0]) >> IB_LVER_SHIFT) &
206 		   IB_LVER_MASK);
207 }
208 
209 static inline u32 ib_get_qkey(struct ib_other_headers *ohdr)
210 {
211 	return be32_to_cpu(ohdr->u.ud.deth[0]);
212 }
213 
214 static inline u32 ib_get_sqpn(struct ib_other_headers *ohdr)
215 {
216 	return ((be32_to_cpu(ohdr->u.ud.deth[1])) & IB_QPN_MASK);
217 }
218 
219 /*
220  * BTH
221  */
222 #define IB_BTH_OPCODE_MASK	0xff
223 #define IB_BTH_OPCODE_SHIFT	24
224 #define IB_BTH_PAD_MASK	3
225 #define IB_BTH_PKEY_MASK	0xffff
226 #define IB_BTH_PAD_SHIFT	20
227 #define IB_BTH_A_MASK		1
228 #define IB_BTH_A_SHIFT		31
229 #define IB_BTH_M_MASK		1
230 #define IB_BTH_M_SHIFT		22
231 #define IB_BTH_SE_MASK		1
232 #define IB_BTH_SE_SHIFT	23
233 #define IB_BTH_TVER_MASK	0xf
234 #define IB_BTH_TVER_SHIFT	16
235 #define IB_BTH_OPCODE_CNP	0x81
236 
237 static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
238 {
239 	return ((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
240 		   IB_BTH_PAD_MASK);
241 }
242 
243 static inline u16 ib_bth_get_pkey(struct ib_other_headers *ohdr)
244 {
245 	return (be32_to_cpu(ohdr->bth[0]) & IB_BTH_PKEY_MASK);
246 }
247 
248 static inline u8 ib_bth_get_opcode(struct ib_other_headers *ohdr)
249 {
250 	return ((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_OPCODE_SHIFT) &
251 		   IB_BTH_OPCODE_MASK);
252 }
253 
254 static inline u8 ib_bth_get_ackreq(struct ib_other_headers *ohdr)
255 {
256 	return (u8)((be32_to_cpu(ohdr->bth[2]) >> IB_BTH_A_SHIFT) &
257 		   IB_BTH_A_MASK);
258 }
259 
260 static inline u8 ib_bth_get_migreq(struct ib_other_headers *ohdr)
261 {
262 	return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_M_SHIFT) &
263 		    IB_BTH_M_MASK);
264 }
265 
266 static inline u8 ib_bth_get_se(struct ib_other_headers *ohdr)
267 {
268 	return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_SE_SHIFT) &
269 		    IB_BTH_SE_MASK);
270 }
271 
272 static inline u32 ib_bth_get_psn(struct ib_other_headers *ohdr)
273 {
274 	return (u32)(be32_to_cpu(ohdr->bth[2]));
275 }
276 
277 static inline u32 ib_bth_get_qpn(struct ib_other_headers *ohdr)
278 {
279 	return (u32)((be32_to_cpu(ohdr->bth[1])) & IB_QPN_MASK);
280 }
281 
282 static inline bool ib_bth_get_becn(struct ib_other_headers *ohdr)
283 {
284 	return (ohdr->bth[1]) & cpu_to_be32(IB_BECN_SMASK);
285 }
286 
287 static inline bool ib_bth_get_fecn(struct ib_other_headers *ohdr)
288 {
289 	return (ohdr->bth[1]) & cpu_to_be32(IB_FECN_SMASK);
290 }
291 
292 static inline u8 ib_bth_get_tver(struct ib_other_headers *ohdr)
293 {
294 	return (u8)((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_TVER_SHIFT)  &
295 		    IB_BTH_TVER_MASK);
296 }
297 
298 static inline bool ib_bth_is_solicited(struct ib_other_headers *ohdr)
299 {
300 	return ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED);
301 }
302 
303 static inline bool ib_bth_is_migration(struct ib_other_headers *ohdr)
304 {
305 	return ohdr->bth[0] & cpu_to_be32(IB_BTH_MIG_REQ);
306 }
307 #endif                          /* IB_HDRS_H */
308