1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
5 * Copyright (c) 2004 Intel Corporation. All rights reserved.
6 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
7 * Copyright (c) 2004-2006 Voltaire Corporation. All rights reserved.
8 */
9
10 #ifndef IB_MAD_H
11 #define IB_MAD_H
12
13 #include <linux/list.h>
14
15 #include <rdma/ib_verbs.h>
16 #include <uapi/rdma/ib_user_mad.h>
17
18 /* Management base versions */
19 #define IB_MGMT_BASE_VERSION 1
20 #define OPA_MGMT_BASE_VERSION 0x80
21
22 #define OPA_SM_CLASS_VERSION 0x80
23
24 /* Management classes */
25 #define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
26 #define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
27 #define IB_MGMT_CLASS_SUBN_ADM 0x03
28 #define IB_MGMT_CLASS_PERF_MGMT 0x04
29 #define IB_MGMT_CLASS_BM 0x05
30 #define IB_MGMT_CLASS_DEVICE_MGMT 0x06
31 #define IB_MGMT_CLASS_CM 0x07
32 #define IB_MGMT_CLASS_SNMP 0x08
33 #define IB_MGMT_CLASS_DEVICE_ADM 0x10
34 #define IB_MGMT_CLASS_BOOT_MGMT 0x11
35 #define IB_MGMT_CLASS_BIS 0x12
36 #define IB_MGMT_CLASS_CONG_MGMT 0x21
37 #define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
38 #define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
39
40 #define IB_OPENIB_OUI (0x001405)
41
42 /* Management methods */
43 #define IB_MGMT_METHOD_GET 0x01
44 #define IB_MGMT_METHOD_SET 0x02
45 #define IB_MGMT_METHOD_GET_RESP 0x81
46 #define IB_MGMT_METHOD_SEND 0x03
47 #define IB_MGMT_METHOD_TRAP 0x05
48 #define IB_MGMT_METHOD_REPORT 0x06
49 #define IB_MGMT_METHOD_REPORT_RESP 0x86
50 #define IB_MGMT_METHOD_TRAP_REPRESS 0x07
51
52 #define IB_MGMT_METHOD_RESP 0x80
53 #define IB_BM_ATTR_MOD_RESP cpu_to_be32(1)
54
55 #define IB_MGMT_MAX_METHODS 128
56
57 /* MAD Status field bit masks */
58 #define IB_MGMT_MAD_STATUS_SUCCESS 0x0000
59 #define IB_MGMT_MAD_STATUS_BUSY 0x0001
60 #define IB_MGMT_MAD_STATUS_REDIRECT_REQD 0x0002
61 #define IB_MGMT_MAD_STATUS_BAD_VERSION 0x0004
62 #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD 0x0008
63 #define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB 0x000c
64 #define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE 0x001c
65
66 /* RMPP information */
67 #define IB_MGMT_RMPP_VERSION 1
68
69 #define IB_MGMT_RMPP_TYPE_DATA 1
70 #define IB_MGMT_RMPP_TYPE_ACK 2
71 #define IB_MGMT_RMPP_TYPE_STOP 3
72 #define IB_MGMT_RMPP_TYPE_ABORT 4
73
74 #define IB_MGMT_RMPP_FLAG_ACTIVE 1
75 #define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
76 #define IB_MGMT_RMPP_FLAG_LAST (1<<2)
77
78 #define IB_MGMT_RMPP_NO_RESPTIME 0x1F
79
80 #define IB_MGMT_RMPP_STATUS_SUCCESS 0
81 #define IB_MGMT_RMPP_STATUS_RESX 1
82 #define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
83 #define IB_MGMT_RMPP_STATUS_T2L 118
84 #define IB_MGMT_RMPP_STATUS_BAD_LEN 119
85 #define IB_MGMT_RMPP_STATUS_BAD_SEG 120
86 #define IB_MGMT_RMPP_STATUS_BADT 121
87 #define IB_MGMT_RMPP_STATUS_W2S 122
88 #define IB_MGMT_RMPP_STATUS_S2B 123
89 #define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
90 #define IB_MGMT_RMPP_STATUS_UNV 125
91 #define IB_MGMT_RMPP_STATUS_TMR 126
92 #define IB_MGMT_RMPP_STATUS_UNSPEC 127
93 #define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
94
95 #define IB_QP0 0
96 #define IB_QP1 cpu_to_be32(1)
97 #define IB_QP1_QKEY 0x80010000
98 #define IB_QP_SET_QKEY 0x80000000
99
100 #define IB_DEFAULT_PKEY_PARTIAL 0x7FFF
101 #define IB_DEFAULT_PKEY_FULL 0xFFFF
102
103 /*
104 * Generic trap/notice types
105 */
106 #define IB_NOTICE_TYPE_FATAL 0x80
107 #define IB_NOTICE_TYPE_URGENT 0x81
108 #define IB_NOTICE_TYPE_SECURITY 0x82
109 #define IB_NOTICE_TYPE_SM 0x83
110 #define IB_NOTICE_TYPE_INFO 0x84
111
112 /*
113 * Generic trap/notice producers
114 */
115 #define IB_NOTICE_PROD_CA cpu_to_be16(1)
116 #define IB_NOTICE_PROD_SWITCH cpu_to_be16(2)
117 #define IB_NOTICE_PROD_ROUTER cpu_to_be16(3)
118 #define IB_NOTICE_PROD_CLASS_MGR cpu_to_be16(4)
119
120 enum {
121 IB_MGMT_MAD_HDR = 24,
122 IB_MGMT_MAD_DATA = 232,
123 IB_MGMT_RMPP_HDR = 36,
124 IB_MGMT_RMPP_DATA = 220,
125 IB_MGMT_VENDOR_HDR = 40,
126 IB_MGMT_VENDOR_DATA = 216,
127 IB_MGMT_SA_HDR = 56,
128 IB_MGMT_SA_DATA = 200,
129 IB_MGMT_DEVICE_HDR = 64,
130 IB_MGMT_DEVICE_DATA = 192,
131 IB_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + IB_MGMT_MAD_DATA,
132 OPA_MGMT_MAD_DATA = 2024,
133 OPA_MGMT_RMPP_DATA = 2012,
134 OPA_MGMT_MAD_SIZE = IB_MGMT_MAD_HDR + OPA_MGMT_MAD_DATA,
135 };
136
137 struct ib_mad_hdr {
138 u8 base_version;
139 u8 mgmt_class;
140 u8 class_version;
141 u8 method;
142 __be16 status;
143 __be16 class_specific;
144 __be64 tid;
145 __be16 attr_id;
146 __be16 resv;
147 __be32 attr_mod;
148 };
149
150 struct ib_rmpp_hdr {
151 u8 rmpp_version;
152 u8 rmpp_type;
153 u8 rmpp_rtime_flags;
154 u8 rmpp_status;
155 __be32 seg_num;
156 __be32 paylen_newwin;
157 };
158
159 typedef u64 __bitwise ib_sa_comp_mask;
160
161 #define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << (n)))
162
163 /*
164 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
165 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
166 * lay them out wrong otherwise. (And unfortunately they are sent on
167 * the wire so we can't change the layout)
168 */
169 struct ib_sa_hdr {
170 __be64 sm_key;
171 __be16 attr_offset;
172 __be16 reserved;
173 ib_sa_comp_mask comp_mask;
174 } __packed;
175
176 struct ib_mad {
177 struct ib_mad_hdr mad_hdr;
178 u8 data[IB_MGMT_MAD_DATA];
179 };
180
181 struct opa_mad {
182 struct ib_mad_hdr mad_hdr;
183 u8 data[OPA_MGMT_MAD_DATA];
184 };
185
186 struct ib_rmpp_mad {
187 struct ib_mad_hdr mad_hdr;
188 struct ib_rmpp_hdr rmpp_hdr;
189 u8 data[IB_MGMT_RMPP_DATA];
190 };
191
192 struct opa_rmpp_mad {
193 struct ib_mad_hdr mad_hdr;
194 struct ib_rmpp_hdr rmpp_hdr;
195 u8 data[OPA_MGMT_RMPP_DATA];
196 };
197
198 struct ib_sa_mad {
199 struct ib_mad_hdr mad_hdr;
200 struct ib_rmpp_hdr rmpp_hdr;
201 struct ib_sa_hdr sa_hdr;
202 u8 data[IB_MGMT_SA_DATA];
203 } __packed;
204
205 struct ib_vendor_mad {
206 struct ib_mad_hdr mad_hdr;
207 struct ib_rmpp_hdr rmpp_hdr;
208 u8 reserved;
209 u8 oui[3];
210 u8 data[IB_MGMT_VENDOR_DATA];
211 };
212
213 #define IB_MGMT_CLASSPORTINFO_ATTR_ID cpu_to_be16(0x0001)
214
215 #define IB_CLASS_PORT_INFO_RESP_TIME_MASK 0x1F
216 #define IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE 5
217
218 struct ib_class_port_info {
219 u8 base_version;
220 u8 class_version;
221 __be16 capability_mask;
222 /* 27 bits for cap_mask2, 5 bits for resp_time */
223 __be32 cap_mask2_resp_time;
224 u8 redirect_gid[16];
225 __be32 redirect_tcslfl;
226 __be16 redirect_lid;
227 __be16 redirect_pkey;
228 __be32 redirect_qp;
229 __be32 redirect_qkey;
230 u8 trap_gid[16];
231 __be32 trap_tcslfl;
232 __be16 trap_lid;
233 __be16 trap_pkey;
234 __be32 trap_hlqp;
235 __be32 trap_qkey;
236 };
237
238 /* PortInfo CapabilityMask */
239 enum ib_port_capability_mask_bits {
240 IB_PORT_SM = 1 << 1,
241 IB_PORT_NOTICE_SUP = 1 << 2,
242 IB_PORT_TRAP_SUP = 1 << 3,
243 IB_PORT_OPT_IPD_SUP = 1 << 4,
244 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
245 IB_PORT_SL_MAP_SUP = 1 << 6,
246 IB_PORT_MKEY_NVRAM = 1 << 7,
247 IB_PORT_PKEY_NVRAM = 1 << 8,
248 IB_PORT_LED_INFO_SUP = 1 << 9,
249 IB_PORT_SM_DISABLED = 1 << 10,
250 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
251 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
252 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
253 IB_PORT_CAP_MASK2_SUP = 1 << 15,
254 IB_PORT_CM_SUP = 1 << 16,
255 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
256 IB_PORT_REINIT_SUP = 1 << 18,
257 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
258 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
259 IB_PORT_DR_NOTICE_SUP = 1 << 21,
260 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
261 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
262 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
263 IB_PORT_CLIENT_REG_SUP = 1 << 25,
264 IB_PORT_OTHER_LOCAL_CHANGES_SUP = 1 << 26,
265 IB_PORT_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27,
266 IB_PORT_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28,
267 IB_PORT_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29,
268 IB_PORT_MCAST_FDB_TOP_SUP = 1 << 30,
269 IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31,
270 };
271
272 enum ib_port_capability_mask2_bits {
273 IB_PORT_SET_NODE_DESC_SUP = 1 << 0,
274 IB_PORT_EX_PORT_INFO_EX_SUP = 1 << 1,
275 IB_PORT_VIRT_SUP = 1 << 2,
276 IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
277 IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
278 IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
279 IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
280 IB_PORT_EXTENDED_SPEEDS2_SUP = 1 << 11,
281 IB_PORT_LINK_SPEED_XDR_SUP = 1 << 12,
282 };
283
284 #define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
285
286 struct opa_class_port_info {
287 u8 base_version;
288 u8 class_version;
289 __be16 cap_mask;
290 __be32 cap_mask2_resp_time;
291
292 u8 redirect_gid[16];
293 __be32 redirect_tc_fl;
294 __be32 redirect_lid;
295 __be32 redirect_sl_qp;
296 __be32 redirect_qkey;
297
298 u8 trap_gid[16];
299 __be32 trap_tc_fl;
300 __be32 trap_lid;
301 __be32 trap_hl_qp;
302 __be32 trap_qkey;
303
304 __be16 trap_pkey;
305 __be16 redirect_pkey;
306
307 u8 trap_sl_rsvd;
308 u8 reserved[3];
309 } __packed;
310
311 /**
312 * ib_get_cpi_resp_time - Returns the resp_time value from
313 * cap_mask2_resp_time in ib_class_port_info.
314 * @cpi: A struct ib_class_port_info mad.
315 */
ib_get_cpi_resp_time(struct ib_class_port_info * cpi)316 static inline u8 ib_get_cpi_resp_time(struct ib_class_port_info *cpi)
317 {
318 return (u8)(be32_to_cpu(cpi->cap_mask2_resp_time) &
319 IB_CLASS_PORT_INFO_RESP_TIME_MASK);
320 }
321
322 /**
323 * ib_set_cpi_resptime - Sets the response time in an
324 * ib_class_port_info mad.
325 * @cpi: A struct ib_class_port_info.
326 * @rtime: The response time to set.
327 */
ib_set_cpi_resp_time(struct ib_class_port_info * cpi,u8 rtime)328 static inline void ib_set_cpi_resp_time(struct ib_class_port_info *cpi,
329 u8 rtime)
330 {
331 cpi->cap_mask2_resp_time =
332 (cpi->cap_mask2_resp_time &
333 cpu_to_be32(~IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
334 cpu_to_be32(rtime & IB_CLASS_PORT_INFO_RESP_TIME_MASK);
335 }
336
337 /**
338 * ib_get_cpi_capmask2 - Returns the capmask2 value from
339 * cap_mask2_resp_time in ib_class_port_info.
340 * @cpi: A struct ib_class_port_info mad.
341 */
ib_get_cpi_capmask2(struct ib_class_port_info * cpi)342 static inline u32 ib_get_cpi_capmask2(struct ib_class_port_info *cpi)
343 {
344 return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
345 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
346 }
347
348 /**
349 * ib_set_cpi_capmask2 - Sets the capmask2 in an
350 * ib_class_port_info mad.
351 * @cpi: A struct ib_class_port_info.
352 * @capmask2: The capmask2 to set.
353 */
ib_set_cpi_capmask2(struct ib_class_port_info * cpi,u32 capmask2)354 static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
355 u32 capmask2)
356 {
357 cpi->cap_mask2_resp_time =
358 (cpi->cap_mask2_resp_time &
359 cpu_to_be32(IB_CLASS_PORT_INFO_RESP_TIME_MASK)) |
360 cpu_to_be32(capmask2 <<
361 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
362 }
363
364 /**
365 * opa_get_cpi_capmask2 - Returns the capmask2 value from
366 * cap_mask2_resp_time in ib_class_port_info.
367 * @cpi: A struct opa_class_port_info mad.
368 */
opa_get_cpi_capmask2(struct opa_class_port_info * cpi)369 static inline u32 opa_get_cpi_capmask2(struct opa_class_port_info *cpi)
370 {
371 return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
372 IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
373 }
374
375 struct ib_mad_notice_attr {
376 u8 generic_type;
377 u8 prod_type_msb;
378 __be16 prod_type_lsb;
379 __be16 trap_num;
380 __be16 issuer_lid;
381 __be16 toggle_count;
382
383 union {
384 struct {
385 u8 details[54];
386 } raw_data;
387
388 struct {
389 __be16 reserved;
390 __be16 lid; /* where violation happened */
391 u8 port_num; /* where violation happened */
392 } __packed ntc_129_131;
393
394 struct {
395 __be16 reserved;
396 __be16 lid; /* LID where change occurred */
397 u8 reserved2;
398 u8 local_changes; /* low bit - local changes */
399 __be32 new_cap_mask; /* new capability mask */
400 u8 reserved3;
401 u8 change_flags; /* low 3 bits only */
402 } __packed ntc_144;
403
404 struct {
405 __be16 reserved;
406 __be16 lid; /* lid where sys guid changed */
407 __be16 reserved2;
408 __be64 new_sys_guid;
409 } __packed ntc_145;
410
411 struct {
412 __be16 reserved;
413 __be16 lid;
414 __be16 dr_slid;
415 u8 method;
416 u8 reserved2;
417 __be16 attr_id;
418 __be32 attr_mod;
419 __be64 mkey;
420 u8 reserved3;
421 u8 dr_trunc_hop;
422 u8 dr_rtn_path[30];
423 } __packed ntc_256;
424
425 struct {
426 __be16 reserved;
427 __be16 lid1;
428 __be16 lid2;
429 __be32 key;
430 __be32 sl_qp1; /* SL: high 4 bits */
431 __be32 qp2; /* high 8 bits reserved */
432 union ib_gid gid1;
433 union ib_gid gid2;
434 } __packed ntc_257_258;
435
436 } details;
437 };
438
439 /**
440 * ib_mad_send_buf - MAD data buffer and work request for sends.
441 * @next: A pointer used to chain together MADs for posting.
442 * @mad: References an allocated MAD data buffer for MADs that do not have
443 * RMPP active. For MADs using RMPP, references the common and management
444 * class specific headers.
445 * @mad_agent: MAD agent that allocated the buffer.
446 * @ah: The address handle to use when sending the MAD.
447 * @context: User-controlled context fields.
448 * @hdr_len: Indicates the size of the data header of the MAD. This length
449 * includes the common MAD, RMPP, and class specific headers.
450 * @data_len: Indicates the total size of user-transferred data.
451 * @seg_count: The number of RMPP segments allocated for this send.
452 * @seg_size: Size of the data in each RMPP segment. This does not include
453 * class specific headers.
454 * @seg_rmpp_size: Size of each RMPP segment including the class specific
455 * headers.
456 * @timeout_ms: Time to wait for a response.
457 * @retries: Number of times to retry a request for a response. For MADs
458 * using RMPP, this applies per window. On completion, returns the number
459 * of retries needed to complete the transfer.
460 *
461 * Users are responsible for initializing the MAD buffer itself, with the
462 * exception of any RMPP header. Additional segment buffer space allocated
463 * beyond data_len is padding.
464 */
465 struct ib_mad_send_buf {
466 struct ib_mad_send_buf *next;
467 void *mad;
468 struct ib_mad_agent *mad_agent;
469 struct ib_ah *ah;
470 void *context[2];
471 int hdr_len;
472 int data_len;
473 int seg_count;
474 int seg_size;
475 int seg_rmpp_size;
476 int timeout_ms;
477 int retries;
478 };
479
480 /**
481 * ib_response_mad - Returns if the specified MAD has been generated in
482 * response to a sent request or trap.
483 */
484 int ib_response_mad(const struct ib_mad_hdr *hdr);
485
486 /**
487 * ib_get_rmpp_resptime - Returns the RMPP response time.
488 * @rmpp_hdr: An RMPP header.
489 */
ib_get_rmpp_resptime(struct ib_rmpp_hdr * rmpp_hdr)490 static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
491 {
492 return rmpp_hdr->rmpp_rtime_flags >> 3;
493 }
494
495 /**
496 * ib_get_rmpp_flags - Returns the RMPP flags.
497 * @rmpp_hdr: An RMPP header.
498 */
ib_get_rmpp_flags(struct ib_rmpp_hdr * rmpp_hdr)499 static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
500 {
501 return rmpp_hdr->rmpp_rtime_flags & 0x7;
502 }
503
504 /**
505 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
506 * @rmpp_hdr: An RMPP header.
507 * @rtime: The response time to set.
508 */
ib_set_rmpp_resptime(struct ib_rmpp_hdr * rmpp_hdr,u8 rtime)509 static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
510 {
511 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
512 }
513
514 /**
515 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
516 * @rmpp_hdr: An RMPP header.
517 * @flags: The flags to set.
518 */
ib_set_rmpp_flags(struct ib_rmpp_hdr * rmpp_hdr,u8 flags)519 static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
520 {
521 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF8) |
522 (flags & 0x7);
523 }
524
525 struct ib_mad_agent;
526 struct ib_mad_send_wc;
527 struct ib_mad_recv_wc;
528
529 /**
530 * ib_mad_send_handler - callback handler for a sent MAD.
531 * @mad_agent: MAD agent that sent the MAD.
532 * @mad_send_wc: Send work completion information on the sent MAD.
533 */
534 typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
535 struct ib_mad_send_wc *mad_send_wc);
536
537 /**
538 * ib_mad_recv_handler - callback handler for a received MAD.
539 * @mad_agent: MAD agent requesting the received MAD.
540 * @send_buf: Send buffer if found, else NULL
541 * @mad_recv_wc: Received work completion information on the received MAD.
542 *
543 * MADs received in response to a send request operation will be handed to
544 * the user before the send operation completes. All data buffers given
545 * to registered agents through this routine are owned by the receiving
546 * client.
547 */
548 typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
549 struct ib_mad_send_buf *send_buf,
550 struct ib_mad_recv_wc *mad_recv_wc);
551
552 /**
553 * ib_mad_agent - Used to track MAD registration with the access layer.
554 * @device: Reference to device registration is on.
555 * @qp: Reference to QP used for sending and receiving MADs.
556 * @mr: Memory region for system memory usable for DMA.
557 * @recv_handler: Callback handler for a received MAD.
558 * @send_handler: Callback handler for a sent MAD.
559 * @context: User-specified context associated with this registration.
560 * @hi_tid: Access layer assigned transaction ID for this client.
561 * Unsolicited MADs sent by this client will have the upper 32-bits
562 * of their TID set to this value.
563 * @flags: registration flags
564 * @port_num: Port number on which QP is registered
565 * @rmpp_version: If set, indicates the RMPP version used by this agent.
566 */
567 enum {
568 IB_MAD_USER_RMPP = IB_USER_MAD_USER_RMPP,
569 };
570 struct ib_mad_agent {
571 struct ib_device *device;
572 struct ib_qp *qp;
573 ib_mad_recv_handler recv_handler;
574 ib_mad_send_handler send_handler;
575 void *context;
576 u32 hi_tid;
577 u32 flags;
578 void *security;
579 struct list_head mad_agent_sec_list;
580 u8 port_num;
581 u8 rmpp_version;
582 bool smp_allowed;
583 };
584
585 /**
586 * ib_mad_send_wc - MAD send completion information.
587 * @send_buf: Send MAD data buffer associated with the send MAD request.
588 * @status: Completion status.
589 * @vendor_err: Optional vendor error information returned with a failed
590 * request.
591 */
592 struct ib_mad_send_wc {
593 struct ib_mad_send_buf *send_buf;
594 enum ib_wc_status status;
595 u32 vendor_err;
596 };
597
598 /**
599 * ib_mad_recv_buf - received MAD buffer information.
600 * @list: Reference to next data buffer for a received RMPP MAD.
601 * @grh: References a data buffer containing the global route header.
602 * The data refereced by this buffer is only valid if the GRH is
603 * valid.
604 * @mad: References the start of the received MAD.
605 */
606 struct ib_mad_recv_buf {
607 struct list_head list;
608 struct ib_grh *grh;
609 union {
610 struct ib_mad *mad;
611 struct opa_mad *opa_mad;
612 };
613 };
614
615 /**
616 * ib_mad_recv_wc - received MAD information.
617 * @wc: Completion information for the received data.
618 * @recv_buf: Specifies the location of the received data buffer(s).
619 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
620 * @mad_len: The length of the received MAD, without duplicated headers.
621 * @mad_seg_size: The size of individual MAD segments
622 *
623 * For received response, the wr_id contains a pointer to the ib_mad_send_buf
624 * for the corresponding send request.
625 */
626 struct ib_mad_recv_wc {
627 struct ib_wc *wc;
628 struct ib_mad_recv_buf recv_buf;
629 struct list_head rmpp_list;
630 int mad_len;
631 size_t mad_seg_size;
632 };
633
634 /**
635 * ib_mad_reg_req - MAD registration request
636 * @mgmt_class: Indicates which management class of MADs should be receive
637 * by the caller. This field is only required if the user wishes to
638 * receive unsolicited MADs, otherwise it should be 0.
639 * @mgmt_class_version: Indicates which version of MADs for the given
640 * management class to receive.
641 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
642 * in the range from 0x30 to 0x4f. Otherwise not used.
643 * @method_mask: The caller will receive unsolicited MADs for any method
644 * where @method_mask = 1.
645 *
646 */
647 struct ib_mad_reg_req {
648 u8 mgmt_class;
649 u8 mgmt_class_version;
650 u8 oui[3];
651 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
652 };
653
654 /**
655 * ib_register_mad_agent - Register to send/receive MADs.
656 * @device: The device to register with.
657 * @port_num: The port on the specified device to use.
658 * @qp_type: Specifies which QP to access. Must be either
659 * IB_QPT_SMI or IB_QPT_GSI.
660 * @mad_reg_req: Specifies which unsolicited MADs should be received
661 * by the caller. This parameter may be NULL if the caller only
662 * wishes to receive solicited responses.
663 * @rmpp_version: If set, indicates that the client will send
664 * and receive MADs that contain the RMPP header for the given version.
665 * If set to 0, indicates that RMPP is not used by this client.
666 * @send_handler: The completion callback routine invoked after a send
667 * request has completed.
668 * @recv_handler: The completion callback routine invoked for a received
669 * MAD.
670 * @context: User specified context associated with the registration.
671 * @registration_flags: Registration flags to set for this agent
672 */
673 struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
674 u32 port_num,
675 enum ib_qp_type qp_type,
676 struct ib_mad_reg_req *mad_reg_req,
677 u8 rmpp_version,
678 ib_mad_send_handler send_handler,
679 ib_mad_recv_handler recv_handler,
680 void *context,
681 u32 registration_flags);
682 /**
683 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
684 * @mad_agent: Corresponding MAD registration request to deregister.
685 *
686 * After invoking this routine, MAD services are no longer usable by the
687 * client on the associated QP.
688 */
689 void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
690
691 /**
692 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
693 * with the registered client.
694 * @send_buf: Specifies the information needed to send the MAD(s).
695 * @bad_send_buf: Specifies the MAD on which an error was encountered. This
696 * parameter is optional if only a single MAD is posted.
697 *
698 * Sent MADs are not guaranteed to complete in the order that they were posted.
699 *
700 * If the MAD requires RMPP, the data buffer should contain a single copy
701 * of the common MAD, RMPP, and class specific headers, followed by the class
702 * defined data. If the class defined data would not divide evenly into
703 * RMPP segments, then space must be allocated at the end of the referenced
704 * buffer for any required padding. To indicate the amount of class defined
705 * data being transferred, the paylen_newwin field in the RMPP header should
706 * be set to the size of the class specific header plus the amount of class
707 * defined data being transferred. The paylen_newwin field should be
708 * specified in network-byte order.
709 */
710 int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
711 struct ib_mad_send_buf **bad_send_buf);
712
713
714 /**
715 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
716 * @mad_recv_wc: Work completion information for a received MAD.
717 *
718 * Clients receiving MADs through their ib_mad_recv_handler must call this
719 * routine to return the work completion buffers to the access layer.
720 */
721 void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
722
723 /**
724 * ib_modify_mad - Modifies an outstanding send MAD operation.
725 * @send_buf: Indicates the MAD to modify.
726 * @timeout_ms: New timeout value for sent MAD.
727 *
728 * This call will reset the timeout value for a sent MAD to the specified
729 * value.
730 */
731 int ib_modify_mad(struct ib_mad_send_buf *send_buf, u32 timeout_ms);
732
733 /**
734 * ib_cancel_mad - Cancels an outstanding send MAD operation.
735 * @send_buf: Indicates the MAD to cancel.
736 *
737 * MADs will be returned to the user through the corresponding
738 * ib_mad_send_handler.
739 */
ib_cancel_mad(struct ib_mad_send_buf * send_buf)740 static inline void ib_cancel_mad(struct ib_mad_send_buf *send_buf)
741 {
742 ib_modify_mad(send_buf, 0);
743 }
744
745 /**
746 * ib_create_send_mad - Allocate and initialize a data buffer and work request
747 * for sending a MAD.
748 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
749 * @remote_qpn: Specifies the QPN of the receiving node.
750 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
751 * is valid only if the remote_qpn is QP 1.
752 * @rmpp_active: Indicates if the send will enable RMPP.
753 * @hdr_len: Indicates the size of the data header of the MAD. This length
754 * should include the common MAD header, RMPP header, plus any class
755 * specific header.
756 * @data_len: Indicates the size of any user-transferred data. The call will
757 * automatically adjust the allocated buffer size to account for any
758 * additional padding that may be necessary.
759 * @gfp_mask: GFP mask used for the memory allocation.
760 * @base_version: Base Version of this MAD
761 *
762 * This routine allocates a MAD for sending. The returned MAD send buffer
763 * will reference a data buffer usable for sending a MAD, along
764 * with an initialized work request structure. Users may modify the returned
765 * MAD data buffer before posting the send.
766 *
767 * The returned MAD header, class specific headers, and any padding will be
768 * cleared. Users are responsible for initializing the common MAD header,
769 * any class specific header, and MAD data area.
770 * If @rmpp_active is set, the RMPP header will be initialized for sending.
771 */
772 struct ib_mad_send_buf *ib_create_send_mad(struct ib_mad_agent *mad_agent,
773 u32 remote_qpn, u16 pkey_index,
774 int rmpp_active,
775 int hdr_len, int data_len,
776 gfp_t gfp_mask,
777 u8 base_version);
778
779 /**
780 * ib_is_mad_class_rmpp - returns whether given management class
781 * supports RMPP.
782 * @mgmt_class: management class
783 *
784 * This routine returns whether the management class supports RMPP.
785 */
786 int ib_is_mad_class_rmpp(u8 mgmt_class);
787
788 /**
789 * ib_get_mad_data_offset - returns the data offset for a given
790 * management class.
791 * @mgmt_class: management class
792 *
793 * This routine returns the data offset in the MAD for the management
794 * class requested.
795 */
796 int ib_get_mad_data_offset(u8 mgmt_class);
797
798 /**
799 * ib_get_rmpp_segment - returns the data buffer for a given RMPP segment.
800 * @send_buf: Previously allocated send data buffer.
801 * @seg_num: number of segment to return
802 *
803 * This routine returns a pointer to the data buffer of an RMPP MAD.
804 * Users must provide synchronization to @send_buf around this call.
805 */
806 void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num);
807
808 /**
809 * ib_free_send_mad - Returns data buffers used to send a MAD.
810 * @send_buf: Previously allocated send data buffer.
811 */
812 void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
813
814 /**
815 * ib_mad_kernel_rmpp_agent - Returns if the agent is performing RMPP.
816 * @agent: the agent in question
817 * @return: true if agent is performing rmpp, false otherwise.
818 */
819 int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent);
820
821 #endif /* IB_MAD_H */
822