1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
5  * Copyright (c) 2005 Voltaire, Inc.  All rights reserved.
6  * Copyright (c) 2006 Intel Corporation.  All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36 
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
39 
40 #include <linux/module.h>
41 #include <linux/err.h>
42 #include <linux/random.h>
43 #include <linux/spinlock.h>
44 #include <linux/slab.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/kref.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
49 #include <linux/etherdevice.h>
50 #include <rdma/ib_pack.h>
51 #include <rdma/ib_cache.h>
52 #include <rdma/ib_user_sa.h>
53 #include <rdma/ib_marshall.h>
54 #include <rdma/ib_addr.h>
55 #include "sa.h"
56 #include "core_priv.h"
57 
58 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN		100
59 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT		2000
60 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX		200000
61 
62 struct ib_sa_sm_ah {
63 	struct ib_ah        *ah;
64 	struct kref          ref;
65 	u16		     pkey_index;
66 	u8		     src_path_mask;
67 };
68 
69 struct ib_sa_classport_cache {
70 	bool valid;
71 	struct ib_class_port_info data;
72 };
73 
74 struct ib_sa_port {
75 	struct ib_mad_agent *agent;
76 	struct ib_sa_sm_ah  *sm_ah;
77 	struct work_struct   update_task;
78 	struct ib_sa_classport_cache classport_info;
79 	spinlock_t                   classport_lock; /* protects class port info set */
80 	spinlock_t           ah_lock;
81 	u8                   port_num;
82 };
83 
84 struct ib_sa_device {
85 	int                     start_port, end_port;
86 	struct ib_event_handler event_handler;
87 	struct ib_sa_port port[0];
88 };
89 
90 struct ib_sa_query {
91 	void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
92 	void (*release)(struct ib_sa_query *);
93 	struct ib_sa_client    *client;
94 	struct ib_sa_port      *port;
95 	struct ib_mad_send_buf *mad_buf;
96 	struct ib_sa_sm_ah     *sm_ah;
97 	int			id;
98 	u32			flags;
99 	struct list_head	list; /* Local svc request list */
100 	u32			seq; /* Local svc request sequence number */
101 	unsigned long		timeout; /* Local svc timeout */
102 	u8			path_use; /* How will the pathrecord be used */
103 };
104 
105 #define IB_SA_ENABLE_LOCAL_SERVICE	0x00000001
106 #define IB_SA_CANCEL			0x00000002
107 
108 struct ib_sa_service_query {
109 	void (*callback)(int, struct ib_sa_service_rec *, void *);
110 	void *context;
111 	struct ib_sa_query sa_query;
112 };
113 
114 struct ib_sa_path_query {
115 	void (*callback)(int, struct ib_sa_path_rec *, void *);
116 	void *context;
117 	struct ib_sa_query sa_query;
118 };
119 
120 struct ib_sa_guidinfo_query {
121 	void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
122 	void *context;
123 	struct ib_sa_query sa_query;
124 };
125 
126 struct ib_sa_classport_info_query {
127 	void (*callback)(int, struct ib_class_port_info *, void *);
128 	void *context;
129 	struct ib_sa_query sa_query;
130 };
131 
132 struct ib_sa_mcmember_query {
133 	void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
134 	void *context;
135 	struct ib_sa_query sa_query;
136 };
137 
138 static void ib_sa_add_one(struct ib_device *device);
139 static void ib_sa_remove_one(struct ib_device *device, void *client_data);
140 
141 static struct ib_client sa_client = {
142 	.name   = "sa",
143 	.add    = ib_sa_add_one,
144 	.remove = ib_sa_remove_one
145 };
146 
147 static DEFINE_SPINLOCK(idr_lock);
148 static DEFINE_IDR(query_idr);
149 
150 static DEFINE_SPINLOCK(tid_lock);
151 static u32 tid;
152 
153 #define PATH_REC_FIELD(field) \
154 	.struct_offset_bytes = offsetof(struct ib_sa_path_rec, field),		\
155 	.struct_size_bytes   = sizeof ((struct ib_sa_path_rec *) 0)->field,	\
156 	.field_name          = "sa_path_rec:" #field
157 
158 static const struct ib_field path_rec_table[] = {
159 	{ PATH_REC_FIELD(service_id),
160 	  .offset_words = 0,
161 	  .offset_bits  = 0,
162 	  .size_bits    = 64 },
163 	{ PATH_REC_FIELD(dgid),
164 	  .offset_words = 2,
165 	  .offset_bits  = 0,
166 	  .size_bits    = 128 },
167 	{ PATH_REC_FIELD(sgid),
168 	  .offset_words = 6,
169 	  .offset_bits  = 0,
170 	  .size_bits    = 128 },
171 	{ PATH_REC_FIELD(dlid),
172 	  .offset_words = 10,
173 	  .offset_bits  = 0,
174 	  .size_bits    = 16 },
175 	{ PATH_REC_FIELD(slid),
176 	  .offset_words = 10,
177 	  .offset_bits  = 16,
178 	  .size_bits    = 16 },
179 	{ PATH_REC_FIELD(raw_traffic),
180 	  .offset_words = 11,
181 	  .offset_bits  = 0,
182 	  .size_bits    = 1 },
183 	{ RESERVED,
184 	  .offset_words = 11,
185 	  .offset_bits  = 1,
186 	  .size_bits    = 3 },
187 	{ PATH_REC_FIELD(flow_label),
188 	  .offset_words = 11,
189 	  .offset_bits  = 4,
190 	  .size_bits    = 20 },
191 	{ PATH_REC_FIELD(hop_limit),
192 	  .offset_words = 11,
193 	  .offset_bits  = 24,
194 	  .size_bits    = 8 },
195 	{ PATH_REC_FIELD(traffic_class),
196 	  .offset_words = 12,
197 	  .offset_bits  = 0,
198 	  .size_bits    = 8 },
199 	{ PATH_REC_FIELD(reversible),
200 	  .offset_words = 12,
201 	  .offset_bits  = 8,
202 	  .size_bits    = 1 },
203 	{ PATH_REC_FIELD(numb_path),
204 	  .offset_words = 12,
205 	  .offset_bits  = 9,
206 	  .size_bits    = 7 },
207 	{ PATH_REC_FIELD(pkey),
208 	  .offset_words = 12,
209 	  .offset_bits  = 16,
210 	  .size_bits    = 16 },
211 	{ PATH_REC_FIELD(qos_class),
212 	  .offset_words = 13,
213 	  .offset_bits  = 0,
214 	  .size_bits    = 12 },
215 	{ PATH_REC_FIELD(sl),
216 	  .offset_words = 13,
217 	  .offset_bits  = 12,
218 	  .size_bits    = 4 },
219 	{ PATH_REC_FIELD(mtu_selector),
220 	  .offset_words = 13,
221 	  .offset_bits  = 16,
222 	  .size_bits    = 2 },
223 	{ PATH_REC_FIELD(mtu),
224 	  .offset_words = 13,
225 	  .offset_bits  = 18,
226 	  .size_bits    = 6 },
227 	{ PATH_REC_FIELD(rate_selector),
228 	  .offset_words = 13,
229 	  .offset_bits  = 24,
230 	  .size_bits    = 2 },
231 	{ PATH_REC_FIELD(rate),
232 	  .offset_words = 13,
233 	  .offset_bits  = 26,
234 	  .size_bits    = 6 },
235 	{ PATH_REC_FIELD(packet_life_time_selector),
236 	  .offset_words = 14,
237 	  .offset_bits  = 0,
238 	  .size_bits    = 2 },
239 	{ PATH_REC_FIELD(packet_life_time),
240 	  .offset_words = 14,
241 	  .offset_bits  = 2,
242 	  .size_bits    = 6 },
243 	{ PATH_REC_FIELD(preference),
244 	  .offset_words = 14,
245 	  .offset_bits  = 8,
246 	  .size_bits    = 8 },
247 	{ RESERVED,
248 	  .offset_words = 14,
249 	  .offset_bits  = 16,
250 	  .size_bits    = 48 },
251 };
252 
253 #define MCMEMBER_REC_FIELD(field) \
254 	.struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field),	\
255 	.struct_size_bytes   = sizeof ((struct ib_sa_mcmember_rec *) 0)->field,	\
256 	.field_name          = "sa_mcmember_rec:" #field
257 
258 static const struct ib_field mcmember_rec_table[] = {
259 	{ MCMEMBER_REC_FIELD(mgid),
260 	  .offset_words = 0,
261 	  .offset_bits  = 0,
262 	  .size_bits    = 128 },
263 	{ MCMEMBER_REC_FIELD(port_gid),
264 	  .offset_words = 4,
265 	  .offset_bits  = 0,
266 	  .size_bits    = 128 },
267 	{ MCMEMBER_REC_FIELD(qkey),
268 	  .offset_words = 8,
269 	  .offset_bits  = 0,
270 	  .size_bits    = 32 },
271 	{ MCMEMBER_REC_FIELD(mlid),
272 	  .offset_words = 9,
273 	  .offset_bits  = 0,
274 	  .size_bits    = 16 },
275 	{ MCMEMBER_REC_FIELD(mtu_selector),
276 	  .offset_words = 9,
277 	  .offset_bits  = 16,
278 	  .size_bits    = 2 },
279 	{ MCMEMBER_REC_FIELD(mtu),
280 	  .offset_words = 9,
281 	  .offset_bits  = 18,
282 	  .size_bits    = 6 },
283 	{ MCMEMBER_REC_FIELD(traffic_class),
284 	  .offset_words = 9,
285 	  .offset_bits  = 24,
286 	  .size_bits    = 8 },
287 	{ MCMEMBER_REC_FIELD(pkey),
288 	  .offset_words = 10,
289 	  .offset_bits  = 0,
290 	  .size_bits    = 16 },
291 	{ MCMEMBER_REC_FIELD(rate_selector),
292 	  .offset_words = 10,
293 	  .offset_bits  = 16,
294 	  .size_bits    = 2 },
295 	{ MCMEMBER_REC_FIELD(rate),
296 	  .offset_words = 10,
297 	  .offset_bits  = 18,
298 	  .size_bits    = 6 },
299 	{ MCMEMBER_REC_FIELD(packet_life_time_selector),
300 	  .offset_words = 10,
301 	  .offset_bits  = 24,
302 	  .size_bits    = 2 },
303 	{ MCMEMBER_REC_FIELD(packet_life_time),
304 	  .offset_words = 10,
305 	  .offset_bits  = 26,
306 	  .size_bits    = 6 },
307 	{ MCMEMBER_REC_FIELD(sl),
308 	  .offset_words = 11,
309 	  .offset_bits  = 0,
310 	  .size_bits    = 4 },
311 	{ MCMEMBER_REC_FIELD(flow_label),
312 	  .offset_words = 11,
313 	  .offset_bits  = 4,
314 	  .size_bits    = 20 },
315 	{ MCMEMBER_REC_FIELD(hop_limit),
316 	  .offset_words = 11,
317 	  .offset_bits  = 24,
318 	  .size_bits    = 8 },
319 	{ MCMEMBER_REC_FIELD(scope),
320 	  .offset_words = 12,
321 	  .offset_bits  = 0,
322 	  .size_bits    = 4 },
323 	{ MCMEMBER_REC_FIELD(join_state),
324 	  .offset_words = 12,
325 	  .offset_bits  = 4,
326 	  .size_bits    = 4 },
327 	{ MCMEMBER_REC_FIELD(proxy_join),
328 	  .offset_words = 12,
329 	  .offset_bits  = 8,
330 	  .size_bits    = 1 },
331 	{ RESERVED,
332 	  .offset_words = 12,
333 	  .offset_bits  = 9,
334 	  .size_bits    = 23 },
335 };
336 
337 #define SERVICE_REC_FIELD(field) \
338 	.struct_offset_bytes = offsetof(struct ib_sa_service_rec, field),	\
339 	.struct_size_bytes   = sizeof ((struct ib_sa_service_rec *) 0)->field,	\
340 	.field_name          = "sa_service_rec:" #field
341 
342 static const struct ib_field service_rec_table[] = {
343 	{ SERVICE_REC_FIELD(id),
344 	  .offset_words = 0,
345 	  .offset_bits  = 0,
346 	  .size_bits    = 64 },
347 	{ SERVICE_REC_FIELD(gid),
348 	  .offset_words = 2,
349 	  .offset_bits  = 0,
350 	  .size_bits    = 128 },
351 	{ SERVICE_REC_FIELD(pkey),
352 	  .offset_words = 6,
353 	  .offset_bits  = 0,
354 	  .size_bits    = 16 },
355 	{ SERVICE_REC_FIELD(lease),
356 	  .offset_words = 7,
357 	  .offset_bits  = 0,
358 	  .size_bits    = 32 },
359 	{ SERVICE_REC_FIELD(key),
360 	  .offset_words = 8,
361 	  .offset_bits  = 0,
362 	  .size_bits    = 128 },
363 	{ SERVICE_REC_FIELD(name),
364 	  .offset_words = 12,
365 	  .offset_bits  = 0,
366 	  .size_bits    = 64*8 },
367 	{ SERVICE_REC_FIELD(data8),
368 	  .offset_words = 28,
369 	  .offset_bits  = 0,
370 	  .size_bits    = 16*8 },
371 	{ SERVICE_REC_FIELD(data16),
372 	  .offset_words = 32,
373 	  .offset_bits  = 0,
374 	  .size_bits    = 8*16 },
375 	{ SERVICE_REC_FIELD(data32),
376 	  .offset_words = 36,
377 	  .offset_bits  = 0,
378 	  .size_bits    = 4*32 },
379 	{ SERVICE_REC_FIELD(data64),
380 	  .offset_words = 40,
381 	  .offset_bits  = 0,
382 	  .size_bits    = 2*64 },
383 };
384 
385 #define CLASSPORTINFO_REC_FIELD(field) \
386 	.struct_offset_bytes = offsetof(struct ib_class_port_info, field),	\
387 	.struct_size_bytes   = sizeof((struct ib_class_port_info *)0)->field,	\
388 	.field_name          = "ib_class_port_info:" #field
389 
390 static const struct ib_field classport_info_rec_table[] = {
391 	{ CLASSPORTINFO_REC_FIELD(base_version),
392 	  .offset_words = 0,
393 	  .offset_bits  = 0,
394 	  .size_bits    = 8 },
395 	{ CLASSPORTINFO_REC_FIELD(class_version),
396 	  .offset_words = 0,
397 	  .offset_bits  = 8,
398 	  .size_bits    = 8 },
399 	{ CLASSPORTINFO_REC_FIELD(capability_mask),
400 	  .offset_words = 0,
401 	  .offset_bits  = 16,
402 	  .size_bits    = 16 },
403 	{ CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
404 	  .offset_words = 1,
405 	  .offset_bits  = 0,
406 	  .size_bits    = 32 },
407 	{ CLASSPORTINFO_REC_FIELD(redirect_gid),
408 	  .offset_words = 2,
409 	  .offset_bits  = 0,
410 	  .size_bits    = 128 },
411 	{ CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
412 	  .offset_words = 6,
413 	  .offset_bits  = 0,
414 	  .size_bits    = 32 },
415 	{ CLASSPORTINFO_REC_FIELD(redirect_lid),
416 	  .offset_words = 7,
417 	  .offset_bits  = 0,
418 	  .size_bits    = 16 },
419 	{ CLASSPORTINFO_REC_FIELD(redirect_pkey),
420 	  .offset_words = 7,
421 	  .offset_bits  = 16,
422 	  .size_bits    = 16 },
423 
424 	{ CLASSPORTINFO_REC_FIELD(redirect_qp),
425 	  .offset_words = 8,
426 	  .offset_bits  = 0,
427 	  .size_bits    = 32 },
428 	{ CLASSPORTINFO_REC_FIELD(redirect_qkey),
429 	  .offset_words = 9,
430 	  .offset_bits  = 0,
431 	  .size_bits    = 32 },
432 
433 	{ CLASSPORTINFO_REC_FIELD(trap_gid),
434 	  .offset_words = 10,
435 	  .offset_bits  = 0,
436 	  .size_bits    = 128 },
437 	{ CLASSPORTINFO_REC_FIELD(trap_tcslfl),
438 	  .offset_words = 14,
439 	  .offset_bits  = 0,
440 	  .size_bits    = 32 },
441 
442 	{ CLASSPORTINFO_REC_FIELD(trap_lid),
443 	  .offset_words = 15,
444 	  .offset_bits  = 0,
445 	  .size_bits    = 16 },
446 	{ CLASSPORTINFO_REC_FIELD(trap_pkey),
447 	  .offset_words = 15,
448 	  .offset_bits  = 16,
449 	  .size_bits    = 16 },
450 
451 	{ CLASSPORTINFO_REC_FIELD(trap_hlqp),
452 	  .offset_words = 16,
453 	  .offset_bits  = 0,
454 	  .size_bits    = 32 },
455 	{ CLASSPORTINFO_REC_FIELD(trap_qkey),
456 	  .offset_words = 17,
457 	  .offset_bits  = 0,
458 	  .size_bits    = 32 },
459 };
460 
461 #define GUIDINFO_REC_FIELD(field) \
462 	.struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field),	\
463 	.struct_size_bytes   = sizeof((struct ib_sa_guidinfo_rec *) 0)->field,	\
464 	.field_name          = "sa_guidinfo_rec:" #field
465 
466 static const struct ib_field guidinfo_rec_table[] = {
467 	{ GUIDINFO_REC_FIELD(lid),
468 	  .offset_words = 0,
469 	  .offset_bits  = 0,
470 	  .size_bits    = 16 },
471 	{ GUIDINFO_REC_FIELD(block_num),
472 	  .offset_words = 0,
473 	  .offset_bits  = 16,
474 	  .size_bits    = 8 },
475 	{ GUIDINFO_REC_FIELD(res1),
476 	  .offset_words = 0,
477 	  .offset_bits  = 24,
478 	  .size_bits    = 8 },
479 	{ GUIDINFO_REC_FIELD(res2),
480 	  .offset_words = 1,
481 	  .offset_bits  = 0,
482 	  .size_bits    = 32 },
483 	{ GUIDINFO_REC_FIELD(guid_info_list),
484 	  .offset_words = 2,
485 	  .offset_bits  = 0,
486 	  .size_bits    = 512 },
487 };
488 
489 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
490 {
491 	query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
492 }
493 
494 static void free_sm_ah(struct kref *kref)
495 {
496 	struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
497 
498 	ib_destroy_ah(sm_ah->ah);
499 	kfree(sm_ah);
500 }
501 
502 static void update_sm_ah(struct work_struct *work)
503 {
504 	struct ib_sa_port *port =
505 		container_of(work, struct ib_sa_port, update_task);
506 	struct ib_sa_sm_ah *new_ah;
507 	struct ib_port_attr port_attr;
508 	struct ib_ah_attr   ah_attr;
509 
510 	if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
511 		pr_warn("Couldn't query port\n");
512 		return;
513 	}
514 
515 	new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
516 	if (!new_ah) {
517 		return;
518 	}
519 
520 	kref_init(&new_ah->ref);
521 	new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
522 
523 	new_ah->pkey_index = 0;
524 	if (ib_find_pkey(port->agent->device, port->port_num,
525 			 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
526 		pr_err("Couldn't find index for default PKey\n");
527 
528 	memset(&ah_attr, 0, sizeof ah_attr);
529 	ah_attr.dlid     = port_attr.sm_lid;
530 	ah_attr.sl       = port_attr.sm_sl;
531 	ah_attr.port_num = port->port_num;
532 	if (port_attr.grh_required) {
533 		ah_attr.ah_flags = IB_AH_GRH;
534 		ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
535 		ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
536 	}
537 
538 	new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
539 	if (IS_ERR(new_ah->ah)) {
540 		pr_warn("Couldn't create new SM AH\n");
541 		kfree(new_ah);
542 		return;
543 	}
544 
545 	spin_lock_irq(&port->ah_lock);
546 	if (port->sm_ah)
547 		kref_put(&port->sm_ah->ref, free_sm_ah);
548 	port->sm_ah = new_ah;
549 	spin_unlock_irq(&port->ah_lock);
550 
551 }
552 
553 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
554 {
555 	if (event->event == IB_EVENT_PORT_ERR    ||
556 	    event->event == IB_EVENT_PORT_ACTIVE ||
557 	    event->event == IB_EVENT_LID_CHANGE  ||
558 	    event->event == IB_EVENT_PKEY_CHANGE ||
559 	    event->event == IB_EVENT_SM_CHANGE   ||
560 	    event->event == IB_EVENT_CLIENT_REREGISTER) {
561 		unsigned long flags;
562 		struct ib_sa_device *sa_dev =
563 			container_of(handler, typeof(*sa_dev), event_handler);
564 		struct ib_sa_port *port =
565 			&sa_dev->port[event->element.port_num - sa_dev->start_port];
566 
567 		if (!rdma_cap_ib_sa(handler->device, port->port_num))
568 			return;
569 
570 		spin_lock_irqsave(&port->ah_lock, flags);
571 		if (port->sm_ah)
572 			kref_put(&port->sm_ah->ref, free_sm_ah);
573 		port->sm_ah = NULL;
574 		spin_unlock_irqrestore(&port->ah_lock, flags);
575 
576 		if (event->event == IB_EVENT_SM_CHANGE ||
577 		    event->event == IB_EVENT_CLIENT_REREGISTER ||
578 		    event->event == IB_EVENT_LID_CHANGE) {
579 			spin_lock_irqsave(&port->classport_lock, flags);
580 			port->classport_info.valid = false;
581 			spin_unlock_irqrestore(&port->classport_lock, flags);
582 		}
583 		queue_work(ib_wq, &sa_dev->port[event->element.port_num -
584 					    sa_dev->start_port].update_task);
585 	}
586 }
587 
588 void ib_sa_register_client(struct ib_sa_client *client)
589 {
590 	atomic_set(&client->users, 1);
591 	init_completion(&client->comp);
592 }
593 EXPORT_SYMBOL(ib_sa_register_client);
594 
595 void ib_sa_unregister_client(struct ib_sa_client *client)
596 {
597 	ib_sa_client_put(client);
598 	wait_for_completion(&client->comp);
599 }
600 EXPORT_SYMBOL(ib_sa_unregister_client);
601 
602 /**
603  * ib_sa_cancel_query - try to cancel an SA query
604  * @id:ID of query to cancel
605  * @query:query pointer to cancel
606  *
607  * Try to cancel an SA query.  If the id and query don't match up or
608  * the query has already completed, nothing is done.  Otherwise the
609  * query is canceled and will complete with a status of -EINTR.
610  */
611 void ib_sa_cancel_query(int id, struct ib_sa_query *query)
612 {
613 	unsigned long flags;
614 	struct ib_mad_agent *agent;
615 	struct ib_mad_send_buf *mad_buf;
616 
617 	spin_lock_irqsave(&idr_lock, flags);
618 	if (idr_find(&query_idr, id) != query) {
619 		spin_unlock_irqrestore(&idr_lock, flags);
620 		return;
621 	}
622 	agent = query->port->agent;
623 	mad_buf = query->mad_buf;
624 	spin_unlock_irqrestore(&idr_lock, flags);
625 }
626 EXPORT_SYMBOL(ib_sa_cancel_query);
627 
628 static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
629 {
630 	struct ib_sa_device *sa_dev;
631 	struct ib_sa_port   *port;
632 	unsigned long flags;
633 	u8 src_path_mask;
634 
635 	sa_dev = ib_get_client_data(device, &sa_client);
636 	if (!sa_dev)
637 		return 0x7f;
638 
639 	port  = &sa_dev->port[port_num - sa_dev->start_port];
640 	spin_lock_irqsave(&port->ah_lock, flags);
641 	src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
642 	spin_unlock_irqrestore(&port->ah_lock, flags);
643 
644 	return src_path_mask;
645 }
646 
647 int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
648 			 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
649 {
650 	int ret;
651 	u16 gid_index;
652 	int use_roce;
653 	struct net_device *ndev = NULL;
654 
655 	memset(ah_attr, 0, sizeof *ah_attr);
656 	ah_attr->dlid = be16_to_cpu(rec->dlid);
657 	ah_attr->sl = rec->sl;
658 	ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
659 				 get_src_path_mask(device, port_num);
660 	ah_attr->port_num = port_num;
661 	ah_attr->static_rate = rec->rate;
662 
663 	use_roce = rdma_cap_eth_ah(device, port_num);
664 
665 	if (use_roce) {
666 		struct net_device *idev;
667 		struct net_device *resolved_dev;
668 		struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
669 						 .net = rec->net ? rec->net :
670 							 &init_net};
671 		union {
672 			struct sockaddr     _sockaddr;
673 			struct sockaddr_in  _sockaddr_in;
674 			struct sockaddr_in6 _sockaddr_in6;
675 		} sgid_addr, dgid_addr;
676 
677 		if (!device->get_netdev)
678 			return -EOPNOTSUPP;
679 
680 		rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
681 		rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
682 
683 		/* validate the route */
684 		ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
685 					    &dgid_addr._sockaddr, &dev_addr);
686 		if (ret)
687 			return ret;
688 
689 		if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
690 		     dev_addr.network == RDMA_NETWORK_IPV6) &&
691 		    rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
692 			return -EINVAL;
693 
694 		idev = device->get_netdev(device, port_num);
695 		if (!idev)
696 			return -ENODEV;
697 
698 		resolved_dev = dev_get_by_index(dev_addr.net,
699 						dev_addr.bound_dev_if);
700 		if (!resolved_dev) {
701 			dev_put(idev);
702 			return -ENODEV;
703 		}
704 		ndev = ib_get_ndev_from_path(rec);
705 		rcu_read_lock();
706 		if ((ndev && ndev != resolved_dev) ||
707 		    (resolved_dev != idev &&
708 		     !rdma_is_upper_dev_rcu(idev, resolved_dev)))
709 			ret = -EHOSTUNREACH;
710 		rcu_read_unlock();
711 		dev_put(idev);
712 		dev_put(resolved_dev);
713 		if (ret) {
714 			if (ndev)
715 				dev_put(ndev);
716 			return ret;
717 		}
718 	}
719 
720 	if (rec->hop_limit > 0 || use_roce) {
721 		ah_attr->ah_flags = IB_AH_GRH;
722 		ah_attr->grh.dgid = rec->dgid;
723 
724 		ret = ib_find_cached_gid_by_port(device, &rec->sgid,
725 						 rec->gid_type, port_num, ndev,
726 						 &gid_index);
727 		if (ret) {
728 			if (ndev)
729 				dev_put(ndev);
730 			return ret;
731 		}
732 
733 		ah_attr->grh.sgid_index    = gid_index;
734 		ah_attr->grh.flow_label    = be32_to_cpu(rec->flow_label);
735 		ah_attr->grh.hop_limit     = rec->hop_limit;
736 		ah_attr->grh.traffic_class = rec->traffic_class;
737 		if (ndev)
738 			dev_put(ndev);
739 	}
740 
741 	if (use_roce)
742 		memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
743 
744 	return 0;
745 }
746 EXPORT_SYMBOL(ib_init_ah_from_path);
747 
748 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
749 {
750 	unsigned long flags;
751 
752 	spin_lock_irqsave(&query->port->ah_lock, flags);
753 	if (!query->port->sm_ah) {
754 		spin_unlock_irqrestore(&query->port->ah_lock, flags);
755 		return -EAGAIN;
756 	}
757 	kref_get(&query->port->sm_ah->ref);
758 	query->sm_ah = query->port->sm_ah;
759 	spin_unlock_irqrestore(&query->port->ah_lock, flags);
760 
761 	query->mad_buf = ib_create_send_mad(query->port->agent, 1,
762 					    query->sm_ah->pkey_index,
763 					    0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
764 					    gfp_mask,
765 					    IB_MGMT_BASE_VERSION);
766 	if (IS_ERR(query->mad_buf)) {
767 		kref_put(&query->sm_ah->ref, free_sm_ah);
768 		return -ENOMEM;
769 	}
770 
771 	query->mad_buf->ah = query->sm_ah->ah;
772 
773 	return 0;
774 }
775 
776 static void free_mad(struct ib_sa_query *query)
777 {
778 	ib_free_send_mad(query->mad_buf);
779 	kref_put(&query->sm_ah->ref, free_sm_ah);
780 }
781 
782 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
783 {
784 	unsigned long flags;
785 
786 	memset(mad, 0, sizeof *mad);
787 
788 	mad->mad_hdr.base_version  = IB_MGMT_BASE_VERSION;
789 	mad->mad_hdr.mgmt_class    = IB_MGMT_CLASS_SUBN_ADM;
790 	mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
791 
792 	spin_lock_irqsave(&tid_lock, flags);
793 	mad->mad_hdr.tid           =
794 		cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
795 	spin_unlock_irqrestore(&tid_lock, flags);
796 }
797 
798 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
799 {
800 	bool preload = gfpflags_allow_blocking(gfp_mask);
801 	unsigned long flags;
802 	int ret, id;
803 
804 	if (preload)
805 		idr_preload(gfp_mask);
806 	spin_lock_irqsave(&idr_lock, flags);
807 
808 	id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
809 
810 	spin_unlock_irqrestore(&idr_lock, flags);
811 	if (preload)
812 		idr_preload_end();
813 	if (id < 0)
814 		return id;
815 
816 	query->mad_buf->timeout_ms  = timeout_ms;
817 	query->mad_buf->context[0] = query;
818 	query->id = id;
819 
820 	if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
821 		ib_sa_disable_local_svc(query);
822 	}
823 
824 	ret = ib_post_send_mad(query->mad_buf, NULL);
825 	if (ret) {
826 		spin_lock_irqsave(&idr_lock, flags);
827 		idr_remove(&query_idr, id);
828 		spin_unlock_irqrestore(&idr_lock, flags);
829 	}
830 
831 	/*
832 	 * It's not safe to dereference query any more, because the
833 	 * send may already have completed and freed the query in
834 	 * another context.
835 	 */
836 	return ret ? ret : id;
837 }
838 
839 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
840 {
841 	ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
842 }
843 EXPORT_SYMBOL(ib_sa_unpack_path);
844 
845 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
846 {
847 	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
848 }
849 EXPORT_SYMBOL(ib_sa_pack_path);
850 
851 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
852 				    int status,
853 				    struct ib_sa_mad *mad)
854 {
855 	struct ib_sa_path_query *query =
856 		container_of(sa_query, struct ib_sa_path_query, sa_query);
857 
858 	if (mad) {
859 		struct ib_sa_path_rec rec;
860 
861 		ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
862 			  mad->data, &rec);
863 		rec.net = NULL;
864 		rec.ifindex = 0;
865 		rec.gid_type = IB_GID_TYPE_IB;
866 		eth_zero_addr(rec.dmac);
867 		query->callback(status, &rec, query->context);
868 	} else
869 		query->callback(status, NULL, query->context);
870 }
871 
872 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
873 {
874 	kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
875 }
876 
877 /**
878  * ib_sa_path_rec_get - Start a Path get query
879  * @client:SA client
880  * @device:device to send query on
881  * @port_num: port number to send query on
882  * @rec:Path Record to send in query
883  * @comp_mask:component mask to send in query
884  * @timeout_ms:time to wait for response
885  * @gfp_mask:GFP mask to use for internal allocations
886  * @callback:function called when query completes, times out or is
887  * canceled
888  * @context:opaque user context passed to callback
889  * @sa_query:query context, used to cancel query
890  *
891  * Send a Path Record Get query to the SA to look up a path.  The
892  * callback function will be called when the query completes (or
893  * fails); status is 0 for a successful response, -EINTR if the query
894  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
895  * occurred sending the query.  The resp parameter of the callback is
896  * only valid if status is 0.
897  *
898  * If the return value of ib_sa_path_rec_get() is negative, it is an
899  * error code.  Otherwise it is a query ID that can be used to cancel
900  * the query.
901  */
902 int ib_sa_path_rec_get(struct ib_sa_client *client,
903 		       struct ib_device *device, u8 port_num,
904 		       struct ib_sa_path_rec *rec,
905 		       ib_sa_comp_mask comp_mask,
906 		       int timeout_ms, gfp_t gfp_mask,
907 		       void (*callback)(int status,
908 					struct ib_sa_path_rec *resp,
909 					void *context),
910 		       void *context,
911 		       struct ib_sa_query **sa_query)
912 {
913 	struct ib_sa_path_query *query;
914 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
915 	struct ib_sa_port   *port;
916 	struct ib_mad_agent *agent;
917 	struct ib_sa_mad *mad;
918 	int ret;
919 
920 	if (!sa_dev)
921 		return -ENODEV;
922 
923 	port  = &sa_dev->port[port_num - sa_dev->start_port];
924 	agent = port->agent;
925 
926 	query = kzalloc(sizeof(*query), gfp_mask);
927 	if (!query)
928 		return -ENOMEM;
929 
930 	query->sa_query.port     = port;
931 	ret = alloc_mad(&query->sa_query, gfp_mask);
932 	if (ret)
933 		goto err1;
934 
935 	ib_sa_client_get(client);
936 	query->sa_query.client = client;
937 	query->callback        = callback;
938 	query->context         = context;
939 
940 	mad = query->sa_query.mad_buf->mad;
941 	init_mad(mad, agent);
942 
943 	query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
944 	query->sa_query.release  = ib_sa_path_rec_release;
945 	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
946 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_PATH_REC);
947 	mad->sa_hdr.comp_mask	 = comp_mask;
948 
949 	ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
950 
951 	*sa_query = &query->sa_query;
952 
953 	query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
954 	query->sa_query.mad_buf->context[1] = rec;
955 
956 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
957 	if (ret < 0)
958 		goto err2;
959 
960 	return ret;
961 
962 err2:
963 	*sa_query = NULL;
964 	ib_sa_client_put(query->sa_query.client);
965 	free_mad(&query->sa_query);
966 
967 err1:
968 	kfree(query);
969 	return ret;
970 }
971 EXPORT_SYMBOL(ib_sa_path_rec_get);
972 
973 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
974 				    int status,
975 				    struct ib_sa_mad *mad)
976 {
977 	struct ib_sa_service_query *query =
978 		container_of(sa_query, struct ib_sa_service_query, sa_query);
979 
980 	if (mad) {
981 		struct ib_sa_service_rec rec;
982 
983 		ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
984 			  mad->data, &rec);
985 		query->callback(status, &rec, query->context);
986 	} else
987 		query->callback(status, NULL, query->context);
988 }
989 
990 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
991 {
992 	kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
993 }
994 
995 /**
996  * ib_sa_service_rec_query - Start Service Record operation
997  * @client:SA client
998  * @device:device to send request on
999  * @port_num: port number to send request on
1000  * @method:SA method - should be get, set, or delete
1001  * @rec:Service Record to send in request
1002  * @comp_mask:component mask to send in request
1003  * @timeout_ms:time to wait for response
1004  * @gfp_mask:GFP mask to use for internal allocations
1005  * @callback:function called when request completes, times out or is
1006  * canceled
1007  * @context:opaque user context passed to callback
1008  * @sa_query:request context, used to cancel request
1009  *
1010  * Send a Service Record set/get/delete to the SA to register,
1011  * unregister or query a service record.
1012  * The callback function will be called when the request completes (or
1013  * fails); status is 0 for a successful response, -EINTR if the query
1014  * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1015  * occurred sending the query.  The resp parameter of the callback is
1016  * only valid if status is 0.
1017  *
1018  * If the return value of ib_sa_service_rec_query() is negative, it is an
1019  * error code.  Otherwise it is a request ID that can be used to cancel
1020  * the query.
1021  */
1022 int ib_sa_service_rec_query(struct ib_sa_client *client,
1023 			    struct ib_device *device, u8 port_num, u8 method,
1024 			    struct ib_sa_service_rec *rec,
1025 			    ib_sa_comp_mask comp_mask,
1026 			    int timeout_ms, gfp_t gfp_mask,
1027 			    void (*callback)(int status,
1028 					     struct ib_sa_service_rec *resp,
1029 					     void *context),
1030 			    void *context,
1031 			    struct ib_sa_query **sa_query)
1032 {
1033 	struct ib_sa_service_query *query;
1034 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1035 	struct ib_sa_port   *port;
1036 	struct ib_mad_agent *agent;
1037 	struct ib_sa_mad *mad;
1038 	int ret;
1039 
1040 	if (!sa_dev)
1041 		return -ENODEV;
1042 
1043 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1044 	agent = port->agent;
1045 
1046 	if (method != IB_MGMT_METHOD_GET &&
1047 	    method != IB_MGMT_METHOD_SET &&
1048 	    method != IB_SA_METHOD_DELETE)
1049 		return -EINVAL;
1050 
1051 	query = kzalloc(sizeof(*query), gfp_mask);
1052 	if (!query)
1053 		return -ENOMEM;
1054 
1055 	query->sa_query.port     = port;
1056 	ret = alloc_mad(&query->sa_query, gfp_mask);
1057 	if (ret)
1058 		goto err1;
1059 
1060 	ib_sa_client_get(client);
1061 	query->sa_query.client = client;
1062 	query->callback        = callback;
1063 	query->context         = context;
1064 
1065 	mad = query->sa_query.mad_buf->mad;
1066 	init_mad(mad, agent);
1067 
1068 	query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1069 	query->sa_query.release  = ib_sa_service_rec_release;
1070 	mad->mad_hdr.method	 = method;
1071 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1072 	mad->sa_hdr.comp_mask	 = comp_mask;
1073 
1074 	ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
1075 		rec, mad->data);
1076 
1077 	*sa_query = &query->sa_query;
1078 
1079 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1080 	if (ret < 0)
1081 		goto err2;
1082 
1083 	return ret;
1084 
1085 err2:
1086 	*sa_query = NULL;
1087 	ib_sa_client_put(query->sa_query.client);
1088 	free_mad(&query->sa_query);
1089 
1090 err1:
1091 	kfree(query);
1092 	return ret;
1093 }
1094 EXPORT_SYMBOL(ib_sa_service_rec_query);
1095 
1096 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1097 					int status,
1098 					struct ib_sa_mad *mad)
1099 {
1100 	struct ib_sa_mcmember_query *query =
1101 		container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1102 
1103 	if (mad) {
1104 		struct ib_sa_mcmember_rec rec;
1105 
1106 		ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1107 			  mad->data, &rec);
1108 		query->callback(status, &rec, query->context);
1109 	} else
1110 		query->callback(status, NULL, query->context);
1111 }
1112 
1113 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1114 {
1115 	kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1116 }
1117 
1118 int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1119 			     struct ib_device *device, u8 port_num,
1120 			     u8 method,
1121 			     struct ib_sa_mcmember_rec *rec,
1122 			     ib_sa_comp_mask comp_mask,
1123 			     int timeout_ms, gfp_t gfp_mask,
1124 			     void (*callback)(int status,
1125 					      struct ib_sa_mcmember_rec *resp,
1126 					      void *context),
1127 			     void *context,
1128 			     struct ib_sa_query **sa_query)
1129 {
1130 	struct ib_sa_mcmember_query *query;
1131 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1132 	struct ib_sa_port   *port;
1133 	struct ib_mad_agent *agent;
1134 	struct ib_sa_mad *mad;
1135 	int ret;
1136 
1137 	if (!sa_dev)
1138 		return -ENODEV;
1139 
1140 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1141 	agent = port->agent;
1142 
1143 	query = kzalloc(sizeof(*query), gfp_mask);
1144 	if (!query)
1145 		return -ENOMEM;
1146 
1147 	query->sa_query.port     = port;
1148 	ret = alloc_mad(&query->sa_query, gfp_mask);
1149 	if (ret)
1150 		goto err1;
1151 
1152 	ib_sa_client_get(client);
1153 	query->sa_query.client = client;
1154 	query->callback        = callback;
1155 	query->context         = context;
1156 
1157 	mad = query->sa_query.mad_buf->mad;
1158 	init_mad(mad, agent);
1159 
1160 	query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1161 	query->sa_query.release  = ib_sa_mcmember_rec_release;
1162 	mad->mad_hdr.method	 = method;
1163 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1164 	mad->sa_hdr.comp_mask	 = comp_mask;
1165 
1166 	ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1167 		rec, mad->data);
1168 
1169 	*sa_query = &query->sa_query;
1170 
1171 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1172 	if (ret < 0)
1173 		goto err2;
1174 
1175 	return ret;
1176 
1177 err2:
1178 	*sa_query = NULL;
1179 	ib_sa_client_put(query->sa_query.client);
1180 	free_mad(&query->sa_query);
1181 
1182 err1:
1183 	kfree(query);
1184 	return ret;
1185 }
1186 
1187 /* Support GuidInfoRecord */
1188 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1189 					int status,
1190 					struct ib_sa_mad *mad)
1191 {
1192 	struct ib_sa_guidinfo_query *query =
1193 		container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1194 
1195 	if (mad) {
1196 		struct ib_sa_guidinfo_rec rec;
1197 
1198 		ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1199 			  mad->data, &rec);
1200 		query->callback(status, &rec, query->context);
1201 	} else
1202 		query->callback(status, NULL, query->context);
1203 }
1204 
1205 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1206 {
1207 	kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1208 }
1209 
1210 int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1211 			      struct ib_device *device, u8 port_num,
1212 			      struct ib_sa_guidinfo_rec *rec,
1213 			      ib_sa_comp_mask comp_mask, u8 method,
1214 			      int timeout_ms, gfp_t gfp_mask,
1215 			      void (*callback)(int status,
1216 					       struct ib_sa_guidinfo_rec *resp,
1217 					       void *context),
1218 			      void *context,
1219 			      struct ib_sa_query **sa_query)
1220 {
1221 	struct ib_sa_guidinfo_query *query;
1222 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1223 	struct ib_sa_port *port;
1224 	struct ib_mad_agent *agent;
1225 	struct ib_sa_mad *mad;
1226 	int ret;
1227 
1228 	if (!sa_dev)
1229 		return -ENODEV;
1230 
1231 	if (method != IB_MGMT_METHOD_GET &&
1232 	    method != IB_MGMT_METHOD_SET &&
1233 	    method != IB_SA_METHOD_DELETE) {
1234 		return -EINVAL;
1235 	}
1236 
1237 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1238 	agent = port->agent;
1239 
1240 	query = kzalloc(sizeof(*query), gfp_mask);
1241 	if (!query)
1242 		return -ENOMEM;
1243 
1244 	query->sa_query.port = port;
1245 	ret = alloc_mad(&query->sa_query, gfp_mask);
1246 	if (ret)
1247 		goto err1;
1248 
1249 	ib_sa_client_get(client);
1250 	query->sa_query.client = client;
1251 	query->callback        = callback;
1252 	query->context         = context;
1253 
1254 	mad = query->sa_query.mad_buf->mad;
1255 	init_mad(mad, agent);
1256 
1257 	query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1258 	query->sa_query.release  = ib_sa_guidinfo_rec_release;
1259 
1260 	mad->mad_hdr.method	 = method;
1261 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1262 	mad->sa_hdr.comp_mask	 = comp_mask;
1263 
1264 	ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1265 		mad->data);
1266 
1267 	*sa_query = &query->sa_query;
1268 
1269 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1270 	if (ret < 0)
1271 		goto err2;
1272 
1273 	return ret;
1274 
1275 err2:
1276 	*sa_query = NULL;
1277 	ib_sa_client_put(query->sa_query.client);
1278 	free_mad(&query->sa_query);
1279 
1280 err1:
1281 	kfree(query);
1282 	return ret;
1283 }
1284 EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1285 
1286 /* Support get SA ClassPortInfo */
1287 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1288 					      int status,
1289 					      struct ib_sa_mad *mad)
1290 {
1291 	unsigned long flags;
1292 	struct ib_sa_classport_info_query *query =
1293 		container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1294 
1295 	if (mad) {
1296 		struct ib_class_port_info rec;
1297 
1298 		ib_unpack(classport_info_rec_table,
1299 			  ARRAY_SIZE(classport_info_rec_table),
1300 			  mad->data, &rec);
1301 
1302 		spin_lock_irqsave(&sa_query->port->classport_lock, flags);
1303 		if (!status && !sa_query->port->classport_info.valid) {
1304 			memcpy(&sa_query->port->classport_info.data, &rec,
1305 			       sizeof(sa_query->port->classport_info.data));
1306 
1307 			sa_query->port->classport_info.valid = true;
1308 		}
1309 		spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
1310 
1311 		query->callback(status, &rec, query->context);
1312 	} else {
1313 		query->callback(status, NULL, query->context);
1314 	}
1315 }
1316 
1317 static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
1318 {
1319 	kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1320 			   sa_query));
1321 }
1322 
1323 int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
1324 				   struct ib_device *device, u8 port_num,
1325 				   int timeout_ms, gfp_t gfp_mask,
1326 				   void (*callback)(int status,
1327 						    struct ib_class_port_info *resp,
1328 						    void *context),
1329 				   void *context,
1330 				   struct ib_sa_query **sa_query)
1331 {
1332 	struct ib_sa_classport_info_query *query;
1333 	struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1334 	struct ib_sa_port *port;
1335 	struct ib_mad_agent *agent;
1336 	struct ib_sa_mad *mad;
1337 	struct ib_class_port_info cached_class_port_info;
1338 	int ret;
1339 	unsigned long flags;
1340 
1341 	if (!sa_dev)
1342 		return -ENODEV;
1343 
1344 	port  = &sa_dev->port[port_num - sa_dev->start_port];
1345 	agent = port->agent;
1346 
1347 	/* Use cached ClassPortInfo attribute if valid instead of sending mad */
1348 	spin_lock_irqsave(&port->classport_lock, flags);
1349 	if (port->classport_info.valid && callback) {
1350 		memcpy(&cached_class_port_info, &port->classport_info.data,
1351 		       sizeof(cached_class_port_info));
1352 		spin_unlock_irqrestore(&port->classport_lock, flags);
1353 		callback(0, &cached_class_port_info, context);
1354 		return 0;
1355 	}
1356 	spin_unlock_irqrestore(&port->classport_lock, flags);
1357 
1358 	query = kzalloc(sizeof(*query), gfp_mask);
1359 	if (!query)
1360 		return -ENOMEM;
1361 
1362 	query->sa_query.port = port;
1363 	ret = alloc_mad(&query->sa_query, gfp_mask);
1364 	if (ret)
1365 		goto err1;
1366 
1367 	ib_sa_client_get(client);
1368 	query->sa_query.client = client;
1369 	query->callback        = callback;
1370 	query->context         = context;
1371 
1372 	mad = query->sa_query.mad_buf->mad;
1373 	init_mad(mad, agent);
1374 
1375 	query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
1376 
1377 	query->sa_query.release  = ib_sa_portclass_info_rec_release;
1378 	/* support GET only */
1379 	mad->mad_hdr.method	 = IB_MGMT_METHOD_GET;
1380 	mad->mad_hdr.attr_id	 = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1381 	mad->sa_hdr.comp_mask	 = 0;
1382 	*sa_query = &query->sa_query;
1383 
1384 	ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1385 	if (ret < 0)
1386 		goto err2;
1387 
1388 	return ret;
1389 
1390 err2:
1391 	*sa_query = NULL;
1392 	ib_sa_client_put(query->sa_query.client);
1393 	free_mad(&query->sa_query);
1394 
1395 err1:
1396 	kfree(query);
1397 	return ret;
1398 }
1399 EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
1400 
1401 static void send_handler(struct ib_mad_agent *agent,
1402 			 struct ib_mad_send_wc *mad_send_wc)
1403 {
1404 	struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
1405 	unsigned long flags;
1406 
1407 	if (query->callback)
1408 		switch (mad_send_wc->status) {
1409 		case IB_WC_SUCCESS:
1410 			/* No callback -- already got recv */
1411 			break;
1412 		case IB_WC_RESP_TIMEOUT_ERR:
1413 			query->callback(query, -ETIMEDOUT, NULL);
1414 			break;
1415 		case IB_WC_WR_FLUSH_ERR:
1416 			query->callback(query, -EINTR, NULL);
1417 			break;
1418 		default:
1419 			query->callback(query, -EIO, NULL);
1420 			break;
1421 		}
1422 
1423 	spin_lock_irqsave(&idr_lock, flags);
1424 	idr_remove(&query_idr, query->id);
1425 	spin_unlock_irqrestore(&idr_lock, flags);
1426 
1427 	free_mad(query);
1428 	ib_sa_client_put(query->client);
1429 	query->release(query);
1430 }
1431 
1432 static void recv_handler(struct ib_mad_agent *mad_agent,
1433 			 struct ib_mad_send_buf *send_buf,
1434 			 struct ib_mad_recv_wc *mad_recv_wc)
1435 {
1436 	struct ib_sa_query *query;
1437 
1438 	if (!send_buf)
1439 		return;
1440 
1441 	query = send_buf->context[0];
1442 	if (query->callback) {
1443 		if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1444 			query->callback(query,
1445 					mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1446 					-EINVAL : 0,
1447 					(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1448 		else
1449 			query->callback(query, -EIO, NULL);
1450 	}
1451 
1452 	ib_free_recv_mad(mad_recv_wc);
1453 }
1454 
1455 static void ib_sa_add_one(struct ib_device *device)
1456 {
1457 	struct ib_sa_device *sa_dev;
1458 	int s, e, i;
1459 	int count = 0;
1460 
1461 	s = rdma_start_port(device);
1462 	e = rdma_end_port(device);
1463 
1464 	sa_dev = kzalloc(sizeof *sa_dev +
1465 			 (e - s + 1) * sizeof (struct ib_sa_port),
1466 			 GFP_KERNEL);
1467 	if (!sa_dev)
1468 		return;
1469 
1470 	sa_dev->start_port = s;
1471 	sa_dev->end_port   = e;
1472 
1473 	for (i = 0; i <= e - s; ++i) {
1474 		spin_lock_init(&sa_dev->port[i].ah_lock);
1475 		if (!rdma_cap_ib_sa(device, i + 1))
1476 			continue;
1477 
1478 		sa_dev->port[i].sm_ah    = NULL;
1479 		sa_dev->port[i].port_num = i + s;
1480 
1481 		spin_lock_init(&sa_dev->port[i].classport_lock);
1482 		sa_dev->port[i].classport_info.valid = false;
1483 
1484 		sa_dev->port[i].agent =
1485 			ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1486 					      NULL, 0, send_handler,
1487 					      recv_handler, sa_dev, 0);
1488 		if (IS_ERR(sa_dev->port[i].agent))
1489 			goto err;
1490 
1491 		INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
1492 
1493 		count++;
1494 	}
1495 
1496 	if (!count)
1497 		goto free;
1498 
1499 	ib_set_client_data(device, &sa_client, sa_dev);
1500 
1501 	/*
1502 	 * We register our event handler after everything is set up,
1503 	 * and then update our cached info after the event handler is
1504 	 * registered to avoid any problems if a port changes state
1505 	 * during our initialization.
1506 	 */
1507 
1508 	INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1509 	if (ib_register_event_handler(&sa_dev->event_handler))
1510 		goto err;
1511 
1512 	for (i = 0; i <= e - s; ++i) {
1513 		if (rdma_cap_ib_sa(device, i + 1))
1514 			update_sm_ah(&sa_dev->port[i].update_task);
1515 	}
1516 
1517 	return;
1518 
1519 err:
1520 	while (--i >= 0) {
1521 		if (rdma_cap_ib_sa(device, i + 1))
1522 			ib_unregister_mad_agent(sa_dev->port[i].agent);
1523 	}
1524 free:
1525 	kfree(sa_dev);
1526 	return;
1527 }
1528 
1529 static void ib_sa_remove_one(struct ib_device *device, void *client_data)
1530 {
1531 	struct ib_sa_device *sa_dev = client_data;
1532 	int i;
1533 
1534 	if (!sa_dev)
1535 		return;
1536 
1537 	ib_unregister_event_handler(&sa_dev->event_handler);
1538 
1539 	flush_workqueue(ib_wq);
1540 
1541 	for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
1542 		if (rdma_cap_ib_sa(device, i + 1)) {
1543 			ib_unregister_mad_agent(sa_dev->port[i].agent);
1544 			if (sa_dev->port[i].sm_ah)
1545 				kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1546 		}
1547 
1548 	}
1549 
1550 	kfree(sa_dev);
1551 }
1552 
1553 int ib_sa_init(void)
1554 {
1555 	int ret;
1556 
1557 	get_random_bytes(&tid, sizeof tid);
1558 
1559 	ret = ib_register_client(&sa_client);
1560 	if (ret) {
1561 		pr_err("Couldn't register ib_sa client\n");
1562 		goto err1;
1563 	}
1564 
1565 	ret = mcast_init();
1566 	if (ret) {
1567 		pr_err("Couldn't initialize multicast handling\n");
1568 		goto err2;
1569 	}
1570 
1571 	return 0;
1572 
1573 err2:
1574 	ib_unregister_client(&sa_client);
1575 err1:
1576 	return ret;
1577 }
1578 
1579 void ib_sa_cleanup(void)
1580 {
1581 	mcast_cleanup();
1582 	ib_unregister_client(&sa_client);
1583 	idr_destroy(&query_idr);
1584 }
1585