1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2006 Intel Corporation. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 * 36 * $FreeBSD$ 37 */ 38 39 #include <linux/module.h> 40 #include <linux/err.h> 41 #include <linux/random.h> 42 #include <linux/spinlock.h> 43 #include <linux/slab.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/kref.h> 46 #include <linux/idr.h> 47 #include <linux/workqueue.h> 48 #include <linux/etherdevice.h> 49 #include <rdma/ib_pack.h> 50 #include <rdma/ib_cache.h> 51 #include <rdma/ib_user_sa.h> 52 #include <rdma/ib_marshall.h> 53 #include <rdma/ib_addr.h> 54 #include "sa.h" 55 #include "core_priv.h" 56 57 #define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100 58 #define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000 59 #define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000 60 61 struct ib_sa_sm_ah { 62 struct ib_ah *ah; 63 struct kref ref; 64 u16 pkey_index; 65 u8 src_path_mask; 66 }; 67 68 struct ib_sa_classport_cache { 69 bool valid; 70 struct ib_class_port_info data; 71 }; 72 73 struct ib_sa_port { 74 struct ib_mad_agent *agent; 75 struct ib_sa_sm_ah *sm_ah; 76 struct work_struct update_task; 77 struct ib_sa_classport_cache classport_info; 78 spinlock_t classport_lock; /* protects class port info set */ 79 spinlock_t ah_lock; 80 u8 port_num; 81 }; 82 83 struct ib_sa_device { 84 int start_port, end_port; 85 struct ib_event_handler event_handler; 86 struct ib_sa_port port[0]; 87 }; 88 89 struct ib_sa_query { 90 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *); 91 void (*release)(struct ib_sa_query *); 92 struct ib_sa_client *client; 93 struct ib_sa_port *port; 94 struct ib_mad_send_buf *mad_buf; 95 struct ib_sa_sm_ah *sm_ah; 96 int id; 97 u32 flags; 98 struct list_head list; /* Local svc request list */ 99 u32 seq; /* Local svc request sequence number */ 100 unsigned long timeout; /* Local svc timeout */ 101 u8 path_use; /* How will the pathrecord be used */ 102 }; 103 104 #define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001 105 #define IB_SA_CANCEL 0x00000002 106 107 struct ib_sa_service_query { 108 void (*callback)(int, struct ib_sa_service_rec *, void *); 109 void *context; 110 struct ib_sa_query sa_query; 111 }; 112 113 struct ib_sa_path_query { 114 void (*callback)(int, struct ib_sa_path_rec *, void *); 115 void *context; 116 struct ib_sa_query sa_query; 117 }; 118 119 struct ib_sa_guidinfo_query { 120 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *); 121 void *context; 122 struct ib_sa_query sa_query; 123 }; 124 125 struct ib_sa_classport_info_query { 126 void (*callback)(int, struct ib_class_port_info *, void *); 127 void *context; 128 struct ib_sa_query sa_query; 129 }; 130 131 struct ib_sa_mcmember_query { 132 void (*callback)(int, struct ib_sa_mcmember_rec *, void *); 133 void *context; 134 struct ib_sa_query sa_query; 135 }; 136 137 static void ib_sa_add_one(struct ib_device *device); 138 static void ib_sa_remove_one(struct ib_device *device, void *client_data); 139 140 static struct ib_client sa_client = { 141 .name = "sa", 142 .add = ib_sa_add_one, 143 .remove = ib_sa_remove_one 144 }; 145 146 static DEFINE_SPINLOCK(idr_lock); 147 static DEFINE_IDR(query_idr); 148 149 static DEFINE_SPINLOCK(tid_lock); 150 static u32 tid; 151 152 #define PATH_REC_FIELD(field) \ 153 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \ 154 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \ 155 .field_name = "sa_path_rec:" #field 156 157 static const struct ib_field path_rec_table[] = { 158 { PATH_REC_FIELD(service_id), 159 .offset_words = 0, 160 .offset_bits = 0, 161 .size_bits = 64 }, 162 { PATH_REC_FIELD(dgid), 163 .offset_words = 2, 164 .offset_bits = 0, 165 .size_bits = 128 }, 166 { PATH_REC_FIELD(sgid), 167 .offset_words = 6, 168 .offset_bits = 0, 169 .size_bits = 128 }, 170 { PATH_REC_FIELD(dlid), 171 .offset_words = 10, 172 .offset_bits = 0, 173 .size_bits = 16 }, 174 { PATH_REC_FIELD(slid), 175 .offset_words = 10, 176 .offset_bits = 16, 177 .size_bits = 16 }, 178 { PATH_REC_FIELD(raw_traffic), 179 .offset_words = 11, 180 .offset_bits = 0, 181 .size_bits = 1 }, 182 { RESERVED, 183 .offset_words = 11, 184 .offset_bits = 1, 185 .size_bits = 3 }, 186 { PATH_REC_FIELD(flow_label), 187 .offset_words = 11, 188 .offset_bits = 4, 189 .size_bits = 20 }, 190 { PATH_REC_FIELD(hop_limit), 191 .offset_words = 11, 192 .offset_bits = 24, 193 .size_bits = 8 }, 194 { PATH_REC_FIELD(traffic_class), 195 .offset_words = 12, 196 .offset_bits = 0, 197 .size_bits = 8 }, 198 { PATH_REC_FIELD(reversible), 199 .offset_words = 12, 200 .offset_bits = 8, 201 .size_bits = 1 }, 202 { PATH_REC_FIELD(numb_path), 203 .offset_words = 12, 204 .offset_bits = 9, 205 .size_bits = 7 }, 206 { PATH_REC_FIELD(pkey), 207 .offset_words = 12, 208 .offset_bits = 16, 209 .size_bits = 16 }, 210 { PATH_REC_FIELD(qos_class), 211 .offset_words = 13, 212 .offset_bits = 0, 213 .size_bits = 12 }, 214 { PATH_REC_FIELD(sl), 215 .offset_words = 13, 216 .offset_bits = 12, 217 .size_bits = 4 }, 218 { PATH_REC_FIELD(mtu_selector), 219 .offset_words = 13, 220 .offset_bits = 16, 221 .size_bits = 2 }, 222 { PATH_REC_FIELD(mtu), 223 .offset_words = 13, 224 .offset_bits = 18, 225 .size_bits = 6 }, 226 { PATH_REC_FIELD(rate_selector), 227 .offset_words = 13, 228 .offset_bits = 24, 229 .size_bits = 2 }, 230 { PATH_REC_FIELD(rate), 231 .offset_words = 13, 232 .offset_bits = 26, 233 .size_bits = 6 }, 234 { PATH_REC_FIELD(packet_life_time_selector), 235 .offset_words = 14, 236 .offset_bits = 0, 237 .size_bits = 2 }, 238 { PATH_REC_FIELD(packet_life_time), 239 .offset_words = 14, 240 .offset_bits = 2, 241 .size_bits = 6 }, 242 { PATH_REC_FIELD(preference), 243 .offset_words = 14, 244 .offset_bits = 8, 245 .size_bits = 8 }, 246 { RESERVED, 247 .offset_words = 14, 248 .offset_bits = 16, 249 .size_bits = 48 }, 250 }; 251 252 #define MCMEMBER_REC_FIELD(field) \ 253 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \ 254 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \ 255 .field_name = "sa_mcmember_rec:" #field 256 257 static const struct ib_field mcmember_rec_table[] = { 258 { MCMEMBER_REC_FIELD(mgid), 259 .offset_words = 0, 260 .offset_bits = 0, 261 .size_bits = 128 }, 262 { MCMEMBER_REC_FIELD(port_gid), 263 .offset_words = 4, 264 .offset_bits = 0, 265 .size_bits = 128 }, 266 { MCMEMBER_REC_FIELD(qkey), 267 .offset_words = 8, 268 .offset_bits = 0, 269 .size_bits = 32 }, 270 { MCMEMBER_REC_FIELD(mlid), 271 .offset_words = 9, 272 .offset_bits = 0, 273 .size_bits = 16 }, 274 { MCMEMBER_REC_FIELD(mtu_selector), 275 .offset_words = 9, 276 .offset_bits = 16, 277 .size_bits = 2 }, 278 { MCMEMBER_REC_FIELD(mtu), 279 .offset_words = 9, 280 .offset_bits = 18, 281 .size_bits = 6 }, 282 { MCMEMBER_REC_FIELD(traffic_class), 283 .offset_words = 9, 284 .offset_bits = 24, 285 .size_bits = 8 }, 286 { MCMEMBER_REC_FIELD(pkey), 287 .offset_words = 10, 288 .offset_bits = 0, 289 .size_bits = 16 }, 290 { MCMEMBER_REC_FIELD(rate_selector), 291 .offset_words = 10, 292 .offset_bits = 16, 293 .size_bits = 2 }, 294 { MCMEMBER_REC_FIELD(rate), 295 .offset_words = 10, 296 .offset_bits = 18, 297 .size_bits = 6 }, 298 { MCMEMBER_REC_FIELD(packet_life_time_selector), 299 .offset_words = 10, 300 .offset_bits = 24, 301 .size_bits = 2 }, 302 { MCMEMBER_REC_FIELD(packet_life_time), 303 .offset_words = 10, 304 .offset_bits = 26, 305 .size_bits = 6 }, 306 { MCMEMBER_REC_FIELD(sl), 307 .offset_words = 11, 308 .offset_bits = 0, 309 .size_bits = 4 }, 310 { MCMEMBER_REC_FIELD(flow_label), 311 .offset_words = 11, 312 .offset_bits = 4, 313 .size_bits = 20 }, 314 { MCMEMBER_REC_FIELD(hop_limit), 315 .offset_words = 11, 316 .offset_bits = 24, 317 .size_bits = 8 }, 318 { MCMEMBER_REC_FIELD(scope), 319 .offset_words = 12, 320 .offset_bits = 0, 321 .size_bits = 4 }, 322 { MCMEMBER_REC_FIELD(join_state), 323 .offset_words = 12, 324 .offset_bits = 4, 325 .size_bits = 4 }, 326 { MCMEMBER_REC_FIELD(proxy_join), 327 .offset_words = 12, 328 .offset_bits = 8, 329 .size_bits = 1 }, 330 { RESERVED, 331 .offset_words = 12, 332 .offset_bits = 9, 333 .size_bits = 23 }, 334 }; 335 336 #define SERVICE_REC_FIELD(field) \ 337 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \ 338 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \ 339 .field_name = "sa_service_rec:" #field 340 341 static const struct ib_field service_rec_table[] = { 342 { SERVICE_REC_FIELD(id), 343 .offset_words = 0, 344 .offset_bits = 0, 345 .size_bits = 64 }, 346 { SERVICE_REC_FIELD(gid), 347 .offset_words = 2, 348 .offset_bits = 0, 349 .size_bits = 128 }, 350 { SERVICE_REC_FIELD(pkey), 351 .offset_words = 6, 352 .offset_bits = 0, 353 .size_bits = 16 }, 354 { SERVICE_REC_FIELD(lease), 355 .offset_words = 7, 356 .offset_bits = 0, 357 .size_bits = 32 }, 358 { SERVICE_REC_FIELD(key), 359 .offset_words = 8, 360 .offset_bits = 0, 361 .size_bits = 128 }, 362 { SERVICE_REC_FIELD(name), 363 .offset_words = 12, 364 .offset_bits = 0, 365 .size_bits = 64*8 }, 366 { SERVICE_REC_FIELD(data8), 367 .offset_words = 28, 368 .offset_bits = 0, 369 .size_bits = 16*8 }, 370 { SERVICE_REC_FIELD(data16), 371 .offset_words = 32, 372 .offset_bits = 0, 373 .size_bits = 8*16 }, 374 { SERVICE_REC_FIELD(data32), 375 .offset_words = 36, 376 .offset_bits = 0, 377 .size_bits = 4*32 }, 378 { SERVICE_REC_FIELD(data64), 379 .offset_words = 40, 380 .offset_bits = 0, 381 .size_bits = 2*64 }, 382 }; 383 384 #define CLASSPORTINFO_REC_FIELD(field) \ 385 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \ 386 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \ 387 .field_name = "ib_class_port_info:" #field 388 389 static const struct ib_field classport_info_rec_table[] = { 390 { CLASSPORTINFO_REC_FIELD(base_version), 391 .offset_words = 0, 392 .offset_bits = 0, 393 .size_bits = 8 }, 394 { CLASSPORTINFO_REC_FIELD(class_version), 395 .offset_words = 0, 396 .offset_bits = 8, 397 .size_bits = 8 }, 398 { CLASSPORTINFO_REC_FIELD(capability_mask), 399 .offset_words = 0, 400 .offset_bits = 16, 401 .size_bits = 16 }, 402 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time), 403 .offset_words = 1, 404 .offset_bits = 0, 405 .size_bits = 32 }, 406 { CLASSPORTINFO_REC_FIELD(redirect_gid), 407 .offset_words = 2, 408 .offset_bits = 0, 409 .size_bits = 128 }, 410 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl), 411 .offset_words = 6, 412 .offset_bits = 0, 413 .size_bits = 32 }, 414 { CLASSPORTINFO_REC_FIELD(redirect_lid), 415 .offset_words = 7, 416 .offset_bits = 0, 417 .size_bits = 16 }, 418 { CLASSPORTINFO_REC_FIELD(redirect_pkey), 419 .offset_words = 7, 420 .offset_bits = 16, 421 .size_bits = 16 }, 422 423 { CLASSPORTINFO_REC_FIELD(redirect_qp), 424 .offset_words = 8, 425 .offset_bits = 0, 426 .size_bits = 32 }, 427 { CLASSPORTINFO_REC_FIELD(redirect_qkey), 428 .offset_words = 9, 429 .offset_bits = 0, 430 .size_bits = 32 }, 431 432 { CLASSPORTINFO_REC_FIELD(trap_gid), 433 .offset_words = 10, 434 .offset_bits = 0, 435 .size_bits = 128 }, 436 { CLASSPORTINFO_REC_FIELD(trap_tcslfl), 437 .offset_words = 14, 438 .offset_bits = 0, 439 .size_bits = 32 }, 440 441 { CLASSPORTINFO_REC_FIELD(trap_lid), 442 .offset_words = 15, 443 .offset_bits = 0, 444 .size_bits = 16 }, 445 { CLASSPORTINFO_REC_FIELD(trap_pkey), 446 .offset_words = 15, 447 .offset_bits = 16, 448 .size_bits = 16 }, 449 450 { CLASSPORTINFO_REC_FIELD(trap_hlqp), 451 .offset_words = 16, 452 .offset_bits = 0, 453 .size_bits = 32 }, 454 { CLASSPORTINFO_REC_FIELD(trap_qkey), 455 .offset_words = 17, 456 .offset_bits = 0, 457 .size_bits = 32 }, 458 }; 459 460 #define GUIDINFO_REC_FIELD(field) \ 461 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \ 462 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \ 463 .field_name = "sa_guidinfo_rec:" #field 464 465 static const struct ib_field guidinfo_rec_table[] = { 466 { GUIDINFO_REC_FIELD(lid), 467 .offset_words = 0, 468 .offset_bits = 0, 469 .size_bits = 16 }, 470 { GUIDINFO_REC_FIELD(block_num), 471 .offset_words = 0, 472 .offset_bits = 16, 473 .size_bits = 8 }, 474 { GUIDINFO_REC_FIELD(res1), 475 .offset_words = 0, 476 .offset_bits = 24, 477 .size_bits = 8 }, 478 { GUIDINFO_REC_FIELD(res2), 479 .offset_words = 1, 480 .offset_bits = 0, 481 .size_bits = 32 }, 482 { GUIDINFO_REC_FIELD(guid_info_list), 483 .offset_words = 2, 484 .offset_bits = 0, 485 .size_bits = 512 }, 486 }; 487 488 static inline void ib_sa_disable_local_svc(struct ib_sa_query *query) 489 { 490 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE; 491 } 492 493 static void free_sm_ah(struct kref *kref) 494 { 495 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 496 497 ib_destroy_ah(sm_ah->ah); 498 kfree(sm_ah); 499 } 500 501 static void update_sm_ah(struct work_struct *work) 502 { 503 struct ib_sa_port *port = 504 container_of(work, struct ib_sa_port, update_task); 505 struct ib_sa_sm_ah *new_ah; 506 struct ib_port_attr port_attr; 507 struct ib_ah_attr ah_attr; 508 509 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) { 510 pr_warn("Couldn't query port\n"); 511 return; 512 } 513 514 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL); 515 if (!new_ah) { 516 return; 517 } 518 519 kref_init(&new_ah->ref); 520 new_ah->src_path_mask = (1 << port_attr.lmc) - 1; 521 522 new_ah->pkey_index = 0; 523 if (ib_find_pkey(port->agent->device, port->port_num, 524 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index)) 525 pr_err("Couldn't find index for default PKey\n"); 526 527 memset(&ah_attr, 0, sizeof ah_attr); 528 ah_attr.dlid = port_attr.sm_lid; 529 ah_attr.sl = port_attr.sm_sl; 530 ah_attr.port_num = port->port_num; 531 if (port_attr.grh_required) { 532 ah_attr.ah_flags = IB_AH_GRH; 533 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix); 534 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID); 535 } 536 537 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr); 538 if (IS_ERR(new_ah->ah)) { 539 pr_warn("Couldn't create new SM AH\n"); 540 kfree(new_ah); 541 return; 542 } 543 544 spin_lock_irq(&port->ah_lock); 545 if (port->sm_ah) 546 kref_put(&port->sm_ah->ref, free_sm_ah); 547 port->sm_ah = new_ah; 548 spin_unlock_irq(&port->ah_lock); 549 550 } 551 552 static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event) 553 { 554 if (event->event == IB_EVENT_PORT_ERR || 555 event->event == IB_EVENT_PORT_ACTIVE || 556 event->event == IB_EVENT_LID_CHANGE || 557 event->event == IB_EVENT_PKEY_CHANGE || 558 event->event == IB_EVENT_SM_CHANGE || 559 event->event == IB_EVENT_CLIENT_REREGISTER) { 560 unsigned long flags; 561 struct ib_sa_device *sa_dev = 562 container_of(handler, typeof(*sa_dev), event_handler); 563 struct ib_sa_port *port = 564 &sa_dev->port[event->element.port_num - sa_dev->start_port]; 565 566 if (!rdma_cap_ib_sa(handler->device, port->port_num)) 567 return; 568 569 spin_lock_irqsave(&port->ah_lock, flags); 570 if (port->sm_ah) 571 kref_put(&port->sm_ah->ref, free_sm_ah); 572 port->sm_ah = NULL; 573 spin_unlock_irqrestore(&port->ah_lock, flags); 574 575 if (event->event == IB_EVENT_SM_CHANGE || 576 event->event == IB_EVENT_CLIENT_REREGISTER || 577 event->event == IB_EVENT_LID_CHANGE) { 578 spin_lock_irqsave(&port->classport_lock, flags); 579 port->classport_info.valid = false; 580 spin_unlock_irqrestore(&port->classport_lock, flags); 581 } 582 queue_work(ib_wq, &sa_dev->port[event->element.port_num - 583 sa_dev->start_port].update_task); 584 } 585 } 586 587 void ib_sa_register_client(struct ib_sa_client *client) 588 { 589 atomic_set(&client->users, 1); 590 init_completion(&client->comp); 591 } 592 EXPORT_SYMBOL(ib_sa_register_client); 593 594 void ib_sa_unregister_client(struct ib_sa_client *client) 595 { 596 ib_sa_client_put(client); 597 wait_for_completion(&client->comp); 598 } 599 EXPORT_SYMBOL(ib_sa_unregister_client); 600 601 /** 602 * ib_sa_cancel_query - try to cancel an SA query 603 * @id:ID of query to cancel 604 * @query:query pointer to cancel 605 * 606 * Try to cancel an SA query. If the id and query don't match up or 607 * the query has already completed, nothing is done. Otherwise the 608 * query is canceled and will complete with a status of -EINTR. 609 */ 610 void ib_sa_cancel_query(int id, struct ib_sa_query *query) 611 { 612 unsigned long flags; 613 struct ib_mad_agent *agent; 614 struct ib_mad_send_buf *mad_buf; 615 616 spin_lock_irqsave(&idr_lock, flags); 617 if (idr_find(&query_idr, id) != query) { 618 spin_unlock_irqrestore(&idr_lock, flags); 619 return; 620 } 621 agent = query->port->agent; 622 mad_buf = query->mad_buf; 623 spin_unlock_irqrestore(&idr_lock, flags); 624 } 625 EXPORT_SYMBOL(ib_sa_cancel_query); 626 627 static u8 get_src_path_mask(struct ib_device *device, u8 port_num) 628 { 629 struct ib_sa_device *sa_dev; 630 struct ib_sa_port *port; 631 unsigned long flags; 632 u8 src_path_mask; 633 634 sa_dev = ib_get_client_data(device, &sa_client); 635 if (!sa_dev) 636 return 0x7f; 637 638 port = &sa_dev->port[port_num - sa_dev->start_port]; 639 spin_lock_irqsave(&port->ah_lock, flags); 640 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f; 641 spin_unlock_irqrestore(&port->ah_lock, flags); 642 643 return src_path_mask; 644 } 645 646 int ib_init_ah_from_path(struct ib_device *device, u8 port_num, 647 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr) 648 { 649 int ret; 650 u16 gid_index; 651 int use_roce; 652 struct net_device *ndev = NULL; 653 654 memset(ah_attr, 0, sizeof *ah_attr); 655 ah_attr->dlid = be16_to_cpu(rec->dlid); 656 ah_attr->sl = rec->sl; 657 ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 658 get_src_path_mask(device, port_num); 659 ah_attr->port_num = port_num; 660 ah_attr->static_rate = rec->rate; 661 662 use_roce = rdma_cap_eth_ah(device, port_num); 663 664 if (use_roce) { 665 struct net_device *idev; 666 struct net_device *resolved_dev; 667 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex, 668 .net = rec->net ? rec->net : 669 &init_net}; 670 union { 671 struct sockaddr _sockaddr; 672 struct sockaddr_in _sockaddr_in; 673 struct sockaddr_in6 _sockaddr_in6; 674 } sgid_addr, dgid_addr; 675 676 if (!device->get_netdev) 677 return -EOPNOTSUPP; 678 679 rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid); 680 rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid); 681 682 /* validate the route */ 683 ret = rdma_resolve_ip_route(&sgid_addr._sockaddr, 684 &dgid_addr._sockaddr, &dev_addr); 685 if (ret) 686 return ret; 687 688 if ((dev_addr.network == RDMA_NETWORK_IPV4 || 689 dev_addr.network == RDMA_NETWORK_IPV6) && 690 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) 691 return -EINVAL; 692 693 idev = device->get_netdev(device, port_num); 694 if (!idev) 695 return -ENODEV; 696 697 resolved_dev = dev_get_by_index(dev_addr.net, 698 dev_addr.bound_dev_if); 699 if (!resolved_dev) { 700 dev_put(idev); 701 return -ENODEV; 702 } 703 ndev = ib_get_ndev_from_path(rec); 704 rcu_read_lock(); 705 if ((ndev && ndev != resolved_dev) || 706 (resolved_dev != idev && 707 !rdma_is_upper_dev_rcu(idev, resolved_dev))) 708 ret = -EHOSTUNREACH; 709 rcu_read_unlock(); 710 dev_put(idev); 711 dev_put(resolved_dev); 712 if (ret) { 713 if (ndev) 714 dev_put(ndev); 715 return ret; 716 } 717 } 718 719 if (rec->hop_limit > 0 || use_roce) { 720 ah_attr->ah_flags = IB_AH_GRH; 721 ah_attr->grh.dgid = rec->dgid; 722 723 ret = ib_find_cached_gid_by_port(device, &rec->sgid, 724 rec->gid_type, port_num, ndev, 725 &gid_index); 726 if (ret) { 727 if (ndev) 728 dev_put(ndev); 729 return ret; 730 } 731 732 ah_attr->grh.sgid_index = gid_index; 733 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 734 ah_attr->grh.hop_limit = rec->hop_limit; 735 ah_attr->grh.traffic_class = rec->traffic_class; 736 if (ndev) 737 dev_put(ndev); 738 } 739 740 if (use_roce) 741 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); 742 743 return 0; 744 } 745 EXPORT_SYMBOL(ib_init_ah_from_path); 746 747 static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask) 748 { 749 unsigned long flags; 750 751 spin_lock_irqsave(&query->port->ah_lock, flags); 752 if (!query->port->sm_ah) { 753 spin_unlock_irqrestore(&query->port->ah_lock, flags); 754 return -EAGAIN; 755 } 756 kref_get(&query->port->sm_ah->ref); 757 query->sm_ah = query->port->sm_ah; 758 spin_unlock_irqrestore(&query->port->ah_lock, flags); 759 760 query->mad_buf = ib_create_send_mad(query->port->agent, 1, 761 query->sm_ah->pkey_index, 762 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA, 763 gfp_mask, 764 IB_MGMT_BASE_VERSION); 765 if (IS_ERR(query->mad_buf)) { 766 kref_put(&query->sm_ah->ref, free_sm_ah); 767 return -ENOMEM; 768 } 769 770 query->mad_buf->ah = query->sm_ah->ah; 771 772 return 0; 773 } 774 775 static void free_mad(struct ib_sa_query *query) 776 { 777 ib_free_send_mad(query->mad_buf); 778 kref_put(&query->sm_ah->ref, free_sm_ah); 779 } 780 781 static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent) 782 { 783 unsigned long flags; 784 785 memset(mad, 0, sizeof *mad); 786 787 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION; 788 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM; 789 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION; 790 791 spin_lock_irqsave(&tid_lock, flags); 792 mad->mad_hdr.tid = 793 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++); 794 spin_unlock_irqrestore(&tid_lock, flags); 795 } 796 797 static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask) 798 { 799 bool preload = gfpflags_allow_blocking(gfp_mask); 800 unsigned long flags; 801 int ret, id; 802 803 if (preload) 804 idr_preload(gfp_mask); 805 spin_lock_irqsave(&idr_lock, flags); 806 807 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT); 808 809 spin_unlock_irqrestore(&idr_lock, flags); 810 if (preload) 811 idr_preload_end(); 812 if (id < 0) 813 return id; 814 815 query->mad_buf->timeout_ms = timeout_ms; 816 query->mad_buf->context[0] = query; 817 query->id = id; 818 819 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) { 820 ib_sa_disable_local_svc(query); 821 } 822 823 ret = ib_post_send_mad(query->mad_buf, NULL); 824 if (ret) { 825 spin_lock_irqsave(&idr_lock, flags); 826 idr_remove(&query_idr, id); 827 spin_unlock_irqrestore(&idr_lock, flags); 828 } 829 830 /* 831 * It's not safe to dereference query any more, because the 832 * send may already have completed and freed the query in 833 * another context. 834 */ 835 return ret ? ret : id; 836 } 837 838 void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec) 839 { 840 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec); 841 } 842 EXPORT_SYMBOL(ib_sa_unpack_path); 843 844 void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute) 845 { 846 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute); 847 } 848 EXPORT_SYMBOL(ib_sa_pack_path); 849 850 static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 851 int status, 852 struct ib_sa_mad *mad) 853 { 854 struct ib_sa_path_query *query = 855 container_of(sa_query, struct ib_sa_path_query, sa_query); 856 857 if (mad) { 858 struct ib_sa_path_rec rec; 859 860 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), 861 mad->data, &rec); 862 rec.net = NULL; 863 rec.ifindex = 0; 864 rec.gid_type = IB_GID_TYPE_IB; 865 eth_zero_addr(rec.dmac); 866 query->callback(status, &rec, query->context); 867 } else 868 query->callback(status, NULL, query->context); 869 } 870 871 static void ib_sa_path_rec_release(struct ib_sa_query *sa_query) 872 { 873 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query)); 874 } 875 876 /** 877 * ib_sa_path_rec_get - Start a Path get query 878 * @client:SA client 879 * @device:device to send query on 880 * @port_num: port number to send query on 881 * @rec:Path Record to send in query 882 * @comp_mask:component mask to send in query 883 * @timeout_ms:time to wait for response 884 * @gfp_mask:GFP mask to use for internal allocations 885 * @callback:function called when query completes, times out or is 886 * canceled 887 * @context:opaque user context passed to callback 888 * @sa_query:query context, used to cancel query 889 * 890 * Send a Path Record Get query to the SA to look up a path. The 891 * callback function will be called when the query completes (or 892 * fails); status is 0 for a successful response, -EINTR if the query 893 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 894 * occurred sending the query. The resp parameter of the callback is 895 * only valid if status is 0. 896 * 897 * If the return value of ib_sa_path_rec_get() is negative, it is an 898 * error code. Otherwise it is a query ID that can be used to cancel 899 * the query. 900 */ 901 int ib_sa_path_rec_get(struct ib_sa_client *client, 902 struct ib_device *device, u8 port_num, 903 struct ib_sa_path_rec *rec, 904 ib_sa_comp_mask comp_mask, 905 int timeout_ms, gfp_t gfp_mask, 906 void (*callback)(int status, 907 struct ib_sa_path_rec *resp, 908 void *context), 909 void *context, 910 struct ib_sa_query **sa_query) 911 { 912 struct ib_sa_path_query *query; 913 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 914 struct ib_sa_port *port; 915 struct ib_mad_agent *agent; 916 struct ib_sa_mad *mad; 917 int ret; 918 919 if (!sa_dev) 920 return -ENODEV; 921 922 port = &sa_dev->port[port_num - sa_dev->start_port]; 923 agent = port->agent; 924 925 query = kzalloc(sizeof(*query), gfp_mask); 926 if (!query) 927 return -ENOMEM; 928 929 query->sa_query.port = port; 930 ret = alloc_mad(&query->sa_query, gfp_mask); 931 if (ret) 932 goto err1; 933 934 ib_sa_client_get(client); 935 query->sa_query.client = client; 936 query->callback = callback; 937 query->context = context; 938 939 mad = query->sa_query.mad_buf->mad; 940 init_mad(mad, agent); 941 942 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL; 943 query->sa_query.release = ib_sa_path_rec_release; 944 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 945 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC); 946 mad->sa_hdr.comp_mask = comp_mask; 947 948 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data); 949 950 *sa_query = &query->sa_query; 951 952 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE; 953 query->sa_query.mad_buf->context[1] = rec; 954 955 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 956 if (ret < 0) 957 goto err2; 958 959 return ret; 960 961 err2: 962 *sa_query = NULL; 963 ib_sa_client_put(query->sa_query.client); 964 free_mad(&query->sa_query); 965 966 err1: 967 kfree(query); 968 return ret; 969 } 970 EXPORT_SYMBOL(ib_sa_path_rec_get); 971 972 static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query, 973 int status, 974 struct ib_sa_mad *mad) 975 { 976 struct ib_sa_service_query *query = 977 container_of(sa_query, struct ib_sa_service_query, sa_query); 978 979 if (mad) { 980 struct ib_sa_service_rec rec; 981 982 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table), 983 mad->data, &rec); 984 query->callback(status, &rec, query->context); 985 } else 986 query->callback(status, NULL, query->context); 987 } 988 989 static void ib_sa_service_rec_release(struct ib_sa_query *sa_query) 990 { 991 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query)); 992 } 993 994 /** 995 * ib_sa_service_rec_query - Start Service Record operation 996 * @client:SA client 997 * @device:device to send request on 998 * @port_num: port number to send request on 999 * @method:SA method - should be get, set, or delete 1000 * @rec:Service Record to send in request 1001 * @comp_mask:component mask to send in request 1002 * @timeout_ms:time to wait for response 1003 * @gfp_mask:GFP mask to use for internal allocations 1004 * @callback:function called when request completes, times out or is 1005 * canceled 1006 * @context:opaque user context passed to callback 1007 * @sa_query:request context, used to cancel request 1008 * 1009 * Send a Service Record set/get/delete to the SA to register, 1010 * unregister or query a service record. 1011 * The callback function will be called when the request completes (or 1012 * fails); status is 0 for a successful response, -EINTR if the query 1013 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error 1014 * occurred sending the query. The resp parameter of the callback is 1015 * only valid if status is 0. 1016 * 1017 * If the return value of ib_sa_service_rec_query() is negative, it is an 1018 * error code. Otherwise it is a request ID that can be used to cancel 1019 * the query. 1020 */ 1021 int ib_sa_service_rec_query(struct ib_sa_client *client, 1022 struct ib_device *device, u8 port_num, u8 method, 1023 struct ib_sa_service_rec *rec, 1024 ib_sa_comp_mask comp_mask, 1025 int timeout_ms, gfp_t gfp_mask, 1026 void (*callback)(int status, 1027 struct ib_sa_service_rec *resp, 1028 void *context), 1029 void *context, 1030 struct ib_sa_query **sa_query) 1031 { 1032 struct ib_sa_service_query *query; 1033 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1034 struct ib_sa_port *port; 1035 struct ib_mad_agent *agent; 1036 struct ib_sa_mad *mad; 1037 int ret; 1038 1039 if (!sa_dev) 1040 return -ENODEV; 1041 1042 port = &sa_dev->port[port_num - sa_dev->start_port]; 1043 agent = port->agent; 1044 1045 if (method != IB_MGMT_METHOD_GET && 1046 method != IB_MGMT_METHOD_SET && 1047 method != IB_SA_METHOD_DELETE) 1048 return -EINVAL; 1049 1050 query = kzalloc(sizeof(*query), gfp_mask); 1051 if (!query) 1052 return -ENOMEM; 1053 1054 query->sa_query.port = port; 1055 ret = alloc_mad(&query->sa_query, gfp_mask); 1056 if (ret) 1057 goto err1; 1058 1059 ib_sa_client_get(client); 1060 query->sa_query.client = client; 1061 query->callback = callback; 1062 query->context = context; 1063 1064 mad = query->sa_query.mad_buf->mad; 1065 init_mad(mad, agent); 1066 1067 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL; 1068 query->sa_query.release = ib_sa_service_rec_release; 1069 mad->mad_hdr.method = method; 1070 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC); 1071 mad->sa_hdr.comp_mask = comp_mask; 1072 1073 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table), 1074 rec, mad->data); 1075 1076 *sa_query = &query->sa_query; 1077 1078 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1079 if (ret < 0) 1080 goto err2; 1081 1082 return ret; 1083 1084 err2: 1085 *sa_query = NULL; 1086 ib_sa_client_put(query->sa_query.client); 1087 free_mad(&query->sa_query); 1088 1089 err1: 1090 kfree(query); 1091 return ret; 1092 } 1093 EXPORT_SYMBOL(ib_sa_service_rec_query); 1094 1095 static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 1096 int status, 1097 struct ib_sa_mad *mad) 1098 { 1099 struct ib_sa_mcmember_query *query = 1100 container_of(sa_query, struct ib_sa_mcmember_query, sa_query); 1101 1102 if (mad) { 1103 struct ib_sa_mcmember_rec rec; 1104 1105 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1106 mad->data, &rec); 1107 query->callback(status, &rec, query->context); 1108 } else 1109 query->callback(status, NULL, query->context); 1110 } 1111 1112 static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query) 1113 { 1114 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query)); 1115 } 1116 1117 int ib_sa_mcmember_rec_query(struct ib_sa_client *client, 1118 struct ib_device *device, u8 port_num, 1119 u8 method, 1120 struct ib_sa_mcmember_rec *rec, 1121 ib_sa_comp_mask comp_mask, 1122 int timeout_ms, gfp_t gfp_mask, 1123 void (*callback)(int status, 1124 struct ib_sa_mcmember_rec *resp, 1125 void *context), 1126 void *context, 1127 struct ib_sa_query **sa_query) 1128 { 1129 struct ib_sa_mcmember_query *query; 1130 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1131 struct ib_sa_port *port; 1132 struct ib_mad_agent *agent; 1133 struct ib_sa_mad *mad; 1134 int ret; 1135 1136 if (!sa_dev) 1137 return -ENODEV; 1138 1139 port = &sa_dev->port[port_num - sa_dev->start_port]; 1140 agent = port->agent; 1141 1142 query = kzalloc(sizeof(*query), gfp_mask); 1143 if (!query) 1144 return -ENOMEM; 1145 1146 query->sa_query.port = port; 1147 ret = alloc_mad(&query->sa_query, gfp_mask); 1148 if (ret) 1149 goto err1; 1150 1151 ib_sa_client_get(client); 1152 query->sa_query.client = client; 1153 query->callback = callback; 1154 query->context = context; 1155 1156 mad = query->sa_query.mad_buf->mad; 1157 init_mad(mad, agent); 1158 1159 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL; 1160 query->sa_query.release = ib_sa_mcmember_rec_release; 1161 mad->mad_hdr.method = method; 1162 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC); 1163 mad->sa_hdr.comp_mask = comp_mask; 1164 1165 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table), 1166 rec, mad->data); 1167 1168 *sa_query = &query->sa_query; 1169 1170 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1171 if (ret < 0) 1172 goto err2; 1173 1174 return ret; 1175 1176 err2: 1177 *sa_query = NULL; 1178 ib_sa_client_put(query->sa_query.client); 1179 free_mad(&query->sa_query); 1180 1181 err1: 1182 kfree(query); 1183 return ret; 1184 } 1185 1186 /* Support GuidInfoRecord */ 1187 static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query, 1188 int status, 1189 struct ib_sa_mad *mad) 1190 { 1191 struct ib_sa_guidinfo_query *query = 1192 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query); 1193 1194 if (mad) { 1195 struct ib_sa_guidinfo_rec rec; 1196 1197 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), 1198 mad->data, &rec); 1199 query->callback(status, &rec, query->context); 1200 } else 1201 query->callback(status, NULL, query->context); 1202 } 1203 1204 static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query) 1205 { 1206 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query)); 1207 } 1208 1209 int ib_sa_guid_info_rec_query(struct ib_sa_client *client, 1210 struct ib_device *device, u8 port_num, 1211 struct ib_sa_guidinfo_rec *rec, 1212 ib_sa_comp_mask comp_mask, u8 method, 1213 int timeout_ms, gfp_t gfp_mask, 1214 void (*callback)(int status, 1215 struct ib_sa_guidinfo_rec *resp, 1216 void *context), 1217 void *context, 1218 struct ib_sa_query **sa_query) 1219 { 1220 struct ib_sa_guidinfo_query *query; 1221 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1222 struct ib_sa_port *port; 1223 struct ib_mad_agent *agent; 1224 struct ib_sa_mad *mad; 1225 int ret; 1226 1227 if (!sa_dev) 1228 return -ENODEV; 1229 1230 if (method != IB_MGMT_METHOD_GET && 1231 method != IB_MGMT_METHOD_SET && 1232 method != IB_SA_METHOD_DELETE) { 1233 return -EINVAL; 1234 } 1235 1236 port = &sa_dev->port[port_num - sa_dev->start_port]; 1237 agent = port->agent; 1238 1239 query = kzalloc(sizeof(*query), gfp_mask); 1240 if (!query) 1241 return -ENOMEM; 1242 1243 query->sa_query.port = port; 1244 ret = alloc_mad(&query->sa_query, gfp_mask); 1245 if (ret) 1246 goto err1; 1247 1248 ib_sa_client_get(client); 1249 query->sa_query.client = client; 1250 query->callback = callback; 1251 query->context = context; 1252 1253 mad = query->sa_query.mad_buf->mad; 1254 init_mad(mad, agent); 1255 1256 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL; 1257 query->sa_query.release = ib_sa_guidinfo_rec_release; 1258 1259 mad->mad_hdr.method = method; 1260 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC); 1261 mad->sa_hdr.comp_mask = comp_mask; 1262 1263 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec, 1264 mad->data); 1265 1266 *sa_query = &query->sa_query; 1267 1268 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1269 if (ret < 0) 1270 goto err2; 1271 1272 return ret; 1273 1274 err2: 1275 *sa_query = NULL; 1276 ib_sa_client_put(query->sa_query.client); 1277 free_mad(&query->sa_query); 1278 1279 err1: 1280 kfree(query); 1281 return ret; 1282 } 1283 EXPORT_SYMBOL(ib_sa_guid_info_rec_query); 1284 1285 /* Support get SA ClassPortInfo */ 1286 static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query, 1287 int status, 1288 struct ib_sa_mad *mad) 1289 { 1290 unsigned long flags; 1291 struct ib_sa_classport_info_query *query = 1292 container_of(sa_query, struct ib_sa_classport_info_query, sa_query); 1293 1294 if (mad) { 1295 struct ib_class_port_info rec; 1296 1297 ib_unpack(classport_info_rec_table, 1298 ARRAY_SIZE(classport_info_rec_table), 1299 mad->data, &rec); 1300 1301 spin_lock_irqsave(&sa_query->port->classport_lock, flags); 1302 if (!status && !sa_query->port->classport_info.valid) { 1303 memcpy(&sa_query->port->classport_info.data, &rec, 1304 sizeof(sa_query->port->classport_info.data)); 1305 1306 sa_query->port->classport_info.valid = true; 1307 } 1308 spin_unlock_irqrestore(&sa_query->port->classport_lock, flags); 1309 1310 query->callback(status, &rec, query->context); 1311 } else { 1312 query->callback(status, NULL, query->context); 1313 } 1314 } 1315 1316 static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query) 1317 { 1318 kfree(container_of(sa_query, struct ib_sa_classport_info_query, 1319 sa_query)); 1320 } 1321 1322 int ib_sa_classport_info_rec_query(struct ib_sa_client *client, 1323 struct ib_device *device, u8 port_num, 1324 int timeout_ms, gfp_t gfp_mask, 1325 void (*callback)(int status, 1326 struct ib_class_port_info *resp, 1327 void *context), 1328 void *context, 1329 struct ib_sa_query **sa_query) 1330 { 1331 struct ib_sa_classport_info_query *query; 1332 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client); 1333 struct ib_sa_port *port; 1334 struct ib_mad_agent *agent; 1335 struct ib_sa_mad *mad; 1336 struct ib_class_port_info cached_class_port_info; 1337 int ret; 1338 unsigned long flags; 1339 1340 if (!sa_dev) 1341 return -ENODEV; 1342 1343 port = &sa_dev->port[port_num - sa_dev->start_port]; 1344 agent = port->agent; 1345 1346 /* Use cached ClassPortInfo attribute if valid instead of sending mad */ 1347 spin_lock_irqsave(&port->classport_lock, flags); 1348 if (port->classport_info.valid && callback) { 1349 memcpy(&cached_class_port_info, &port->classport_info.data, 1350 sizeof(cached_class_port_info)); 1351 spin_unlock_irqrestore(&port->classport_lock, flags); 1352 callback(0, &cached_class_port_info, context); 1353 return 0; 1354 } 1355 spin_unlock_irqrestore(&port->classport_lock, flags); 1356 1357 query = kzalloc(sizeof(*query), gfp_mask); 1358 if (!query) 1359 return -ENOMEM; 1360 1361 query->sa_query.port = port; 1362 ret = alloc_mad(&query->sa_query, gfp_mask); 1363 if (ret) 1364 goto err1; 1365 1366 ib_sa_client_get(client); 1367 query->sa_query.client = client; 1368 query->callback = callback; 1369 query->context = context; 1370 1371 mad = query->sa_query.mad_buf->mad; 1372 init_mad(mad, agent); 1373 1374 query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL; 1375 1376 query->sa_query.release = ib_sa_portclass_info_rec_release; 1377 /* support GET only */ 1378 mad->mad_hdr.method = IB_MGMT_METHOD_GET; 1379 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO); 1380 mad->sa_hdr.comp_mask = 0; 1381 *sa_query = &query->sa_query; 1382 1383 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask); 1384 if (ret < 0) 1385 goto err2; 1386 1387 return ret; 1388 1389 err2: 1390 *sa_query = NULL; 1391 ib_sa_client_put(query->sa_query.client); 1392 free_mad(&query->sa_query); 1393 1394 err1: 1395 kfree(query); 1396 return ret; 1397 } 1398 EXPORT_SYMBOL(ib_sa_classport_info_rec_query); 1399 1400 static void send_handler(struct ib_mad_agent *agent, 1401 struct ib_mad_send_wc *mad_send_wc) 1402 { 1403 struct ib_sa_query *query = mad_send_wc->send_buf->context[0]; 1404 unsigned long flags; 1405 1406 if (query->callback) 1407 switch (mad_send_wc->status) { 1408 case IB_WC_SUCCESS: 1409 /* No callback -- already got recv */ 1410 break; 1411 case IB_WC_RESP_TIMEOUT_ERR: 1412 query->callback(query, -ETIMEDOUT, NULL); 1413 break; 1414 case IB_WC_WR_FLUSH_ERR: 1415 query->callback(query, -EINTR, NULL); 1416 break; 1417 default: 1418 query->callback(query, -EIO, NULL); 1419 break; 1420 } 1421 1422 spin_lock_irqsave(&idr_lock, flags); 1423 idr_remove(&query_idr, query->id); 1424 spin_unlock_irqrestore(&idr_lock, flags); 1425 1426 free_mad(query); 1427 ib_sa_client_put(query->client); 1428 query->release(query); 1429 } 1430 1431 static void recv_handler(struct ib_mad_agent *mad_agent, 1432 struct ib_mad_send_buf *send_buf, 1433 struct ib_mad_recv_wc *mad_recv_wc) 1434 { 1435 struct ib_sa_query *query; 1436 1437 if (!send_buf) 1438 return; 1439 1440 query = send_buf->context[0]; 1441 if (query->callback) { 1442 if (mad_recv_wc->wc->status == IB_WC_SUCCESS) 1443 query->callback(query, 1444 mad_recv_wc->recv_buf.mad->mad_hdr.status ? 1445 -EINVAL : 0, 1446 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad); 1447 else 1448 query->callback(query, -EIO, NULL); 1449 } 1450 1451 ib_free_recv_mad(mad_recv_wc); 1452 } 1453 1454 static void ib_sa_add_one(struct ib_device *device) 1455 { 1456 struct ib_sa_device *sa_dev; 1457 int s, e, i; 1458 int count = 0; 1459 1460 s = rdma_start_port(device); 1461 e = rdma_end_port(device); 1462 1463 sa_dev = kzalloc(sizeof *sa_dev + 1464 (e - s + 1) * sizeof (struct ib_sa_port), 1465 GFP_KERNEL); 1466 if (!sa_dev) 1467 return; 1468 1469 sa_dev->start_port = s; 1470 sa_dev->end_port = e; 1471 1472 for (i = 0; i <= e - s; ++i) { 1473 spin_lock_init(&sa_dev->port[i].ah_lock); 1474 if (!rdma_cap_ib_sa(device, i + 1)) 1475 continue; 1476 1477 sa_dev->port[i].sm_ah = NULL; 1478 sa_dev->port[i].port_num = i + s; 1479 1480 spin_lock_init(&sa_dev->port[i].classport_lock); 1481 sa_dev->port[i].classport_info.valid = false; 1482 1483 sa_dev->port[i].agent = 1484 ib_register_mad_agent(device, i + s, IB_QPT_GSI, 1485 NULL, 0, send_handler, 1486 recv_handler, sa_dev, 0); 1487 if (IS_ERR(sa_dev->port[i].agent)) 1488 goto err; 1489 1490 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah); 1491 1492 count++; 1493 } 1494 1495 if (!count) 1496 goto free; 1497 1498 ib_set_client_data(device, &sa_client, sa_dev); 1499 1500 /* 1501 * We register our event handler after everything is set up, 1502 * and then update our cached info after the event handler is 1503 * registered to avoid any problems if a port changes state 1504 * during our initialization. 1505 */ 1506 1507 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); 1508 if (ib_register_event_handler(&sa_dev->event_handler)) 1509 goto err; 1510 1511 for (i = 0; i <= e - s; ++i) { 1512 if (rdma_cap_ib_sa(device, i + 1)) 1513 update_sm_ah(&sa_dev->port[i].update_task); 1514 } 1515 1516 return; 1517 1518 err: 1519 while (--i >= 0) { 1520 if (rdma_cap_ib_sa(device, i + 1)) 1521 ib_unregister_mad_agent(sa_dev->port[i].agent); 1522 } 1523 free: 1524 kfree(sa_dev); 1525 return; 1526 } 1527 1528 static void ib_sa_remove_one(struct ib_device *device, void *client_data) 1529 { 1530 struct ib_sa_device *sa_dev = client_data; 1531 int i; 1532 1533 if (!sa_dev) 1534 return; 1535 1536 ib_unregister_event_handler(&sa_dev->event_handler); 1537 1538 flush_workqueue(ib_wq); 1539 1540 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { 1541 if (rdma_cap_ib_sa(device, i + 1)) { 1542 ib_unregister_mad_agent(sa_dev->port[i].agent); 1543 if (sa_dev->port[i].sm_ah) 1544 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); 1545 } 1546 1547 } 1548 1549 kfree(sa_dev); 1550 } 1551 1552 int ib_sa_init(void) 1553 { 1554 int ret; 1555 1556 get_random_bytes(&tid, sizeof tid); 1557 1558 ret = ib_register_client(&sa_client); 1559 if (ret) { 1560 pr_err("Couldn't register ib_sa client\n"); 1561 goto err1; 1562 } 1563 1564 ret = mcast_init(); 1565 if (ret) { 1566 pr_err("Couldn't initialize multicast handling\n"); 1567 goto err2; 1568 } 1569 1570 return 0; 1571 1572 err2: 1573 ib_unregister_client(&sa_client); 1574 err1: 1575 return ret; 1576 } 1577 1578 void ib_sa_cleanup(void) 1579 { 1580 mcast_cleanup(); 1581 ib_unregister_client(&sa_client); 1582 idr_destroy(&query_idr); 1583 } 1584