1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2006 Intel Corporation. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <sys/cdefs.h> 36 #define LINUXKPI_PARAM_PREFIX ibcore_ 37 38 #include <linux/completion.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/err.h> 41 #include <linux/interrupt.h> 42 #include <linux/slab.h> 43 #include <linux/bitops.h> 44 #include <linux/random.h> 45 #include <linux/rbtree.h> 46 47 #include <rdma/ib_cache.h> 48 #include "sa.h" 49 50 static void mcast_add_one(struct ib_device *device); 51 static void mcast_remove_one(struct ib_device *device, void *client_data); 52 53 static struct ib_client mcast_client = { 54 .name = "ib_multicast", 55 .add = mcast_add_one, 56 .remove = mcast_remove_one 57 }; 58 59 static struct ib_sa_client sa_client; 60 static struct workqueue_struct *mcast_wq; 61 static union ib_gid mgid0; 62 63 struct mcast_device; 64 65 struct mcast_port { 66 struct mcast_device *dev; 67 spinlock_t lock; 68 struct rb_root table; 69 atomic_t refcount; 70 struct completion comp; 71 u8 port_num; 72 }; 73 74 struct mcast_device { 75 struct ib_device *device; 76 struct ib_event_handler event_handler; 77 int start_port; 78 int end_port; 79 struct mcast_port port[0]; 80 }; 81 82 enum mcast_state { 83 MCAST_JOINING, 84 MCAST_MEMBER, 85 MCAST_ERROR, 86 }; 87 88 enum mcast_group_state { 89 MCAST_IDLE, 90 MCAST_BUSY, 91 MCAST_GROUP_ERROR, 92 MCAST_PKEY_EVENT 93 }; 94 95 enum { 96 MCAST_INVALID_PKEY_INDEX = 0xFFFF 97 }; 98 99 struct mcast_member; 100 101 struct mcast_group { 102 struct ib_sa_mcmember_rec rec; 103 struct rb_node node; 104 struct mcast_port *port; 105 spinlock_t lock; 106 struct work_struct work; 107 struct list_head pending_list; 108 struct list_head active_list; 109 struct mcast_member *last_join; 110 int members[NUM_JOIN_MEMBERSHIP_TYPES]; 111 atomic_t refcount; 112 enum mcast_group_state state; 113 struct ib_sa_query *query; 114 u16 pkey_index; 115 u8 leave_state; 116 int retries; 117 }; 118 119 struct mcast_member { 120 struct ib_sa_multicast multicast; 121 struct ib_sa_client *client; 122 struct mcast_group *group; 123 struct list_head list; 124 enum mcast_state state; 125 atomic_t refcount; 126 struct completion comp; 127 }; 128 129 static void join_handler(int status, struct ib_sa_mcmember_rec *rec, 130 void *context); 131 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, 132 void *context); 133 134 static struct mcast_group *mcast_find(struct mcast_port *port, 135 union ib_gid *mgid) 136 { 137 struct rb_node *node = port->table.rb_node; 138 struct mcast_group *group; 139 int ret; 140 141 while (node) { 142 group = rb_entry(node, struct mcast_group, node); 143 ret = memcmp(mgid->raw, group->rec.mgid.raw, sizeof *mgid); 144 if (!ret) 145 return group; 146 147 if (ret < 0) 148 node = node->rb_left; 149 else 150 node = node->rb_right; 151 } 152 return NULL; 153 } 154 155 static struct mcast_group *mcast_insert(struct mcast_port *port, 156 struct mcast_group *group, 157 int allow_duplicates) 158 { 159 struct rb_node **link = &port->table.rb_node; 160 struct rb_node *parent = NULL; 161 struct mcast_group *cur_group; 162 int ret; 163 164 while (*link) { 165 parent = *link; 166 cur_group = rb_entry(parent, struct mcast_group, node); 167 168 ret = memcmp(group->rec.mgid.raw, cur_group->rec.mgid.raw, 169 sizeof group->rec.mgid); 170 if (ret < 0) 171 link = &(*link)->rb_left; 172 else if (ret > 0) 173 link = &(*link)->rb_right; 174 else if (allow_duplicates) 175 link = &(*link)->rb_left; 176 else 177 return cur_group; 178 } 179 rb_link_node(&group->node, parent, link); 180 rb_insert_color(&group->node, &port->table); 181 return NULL; 182 } 183 184 static void deref_port(struct mcast_port *port) 185 { 186 if (atomic_dec_and_test(&port->refcount)) 187 complete(&port->comp); 188 } 189 190 static void release_group(struct mcast_group *group) 191 { 192 struct mcast_port *port = group->port; 193 unsigned long flags; 194 195 spin_lock_irqsave(&port->lock, flags); 196 if (atomic_dec_and_test(&group->refcount)) { 197 rb_erase(&group->node, &port->table); 198 spin_unlock_irqrestore(&port->lock, flags); 199 kfree(group); 200 deref_port(port); 201 } else 202 spin_unlock_irqrestore(&port->lock, flags); 203 } 204 205 static void deref_member(struct mcast_member *member) 206 { 207 if (atomic_dec_and_test(&member->refcount)) 208 complete(&member->comp); 209 } 210 211 static void queue_join(struct mcast_member *member) 212 { 213 struct mcast_group *group = member->group; 214 unsigned long flags; 215 216 spin_lock_irqsave(&group->lock, flags); 217 list_add_tail(&member->list, &group->pending_list); 218 if (group->state == MCAST_IDLE) { 219 group->state = MCAST_BUSY; 220 atomic_inc(&group->refcount); 221 queue_work(mcast_wq, &group->work); 222 } 223 spin_unlock_irqrestore(&group->lock, flags); 224 } 225 226 /* 227 * A multicast group has four types of members: full member, non member, 228 * sendonly non member and sendonly full member. 229 * We need to keep track of the number of members of each 230 * type based on their join state. Adjust the number of members the belong to 231 * the specified join states. 232 */ 233 static void adjust_membership(struct mcast_group *group, u8 join_state, int inc) 234 { 235 int i; 236 237 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++, join_state >>= 1) 238 if (join_state & 0x1) 239 group->members[i] += inc; 240 } 241 242 /* 243 * If a multicast group has zero members left for a particular join state, but 244 * the group is still a member with the SA, we need to leave that join state. 245 * Determine which join states we still belong to, but that do not have any 246 * active members. 247 */ 248 static u8 get_leave_state(struct mcast_group *group) 249 { 250 u8 leave_state = 0; 251 int i; 252 253 for (i = 0; i < NUM_JOIN_MEMBERSHIP_TYPES; i++) 254 if (!group->members[i]) 255 leave_state |= (0x1 << i); 256 257 return leave_state & group->rec.join_state; 258 } 259 260 static int check_selector(ib_sa_comp_mask comp_mask, 261 ib_sa_comp_mask selector_mask, 262 ib_sa_comp_mask value_mask, 263 u8 selector, u8 src_value, u8 dst_value) 264 { 265 int err; 266 267 if (!(comp_mask & selector_mask) || !(comp_mask & value_mask)) 268 return 0; 269 270 switch (selector) { 271 case IB_SA_GT: 272 err = (src_value <= dst_value); 273 break; 274 case IB_SA_LT: 275 err = (src_value >= dst_value); 276 break; 277 case IB_SA_EQ: 278 err = (src_value != dst_value); 279 break; 280 default: 281 err = 0; 282 break; 283 } 284 285 return err; 286 } 287 288 static int cmp_rec(struct ib_sa_mcmember_rec *src, 289 struct ib_sa_mcmember_rec *dst, ib_sa_comp_mask comp_mask) 290 { 291 /* MGID must already match */ 292 293 if (comp_mask & IB_SA_MCMEMBER_REC_PORT_GID && 294 memcmp(&src->port_gid, &dst->port_gid, sizeof src->port_gid)) 295 return -EINVAL; 296 if (comp_mask & IB_SA_MCMEMBER_REC_QKEY && src->qkey != dst->qkey) 297 return -EINVAL; 298 if (comp_mask & IB_SA_MCMEMBER_REC_MLID && src->mlid != dst->mlid) 299 return -EINVAL; 300 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_MTU_SELECTOR, 301 IB_SA_MCMEMBER_REC_MTU, dst->mtu_selector, 302 src->mtu, dst->mtu)) 303 return -EINVAL; 304 if (comp_mask & IB_SA_MCMEMBER_REC_TRAFFIC_CLASS && 305 src->traffic_class != dst->traffic_class) 306 return -EINVAL; 307 if (comp_mask & IB_SA_MCMEMBER_REC_PKEY && src->pkey != dst->pkey) 308 return -EINVAL; 309 if (check_selector(comp_mask, IB_SA_MCMEMBER_REC_RATE_SELECTOR, 310 IB_SA_MCMEMBER_REC_RATE, dst->rate_selector, 311 src->rate, dst->rate)) 312 return -EINVAL; 313 if (check_selector(comp_mask, 314 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR, 315 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME, 316 dst->packet_life_time_selector, 317 src->packet_life_time, dst->packet_life_time)) 318 return -EINVAL; 319 if (comp_mask & IB_SA_MCMEMBER_REC_SL && src->sl != dst->sl) 320 return -EINVAL; 321 if (comp_mask & IB_SA_MCMEMBER_REC_FLOW_LABEL && 322 src->flow_label != dst->flow_label) 323 return -EINVAL; 324 if (comp_mask & IB_SA_MCMEMBER_REC_HOP_LIMIT && 325 src->hop_limit != dst->hop_limit) 326 return -EINVAL; 327 if (comp_mask & IB_SA_MCMEMBER_REC_SCOPE && src->scope != dst->scope) 328 return -EINVAL; 329 330 /* join_state checked separately, proxy_join ignored */ 331 332 return 0; 333 } 334 335 static int send_join(struct mcast_group *group, struct mcast_member *member) 336 { 337 struct mcast_port *port = group->port; 338 int ret; 339 340 group->last_join = member; 341 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 342 port->port_num, IB_MGMT_METHOD_SET, 343 &member->multicast.rec, 344 member->multicast.comp_mask, 345 3000, GFP_KERNEL, join_handler, group, 346 &group->query); 347 return (ret > 0) ? 0 : ret; 348 } 349 350 static int send_leave(struct mcast_group *group, u8 leave_state) 351 { 352 struct mcast_port *port = group->port; 353 struct ib_sa_mcmember_rec rec; 354 int ret; 355 356 rec = group->rec; 357 rec.join_state = leave_state; 358 group->leave_state = leave_state; 359 360 ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device, 361 port->port_num, IB_SA_METHOD_DELETE, &rec, 362 IB_SA_MCMEMBER_REC_MGID | 363 IB_SA_MCMEMBER_REC_PORT_GID | 364 IB_SA_MCMEMBER_REC_JOIN_STATE, 365 3000, GFP_KERNEL, leave_handler, 366 group, &group->query); 367 return (ret > 0) ? 0 : ret; 368 } 369 370 static void join_group(struct mcast_group *group, struct mcast_member *member, 371 u8 join_state) 372 { 373 member->state = MCAST_MEMBER; 374 adjust_membership(group, join_state, 1); 375 group->rec.join_state |= join_state; 376 member->multicast.rec = group->rec; 377 member->multicast.rec.join_state = join_state; 378 list_move(&member->list, &group->active_list); 379 } 380 381 static int fail_join(struct mcast_group *group, struct mcast_member *member, 382 int status) 383 { 384 spin_lock_irq(&group->lock); 385 list_del_init(&member->list); 386 spin_unlock_irq(&group->lock); 387 return member->multicast.callback(status, &member->multicast); 388 } 389 390 static void process_group_error(struct mcast_group *group) 391 { 392 struct mcast_member *member; 393 int ret = 0; 394 u16 pkey_index; 395 396 if (group->state == MCAST_PKEY_EVENT) 397 ret = ib_find_pkey(group->port->dev->device, 398 group->port->port_num, 399 be16_to_cpu(group->rec.pkey), &pkey_index); 400 401 spin_lock_irq(&group->lock); 402 if (group->state == MCAST_PKEY_EVENT && !ret && 403 group->pkey_index == pkey_index) 404 goto out; 405 406 while (!list_empty(&group->active_list)) { 407 member = list_entry(group->active_list.next, 408 struct mcast_member, list); 409 atomic_inc(&member->refcount); 410 list_del_init(&member->list); 411 adjust_membership(group, member->multicast.rec.join_state, -1); 412 member->state = MCAST_ERROR; 413 spin_unlock_irq(&group->lock); 414 415 ret = member->multicast.callback(-ENETRESET, 416 &member->multicast); 417 deref_member(member); 418 if (ret) 419 ib_sa_free_multicast(&member->multicast); 420 spin_lock_irq(&group->lock); 421 } 422 423 group->rec.join_state = 0; 424 out: 425 group->state = MCAST_BUSY; 426 spin_unlock_irq(&group->lock); 427 } 428 429 static void mcast_work_handler(struct work_struct *work) 430 { 431 struct mcast_group *group; 432 struct mcast_member *member; 433 struct ib_sa_multicast *multicast; 434 int status, ret; 435 u8 join_state; 436 437 group = container_of(work, typeof(*group), work); 438 retest: 439 spin_lock_irq(&group->lock); 440 while (!list_empty(&group->pending_list) || 441 (group->state != MCAST_BUSY)) { 442 443 if (group->state != MCAST_BUSY) { 444 spin_unlock_irq(&group->lock); 445 process_group_error(group); 446 goto retest; 447 } 448 449 member = list_entry(group->pending_list.next, 450 struct mcast_member, list); 451 multicast = &member->multicast; 452 join_state = multicast->rec.join_state; 453 atomic_inc(&member->refcount); 454 455 if (join_state == (group->rec.join_state & join_state)) { 456 status = cmp_rec(&group->rec, &multicast->rec, 457 multicast->comp_mask); 458 if (!status) 459 join_group(group, member, join_state); 460 else 461 list_del_init(&member->list); 462 spin_unlock_irq(&group->lock); 463 ret = multicast->callback(status, multicast); 464 } else { 465 spin_unlock_irq(&group->lock); 466 status = send_join(group, member); 467 if (!status) { 468 deref_member(member); 469 return; 470 } 471 ret = fail_join(group, member, status); 472 } 473 474 deref_member(member); 475 if (ret) 476 ib_sa_free_multicast(&member->multicast); 477 spin_lock_irq(&group->lock); 478 } 479 480 join_state = get_leave_state(group); 481 if (join_state) { 482 group->rec.join_state &= ~join_state; 483 spin_unlock_irq(&group->lock); 484 if (send_leave(group, join_state)) 485 goto retest; 486 } else { 487 group->state = MCAST_IDLE; 488 spin_unlock_irq(&group->lock); 489 release_group(group); 490 } 491 } 492 493 /* 494 * Fail a join request if it is still active - at the head of the pending queue. 495 */ 496 static void process_join_error(struct mcast_group *group, int status) 497 { 498 struct mcast_member *member; 499 int ret; 500 501 spin_lock_irq(&group->lock); 502 member = list_entry(group->pending_list.next, 503 struct mcast_member, list); 504 if (group->last_join == member) { 505 atomic_inc(&member->refcount); 506 list_del_init(&member->list); 507 spin_unlock_irq(&group->lock); 508 ret = member->multicast.callback(status, &member->multicast); 509 deref_member(member); 510 if (ret) 511 ib_sa_free_multicast(&member->multicast); 512 } else 513 spin_unlock_irq(&group->lock); 514 } 515 516 static void join_handler(int status, struct ib_sa_mcmember_rec *rec, 517 void *context) 518 { 519 struct mcast_group *group = context; 520 u16 pkey_index = MCAST_INVALID_PKEY_INDEX; 521 522 if (status) 523 process_join_error(group, status); 524 else { 525 int mgids_changed, is_mgid0; 526 if (ib_find_pkey(group->port->dev->device, group->port->port_num, 527 be16_to_cpu(rec->pkey), &pkey_index)) 528 pkey_index = MCAST_INVALID_PKEY_INDEX; 529 530 spin_lock_irq(&group->port->lock); 531 if (group->state == MCAST_BUSY && 532 group->pkey_index == MCAST_INVALID_PKEY_INDEX) 533 group->pkey_index = pkey_index; 534 mgids_changed = memcmp(&rec->mgid, &group->rec.mgid, 535 sizeof(group->rec.mgid)); 536 group->rec = *rec; 537 if (mgids_changed) { 538 rb_erase(&group->node, &group->port->table); 539 is_mgid0 = !memcmp(&mgid0, &group->rec.mgid, 540 sizeof(mgid0)); 541 mcast_insert(group->port, group, is_mgid0); 542 } 543 spin_unlock_irq(&group->port->lock); 544 } 545 mcast_work_handler(&group->work); 546 } 547 548 static void leave_handler(int status, struct ib_sa_mcmember_rec *rec, 549 void *context) 550 { 551 struct mcast_group *group = context; 552 553 if (status && group->retries > 0 && 554 !send_leave(group, group->leave_state)) 555 group->retries--; 556 else 557 mcast_work_handler(&group->work); 558 } 559 560 static struct mcast_group *acquire_group(struct mcast_port *port, 561 union ib_gid *mgid, gfp_t gfp_mask) 562 { 563 struct mcast_group *group, *cur_group; 564 unsigned long flags; 565 int is_mgid0; 566 567 is_mgid0 = !memcmp(&mgid0, mgid, sizeof mgid0); 568 if (!is_mgid0) { 569 spin_lock_irqsave(&port->lock, flags); 570 group = mcast_find(port, mgid); 571 if (group) 572 goto found; 573 spin_unlock_irqrestore(&port->lock, flags); 574 } 575 576 group = kzalloc(sizeof *group, gfp_mask); 577 if (!group) 578 return NULL; 579 580 group->retries = 3; 581 group->port = port; 582 group->rec.mgid = *mgid; 583 group->pkey_index = MCAST_INVALID_PKEY_INDEX; 584 INIT_LIST_HEAD(&group->pending_list); 585 INIT_LIST_HEAD(&group->active_list); 586 INIT_WORK(&group->work, mcast_work_handler); 587 spin_lock_init(&group->lock); 588 589 spin_lock_irqsave(&port->lock, flags); 590 cur_group = mcast_insert(port, group, is_mgid0); 591 if (cur_group) { 592 kfree(group); 593 group = cur_group; 594 } else 595 atomic_inc(&port->refcount); 596 found: 597 atomic_inc(&group->refcount); 598 spin_unlock_irqrestore(&port->lock, flags); 599 return group; 600 } 601 602 /* 603 * We serialize all join requests to a single group to make our lives much 604 * easier. Otherwise, two users could try to join the same group 605 * simultaneously, with different configurations, one could leave while the 606 * join is in progress, etc., which makes locking around error recovery 607 * difficult. 608 */ 609 struct ib_sa_multicast * 610 ib_sa_join_multicast(struct ib_sa_client *client, 611 struct ib_device *device, u8 port_num, 612 struct ib_sa_mcmember_rec *rec, 613 ib_sa_comp_mask comp_mask, gfp_t gfp_mask, 614 int (*callback)(int status, 615 struct ib_sa_multicast *multicast), 616 void *context) 617 { 618 struct mcast_device *dev; 619 struct mcast_member *member; 620 struct ib_sa_multicast *multicast; 621 int ret; 622 623 dev = ib_get_client_data(device, &mcast_client); 624 if (!dev) 625 return ERR_PTR(-ENODEV); 626 627 member = kmalloc(sizeof *member, gfp_mask); 628 if (!member) 629 return ERR_PTR(-ENOMEM); 630 631 ib_sa_client_get(client); 632 member->client = client; 633 member->multicast.rec = *rec; 634 member->multicast.comp_mask = comp_mask; 635 member->multicast.callback = callback; 636 member->multicast.context = context; 637 init_completion(&member->comp); 638 atomic_set(&member->refcount, 1); 639 member->state = MCAST_JOINING; 640 641 member->group = acquire_group(&dev->port[port_num - dev->start_port], 642 &rec->mgid, gfp_mask); 643 if (!member->group) { 644 ret = -ENOMEM; 645 goto err; 646 } 647 648 /* 649 * The user will get the multicast structure in their callback. They 650 * could then free the multicast structure before we can return from 651 * this routine. So we save the pointer to return before queuing 652 * any callback. 653 */ 654 multicast = &member->multicast; 655 queue_join(member); 656 return multicast; 657 658 err: 659 ib_sa_client_put(client); 660 kfree(member); 661 return ERR_PTR(ret); 662 } 663 EXPORT_SYMBOL(ib_sa_join_multicast); 664 665 void ib_sa_free_multicast(struct ib_sa_multicast *multicast) 666 { 667 struct mcast_member *member; 668 struct mcast_group *group; 669 670 member = container_of(multicast, struct mcast_member, multicast); 671 group = member->group; 672 673 spin_lock_irq(&group->lock); 674 if (member->state == MCAST_MEMBER) 675 adjust_membership(group, multicast->rec.join_state, -1); 676 677 list_del_init(&member->list); 678 679 if (group->state == MCAST_IDLE) { 680 group->state = MCAST_BUSY; 681 spin_unlock_irq(&group->lock); 682 /* Continue to hold reference on group until callback */ 683 queue_work(mcast_wq, &group->work); 684 } else { 685 spin_unlock_irq(&group->lock); 686 release_group(group); 687 } 688 689 deref_member(member); 690 wait_for_completion(&member->comp); 691 ib_sa_client_put(member->client); 692 kfree(member); 693 } 694 EXPORT_SYMBOL(ib_sa_free_multicast); 695 696 int ib_sa_get_mcmember_rec(struct ib_device *device, u8 port_num, 697 union ib_gid *mgid, struct ib_sa_mcmember_rec *rec) 698 { 699 struct mcast_device *dev; 700 struct mcast_port *port; 701 struct mcast_group *group; 702 unsigned long flags; 703 int ret = 0; 704 705 dev = ib_get_client_data(device, &mcast_client); 706 if (!dev) 707 return -ENODEV; 708 709 port = &dev->port[port_num - dev->start_port]; 710 spin_lock_irqsave(&port->lock, flags); 711 group = mcast_find(port, mgid); 712 if (group) 713 *rec = group->rec; 714 else 715 ret = -EADDRNOTAVAIL; 716 spin_unlock_irqrestore(&port->lock, flags); 717 718 return ret; 719 } 720 EXPORT_SYMBOL(ib_sa_get_mcmember_rec); 721 722 int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num, 723 struct ib_sa_mcmember_rec *rec, 724 if_t ndev, 725 enum ib_gid_type gid_type, 726 struct ib_ah_attr *ah_attr) 727 { 728 int ret; 729 u16 gid_index; 730 731 /* GID table is not based on the netdevice for IB link layer, 732 * so ignore ndev during search. 733 */ 734 if (rdma_protocol_ib(device, port_num)) 735 ndev = NULL; 736 else if (!rdma_protocol_roce(device, port_num)) 737 return -EINVAL; 738 739 ret = ib_find_cached_gid_by_port(device, &rec->port_gid, 740 gid_type, port_num, 741 ndev, 742 &gid_index); 743 if (ret) 744 return ret; 745 746 memset(ah_attr, 0, sizeof *ah_attr); 747 ah_attr->dlid = be16_to_cpu(rec->mlid); 748 ah_attr->sl = rec->sl; 749 ah_attr->port_num = port_num; 750 ah_attr->static_rate = rec->rate; 751 752 ah_attr->ah_flags = IB_AH_GRH; 753 ah_attr->grh.dgid = rec->mgid; 754 755 ah_attr->grh.sgid_index = (u8) gid_index; 756 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label); 757 ah_attr->grh.hop_limit = rec->hop_limit; 758 ah_attr->grh.traffic_class = rec->traffic_class; 759 760 return 0; 761 } 762 EXPORT_SYMBOL(ib_init_ah_from_mcmember); 763 764 static void mcast_groups_event(struct mcast_port *port, 765 enum mcast_group_state state) 766 { 767 struct mcast_group *group; 768 struct rb_node *node; 769 unsigned long flags; 770 771 spin_lock_irqsave(&port->lock, flags); 772 for (node = rb_first(&port->table); node; node = rb_next(node)) { 773 group = rb_entry(node, struct mcast_group, node); 774 spin_lock(&group->lock); 775 if (group->state == MCAST_IDLE) { 776 atomic_inc(&group->refcount); 777 queue_work(mcast_wq, &group->work); 778 } 779 if (group->state != MCAST_GROUP_ERROR) 780 group->state = state; 781 spin_unlock(&group->lock); 782 } 783 spin_unlock_irqrestore(&port->lock, flags); 784 } 785 786 static void mcast_event_handler(struct ib_event_handler *handler, 787 struct ib_event *event) 788 { 789 struct mcast_device *dev; 790 int index; 791 792 dev = container_of(handler, struct mcast_device, event_handler); 793 if (!rdma_cap_ib_mcast(dev->device, event->element.port_num)) 794 return; 795 796 index = event->element.port_num - dev->start_port; 797 798 switch (event->event) { 799 case IB_EVENT_PORT_ERR: 800 case IB_EVENT_LID_CHANGE: 801 case IB_EVENT_CLIENT_REREGISTER: 802 mcast_groups_event(&dev->port[index], MCAST_GROUP_ERROR); 803 break; 804 case IB_EVENT_PKEY_CHANGE: 805 mcast_groups_event(&dev->port[index], MCAST_PKEY_EVENT); 806 break; 807 default: 808 break; 809 } 810 } 811 812 static void mcast_add_one(struct ib_device *device) 813 { 814 struct mcast_device *dev; 815 struct mcast_port *port; 816 int i; 817 int count = 0; 818 819 dev = kmalloc(sizeof *dev + device->phys_port_cnt * sizeof *port, 820 GFP_KERNEL); 821 if (!dev) 822 return; 823 824 dev->start_port = rdma_start_port(device); 825 dev->end_port = rdma_end_port(device); 826 827 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 828 if (!rdma_cap_ib_mcast(device, dev->start_port + i)) 829 continue; 830 port = &dev->port[i]; 831 port->dev = dev; 832 port->port_num = dev->start_port + i; 833 spin_lock_init(&port->lock); 834 port->table = RB_ROOT; 835 init_completion(&port->comp); 836 atomic_set(&port->refcount, 1); 837 ++count; 838 } 839 840 if (!count) { 841 kfree(dev); 842 return; 843 } 844 845 dev->device = device; 846 ib_set_client_data(device, &mcast_client, dev); 847 848 INIT_IB_EVENT_HANDLER(&dev->event_handler, device, mcast_event_handler); 849 ib_register_event_handler(&dev->event_handler); 850 } 851 852 static void mcast_remove_one(struct ib_device *device, void *client_data) 853 { 854 struct mcast_device *dev = client_data; 855 struct mcast_port *port; 856 int i; 857 858 if (!dev) 859 return; 860 861 ib_unregister_event_handler(&dev->event_handler); 862 flush_workqueue(mcast_wq); 863 864 for (i = 0; i <= dev->end_port - dev->start_port; i++) { 865 if (rdma_cap_ib_mcast(device, dev->start_port + i)) { 866 port = &dev->port[i]; 867 deref_port(port); 868 wait_for_completion(&port->comp); 869 } 870 } 871 872 kfree(dev); 873 } 874 875 int mcast_init(void) 876 { 877 int ret; 878 879 mcast_wq = alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM); 880 if (!mcast_wq) 881 return -ENOMEM; 882 883 ib_sa_register_client(&sa_client); 884 885 ret = ib_register_client(&mcast_client); 886 if (ret) 887 goto err; 888 return 0; 889 890 err: 891 ib_sa_unregister_client(&sa_client); 892 destroy_workqueue(mcast_wq); 893 return ret; 894 } 895 896 void mcast_cleanup(void) 897 { 898 ib_unregister_client(&mcast_client); 899 ib_sa_unregister_client(&sa_client); 900 destroy_workqueue(mcast_wq); 901 } 902