1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <sys/cdefs.h> 37 #include <linux/module.h> 38 #include <linux/string.h> 39 #include <linux/errno.h> 40 #include <linux/kernel.h> 41 #include <linux/slab.h> 42 #include <linux/mutex.h> 43 #include <linux/netdevice.h> 44 #include <rdma/ib_addr.h> 45 #include <rdma/ib_cache.h> 46 47 #include "core_priv.h" 48 49 MODULE_AUTHOR("Roland Dreier"); 50 MODULE_DESCRIPTION("core kernel InfiniBand API"); 51 MODULE_LICENSE("Dual BSD/GPL"); 52 53 struct ib_client_data { 54 struct list_head list; 55 struct ib_client *client; 56 void * data; 57 /* The device or client is going down. Do not call client or device 58 * callbacks other than remove(). */ 59 bool going_down; 60 }; 61 62 struct workqueue_struct *ib_comp_wq; 63 struct workqueue_struct *ib_wq; 64 EXPORT_SYMBOL_GPL(ib_wq); 65 66 /* The device_list and client_list contain devices and clients after their 67 * registration has completed, and the devices and clients are removed 68 * during unregistration. */ 69 static LIST_HEAD(device_list); 70 static LIST_HEAD(client_list); 71 72 /* 73 * device_mutex and lists_rwsem protect access to both device_list and 74 * client_list. device_mutex protects writer access by device and client 75 * registration / de-registration. lists_rwsem protects reader access to 76 * these lists. Iterators of these lists must lock it for read, while updates 77 * to the lists must be done with a write lock. A special case is when the 78 * device_mutex is locked. In this case locking the lists for read access is 79 * not necessary as the device_mutex implies it. 80 * 81 * lists_rwsem also protects access to the client data list. 82 */ 83 static DEFINE_MUTEX(device_mutex); 84 static DECLARE_RWSEM(lists_rwsem); 85 86 87 static int ib_device_check_mandatory(struct ib_device *device) 88 { 89 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } 90 static const struct { 91 size_t offset; 92 char *name; 93 } mandatory_table[] = { 94 IB_MANDATORY_FUNC(query_device), 95 IB_MANDATORY_FUNC(query_port), 96 IB_MANDATORY_FUNC(query_pkey), 97 IB_MANDATORY_FUNC(query_gid), 98 IB_MANDATORY_FUNC(alloc_pd), 99 IB_MANDATORY_FUNC(dealloc_pd), 100 IB_MANDATORY_FUNC(create_ah), 101 IB_MANDATORY_FUNC(destroy_ah), 102 IB_MANDATORY_FUNC(create_qp), 103 IB_MANDATORY_FUNC(modify_qp), 104 IB_MANDATORY_FUNC(destroy_qp), 105 IB_MANDATORY_FUNC(post_send), 106 IB_MANDATORY_FUNC(post_recv), 107 IB_MANDATORY_FUNC(create_cq), 108 IB_MANDATORY_FUNC(destroy_cq), 109 IB_MANDATORY_FUNC(poll_cq), 110 IB_MANDATORY_FUNC(req_notify_cq), 111 IB_MANDATORY_FUNC(get_dma_mr), 112 IB_MANDATORY_FUNC(dereg_mr), 113 IB_MANDATORY_FUNC(get_port_immutable) 114 }; 115 int i; 116 117 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) { 118 if (!*(void **) ((char *) device + mandatory_table[i].offset)) { 119 pr_warn("Device %s is missing mandatory function %s\n", 120 device->name, mandatory_table[i].name); 121 return -EINVAL; 122 } 123 } 124 125 return 0; 126 } 127 128 static struct ib_device *__ib_device_get_by_name(const char *name) 129 { 130 struct ib_device *device; 131 132 list_for_each_entry(device, &device_list, core_list) 133 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) 134 return device; 135 136 return NULL; 137 } 138 139 140 static int alloc_name(char *name) 141 { 142 unsigned long *inuse; 143 char buf[IB_DEVICE_NAME_MAX]; 144 struct ib_device *device; 145 int i; 146 147 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL); 148 if (!inuse) 149 return -ENOMEM; 150 151 list_for_each_entry(device, &device_list, core_list) { 152 if (!sscanf(device->name, name, &i)) 153 continue; 154 if (i < 0 || i >= PAGE_SIZE * 8) 155 continue; 156 snprintf(buf, sizeof buf, name, i); 157 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) 158 set_bit(i, inuse); 159 } 160 161 i = find_first_zero_bit(inuse, PAGE_SIZE * 8); 162 free_page((unsigned long) inuse); 163 snprintf(buf, sizeof buf, name, i); 164 165 if (__ib_device_get_by_name(buf)) 166 return -ENFILE; 167 168 strlcpy(name, buf, IB_DEVICE_NAME_MAX); 169 return 0; 170 } 171 172 static void ib_device_release(struct device *device) 173 { 174 struct ib_device *dev = container_of(device, struct ib_device, dev); 175 176 WARN_ON(dev->reg_state == IB_DEV_REGISTERED); 177 if (dev->reg_state == IB_DEV_UNREGISTERED) { 178 /* 179 * In IB_DEV_UNINITIALIZED state, cache or port table 180 * is not even created. Free cache and port table only when 181 * device reaches UNREGISTERED state. 182 */ 183 ib_cache_release_one(dev); 184 kfree(dev->port_immutable); 185 } 186 kfree(dev); 187 } 188 189 static struct class ib_class = { 190 .name = "infiniband", 191 .dev_release = ib_device_release, 192 }; 193 194 /** 195 * ib_alloc_device - allocate an IB device struct 196 * @size:size of structure to allocate 197 * 198 * Low-level drivers should use ib_alloc_device() to allocate &struct 199 * ib_device. @size is the size of the structure to be allocated, 200 * including any private data used by the low-level driver. 201 * ib_dealloc_device() must be used to free structures allocated with 202 * ib_alloc_device(). 203 */ 204 struct ib_device *ib_alloc_device(size_t size) 205 { 206 struct ib_device *device; 207 208 if (WARN_ON(size < sizeof(struct ib_device))) 209 return NULL; 210 211 device = kzalloc(size, GFP_KERNEL); 212 if (!device) 213 return NULL; 214 215 device->dev.parent = &linux_root_device; 216 device->dev.class = &ib_class; 217 device_initialize(&device->dev); 218 219 dev_set_drvdata(&device->dev, device); 220 221 INIT_LIST_HEAD(&device->event_handler_list); 222 spin_lock_init(&device->event_handler_lock); 223 spin_lock_init(&device->client_data_lock); 224 INIT_LIST_HEAD(&device->client_data_list); 225 INIT_LIST_HEAD(&device->port_list); 226 227 return device; 228 } 229 EXPORT_SYMBOL(ib_alloc_device); 230 231 /** 232 * ib_dealloc_device - free an IB device struct 233 * @device:structure to free 234 * 235 * Free a structure allocated with ib_alloc_device(). 236 */ 237 void ib_dealloc_device(struct ib_device *device) 238 { 239 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && 240 device->reg_state != IB_DEV_UNINITIALIZED); 241 kobject_put(&device->dev.kobj); 242 } 243 EXPORT_SYMBOL(ib_dealloc_device); 244 245 static int add_client_context(struct ib_device *device, struct ib_client *client) 246 { 247 struct ib_client_data *context; 248 unsigned long flags; 249 250 context = kmalloc(sizeof *context, GFP_KERNEL); 251 if (!context) { 252 pr_warn("Couldn't allocate client context for %s/%s\n", 253 device->name, client->name); 254 return -ENOMEM; 255 } 256 257 context->client = client; 258 context->data = NULL; 259 context->going_down = false; 260 261 down_write(&lists_rwsem); 262 spin_lock_irqsave(&device->client_data_lock, flags); 263 list_add(&context->list, &device->client_data_list); 264 spin_unlock_irqrestore(&device->client_data_lock, flags); 265 up_write(&lists_rwsem); 266 267 return 0; 268 } 269 270 static int verify_immutable(const struct ib_device *dev, u8 port) 271 { 272 return WARN_ON(!rdma_cap_ib_mad(dev, port) && 273 rdma_max_mad_size(dev, port) != 0); 274 } 275 276 static int read_port_immutable(struct ib_device *device) 277 { 278 int ret; 279 u8 start_port = rdma_start_port(device); 280 u8 end_port = rdma_end_port(device); 281 u8 port; 282 283 /** 284 * device->port_immutable is indexed directly by the port number to make 285 * access to this data as efficient as possible. 286 * 287 * Therefore port_immutable is declared as a 1 based array with 288 * potential empty slots at the beginning. 289 */ 290 device->port_immutable = kzalloc(sizeof(*device->port_immutable) 291 * (end_port + 1), 292 GFP_KERNEL); 293 if (!device->port_immutable) 294 return -ENOMEM; 295 296 for (port = start_port; port <= end_port; ++port) { 297 ret = device->get_port_immutable(device, port, 298 &device->port_immutable[port]); 299 if (ret) 300 return ret; 301 302 if (verify_immutable(device, port)) 303 return -EINVAL; 304 } 305 return 0; 306 } 307 308 void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) 309 { 310 if (dev->get_dev_fw_str) 311 dev->get_dev_fw_str(dev, str, str_len); 312 else 313 str[0] = '\0'; 314 } 315 EXPORT_SYMBOL(ib_get_device_fw_str); 316 317 /** 318 * ib_register_device - Register an IB device with IB core 319 * @device:Device to register 320 * 321 * Low-level drivers use ib_register_device() to register their 322 * devices with the IB core. All registered clients will receive a 323 * callback for each device that is added. @device must be allocated 324 * with ib_alloc_device(). 325 */ 326 int ib_register_device(struct ib_device *device, 327 int (*port_callback)(struct ib_device *, 328 u8, struct kobject *)) 329 { 330 int ret; 331 struct ib_client *client; 332 struct ib_udata uhw = {.outlen = 0, .inlen = 0}; 333 334 mutex_lock(&device_mutex); 335 336 if (strchr(device->name, '%')) { 337 ret = alloc_name(device->name); 338 if (ret) 339 goto out; 340 } 341 342 if (ib_device_check_mandatory(device)) { 343 ret = -EINVAL; 344 goto out; 345 } 346 347 ret = read_port_immutable(device); 348 if (ret) { 349 pr_warn("Couldn't create per port immutable data %s\n", 350 device->name); 351 goto out; 352 } 353 354 ret = ib_cache_setup_one(device); 355 if (ret) { 356 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); 357 goto port_cleanup; 358 } 359 360 memset(&device->attrs, 0, sizeof(device->attrs)); 361 ret = device->query_device(device, &device->attrs, &uhw); 362 if (ret) { 363 pr_warn("Couldn't query the device attributes\n"); 364 goto cache_cleanup; 365 } 366 367 ret = ib_device_register_sysfs(device, port_callback); 368 if (ret) { 369 pr_warn("Couldn't register device %s with driver model\n", 370 device->name); 371 goto cache_cleanup; 372 } 373 374 device->reg_state = IB_DEV_REGISTERED; 375 376 list_for_each_entry(client, &client_list, list) 377 if (client->add && !add_client_context(device, client)) 378 client->add(device); 379 380 down_write(&lists_rwsem); 381 list_add_tail(&device->core_list, &device_list); 382 up_write(&lists_rwsem); 383 mutex_unlock(&device_mutex); 384 return 0; 385 386 cache_cleanup: 387 ib_cache_cleanup_one(device); 388 ib_cache_release_one(device); 389 port_cleanup: 390 kfree(device->port_immutable); 391 out: 392 mutex_unlock(&device_mutex); 393 return ret; 394 } 395 EXPORT_SYMBOL(ib_register_device); 396 397 /** 398 * ib_unregister_device - Unregister an IB device 399 * @device:Device to unregister 400 * 401 * Unregister an IB device. All clients will receive a remove callback. 402 */ 403 void ib_unregister_device(struct ib_device *device) 404 { 405 struct ib_client_data *context, *tmp; 406 unsigned long flags; 407 408 mutex_lock(&device_mutex); 409 410 down_write(&lists_rwsem); 411 list_del(&device->core_list); 412 spin_lock_irqsave(&device->client_data_lock, flags); 413 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 414 context->going_down = true; 415 spin_unlock_irqrestore(&device->client_data_lock, flags); 416 downgrade_write(&lists_rwsem); 417 418 list_for_each_entry_safe(context, tmp, &device->client_data_list, 419 list) { 420 if (context->client->remove) 421 context->client->remove(device, context->data); 422 } 423 up_read(&lists_rwsem); 424 425 mutex_unlock(&device_mutex); 426 427 ib_device_unregister_sysfs(device); 428 ib_cache_cleanup_one(device); 429 430 down_write(&lists_rwsem); 431 spin_lock_irqsave(&device->client_data_lock, flags); 432 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 433 kfree(context); 434 spin_unlock_irqrestore(&device->client_data_lock, flags); 435 up_write(&lists_rwsem); 436 437 device->reg_state = IB_DEV_UNREGISTERED; 438 } 439 EXPORT_SYMBOL(ib_unregister_device); 440 441 /** 442 * ib_register_client - Register an IB client 443 * @client:Client to register 444 * 445 * Upper level users of the IB drivers can use ib_register_client() to 446 * register callbacks for IB device addition and removal. When an IB 447 * device is added, each registered client's add method will be called 448 * (in the order the clients were registered), and when a device is 449 * removed, each client's remove method will be called (in the reverse 450 * order that clients were registered). In addition, when 451 * ib_register_client() is called, the client will receive an add 452 * callback for all devices already registered. 453 */ 454 int ib_register_client(struct ib_client *client) 455 { 456 struct ib_device *device; 457 458 mutex_lock(&device_mutex); 459 460 list_for_each_entry(device, &device_list, core_list) 461 if (client->add && !add_client_context(device, client)) 462 client->add(device); 463 464 down_write(&lists_rwsem); 465 list_add_tail(&client->list, &client_list); 466 up_write(&lists_rwsem); 467 468 mutex_unlock(&device_mutex); 469 470 return 0; 471 } 472 EXPORT_SYMBOL(ib_register_client); 473 474 /** 475 * ib_unregister_client - Unregister an IB client 476 * @client:Client to unregister 477 * 478 * Upper level users use ib_unregister_client() to remove their client 479 * registration. When ib_unregister_client() is called, the client 480 * will receive a remove callback for each IB device still registered. 481 */ 482 void ib_unregister_client(struct ib_client *client) 483 { 484 struct ib_client_data *context, *tmp; 485 struct ib_device *device; 486 unsigned long flags; 487 488 mutex_lock(&device_mutex); 489 490 down_write(&lists_rwsem); 491 list_del(&client->list); 492 up_write(&lists_rwsem); 493 494 list_for_each_entry(device, &device_list, core_list) { 495 struct ib_client_data *found_context = NULL; 496 497 down_write(&lists_rwsem); 498 spin_lock_irqsave(&device->client_data_lock, flags); 499 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) 500 if (context->client == client) { 501 context->going_down = true; 502 found_context = context; 503 break; 504 } 505 spin_unlock_irqrestore(&device->client_data_lock, flags); 506 up_write(&lists_rwsem); 507 508 if (client->remove) 509 client->remove(device, found_context ? 510 found_context->data : NULL); 511 512 if (!found_context) { 513 pr_warn("No client context found for %s/%s\n", 514 device->name, client->name); 515 continue; 516 } 517 518 down_write(&lists_rwsem); 519 spin_lock_irqsave(&device->client_data_lock, flags); 520 list_del(&found_context->list); 521 kfree(found_context); 522 spin_unlock_irqrestore(&device->client_data_lock, flags); 523 up_write(&lists_rwsem); 524 } 525 526 mutex_unlock(&device_mutex); 527 } 528 EXPORT_SYMBOL(ib_unregister_client); 529 530 /** 531 * ib_get_client_data - Get IB client context 532 * @device:Device to get context for 533 * @client:Client to get context for 534 * 535 * ib_get_client_data() returns client context set with 536 * ib_set_client_data(). 537 */ 538 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) 539 { 540 struct ib_client_data *context; 541 void *ret = NULL; 542 unsigned long flags; 543 544 spin_lock_irqsave(&device->client_data_lock, flags); 545 list_for_each_entry(context, &device->client_data_list, list) 546 if (context->client == client) { 547 ret = context->data; 548 break; 549 } 550 spin_unlock_irqrestore(&device->client_data_lock, flags); 551 552 return ret; 553 } 554 EXPORT_SYMBOL(ib_get_client_data); 555 556 /** 557 * ib_set_client_data - Set IB client context 558 * @device:Device to set context for 559 * @client:Client to set context for 560 * @data:Context to set 561 * 562 * ib_set_client_data() sets client context that can be retrieved with 563 * ib_get_client_data(). 564 */ 565 void ib_set_client_data(struct ib_device *device, struct ib_client *client, 566 void *data) 567 { 568 struct ib_client_data *context; 569 unsigned long flags; 570 571 spin_lock_irqsave(&device->client_data_lock, flags); 572 list_for_each_entry(context, &device->client_data_list, list) 573 if (context->client == client) { 574 context->data = data; 575 goto out; 576 } 577 578 pr_warn("No client context found for %s/%s\n", 579 device->name, client->name); 580 581 out: 582 spin_unlock_irqrestore(&device->client_data_lock, flags); 583 } 584 EXPORT_SYMBOL(ib_set_client_data); 585 586 /** 587 * ib_register_event_handler - Register an IB event handler 588 * @event_handler:Handler to register 589 * 590 * ib_register_event_handler() registers an event handler that will be 591 * called back when asynchronous IB events occur (as defined in 592 * chapter 11 of the InfiniBand Architecture Specification). This 593 * callback may occur in interrupt context. 594 */ 595 int ib_register_event_handler (struct ib_event_handler *event_handler) 596 { 597 unsigned long flags; 598 599 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 600 list_add_tail(&event_handler->list, 601 &event_handler->device->event_handler_list); 602 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 603 604 return 0; 605 } 606 EXPORT_SYMBOL(ib_register_event_handler); 607 608 /** 609 * ib_unregister_event_handler - Unregister an event handler 610 * @event_handler:Handler to unregister 611 * 612 * Unregister an event handler registered with 613 * ib_register_event_handler(). 614 */ 615 int ib_unregister_event_handler(struct ib_event_handler *event_handler) 616 { 617 unsigned long flags; 618 619 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); 620 list_del(&event_handler->list); 621 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); 622 623 return 0; 624 } 625 EXPORT_SYMBOL(ib_unregister_event_handler); 626 627 /** 628 * ib_dispatch_event - Dispatch an asynchronous event 629 * @event:Event to dispatch 630 * 631 * Low-level drivers must call ib_dispatch_event() to dispatch the 632 * event to all registered event handlers when an asynchronous event 633 * occurs. 634 */ 635 void ib_dispatch_event(struct ib_event *event) 636 { 637 unsigned long flags; 638 struct ib_event_handler *handler; 639 640 spin_lock_irqsave(&event->device->event_handler_lock, flags); 641 642 list_for_each_entry(handler, &event->device->event_handler_list, list) 643 handler->handler(handler, event); 644 645 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); 646 } 647 EXPORT_SYMBOL(ib_dispatch_event); 648 649 /** 650 * ib_query_port - Query IB port attributes 651 * @device:Device to query 652 * @port_num:Port number to query 653 * @port_attr:Port attributes 654 * 655 * ib_query_port() returns the attributes of a port through the 656 * @port_attr pointer. 657 */ 658 int ib_query_port(struct ib_device *device, 659 u8 port_num, 660 struct ib_port_attr *port_attr) 661 { 662 union ib_gid gid; 663 int err; 664 665 if (!rdma_is_port_valid(device, port_num)) 666 return -EINVAL; 667 668 memset(port_attr, 0, sizeof(*port_attr)); 669 err = device->query_port(device, port_num, port_attr); 670 if (err || port_attr->subnet_prefix) 671 return err; 672 673 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) 674 return 0; 675 676 err = ib_query_gid(device, port_num, 0, &gid, NULL); 677 if (err) 678 return err; 679 680 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix); 681 return 0; 682 } 683 EXPORT_SYMBOL(ib_query_port); 684 685 /** 686 * ib_query_gid - Get GID table entry 687 * @device:Device to query 688 * @port_num:Port number to query 689 * @index:GID table index to query 690 * @gid:Returned GID 691 * @attr: Returned GID attributes related to this GID index (only in RoCE). 692 * NULL means ignore. 693 * 694 * ib_query_gid() fetches the specified GID table entry. 695 */ 696 int ib_query_gid(struct ib_device *device, 697 u8 port_num, int index, union ib_gid *gid, 698 struct ib_gid_attr *attr) 699 { 700 if (rdma_cap_roce_gid_table(device, port_num)) 701 return ib_get_cached_gid(device, port_num, index, gid, attr); 702 703 if (attr) 704 return -EINVAL; 705 706 return device->query_gid(device, port_num, index, gid); 707 } 708 EXPORT_SYMBOL(ib_query_gid); 709 710 /** 711 * ib_enum_roce_netdev - enumerate all RoCE ports 712 * @ib_dev : IB device we want to query 713 * @filter: Should we call the callback? 714 * @filter_cookie: Cookie passed to filter 715 * @cb: Callback to call for each found RoCE ports 716 * @cookie: Cookie passed back to the callback 717 * 718 * Enumerates all of the physical RoCE ports of ib_dev 719 * which are related to netdevice and calls callback() on each 720 * device for which filter() function returns non zero. 721 */ 722 void ib_enum_roce_netdev(struct ib_device *ib_dev, 723 roce_netdev_filter filter, 724 void *filter_cookie, 725 roce_netdev_callback cb, 726 void *cookie) 727 { 728 u8 port; 729 730 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); 731 port++) 732 if (rdma_protocol_roce(ib_dev, port)) { 733 if_t idev = NULL; 734 735 if (ib_dev->get_netdev) 736 idev = ib_dev->get_netdev(ib_dev, port); 737 738 if (idev && (if_getflags(idev) & IFF_DYING)) { 739 dev_put(idev); 740 idev = NULL; 741 } 742 743 if (filter(ib_dev, port, idev, filter_cookie)) 744 cb(ib_dev, port, idev, cookie); 745 746 if (idev) 747 dev_put(idev); 748 } 749 } 750 751 /** 752 * ib_enum_all_roce_netdevs - enumerate all RoCE devices 753 * @filter: Should we call the callback? 754 * @filter_cookie: Cookie passed to filter 755 * @cb: Callback to call for each found RoCE ports 756 * @cookie: Cookie passed back to the callback 757 * 758 * Enumerates all RoCE devices' physical ports which are related 759 * to netdevices and calls callback() on each device for which 760 * filter() function returns non zero. 761 */ 762 void ib_enum_all_roce_netdevs(roce_netdev_filter filter, 763 void *filter_cookie, 764 roce_netdev_callback cb, 765 void *cookie) 766 { 767 struct ib_device *dev; 768 769 down_read(&lists_rwsem); 770 list_for_each_entry(dev, &device_list, core_list) 771 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie); 772 up_read(&lists_rwsem); 773 } 774 775 /** 776 * ib_cache_gid_del_all_by_netdev - delete GIDs belonging a netdevice 777 * 778 * @ndev: Pointer to netdevice 779 */ 780 void ib_cache_gid_del_all_by_netdev(if_t ndev) 781 { 782 struct ib_device *ib_dev; 783 u8 port; 784 785 down_read(&lists_rwsem); 786 list_for_each_entry(ib_dev, &device_list, core_list) { 787 for (port = rdma_start_port(ib_dev); 788 port <= rdma_end_port(ib_dev); 789 port++) { 790 if (rdma_protocol_roce(ib_dev, port) == 0) 791 continue; 792 (void) ib_cache_gid_del_all_netdev_gids(ib_dev, port, ndev); 793 } 794 } 795 up_read(&lists_rwsem); 796 } 797 798 /** 799 * ib_query_pkey - Get P_Key table entry 800 * @device:Device to query 801 * @port_num:Port number to query 802 * @index:P_Key table index to query 803 * @pkey:Returned P_Key 804 * 805 * ib_query_pkey() fetches the specified P_Key table entry. 806 */ 807 int ib_query_pkey(struct ib_device *device, 808 u8 port_num, u16 index, u16 *pkey) 809 { 810 if (!rdma_is_port_valid(device, port_num)) 811 return -EINVAL; 812 813 return device->query_pkey(device, port_num, index, pkey); 814 } 815 EXPORT_SYMBOL(ib_query_pkey); 816 817 /** 818 * ib_modify_device - Change IB device attributes 819 * @device:Device to modify 820 * @device_modify_mask:Mask of attributes to change 821 * @device_modify:New attribute values 822 * 823 * ib_modify_device() changes a device's attributes as specified by 824 * the @device_modify_mask and @device_modify structure. 825 */ 826 int ib_modify_device(struct ib_device *device, 827 int device_modify_mask, 828 struct ib_device_modify *device_modify) 829 { 830 if (!device->modify_device) 831 return -ENOSYS; 832 833 return device->modify_device(device, device_modify_mask, 834 device_modify); 835 } 836 EXPORT_SYMBOL(ib_modify_device); 837 838 /** 839 * ib_modify_port - Modifies the attributes for the specified port. 840 * @device: The device to modify. 841 * @port_num: The number of the port to modify. 842 * @port_modify_mask: Mask used to specify which attributes of the port 843 * to change. 844 * @port_modify: New attribute values for the port. 845 * 846 * ib_modify_port() changes a port's attributes as specified by the 847 * @port_modify_mask and @port_modify structure. 848 */ 849 int ib_modify_port(struct ib_device *device, 850 u8 port_num, int port_modify_mask, 851 struct ib_port_modify *port_modify) 852 { 853 if (!device->modify_port) 854 return -ENOSYS; 855 856 if (!rdma_is_port_valid(device, port_num)) 857 return -EINVAL; 858 859 return device->modify_port(device, port_num, port_modify_mask, 860 port_modify); 861 } 862 EXPORT_SYMBOL(ib_modify_port); 863 864 /** 865 * ib_find_gid - Returns the port number and GID table index where 866 * a specified GID value occurs. 867 * @device: The device to query. 868 * @gid: The GID value to search for. 869 * @gid_type: Type of GID. 870 * @ndev: The ndev related to the GID to search for. 871 * @port_num: The port number of the device where the GID value was found. 872 * @index: The index into the GID table where the GID was found. This 873 * parameter may be NULL. 874 */ 875 int ib_find_gid(struct ib_device *device, union ib_gid *gid, 876 enum ib_gid_type gid_type, if_t ndev, 877 u8 *port_num, u16 *index) 878 { 879 union ib_gid tmp_gid; 880 int ret, port, i; 881 882 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { 883 if (rdma_cap_roce_gid_table(device, port)) { 884 if (!ib_find_cached_gid_by_port(device, gid, gid_type, port, 885 ndev, index)) { 886 *port_num = port; 887 return 0; 888 } 889 } 890 891 if (gid_type != IB_GID_TYPE_IB) 892 continue; 893 894 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { 895 ret = ib_query_gid(device, port, i, &tmp_gid, NULL); 896 if (ret) 897 return ret; 898 if (!memcmp(&tmp_gid, gid, sizeof *gid)) { 899 *port_num = port; 900 if (index) 901 *index = i; 902 return 0; 903 } 904 } 905 } 906 907 return -ENOENT; 908 } 909 EXPORT_SYMBOL(ib_find_gid); 910 911 /** 912 * ib_find_pkey - Returns the PKey table index where a specified 913 * PKey value occurs. 914 * @device: The device to query. 915 * @port_num: The port number of the device to search for the PKey. 916 * @pkey: The PKey value to search for. 917 * @index: The index into the PKey table where the PKey was found. 918 */ 919 int ib_find_pkey(struct ib_device *device, 920 u8 port_num, u16 pkey, u16 *index) 921 { 922 int ret, i; 923 u16 tmp_pkey; 924 int partial_ix = -1; 925 926 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { 927 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); 928 if (ret) 929 return ret; 930 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) { 931 /* if there is full-member pkey take it.*/ 932 if (tmp_pkey & 0x8000) { 933 *index = i; 934 return 0; 935 } 936 if (partial_ix < 0) 937 partial_ix = i; 938 } 939 } 940 941 /*no full-member, if exists take the limited*/ 942 if (partial_ix >= 0) { 943 *index = partial_ix; 944 return 0; 945 } 946 return -ENOENT; 947 } 948 EXPORT_SYMBOL(ib_find_pkey); 949 950 /** 951 * ib_get_net_dev_by_params() - Return the appropriate net_dev 952 * for a received CM request 953 * @dev: An RDMA device on which the request has been received. 954 * @port: Port number on the RDMA device. 955 * @pkey: The Pkey the request came on. 956 * @gid: A GID that the net_dev uses to communicate. 957 * @addr: Contains the IP address that the request specified as its 958 * destination. 959 */ 960 if_t ib_get_net_dev_by_params(struct ib_device *dev, 961 u8 port, 962 u16 pkey, 963 const union ib_gid *gid, 964 const struct sockaddr *addr) 965 { 966 if_t net_dev = NULL; 967 struct ib_client_data *context; 968 969 if (!rdma_protocol_ib(dev, port)) 970 return NULL; 971 972 down_read(&lists_rwsem); 973 974 list_for_each_entry(context, &dev->client_data_list, list) { 975 struct ib_client *client = context->client; 976 977 if (context->going_down) 978 continue; 979 980 if (client->get_net_dev_by_params) { 981 net_dev = client->get_net_dev_by_params(dev, port, pkey, 982 gid, addr, 983 context->data); 984 if (net_dev) 985 break; 986 } 987 } 988 989 up_read(&lists_rwsem); 990 991 return net_dev; 992 } 993 EXPORT_SYMBOL(ib_get_net_dev_by_params); 994 995 static int __init ib_core_init(void) 996 { 997 int ret; 998 999 ib_wq = alloc_workqueue("infiniband", 0, 0); 1000 if (!ib_wq) 1001 return -ENOMEM; 1002 1003 ib_comp_wq = alloc_workqueue("ib-comp-wq", 1004 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, 1005 mp_ncpus * 4 /* WQ_UNBOUND_MAX_ACTIVE */); 1006 if (!ib_comp_wq) { 1007 ret = -ENOMEM; 1008 goto err; 1009 } 1010 1011 ret = class_register(&ib_class); 1012 if (ret) { 1013 pr_warn("Couldn't create InfiniBand device class\n"); 1014 goto err_comp; 1015 } 1016 1017 ret = addr_init(); 1018 if (ret) { 1019 pr_warn("Could't init IB address resolution\n"); 1020 goto err_sysfs; 1021 } 1022 1023 ret = ib_mad_init(); 1024 if (ret) { 1025 pr_warn("Couldn't init IB MAD\n"); 1026 goto err_addr; 1027 } 1028 1029 ret = ib_sa_init(); 1030 if (ret) { 1031 pr_warn("Couldn't init SA\n"); 1032 goto err_mad; 1033 } 1034 1035 ib_cache_setup(); 1036 1037 return 0; 1038 1039 err_mad: 1040 ib_mad_cleanup(); 1041 err_addr: 1042 addr_cleanup(); 1043 err_sysfs: 1044 class_unregister(&ib_class); 1045 err_comp: 1046 destroy_workqueue(ib_comp_wq); 1047 err: 1048 destroy_workqueue(ib_wq); 1049 return ret; 1050 } 1051 1052 static void __exit ib_core_cleanup(void) 1053 { 1054 ib_cache_cleanup(); 1055 ib_sa_cleanup(); 1056 ib_mad_cleanup(); 1057 addr_cleanup(); 1058 class_unregister(&ib_class); 1059 destroy_workqueue(ib_comp_wq); 1060 /* Make sure that any pending umem accounting work is done. */ 1061 destroy_workqueue(ib_wq); 1062 } 1063 1064 /* 1065 * Typical loading and unloading order values and their use: 1066 * 1067 * SI_ORDER_FIRST (default for module_init): 1068 * Core modules (PCI, infiniband) 1069 * SI_ORDER_SECOND (default for module_exit): 1070 * Infiniband core modules (CM) 1071 * SI_ORDER_THIRD: 1072 * SI_ORDER_FOURTH: 1073 * Infiniband core modules (CMA) 1074 * SI_ORDER_FIFTH: 1075 * Infiniband user-space modules (UCM,UCMA,UMAD,UVERBS,IPOIB) 1076 * SI_ORDER_SIXTH: 1077 * Network HW driver modules 1078 * SI_ORDER_SEVENTH: 1079 * Infiniband HW driver modules 1080 */ 1081 module_init_order(ib_core_init, SI_ORDER_FIRST); 1082 module_exit_order(ib_core_cleanup, SI_ORDER_FIRST); 1083 1084 MODULE_VERSION(ibcore, 1); 1085 MODULE_DEPEND(ibcore, linuxkpi, 1, 1, 1); 1086