1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved. 6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * Copyright (c) 2008 Cisco. All rights reserved. 8 * 9 * This software is available to you under a choice of one of two 10 * licenses. You may choose to be licensed under the terms of the GNU 11 * General Public License (GPL) Version 2, available from the file 12 * COPYING in the main directory of this source tree, or the 13 * OpenIB.org BSD license below: 14 * 15 * Redistribution and use in source and binary forms, with or 16 * without modification, are permitted provided that the following 17 * conditions are met: 18 * 19 * - Redistributions of source code must retain the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer. 22 * 23 * - Redistributions in binary form must reproduce the above 24 * copyright notice, this list of conditions and the following 25 * disclaimer in the documentation and/or other materials 26 * provided with the distribution. 27 * 28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 35 * SOFTWARE. 36 */ 37 38 #include <sys/cdefs.h> 39 __FBSDID("$FreeBSD$"); 40 41 #define pr_fmt(fmt) "user_mad: " fmt 42 43 #include <linux/module.h> 44 #include <linux/device.h> 45 #include <linux/err.h> 46 #include <linux/fs.h> 47 #include <linux/cdev.h> 48 #include <linux/dma-mapping.h> 49 #include <linux/poll.h> 50 #include <linux/mutex.h> 51 #include <linux/kref.h> 52 #include <linux/compat.h> 53 #include <linux/sched.h> 54 #include <linux/semaphore.h> 55 #include <linux/slab.h> 56 57 #include <asm/uaccess.h> 58 59 #include <rdma/ib_mad.h> 60 #include <rdma/ib_user_mad.h> 61 62 MODULE_AUTHOR("Roland Dreier"); 63 MODULE_DESCRIPTION("InfiniBand userspace MAD packet access"); 64 MODULE_LICENSE("Dual BSD/GPL"); 65 66 enum { 67 IB_UMAD_MAX_PORTS = 64, 68 IB_UMAD_MAX_AGENTS = 32, 69 70 IB_UMAD_MAJOR = 231, 71 IB_UMAD_MINOR_BASE = 0 72 }; 73 74 /* 75 * Our lifetime rules for these structs are the following: 76 * device special file is opened, we take a reference on the 77 * ib_umad_port's struct ib_umad_device. We drop these 78 * references in the corresponding close(). 79 * 80 * In addition to references coming from open character devices, there 81 * is one more reference to each ib_umad_device representing the 82 * module's reference taken when allocating the ib_umad_device in 83 * ib_umad_add_one(). 84 * 85 * When destroying an ib_umad_device, we drop the module's reference. 86 */ 87 88 struct ib_umad_port { 89 struct cdev cdev; 90 struct device *dev; 91 92 struct cdev sm_cdev; 93 struct device *sm_dev; 94 struct semaphore sm_sem; 95 96 struct mutex file_mutex; 97 struct list_head file_list; 98 99 struct ib_device *ib_dev; 100 struct ib_umad_device *umad_dev; 101 int dev_num; 102 u8 port_num; 103 }; 104 105 struct ib_umad_device { 106 struct kobject kobj; 107 struct ib_umad_port port[0]; 108 }; 109 110 struct ib_umad_file { 111 struct mutex mutex; 112 struct ib_umad_port *port; 113 struct list_head recv_list; 114 struct list_head send_list; 115 struct list_head port_list; 116 spinlock_t send_lock; 117 wait_queue_head_t recv_wait; 118 struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; 119 int agents_dead; 120 u8 use_pkey_index; 121 u8 already_used; 122 }; 123 124 struct ib_umad_packet { 125 struct ib_mad_send_buf *msg; 126 struct ib_mad_recv_wc *recv_wc; 127 struct list_head list; 128 int length; 129 struct ib_user_mad mad; 130 }; 131 132 static struct class *umad_class; 133 134 #define base_dev MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) 135 136 static DEFINE_SPINLOCK(port_lock); 137 static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS); 138 139 static void ib_umad_add_one(struct ib_device *device); 140 static void ib_umad_remove_one(struct ib_device *device, void *client_data); 141 142 static void ib_umad_release_dev(struct kobject *kobj) 143 { 144 struct ib_umad_device *dev = 145 container_of(kobj, struct ib_umad_device, kobj); 146 147 kfree(dev); 148 } 149 150 static struct kobj_type ib_umad_dev_ktype = { 151 .release = ib_umad_release_dev, 152 }; 153 154 static int hdr_size(struct ib_umad_file *file) 155 { 156 return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : 157 sizeof (struct ib_user_mad_hdr_old); 158 } 159 160 /* caller must hold file->mutex */ 161 static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) 162 { 163 return file->agents_dead ? NULL : file->agent[id]; 164 } 165 166 static int queue_packet(struct ib_umad_file *file, 167 struct ib_mad_agent *agent, 168 struct ib_umad_packet *packet) 169 { 170 int ret = 1; 171 172 mutex_lock(&file->mutex); 173 174 for (packet->mad.hdr.id = 0; 175 packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; 176 packet->mad.hdr.id++) 177 if (agent == __get_agent(file, packet->mad.hdr.id)) { 178 list_add_tail(&packet->list, &file->recv_list); 179 wake_up_interruptible(&file->recv_wait); 180 ret = 0; 181 break; 182 } 183 184 mutex_unlock(&file->mutex); 185 186 return ret; 187 } 188 189 static void dequeue_send(struct ib_umad_file *file, 190 struct ib_umad_packet *packet) 191 { 192 spin_lock_irq(&file->send_lock); 193 list_del(&packet->list); 194 spin_unlock_irq(&file->send_lock); 195 } 196 197 static void send_handler(struct ib_mad_agent *agent, 198 struct ib_mad_send_wc *send_wc) 199 { 200 struct ib_umad_file *file = agent->context; 201 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 202 203 dequeue_send(file, packet); 204 ib_destroy_ah(packet->msg->ah); 205 ib_free_send_mad(packet->msg); 206 207 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 208 packet->length = IB_MGMT_MAD_HDR; 209 packet->mad.hdr.status = ETIMEDOUT; 210 if (!queue_packet(file, agent, packet)) 211 return; 212 } 213 kfree(packet); 214 } 215 216 static void recv_handler(struct ib_mad_agent *agent, 217 struct ib_mad_send_buf *send_buf, 218 struct ib_mad_recv_wc *mad_recv_wc) 219 { 220 struct ib_umad_file *file = agent->context; 221 struct ib_umad_packet *packet; 222 223 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 224 goto err1; 225 226 packet = kzalloc(sizeof *packet, GFP_KERNEL); 227 if (!packet) 228 goto err1; 229 230 packet->length = mad_recv_wc->mad_len; 231 packet->recv_wc = mad_recv_wc; 232 233 packet->mad.hdr.status = 0; 234 packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; 235 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 236 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); 237 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 238 packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; 239 packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; 240 packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); 241 if (packet->mad.hdr.grh_present) { 242 struct ib_ah_attr ah_attr; 243 int ret; 244 245 ret = ib_init_ah_from_wc(agent->device, agent->port_num, 246 mad_recv_wc->wc, mad_recv_wc->recv_buf.grh, 247 &ah_attr); 248 if (ret) 249 goto err2; 250 251 packet->mad.hdr.gid_index = ah_attr.grh.sgid_index; 252 packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit; 253 packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class; 254 memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16); 255 packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label); 256 } 257 258 if (queue_packet(file, agent, packet)) 259 goto err2; 260 return; 261 262 err2: 263 kfree(packet); 264 err1: 265 ib_free_recv_mad(mad_recv_wc); 266 } 267 268 static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, 269 struct ib_umad_packet *packet, size_t count) 270 { 271 struct ib_mad_recv_buf *recv_buf; 272 int left, seg_payload, offset, max_seg_payload; 273 size_t seg_size; 274 275 recv_buf = &packet->recv_wc->recv_buf; 276 seg_size = packet->recv_wc->mad_seg_size; 277 278 /* We need enough room to copy the first (or only) MAD segment. */ 279 if ((packet->length <= seg_size && 280 count < hdr_size(file) + packet->length) || 281 (packet->length > seg_size && 282 count < hdr_size(file) + seg_size)) 283 return -EINVAL; 284 285 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 286 return -EFAULT; 287 288 buf += hdr_size(file); 289 seg_payload = min_t(int, packet->length, seg_size); 290 if (copy_to_user(buf, recv_buf->mad, seg_payload)) 291 return -EFAULT; 292 293 if (seg_payload < packet->length) { 294 /* 295 * Multipacket RMPP MAD message. Copy remainder of message. 296 * Note that last segment may have a shorter payload. 297 */ 298 if (count < hdr_size(file) + packet->length) { 299 /* 300 * The buffer is too small, return the first RMPP segment, 301 * which includes the RMPP message length. 302 */ 303 return -ENOSPC; 304 } 305 offset = ib_get_mad_data_offset(recv_buf->mad->mad_hdr.mgmt_class); 306 max_seg_payload = seg_size - offset; 307 308 for (left = packet->length - seg_payload, buf += seg_payload; 309 left; left -= seg_payload, buf += seg_payload) { 310 recv_buf = container_of(recv_buf->list.next, 311 struct ib_mad_recv_buf, list); 312 seg_payload = min(left, max_seg_payload); 313 if (copy_to_user(buf, (char *)recv_buf->mad + offset, 314 seg_payload)) 315 return -EFAULT; 316 } 317 } 318 return hdr_size(file) + packet->length; 319 } 320 321 static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, 322 struct ib_umad_packet *packet, size_t count) 323 { 324 ssize_t size = hdr_size(file) + packet->length; 325 326 if (count < size) 327 return -EINVAL; 328 329 if (copy_to_user(buf, &packet->mad, hdr_size(file))) 330 return -EFAULT; 331 332 buf += hdr_size(file); 333 334 if (copy_to_user(buf, packet->mad.data, packet->length)) 335 return -EFAULT; 336 337 return size; 338 } 339 340 static ssize_t ib_umad_read(struct file *filp, char __user *buf, 341 size_t count, loff_t *pos) 342 { 343 struct ib_umad_file *file = filp->private_data; 344 struct ib_umad_packet *packet; 345 ssize_t ret; 346 347 if (count < hdr_size(file)) 348 return -EINVAL; 349 350 mutex_lock(&file->mutex); 351 352 while (list_empty(&file->recv_list)) { 353 mutex_unlock(&file->mutex); 354 355 if (filp->f_flags & O_NONBLOCK) 356 return -EAGAIN; 357 358 if (wait_event_interruptible(file->recv_wait, 359 !list_empty(&file->recv_list))) 360 return -ERESTARTSYS; 361 362 mutex_lock(&file->mutex); 363 } 364 365 packet = list_entry(file->recv_list.next, struct ib_umad_packet, list); 366 list_del(&packet->list); 367 368 mutex_unlock(&file->mutex); 369 370 if (packet->recv_wc) 371 ret = copy_recv_mad(file, buf, packet, count); 372 else 373 ret = copy_send_mad(file, buf, packet, count); 374 375 if (ret < 0) { 376 /* Requeue packet */ 377 mutex_lock(&file->mutex); 378 list_add(&packet->list, &file->recv_list); 379 mutex_unlock(&file->mutex); 380 } else { 381 if (packet->recv_wc) 382 ib_free_recv_mad(packet->recv_wc); 383 kfree(packet); 384 } 385 return ret; 386 } 387 388 static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf) 389 { 390 int left, seg; 391 392 /* Copy class specific header */ 393 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) && 394 copy_from_user((char *)msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR, 395 msg->hdr_len - IB_MGMT_RMPP_HDR)) 396 return -EFAULT; 397 398 /* All headers are in place. Copy data segments. */ 399 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0; 400 seg++, left -= msg->seg_size, buf += msg->seg_size) { 401 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf, 402 min(left, msg->seg_size))) 403 return -EFAULT; 404 } 405 return 0; 406 } 407 408 static int same_destination(struct ib_user_mad_hdr *hdr1, 409 struct ib_user_mad_hdr *hdr2) 410 { 411 if (!hdr1->grh_present && !hdr2->grh_present) 412 return (hdr1->lid == hdr2->lid); 413 414 if (hdr1->grh_present && hdr2->grh_present) 415 return !memcmp(hdr1->gid, hdr2->gid, 16); 416 417 return 0; 418 } 419 420 static int is_duplicate(struct ib_umad_file *file, 421 struct ib_umad_packet *packet) 422 { 423 struct ib_umad_packet *sent_packet; 424 struct ib_mad_hdr *sent_hdr, *hdr; 425 426 hdr = (struct ib_mad_hdr *) packet->mad.data; 427 list_for_each_entry(sent_packet, &file->send_list, list) { 428 sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data; 429 430 if ((hdr->tid != sent_hdr->tid) || 431 (hdr->mgmt_class != sent_hdr->mgmt_class)) 432 continue; 433 434 /* 435 * No need to be overly clever here. If two new operations have 436 * the same TID, reject the second as a duplicate. This is more 437 * restrictive than required by the spec. 438 */ 439 if (!ib_response_mad(hdr)) { 440 if (!ib_response_mad(sent_hdr)) 441 return 1; 442 continue; 443 } else if (!ib_response_mad(sent_hdr)) 444 continue; 445 446 if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) 447 return 1; 448 } 449 450 return 0; 451 } 452 453 static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 454 size_t count, loff_t *pos) 455 { 456 struct ib_umad_file *file = filp->private_data; 457 struct ib_umad_packet *packet; 458 struct ib_mad_agent *agent; 459 struct ib_ah_attr ah_attr; 460 struct ib_ah *ah; 461 struct ib_rmpp_mad *rmpp_mad; 462 __be64 *tid; 463 int ret, data_len, hdr_len, copy_offset, rmpp_active; 464 u8 base_version; 465 466 if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) 467 return -EINVAL; 468 469 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); 470 if (!packet) 471 return -ENOMEM; 472 473 if (copy_from_user(&packet->mad, buf, hdr_size(file))) { 474 ret = -EFAULT; 475 goto err; 476 } 477 478 if (packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { 479 ret = -EINVAL; 480 goto err; 481 } 482 483 buf += hdr_size(file); 484 485 if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { 486 ret = -EFAULT; 487 goto err; 488 } 489 490 mutex_lock(&file->mutex); 491 492 agent = __get_agent(file, packet->mad.hdr.id); 493 if (!agent) { 494 ret = -EINVAL; 495 goto err_up; 496 } 497 498 memset(&ah_attr, 0, sizeof ah_attr); 499 ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); 500 ah_attr.sl = packet->mad.hdr.sl; 501 ah_attr.src_path_bits = packet->mad.hdr.path_bits; 502 ah_attr.port_num = file->port->port_num; 503 if (packet->mad.hdr.grh_present) { 504 ah_attr.ah_flags = IB_AH_GRH; 505 memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); 506 ah_attr.grh.sgid_index = packet->mad.hdr.gid_index; 507 ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label); 508 ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; 509 ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; 510 } 511 512 ah = ib_create_ah(agent->qp->pd, &ah_attr); 513 if (IS_ERR(ah)) { 514 ret = PTR_ERR(ah); 515 goto err_up; 516 } 517 518 rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; 519 hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class); 520 521 if (ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 522 && ib_mad_kernel_rmpp_agent(agent)) { 523 copy_offset = IB_MGMT_RMPP_HDR; 524 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 525 IB_MGMT_RMPP_FLAG_ACTIVE; 526 } else { 527 copy_offset = IB_MGMT_MAD_HDR; 528 rmpp_active = 0; 529 } 530 531 base_version = ((struct ib_mad_hdr *)&packet->mad.data)->base_version; 532 data_len = count - hdr_size(file) - hdr_len; 533 packet->msg = ib_create_send_mad(agent, 534 be32_to_cpu(packet->mad.hdr.qpn), 535 packet->mad.hdr.pkey_index, rmpp_active, 536 hdr_len, data_len, GFP_KERNEL, 537 base_version); 538 if (IS_ERR(packet->msg)) { 539 ret = PTR_ERR(packet->msg); 540 goto err_ah; 541 } 542 543 packet->msg->ah = ah; 544 packet->msg->timeout_ms = packet->mad.hdr.timeout_ms; 545 packet->msg->retries = packet->mad.hdr.retries; 546 packet->msg->context[0] = packet; 547 548 /* Copy MAD header. Any RMPP header is already in place. */ 549 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 550 551 if (!rmpp_active) { 552 if (copy_from_user((char *)packet->msg->mad + copy_offset, 553 buf + copy_offset, 554 hdr_len + data_len - copy_offset)) { 555 ret = -EFAULT; 556 goto err_msg; 557 } 558 } else { 559 ret = copy_rmpp_mad(packet->msg, buf); 560 if (ret) 561 goto err_msg; 562 } 563 564 /* 565 * Set the high-order part of the transaction ID to make MADs from 566 * different agents unique, and allow routing responses back to the 567 * original requestor. 568 */ 569 if (!ib_response_mad(packet->msg->mad)) { 570 tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid; 571 *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | 572 (be64_to_cpup(tid) & 0xffffffff)); 573 rmpp_mad->mad_hdr.tid = *tid; 574 } 575 576 if (!ib_mad_kernel_rmpp_agent(agent) 577 && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class) 578 && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) { 579 spin_lock_irq(&file->send_lock); 580 list_add_tail(&packet->list, &file->send_list); 581 spin_unlock_irq(&file->send_lock); 582 } else { 583 spin_lock_irq(&file->send_lock); 584 ret = is_duplicate(file, packet); 585 if (!ret) 586 list_add_tail(&packet->list, &file->send_list); 587 spin_unlock_irq(&file->send_lock); 588 if (ret) { 589 ret = -EINVAL; 590 goto err_msg; 591 } 592 } 593 594 ret = ib_post_send_mad(packet->msg, NULL); 595 if (ret) 596 goto err_send; 597 598 mutex_unlock(&file->mutex); 599 return count; 600 601 err_send: 602 dequeue_send(file, packet); 603 err_msg: 604 ib_free_send_mad(packet->msg); 605 err_ah: 606 ib_destroy_ah(ah); 607 err_up: 608 mutex_unlock(&file->mutex); 609 err: 610 kfree(packet); 611 return ret; 612 } 613 614 static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait) 615 { 616 struct ib_umad_file *file = filp->private_data; 617 618 /* we will always be able to post a MAD send */ 619 unsigned int mask = POLLOUT | POLLWRNORM; 620 621 poll_wait(filp, &file->recv_wait, wait); 622 623 if (!list_empty(&file->recv_list)) 624 mask |= POLLIN | POLLRDNORM; 625 626 return mask; 627 } 628 629 static int ib_umad_reg_agent(struct ib_umad_file *file, void __user *arg, 630 int compat_method_mask) 631 { 632 struct ib_user_mad_reg_req ureq; 633 struct ib_mad_reg_req req; 634 struct ib_mad_agent *agent = NULL; 635 int agent_id; 636 int ret; 637 638 mutex_lock(&file->port->file_mutex); 639 mutex_lock(&file->mutex); 640 641 if (!file->port->ib_dev) { 642 dev_notice(file->port->dev, 643 "ib_umad_reg_agent: invalid device\n"); 644 ret = -EPIPE; 645 goto out; 646 } 647 648 if (copy_from_user(&ureq, arg, sizeof ureq)) { 649 ret = -EFAULT; 650 goto out; 651 } 652 653 if (ureq.qpn != 0 && ureq.qpn != 1) { 654 dev_notice(file->port->dev, 655 "ib_umad_reg_agent: invalid QPN %d specified\n", 656 ureq.qpn); 657 ret = -EINVAL; 658 goto out; 659 } 660 661 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 662 if (!__get_agent(file, agent_id)) 663 goto found; 664 665 dev_notice(file->port->dev, 666 "ib_umad_reg_agent: Max Agents (%u) reached\n", 667 IB_UMAD_MAX_AGENTS); 668 ret = -ENOMEM; 669 goto out; 670 671 found: 672 if (ureq.mgmt_class) { 673 memset(&req, 0, sizeof(req)); 674 req.mgmt_class = ureq.mgmt_class; 675 req.mgmt_class_version = ureq.mgmt_class_version; 676 memcpy(req.oui, ureq.oui, sizeof req.oui); 677 678 if (compat_method_mask) { 679 u32 *umm = (u32 *) ureq.method_mask; 680 int i; 681 682 for (i = 0; i < BITS_TO_LONGS(IB_MGMT_MAX_METHODS); ++i) 683 req.method_mask[i] = 684 umm[i * 2] | ((u64) umm[i * 2 + 1] << 32); 685 } else 686 memcpy(req.method_mask, ureq.method_mask, 687 sizeof req.method_mask); 688 } 689 690 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 691 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 692 ureq.mgmt_class ? &req : NULL, 693 ureq.rmpp_version, 694 send_handler, recv_handler, file, 0); 695 if (IS_ERR(agent)) { 696 ret = PTR_ERR(agent); 697 agent = NULL; 698 goto out; 699 } 700 701 if (put_user(agent_id, 702 (u32 __user *) ((char *)arg + offsetof(struct ib_user_mad_reg_req, id)))) { 703 ret = -EFAULT; 704 goto out; 705 } 706 707 if (!file->already_used) { 708 file->already_used = 1; 709 if (!file->use_pkey_index) { 710 dev_warn(file->port->dev, 711 "process %s did not enable P_Key index support.\n", 712 current->comm); 713 dev_warn(file->port->dev, 714 " Documentation/infiniband/user_mad.txt has info on the new ABI.\n"); 715 } 716 } 717 718 file->agent[agent_id] = agent; 719 ret = 0; 720 721 out: 722 mutex_unlock(&file->mutex); 723 724 if (ret && agent) 725 ib_unregister_mad_agent(agent); 726 727 mutex_unlock(&file->port->file_mutex); 728 729 return ret; 730 } 731 732 static int ib_umad_reg_agent2(struct ib_umad_file *file, void __user *arg) 733 { 734 struct ib_user_mad_reg_req2 ureq; 735 struct ib_mad_reg_req req; 736 struct ib_mad_agent *agent = NULL; 737 int agent_id; 738 int ret; 739 740 mutex_lock(&file->port->file_mutex); 741 mutex_lock(&file->mutex); 742 743 if (!file->port->ib_dev) { 744 dev_notice(file->port->dev, 745 "ib_umad_reg_agent2: invalid device\n"); 746 ret = -EPIPE; 747 goto out; 748 } 749 750 if (copy_from_user(&ureq, arg, sizeof(ureq))) { 751 ret = -EFAULT; 752 goto out; 753 } 754 755 if (ureq.qpn != 0 && ureq.qpn != 1) { 756 dev_notice(file->port->dev, 757 "ib_umad_reg_agent2: invalid QPN %d specified\n", 758 ureq.qpn); 759 ret = -EINVAL; 760 goto out; 761 } 762 763 if (ureq.flags & ~IB_USER_MAD_REG_FLAGS_CAP) { 764 const u32 flags = IB_USER_MAD_REG_FLAGS_CAP; 765 dev_notice(file->port->dev, 766 "ib_umad_reg_agent2 failed: invalid registration flags specified 0x%x; supported 0x%x\n", 767 ureq.flags, IB_USER_MAD_REG_FLAGS_CAP); 768 ret = -EINVAL; 769 770 if (put_user(flags, 771 (u32 __user *) ((char *)arg + offsetof(struct 772 ib_user_mad_reg_req2, flags)))) 773 ret = -EFAULT; 774 775 goto out; 776 } 777 778 for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id) 779 if (!__get_agent(file, agent_id)) 780 goto found; 781 782 dev_notice(file->port->dev, 783 "ib_umad_reg_agent2: Max Agents (%u) reached\n", 784 IB_UMAD_MAX_AGENTS); 785 ret = -ENOMEM; 786 goto out; 787 788 found: 789 if (ureq.mgmt_class) { 790 memset(&req, 0, sizeof(req)); 791 req.mgmt_class = ureq.mgmt_class; 792 req.mgmt_class_version = ureq.mgmt_class_version; 793 if (ureq.oui & 0xff000000) { 794 dev_notice(file->port->dev, 795 "ib_umad_reg_agent2 failed: oui invalid 0x%08x\n", 796 ureq.oui); 797 ret = -EINVAL; 798 goto out; 799 } 800 req.oui[2] = ureq.oui & 0x0000ff; 801 req.oui[1] = (ureq.oui & 0x00ff00) >> 8; 802 req.oui[0] = (ureq.oui & 0xff0000) >> 16; 803 memcpy(req.method_mask, ureq.method_mask, 804 sizeof(req.method_mask)); 805 } 806 807 agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, 808 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, 809 ureq.mgmt_class ? &req : NULL, 810 ureq.rmpp_version, 811 send_handler, recv_handler, file, 812 ureq.flags); 813 if (IS_ERR(agent)) { 814 ret = PTR_ERR(agent); 815 agent = NULL; 816 goto out; 817 } 818 819 if (put_user(agent_id, 820 (u32 __user *)((char *)arg + 821 offsetof(struct ib_user_mad_reg_req2, id)))) { 822 ret = -EFAULT; 823 goto out; 824 } 825 826 if (!file->already_used) { 827 file->already_used = 1; 828 file->use_pkey_index = 1; 829 } 830 831 file->agent[agent_id] = agent; 832 ret = 0; 833 834 out: 835 mutex_unlock(&file->mutex); 836 837 if (ret && agent) 838 ib_unregister_mad_agent(agent); 839 840 mutex_unlock(&file->port->file_mutex); 841 842 return ret; 843 } 844 845 846 static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg) 847 { 848 struct ib_mad_agent *agent = NULL; 849 u32 id; 850 int ret = 0; 851 852 if (get_user(id, arg)) 853 return -EFAULT; 854 855 mutex_lock(&file->port->file_mutex); 856 mutex_lock(&file->mutex); 857 858 if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) { 859 ret = -EINVAL; 860 goto out; 861 } 862 863 agent = file->agent[id]; 864 file->agent[id] = NULL; 865 866 out: 867 mutex_unlock(&file->mutex); 868 869 if (agent) 870 ib_unregister_mad_agent(agent); 871 872 mutex_unlock(&file->port->file_mutex); 873 874 return ret; 875 } 876 877 static long ib_umad_enable_pkey(struct ib_umad_file *file) 878 { 879 int ret = 0; 880 881 mutex_lock(&file->mutex); 882 if (file->already_used) 883 ret = -EINVAL; 884 else 885 file->use_pkey_index = 1; 886 mutex_unlock(&file->mutex); 887 888 return ret; 889 } 890 891 static long ib_umad_ioctl(struct file *filp, unsigned int cmd, 892 unsigned long arg) 893 { 894 switch (cmd) { 895 case IB_USER_MAD_REGISTER_AGENT: 896 return ib_umad_reg_agent(filp->private_data, (void __user *) arg, 0); 897 case IB_USER_MAD_UNREGISTER_AGENT: 898 return ib_umad_unreg_agent(filp->private_data, (__u32 __user *) arg); 899 case IB_USER_MAD_ENABLE_PKEY: 900 return ib_umad_enable_pkey(filp->private_data); 901 case IB_USER_MAD_REGISTER_AGENT2: 902 return ib_umad_reg_agent2(filp->private_data, (void __user *) arg); 903 default: 904 return -ENOIOCTLCMD; 905 } 906 } 907 908 #ifdef CONFIG_COMPAT 909 static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd, 910 unsigned long arg) 911 { 912 switch (cmd) { 913 case IB_USER_MAD_REGISTER_AGENT: 914 return ib_umad_reg_agent(filp->private_data, compat_ptr(arg), 1); 915 case IB_USER_MAD_UNREGISTER_AGENT: 916 return ib_umad_unreg_agent(filp->private_data, compat_ptr(arg)); 917 case IB_USER_MAD_ENABLE_PKEY: 918 return ib_umad_enable_pkey(filp->private_data); 919 case IB_USER_MAD_REGISTER_AGENT2: 920 return ib_umad_reg_agent2(filp->private_data, compat_ptr(arg)); 921 default: 922 return -ENOIOCTLCMD; 923 } 924 } 925 #endif 926 927 /* 928 * ib_umad_open() does not need the BKL: 929 * 930 * - the ib_umad_port structures are properly reference counted, and 931 * everything else is purely local to the file being created, so 932 * races against other open calls are not a problem; 933 * - the ioctl method does not affect any global state outside of the 934 * file structure being operated on; 935 */ 936 static int ib_umad_open(struct inode *inode, struct file *filp) 937 { 938 struct ib_umad_port *port; 939 struct ib_umad_file *file; 940 int ret = -ENXIO; 941 942 port = container_of(inode->i_cdev->si_drv1, struct ib_umad_port, cdev); 943 944 mutex_lock(&port->file_mutex); 945 946 if (!port->ib_dev) 947 goto out; 948 949 ret = -ENOMEM; 950 file = kzalloc(sizeof *file, GFP_KERNEL); 951 if (!file) 952 goto out; 953 954 mutex_init(&file->mutex); 955 spin_lock_init(&file->send_lock); 956 INIT_LIST_HEAD(&file->recv_list); 957 INIT_LIST_HEAD(&file->send_list); 958 init_waitqueue_head(&file->recv_wait); 959 960 file->port = port; 961 filp->private_data = file; 962 963 list_add_tail(&file->port_list, &port->file_list); 964 965 ret = nonseekable_open(inode, filp); 966 if (ret) { 967 list_del(&file->port_list); 968 kfree(file); 969 goto out; 970 } 971 972 kobject_get(&port->umad_dev->kobj); 973 974 out: 975 mutex_unlock(&port->file_mutex); 976 return ret; 977 } 978 979 static int ib_umad_close(struct inode *inode, struct file *filp) 980 { 981 struct ib_umad_file *file = filp->private_data; 982 struct ib_umad_device *dev = file->port->umad_dev; 983 struct ib_umad_packet *packet, *tmp; 984 int already_dead; 985 int i; 986 987 mutex_lock(&file->port->file_mutex); 988 mutex_lock(&file->mutex); 989 990 already_dead = file->agents_dead; 991 file->agents_dead = 1; 992 993 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) { 994 if (packet->recv_wc) 995 ib_free_recv_mad(packet->recv_wc); 996 kfree(packet); 997 } 998 999 list_del(&file->port_list); 1000 1001 mutex_unlock(&file->mutex); 1002 1003 if (!already_dead) 1004 for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i) 1005 if (file->agent[i]) 1006 ib_unregister_mad_agent(file->agent[i]); 1007 1008 mutex_unlock(&file->port->file_mutex); 1009 1010 kfree(file); 1011 kobject_put(&dev->kobj); 1012 1013 return 0; 1014 } 1015 1016 static const struct file_operations umad_fops = { 1017 .owner = THIS_MODULE, 1018 .read = ib_umad_read, 1019 .write = ib_umad_write, 1020 .poll = ib_umad_poll, 1021 .unlocked_ioctl = ib_umad_ioctl, 1022 #ifdef CONFIG_COMPAT 1023 .compat_ioctl = ib_umad_compat_ioctl, 1024 #endif 1025 .open = ib_umad_open, 1026 .release = ib_umad_close, 1027 .llseek = no_llseek, 1028 }; 1029 1030 static int ib_umad_sm_open(struct inode *inode, struct file *filp) 1031 { 1032 struct ib_umad_port *port; 1033 struct ib_port_modify props = { 1034 .set_port_cap_mask = IB_PORT_SM 1035 }; 1036 int ret; 1037 1038 port = container_of(inode->i_cdev->si_drv1, struct ib_umad_port, sm_cdev); 1039 1040 if (filp->f_flags & O_NONBLOCK) { 1041 if (down_trylock(&port->sm_sem)) { 1042 ret = -EAGAIN; 1043 goto fail; 1044 } 1045 } else { 1046 if (down_interruptible(&port->sm_sem)) { 1047 ret = -ERESTARTSYS; 1048 goto fail; 1049 } 1050 } 1051 1052 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1053 if (ret) 1054 goto err_up_sem; 1055 1056 filp->private_data = port; 1057 1058 ret = nonseekable_open(inode, filp); 1059 if (ret) 1060 goto err_clr_sm_cap; 1061 1062 kobject_get(&port->umad_dev->kobj); 1063 1064 return 0; 1065 1066 err_clr_sm_cap: 1067 swap(props.set_port_cap_mask, props.clr_port_cap_mask); 1068 ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1069 1070 err_up_sem: 1071 up(&port->sm_sem); 1072 1073 fail: 1074 return ret; 1075 } 1076 1077 static int ib_umad_sm_close(struct inode *inode, struct file *filp) 1078 { 1079 struct ib_umad_port *port = filp->private_data; 1080 struct ib_port_modify props = { 1081 .clr_port_cap_mask = IB_PORT_SM 1082 }; 1083 int ret = 0; 1084 1085 mutex_lock(&port->file_mutex); 1086 if (port->ib_dev) 1087 ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); 1088 mutex_unlock(&port->file_mutex); 1089 1090 up(&port->sm_sem); 1091 1092 kobject_put(&port->umad_dev->kobj); 1093 1094 return ret; 1095 } 1096 1097 static const struct file_operations umad_sm_fops = { 1098 .owner = THIS_MODULE, 1099 .open = ib_umad_sm_open, 1100 .release = ib_umad_sm_close, 1101 .llseek = no_llseek, 1102 }; 1103 1104 static struct ib_client umad_client = { 1105 .name = "umad", 1106 .add = ib_umad_add_one, 1107 .remove = ib_umad_remove_one 1108 }; 1109 1110 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr, 1111 char *buf) 1112 { 1113 struct ib_umad_port *port = dev_get_drvdata(dev); 1114 1115 if (!port) 1116 return -ENODEV; 1117 1118 return sprintf(buf, "%s\n", port->ib_dev->name); 1119 } 1120 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL); 1121 1122 static ssize_t show_port(struct device *dev, struct device_attribute *attr, 1123 char *buf) 1124 { 1125 struct ib_umad_port *port = dev_get_drvdata(dev); 1126 1127 if (!port) 1128 return -ENODEV; 1129 1130 return sprintf(buf, "%d\n", port->port_num); 1131 } 1132 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL); 1133 1134 static CLASS_ATTR_STRING(abi_version, S_IRUGO, 1135 __stringify(IB_USER_MAD_ABI_VERSION)); 1136 1137 static dev_t overflow_maj; 1138 static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS); 1139 static int find_overflow_devnum(struct ib_device *device) 1140 { 1141 int ret; 1142 1143 if (!overflow_maj) { 1144 ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2, 1145 "infiniband_mad"); 1146 if (ret) { 1147 dev_err(&device->dev, 1148 "couldn't register dynamic device number\n"); 1149 return ret; 1150 } 1151 } 1152 1153 ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS); 1154 if (ret >= IB_UMAD_MAX_PORTS) 1155 return -1; 1156 1157 return ret; 1158 } 1159 1160 static int ib_umad_init_port(struct ib_device *device, int port_num, 1161 struct ib_umad_device *umad_dev, 1162 struct ib_umad_port *port) 1163 { 1164 int devnum; 1165 dev_t base; 1166 1167 spin_lock(&port_lock); 1168 devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS); 1169 if (devnum >= IB_UMAD_MAX_PORTS) { 1170 spin_unlock(&port_lock); 1171 devnum = find_overflow_devnum(device); 1172 if (devnum < 0) 1173 return -1; 1174 1175 spin_lock(&port_lock); 1176 port->dev_num = devnum + IB_UMAD_MAX_PORTS; 1177 base = devnum + overflow_maj; 1178 set_bit(devnum, overflow_map); 1179 } else { 1180 port->dev_num = devnum; 1181 base = devnum + base_dev; 1182 set_bit(devnum, dev_map); 1183 } 1184 spin_unlock(&port_lock); 1185 1186 port->ib_dev = device; 1187 port->port_num = port_num; 1188 sema_init(&port->sm_sem, 1); 1189 mutex_init(&port->file_mutex); 1190 INIT_LIST_HEAD(&port->file_list); 1191 1192 cdev_init(&port->cdev, &umad_fops); 1193 port->cdev.owner = THIS_MODULE; 1194 port->cdev.kobj.parent = &umad_dev->kobj; 1195 kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num); 1196 if (cdev_add(&port->cdev, base, 1)) 1197 goto err_cdev; 1198 1199 port->dev = device_create(umad_class, device->dma_device, 1200 port->cdev.dev, port, 1201 "umad%d", port->dev_num); 1202 if (IS_ERR(port->dev)) 1203 goto err_cdev; 1204 1205 if (device_create_file(port->dev, &dev_attr_ibdev)) 1206 goto err_dev; 1207 if (device_create_file(port->dev, &dev_attr_port)) 1208 goto err_dev; 1209 1210 base += IB_UMAD_MAX_PORTS; 1211 cdev_init(&port->sm_cdev, &umad_sm_fops); 1212 port->sm_cdev.owner = THIS_MODULE; 1213 port->sm_cdev.kobj.parent = &umad_dev->kobj; 1214 kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num); 1215 if (cdev_add(&port->sm_cdev, base, 1)) 1216 goto err_sm_cdev; 1217 1218 port->sm_dev = device_create(umad_class, device->dma_device, 1219 port->sm_cdev.dev, port, 1220 "issm%d", port->dev_num); 1221 if (IS_ERR(port->sm_dev)) 1222 goto err_sm_cdev; 1223 1224 if (device_create_file(port->sm_dev, &dev_attr_ibdev)) 1225 goto err_sm_dev; 1226 if (device_create_file(port->sm_dev, &dev_attr_port)) 1227 goto err_sm_dev; 1228 1229 return 0; 1230 1231 err_sm_dev: 1232 device_destroy(umad_class, port->sm_cdev.dev); 1233 1234 err_sm_cdev: 1235 cdev_del(&port->sm_cdev); 1236 1237 err_dev: 1238 device_destroy(umad_class, port->cdev.dev); 1239 1240 err_cdev: 1241 cdev_del(&port->cdev); 1242 if (port->dev_num < IB_UMAD_MAX_PORTS) 1243 clear_bit(devnum, dev_map); 1244 else 1245 clear_bit(devnum, overflow_map); 1246 1247 return -1; 1248 } 1249 1250 static void ib_umad_kill_port(struct ib_umad_port *port) 1251 { 1252 struct ib_umad_file *file; 1253 int id; 1254 1255 dev_set_drvdata(port->dev, NULL); 1256 dev_set_drvdata(port->sm_dev, NULL); 1257 1258 device_destroy(umad_class, port->cdev.dev); 1259 device_destroy(umad_class, port->sm_cdev.dev); 1260 1261 cdev_del(&port->cdev); 1262 cdev_del(&port->sm_cdev); 1263 1264 mutex_lock(&port->file_mutex); 1265 1266 port->ib_dev = NULL; 1267 1268 list_for_each_entry(file, &port->file_list, port_list) { 1269 mutex_lock(&file->mutex); 1270 file->agents_dead = 1; 1271 mutex_unlock(&file->mutex); 1272 1273 for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) 1274 if (file->agent[id]) 1275 ib_unregister_mad_agent(file->agent[id]); 1276 } 1277 1278 mutex_unlock(&port->file_mutex); 1279 1280 if (port->dev_num < IB_UMAD_MAX_PORTS) 1281 clear_bit(port->dev_num, dev_map); 1282 else 1283 clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map); 1284 } 1285 1286 static void ib_umad_add_one(struct ib_device *device) 1287 { 1288 struct ib_umad_device *umad_dev; 1289 int s, e, i; 1290 int count = 0; 1291 1292 s = rdma_start_port(device); 1293 e = rdma_end_port(device); 1294 1295 umad_dev = kzalloc(sizeof *umad_dev + 1296 (e - s + 1) * sizeof (struct ib_umad_port), 1297 GFP_KERNEL); 1298 if (!umad_dev) 1299 return; 1300 1301 kobject_init(&umad_dev->kobj, &ib_umad_dev_ktype); 1302 1303 for (i = s; i <= e; ++i) { 1304 if (!rdma_cap_ib_mad(device, i)) 1305 continue; 1306 1307 umad_dev->port[i - s].umad_dev = umad_dev; 1308 1309 if (ib_umad_init_port(device, i, umad_dev, 1310 &umad_dev->port[i - s])) 1311 goto err; 1312 1313 count++; 1314 } 1315 1316 if (!count) 1317 goto free; 1318 1319 ib_set_client_data(device, &umad_client, umad_dev); 1320 1321 return; 1322 1323 err: 1324 while (--i >= s) { 1325 if (!rdma_cap_ib_mad(device, i)) 1326 continue; 1327 1328 ib_umad_kill_port(&umad_dev->port[i - s]); 1329 } 1330 free: 1331 kobject_put(&umad_dev->kobj); 1332 } 1333 1334 static void ib_umad_remove_one(struct ib_device *device, void *client_data) 1335 { 1336 struct ib_umad_device *umad_dev = client_data; 1337 int i; 1338 1339 if (!umad_dev) 1340 return; 1341 1342 for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { 1343 if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) 1344 ib_umad_kill_port(&umad_dev->port[i]); 1345 } 1346 1347 kobject_put(&umad_dev->kobj); 1348 } 1349 1350 static char *umad_devnode(struct device *dev, umode_t *mode) 1351 { 1352 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); 1353 } 1354 1355 static int __init ib_umad_init(void) 1356 { 1357 int ret; 1358 1359 ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2, 1360 "infiniband_mad"); 1361 if (ret) { 1362 pr_err("couldn't register device number\n"); 1363 goto out; 1364 } 1365 1366 umad_class = class_create(THIS_MODULE, "infiniband_mad"); 1367 if (IS_ERR(umad_class)) { 1368 ret = PTR_ERR(umad_class); 1369 pr_err("couldn't create class infiniband_mad\n"); 1370 goto out_chrdev; 1371 } 1372 1373 umad_class->devnode = umad_devnode; 1374 1375 ret = class_create_file(umad_class, &class_attr_abi_version.attr); 1376 if (ret) { 1377 pr_err("couldn't create abi_version attribute\n"); 1378 goto out_class; 1379 } 1380 1381 ret = ib_register_client(&umad_client); 1382 if (ret) { 1383 pr_err("couldn't register ib_umad client\n"); 1384 goto out_class; 1385 } 1386 1387 return 0; 1388 1389 out_class: 1390 class_destroy(umad_class); 1391 1392 out_chrdev: 1393 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1394 1395 out: 1396 return ret; 1397 } 1398 1399 static void __exit ib_umad_cleanup(void) 1400 { 1401 ib_unregister_client(&umad_client); 1402 class_destroy(umad_class); 1403 unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2); 1404 if (overflow_maj) 1405 unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2); 1406 } 1407 1408 module_init_order(ib_umad_init, SI_ORDER_THIRD); 1409 module_exit(ib_umad_cleanup); 1410