1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0 3 * 4 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 5 * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. 6 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 7 * 8 * This software is available to you under a choice of one of two 9 * licenses. You may choose to be licensed under the terms of the GNU 10 * General Public License (GPL) Version 2, available from the file 11 * COPYING in the main directory of this source tree, or the 12 * OpenIB.org BSD license below: 13 * 14 * Redistribution and use in source and binary forms, with or 15 * without modification, are permitted provided that the following 16 * conditions are met: 17 * 18 * - Redistributions of source code must retain the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer. 21 * 22 * - Redistributions in binary form must reproduce the above 23 * copyright notice, this list of conditions and the following 24 * disclaimer in the documentation and/or other materials 25 * provided with the distribution. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 34 * SOFTWARE. 35 */ 36 37 #include <sys/cdefs.h> 38 #include "core_priv.h" 39 40 #include <linux/slab.h> 41 #include <linux/string.h> 42 #include <linux/netdevice.h> 43 #include <linux/fs.h> 44 #include <linux/printk.h> 45 46 #include <rdma/ib_addr.h> 47 #include <rdma/ib_mad.h> 48 #include <rdma/ib_pma.h> 49 50 struct ib_port; 51 52 struct gid_attr_group { 53 struct ib_port *port; 54 struct kobject kobj; 55 struct attribute_group ndev; 56 struct attribute_group type; 57 }; 58 struct ib_port { 59 struct kobject kobj; 60 struct ib_device *ibdev; 61 struct gid_attr_group *gid_attr_group; 62 struct attribute_group gid_group; 63 struct attribute_group pkey_group; 64 struct attribute_group *pma_table; 65 struct attribute_group *hw_stats_ag; 66 struct rdma_hw_stats *hw_stats; 67 u8 port_num; 68 }; 69 70 struct port_attribute { 71 struct attribute attr; 72 ssize_t (*show)(struct ib_port *, struct port_attribute *, char *buf); 73 ssize_t (*store)(struct ib_port *, struct port_attribute *, 74 const char *buf, size_t count); 75 }; 76 77 #define PORT_ATTR(_name, _mode, _show, _store) \ 78 struct port_attribute port_attr_##_name = __ATTR(_name, _mode, _show, _store) 79 80 #define PORT_ATTR_RO(_name) \ 81 struct port_attribute port_attr_##_name = __ATTR_RO(_name) 82 83 struct port_table_attribute { 84 struct port_attribute attr; 85 char name[8]; 86 int index; 87 __be16 attr_id; 88 }; 89 90 struct hw_stats_attribute { 91 struct attribute attr; 92 ssize_t (*show)(struct kobject *kobj, 93 struct attribute *attr, char *buf); 94 ssize_t (*store)(struct kobject *kobj, 95 struct attribute *attr, 96 const char *buf, 97 size_t count); 98 int index; 99 u8 port_num; 100 }; 101 102 static ssize_t port_attr_show(struct kobject *kobj, 103 struct attribute *attr, char *buf) 104 { 105 struct port_attribute *port_attr = 106 container_of(attr, struct port_attribute, attr); 107 struct ib_port *p = container_of(kobj, struct ib_port, kobj); 108 109 if (!port_attr->show) 110 return -EIO; 111 112 return port_attr->show(p, port_attr, buf); 113 } 114 115 static ssize_t port_attr_store(struct kobject *kobj, 116 struct attribute *attr, 117 const char *buf, size_t count) 118 { 119 struct port_attribute *port_attr = 120 container_of(attr, struct port_attribute, attr); 121 struct ib_port *p = container_of(kobj, struct ib_port, kobj); 122 123 if (!port_attr->store) 124 return -EIO; 125 return port_attr->store(p, port_attr, buf, count); 126 } 127 128 static const struct sysfs_ops port_sysfs_ops = { 129 .show = port_attr_show, 130 .store = port_attr_store 131 }; 132 133 static ssize_t gid_attr_show(struct kobject *kobj, 134 struct attribute *attr, char *buf) 135 { 136 struct port_attribute *port_attr = 137 container_of(attr, struct port_attribute, attr); 138 struct ib_port *p = container_of(kobj, struct gid_attr_group, 139 kobj)->port; 140 141 if (!port_attr->show) 142 return -EIO; 143 144 return port_attr->show(p, port_attr, buf); 145 } 146 147 static const struct sysfs_ops gid_attr_sysfs_ops = { 148 .show = gid_attr_show 149 }; 150 151 static ssize_t state_show(struct ib_port *p, struct port_attribute *unused, 152 char *buf) 153 { 154 struct ib_port_attr attr; 155 ssize_t ret; 156 157 static const char *state_name[] = { 158 [IB_PORT_NOP] = "NOP", 159 [IB_PORT_DOWN] = "DOWN", 160 [IB_PORT_INIT] = "INIT", 161 [IB_PORT_ARMED] = "ARMED", 162 [IB_PORT_ACTIVE] = "ACTIVE", 163 [IB_PORT_ACTIVE_DEFER] = "ACTIVE_DEFER" 164 }; 165 166 ret = ib_query_port(p->ibdev, p->port_num, &attr); 167 if (ret) 168 return ret; 169 170 return sprintf(buf, "%d: %s\n", attr.state, 171 attr.state >= 0 && attr.state < ARRAY_SIZE(state_name) ? 172 state_name[attr.state] : "UNKNOWN"); 173 } 174 175 static ssize_t lid_show(struct ib_port *p, struct port_attribute *unused, 176 char *buf) 177 { 178 struct ib_port_attr attr; 179 ssize_t ret; 180 181 ret = ib_query_port(p->ibdev, p->port_num, &attr); 182 if (ret) 183 return ret; 184 185 return sprintf(buf, "0x%x\n", attr.lid); 186 } 187 188 static ssize_t lid_mask_count_show(struct ib_port *p, 189 struct port_attribute *unused, 190 char *buf) 191 { 192 struct ib_port_attr attr; 193 ssize_t ret; 194 195 ret = ib_query_port(p->ibdev, p->port_num, &attr); 196 if (ret) 197 return ret; 198 199 return sprintf(buf, "%d\n", attr.lmc); 200 } 201 202 static ssize_t sm_lid_show(struct ib_port *p, struct port_attribute *unused, 203 char *buf) 204 { 205 struct ib_port_attr attr; 206 ssize_t ret; 207 208 ret = ib_query_port(p->ibdev, p->port_num, &attr); 209 if (ret) 210 return ret; 211 212 return sprintf(buf, "0x%x\n", attr.sm_lid); 213 } 214 215 static ssize_t sm_sl_show(struct ib_port *p, struct port_attribute *unused, 216 char *buf) 217 { 218 struct ib_port_attr attr; 219 ssize_t ret; 220 221 ret = ib_query_port(p->ibdev, p->port_num, &attr); 222 if (ret) 223 return ret; 224 225 return sprintf(buf, "%d\n", attr.sm_sl); 226 } 227 228 static ssize_t cap_mask_show(struct ib_port *p, struct port_attribute *unused, 229 char *buf) 230 { 231 struct ib_port_attr attr; 232 ssize_t ret; 233 234 ret = ib_query_port(p->ibdev, p->port_num, &attr); 235 if (ret) 236 return ret; 237 238 return sprintf(buf, "0x%08x\n", attr.port_cap_flags); 239 } 240 241 static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, 242 char *buf) 243 { 244 struct ib_port_attr attr; 245 char *speed = ""; 246 int rate; /* in deci-Gb/sec */ 247 ssize_t ret; 248 249 ret = ib_query_port(p->ibdev, p->port_num, &attr); 250 if (ret) 251 return ret; 252 253 switch (attr.active_speed) { 254 case IB_SPEED_DDR: 255 speed = " DDR"; 256 rate = 50; 257 break; 258 case IB_SPEED_QDR: 259 speed = " QDR"; 260 rate = 100; 261 break; 262 case IB_SPEED_FDR10: 263 speed = " FDR10"; 264 rate = 100; 265 break; 266 case IB_SPEED_FDR: 267 speed = " FDR"; 268 rate = 140; 269 break; 270 case IB_SPEED_EDR: 271 speed = " EDR"; 272 rate = 250; 273 break; 274 case IB_SPEED_HDR: 275 speed = " HDR"; 276 rate = 500; 277 break; 278 case IB_SPEED_NDR: 279 speed = " NDR"; 280 rate = 1000; 281 break; 282 case IB_SPEED_SDR: 283 default: /* default to SDR for invalid rates */ 284 speed = " SDR"; 285 rate = 25; 286 break; 287 } 288 289 rate *= ib_width_enum_to_int(attr.active_width); 290 if (rate < 0) 291 return -EINVAL; 292 293 return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", 294 rate / 10, rate % 10 ? ".5" : "", 295 ib_width_enum_to_int(attr.active_width), speed); 296 } 297 298 static const char *phys_state_to_str(enum ib_port_phys_state phys_state) 299 { 300 static const char * phys_state_str[] = { 301 "<unknown>", 302 "Sleep", 303 "Polling", 304 "Disabled", 305 "PortConfigurationTraining", 306 "LinkUp", 307 "LinkErrorRecovery", 308 "Phy Test", 309 }; 310 311 if (phys_state < ARRAY_SIZE(phys_state_str)) 312 return phys_state_str[phys_state]; 313 return "<unknown>"; 314 } 315 316 static ssize_t phys_state_show(struct ib_port *p, struct port_attribute *unused, 317 char *buf) 318 { 319 struct ib_port_attr attr; 320 321 ssize_t ret; 322 323 ret = ib_query_port(p->ibdev, p->port_num, &attr); 324 if (ret) 325 return ret; 326 327 return sprintf(buf, "%d: %s\n", attr.phys_state, 328 phys_state_to_str(attr.phys_state)); 329 } 330 331 static ssize_t link_layer_show(struct ib_port *p, struct port_attribute *unused, 332 char *buf) 333 { 334 switch (rdma_port_get_link_layer(p->ibdev, p->port_num)) { 335 case IB_LINK_LAYER_INFINIBAND: 336 return sprintf(buf, "%s\n", "InfiniBand"); 337 case IB_LINK_LAYER_ETHERNET: 338 return sprintf(buf, "%s\n", "Ethernet"); 339 default: 340 return sprintf(buf, "%s\n", "Unknown"); 341 } 342 } 343 344 static PORT_ATTR_RO(state); 345 static PORT_ATTR_RO(lid); 346 static PORT_ATTR_RO(lid_mask_count); 347 static PORT_ATTR_RO(sm_lid); 348 static PORT_ATTR_RO(sm_sl); 349 static PORT_ATTR_RO(cap_mask); 350 static PORT_ATTR_RO(rate); 351 static PORT_ATTR_RO(phys_state); 352 static PORT_ATTR_RO(link_layer); 353 354 static struct attribute *port_default_attrs[] = { 355 &port_attr_state.attr, 356 &port_attr_lid.attr, 357 &port_attr_lid_mask_count.attr, 358 &port_attr_sm_lid.attr, 359 &port_attr_sm_sl.attr, 360 &port_attr_cap_mask.attr, 361 &port_attr_rate.attr, 362 &port_attr_phys_state.attr, 363 &port_attr_link_layer.attr, 364 NULL 365 }; 366 367 static size_t print_ndev(struct ib_gid_attr *gid_attr, char *buf) 368 { 369 if (!gid_attr->ndev) 370 return -EINVAL; 371 372 return sprintf(buf, "%s\n", if_name(gid_attr->ndev)); 373 } 374 375 static size_t print_gid_type(struct ib_gid_attr *gid_attr, char *buf) 376 { 377 return sprintf(buf, "%s\n", ib_cache_gid_type_str(gid_attr->gid_type)); 378 } 379 380 static ssize_t _show_port_gid_attr(struct ib_port *p, 381 struct port_attribute *attr, 382 char *buf, 383 size_t (*print)(struct ib_gid_attr *gid_attr, 384 char *buf)) 385 { 386 struct port_table_attribute *tab_attr = 387 container_of(attr, struct port_table_attribute, attr); 388 union ib_gid gid; 389 struct ib_gid_attr gid_attr = {}; 390 ssize_t ret; 391 392 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, 393 &gid_attr); 394 if (ret) 395 goto err; 396 397 ret = print(&gid_attr, buf); 398 399 err: 400 if (gid_attr.ndev) 401 dev_put(gid_attr.ndev); 402 return ret; 403 } 404 405 static ssize_t show_port_gid(struct ib_port *p, struct port_attribute *attr, 406 char *buf) 407 { 408 struct port_table_attribute *tab_attr = 409 container_of(attr, struct port_table_attribute, attr); 410 union ib_gid gid; 411 ssize_t ret; 412 413 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, NULL); 414 if (ret) 415 return ret; 416 417 return sprintf(buf, GID_PRINT_FMT"\n", GID_PRINT_ARGS(gid.raw)); 418 } 419 420 static ssize_t show_port_gid_attr_ndev(struct ib_port *p, 421 struct port_attribute *attr, char *buf) 422 { 423 return _show_port_gid_attr(p, attr, buf, print_ndev); 424 } 425 426 static ssize_t show_port_gid_attr_gid_type(struct ib_port *p, 427 struct port_attribute *attr, 428 char *buf) 429 { 430 return _show_port_gid_attr(p, attr, buf, print_gid_type); 431 } 432 433 static ssize_t show_port_pkey(struct ib_port *p, struct port_attribute *attr, 434 char *buf) 435 { 436 struct port_table_attribute *tab_attr = 437 container_of(attr, struct port_table_attribute, attr); 438 u16 pkey; 439 ssize_t ret; 440 441 ret = ib_query_pkey(p->ibdev, p->port_num, tab_attr->index, &pkey); 442 if (ret) 443 return ret; 444 445 return sprintf(buf, "0x%04x\n", pkey); 446 } 447 448 #define PORT_PMA_ATTR(_name, _counter, _width, _offset) \ 449 struct port_table_attribute port_pma_attr_##_name = { \ 450 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ 451 .index = (_offset) | ((_width) << 16) | ((_counter) << 24), \ 452 .attr_id = IB_PMA_PORT_COUNTERS , \ 453 } 454 455 #define PORT_PMA_ATTR_EXT(_name, _width, _offset) \ 456 struct port_table_attribute port_pma_attr_ext_##_name = { \ 457 .attr = __ATTR(_name, S_IRUGO, show_pma_counter, NULL), \ 458 .index = (_offset) | ((_width) << 16), \ 459 .attr_id = IB_PMA_PORT_COUNTERS_EXT , \ 460 } 461 462 /* 463 * Get a Perfmgmt MAD block of data. 464 * Returns error code or the number of bytes retrieved. 465 */ 466 static int get_perf_mad(struct ib_device *dev, int port_num, __be16 attr, 467 void *data, int offset, size_t size) 468 { 469 struct ib_mad *in_mad; 470 struct ib_mad *out_mad; 471 size_t mad_size = sizeof(*out_mad); 472 u16 out_mad_pkey_index = 0; 473 ssize_t ret; 474 475 if (!dev->process_mad) 476 return -ENOSYS; 477 478 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 479 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 480 if (!in_mad || !out_mad) { 481 ret = -ENOMEM; 482 goto out; 483 } 484 485 in_mad->mad_hdr.base_version = 1; 486 in_mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_PERF_MGMT; 487 in_mad->mad_hdr.class_version = 1; 488 in_mad->mad_hdr.method = IB_MGMT_METHOD_GET; 489 in_mad->mad_hdr.attr_id = attr; 490 491 if (attr != IB_PMA_CLASS_PORT_INFO) 492 in_mad->data[41] = port_num; /* PortSelect field */ 493 494 if ((dev->process_mad(dev, IB_MAD_IGNORE_MKEY, 495 port_num, NULL, NULL, 496 (const struct ib_mad_hdr *)in_mad, mad_size, 497 (struct ib_mad_hdr *)out_mad, &mad_size, 498 &out_mad_pkey_index) & 499 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) != 500 (IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY)) { 501 ret = -EINVAL; 502 goto out; 503 } 504 memcpy(data, out_mad->data + offset, size); 505 ret = size; 506 out: 507 kfree(in_mad); 508 kfree(out_mad); 509 return ret; 510 } 511 512 static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr, 513 char *buf) 514 { 515 struct port_table_attribute *tab_attr = 516 container_of(attr, struct port_table_attribute, attr); 517 int offset = tab_attr->index & 0xffff; 518 int width = (tab_attr->index >> 16) & 0xff; 519 ssize_t ret; 520 u8 data[8]; 521 522 ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data, 523 40 + offset / 8, sizeof(data)); 524 if (ret < 0) 525 return ret; 526 527 switch (width) { 528 case 4: 529 ret = sprintf(buf, "%u\n", (*data >> 530 (4 - (offset % 8))) & 0xf); 531 break; 532 case 8: 533 ret = sprintf(buf, "%u\n", *data); 534 break; 535 case 16: 536 ret = sprintf(buf, "%u\n", 537 be16_to_cpup((__be16 *)data)); 538 break; 539 case 32: 540 ret = sprintf(buf, "%u\n", 541 be32_to_cpup((__be32 *)data)); 542 break; 543 case 64: 544 ret = sprintf(buf, "%llu\n", 545 (unsigned long long)be64_to_cpup((__be64 *)data)); 546 break; 547 548 default: 549 ret = 0; 550 } 551 552 return ret; 553 } 554 555 static PORT_PMA_ATTR(symbol_error , 0, 16, 32); 556 static PORT_PMA_ATTR(link_error_recovery , 1, 8, 48); 557 static PORT_PMA_ATTR(link_downed , 2, 8, 56); 558 static PORT_PMA_ATTR(port_rcv_errors , 3, 16, 64); 559 static PORT_PMA_ATTR(port_rcv_remote_physical_errors, 4, 16, 80); 560 static PORT_PMA_ATTR(port_rcv_switch_relay_errors , 5, 16, 96); 561 static PORT_PMA_ATTR(port_xmit_discards , 6, 16, 112); 562 static PORT_PMA_ATTR(port_xmit_constraint_errors , 7, 8, 128); 563 static PORT_PMA_ATTR(port_rcv_constraint_errors , 8, 8, 136); 564 static PORT_PMA_ATTR(local_link_integrity_errors , 9, 4, 152); 565 static PORT_PMA_ATTR(excessive_buffer_overrun_errors, 10, 4, 156); 566 static PORT_PMA_ATTR(VL15_dropped , 11, 16, 176); 567 static PORT_PMA_ATTR(port_xmit_data , 12, 32, 192); 568 static PORT_PMA_ATTR(port_rcv_data , 13, 32, 224); 569 static PORT_PMA_ATTR(port_xmit_packets , 14, 32, 256); 570 static PORT_PMA_ATTR(port_rcv_packets , 15, 32, 288); 571 static PORT_PMA_ATTR(port_xmit_wait , 0, 32, 320); 572 573 /* 574 * Counters added by extended set 575 */ 576 static PORT_PMA_ATTR_EXT(port_xmit_data , 64, 64); 577 static PORT_PMA_ATTR_EXT(port_rcv_data , 64, 128); 578 static PORT_PMA_ATTR_EXT(port_xmit_packets , 64, 192); 579 static PORT_PMA_ATTR_EXT(port_rcv_packets , 64, 256); 580 static PORT_PMA_ATTR_EXT(unicast_xmit_packets , 64, 320); 581 static PORT_PMA_ATTR_EXT(unicast_rcv_packets , 64, 384); 582 static PORT_PMA_ATTR_EXT(multicast_xmit_packets , 64, 448); 583 static PORT_PMA_ATTR_EXT(multicast_rcv_packets , 64, 512); 584 585 static struct attribute *pma_attrs[] = { 586 &port_pma_attr_symbol_error.attr.attr, 587 &port_pma_attr_link_error_recovery.attr.attr, 588 &port_pma_attr_link_downed.attr.attr, 589 &port_pma_attr_port_rcv_errors.attr.attr, 590 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, 591 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, 592 &port_pma_attr_port_xmit_discards.attr.attr, 593 &port_pma_attr_port_xmit_constraint_errors.attr.attr, 594 &port_pma_attr_port_rcv_constraint_errors.attr.attr, 595 &port_pma_attr_local_link_integrity_errors.attr.attr, 596 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, 597 &port_pma_attr_VL15_dropped.attr.attr, 598 &port_pma_attr_port_xmit_data.attr.attr, 599 &port_pma_attr_port_rcv_data.attr.attr, 600 &port_pma_attr_port_xmit_packets.attr.attr, 601 &port_pma_attr_port_rcv_packets.attr.attr, 602 &port_pma_attr_port_xmit_wait.attr.attr, 603 NULL 604 }; 605 606 static struct attribute *pma_attrs_ext[] = { 607 &port_pma_attr_symbol_error.attr.attr, 608 &port_pma_attr_link_error_recovery.attr.attr, 609 &port_pma_attr_link_downed.attr.attr, 610 &port_pma_attr_port_rcv_errors.attr.attr, 611 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, 612 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, 613 &port_pma_attr_port_xmit_discards.attr.attr, 614 &port_pma_attr_port_xmit_constraint_errors.attr.attr, 615 &port_pma_attr_port_rcv_constraint_errors.attr.attr, 616 &port_pma_attr_local_link_integrity_errors.attr.attr, 617 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, 618 &port_pma_attr_VL15_dropped.attr.attr, 619 &port_pma_attr_ext_port_xmit_data.attr.attr, 620 &port_pma_attr_ext_port_rcv_data.attr.attr, 621 &port_pma_attr_ext_port_xmit_packets.attr.attr, 622 &port_pma_attr_port_xmit_wait.attr.attr, 623 &port_pma_attr_ext_port_rcv_packets.attr.attr, 624 &port_pma_attr_ext_unicast_rcv_packets.attr.attr, 625 &port_pma_attr_ext_unicast_xmit_packets.attr.attr, 626 &port_pma_attr_ext_multicast_rcv_packets.attr.attr, 627 &port_pma_attr_ext_multicast_xmit_packets.attr.attr, 628 NULL 629 }; 630 631 static struct attribute *pma_attrs_noietf[] = { 632 &port_pma_attr_symbol_error.attr.attr, 633 &port_pma_attr_link_error_recovery.attr.attr, 634 &port_pma_attr_link_downed.attr.attr, 635 &port_pma_attr_port_rcv_errors.attr.attr, 636 &port_pma_attr_port_rcv_remote_physical_errors.attr.attr, 637 &port_pma_attr_port_rcv_switch_relay_errors.attr.attr, 638 &port_pma_attr_port_xmit_discards.attr.attr, 639 &port_pma_attr_port_xmit_constraint_errors.attr.attr, 640 &port_pma_attr_port_rcv_constraint_errors.attr.attr, 641 &port_pma_attr_local_link_integrity_errors.attr.attr, 642 &port_pma_attr_excessive_buffer_overrun_errors.attr.attr, 643 &port_pma_attr_VL15_dropped.attr.attr, 644 &port_pma_attr_ext_port_xmit_data.attr.attr, 645 &port_pma_attr_ext_port_rcv_data.attr.attr, 646 &port_pma_attr_ext_port_xmit_packets.attr.attr, 647 &port_pma_attr_ext_port_rcv_packets.attr.attr, 648 &port_pma_attr_port_xmit_wait.attr.attr, 649 NULL 650 }; 651 652 static struct attribute_group pma_group = { 653 .name = "counters", 654 .attrs = pma_attrs 655 }; 656 657 static struct attribute_group pma_group_ext = { 658 .name = "counters", 659 .attrs = pma_attrs_ext 660 }; 661 662 static struct attribute_group pma_group_noietf = { 663 .name = "counters", 664 .attrs = pma_attrs_noietf 665 }; 666 667 static void ib_port_release(struct kobject *kobj) 668 { 669 struct ib_port *p = container_of(kobj, struct ib_port, kobj); 670 struct attribute *a; 671 int i; 672 673 if (p->gid_group.attrs) { 674 for (i = 0; (a = p->gid_group.attrs[i]); ++i) 675 kfree(a); 676 677 kfree(p->gid_group.attrs); 678 } 679 680 if (p->pkey_group.attrs) { 681 for (i = 0; (a = p->pkey_group.attrs[i]); ++i) 682 kfree(a); 683 684 kfree(p->pkey_group.attrs); 685 } 686 687 kfree(p); 688 } 689 690 static void ib_port_gid_attr_release(struct kobject *kobj) 691 { 692 struct gid_attr_group *g = container_of(kobj, struct gid_attr_group, 693 kobj); 694 struct attribute *a; 695 int i; 696 697 if (g->ndev.attrs) { 698 for (i = 0; (a = g->ndev.attrs[i]); ++i) 699 kfree(a); 700 701 kfree(g->ndev.attrs); 702 } 703 704 if (g->type.attrs) { 705 for (i = 0; (a = g->type.attrs[i]); ++i) 706 kfree(a); 707 708 kfree(g->type.attrs); 709 } 710 711 kfree(g); 712 } 713 714 static struct kobj_type port_type = { 715 .release = ib_port_release, 716 .sysfs_ops = &port_sysfs_ops, 717 .default_attrs = port_default_attrs 718 }; 719 720 static struct kobj_type gid_attr_type = { 721 .sysfs_ops = &gid_attr_sysfs_ops, 722 .release = ib_port_gid_attr_release 723 }; 724 725 static struct attribute ** 726 alloc_group_attrs(ssize_t (*show)(struct ib_port *, 727 struct port_attribute *, char *buf), 728 int len) 729 { 730 struct attribute **tab_attr; 731 struct port_table_attribute *element; 732 int i; 733 734 tab_attr = kcalloc(1 + len, sizeof(struct attribute *), GFP_KERNEL); 735 if (!tab_attr) 736 return NULL; 737 738 for (i = 0; i < len; i++) { 739 element = kzalloc(sizeof(struct port_table_attribute), 740 GFP_KERNEL); 741 if (!element) 742 goto err; 743 744 if (snprintf(element->name, sizeof(element->name), 745 "%d", i) >= sizeof(element->name)) { 746 kfree(element); 747 goto err; 748 } 749 750 element->attr.attr.name = element->name; 751 element->attr.attr.mode = S_IRUGO; 752 element->attr.show = show; 753 element->index = i; 754 sysfs_attr_init(&element->attr.attr); 755 756 tab_attr[i] = &element->attr.attr; 757 } 758 759 return tab_attr; 760 761 err: 762 while (--i >= 0) 763 kfree(tab_attr[i]); 764 kfree(tab_attr); 765 return NULL; 766 } 767 768 /* 769 * Figure out which counter table to use depending on 770 * the device capabilities. 771 */ 772 static struct attribute_group *get_counter_table(struct ib_device *dev, 773 int port_num) 774 { 775 struct ib_class_port_info cpi; 776 777 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, 778 &cpi, 40, sizeof(cpi)) >= 0) { 779 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH) 780 /* We have extended counters */ 781 return &pma_group_ext; 782 783 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) 784 /* But not the IETF ones */ 785 return &pma_group_noietf; 786 } 787 788 /* Fall back to normal counters */ 789 return &pma_group; 790 } 791 792 static int update_hw_stats(struct ib_device *dev, struct rdma_hw_stats *stats, 793 u8 port_num, int index) 794 { 795 int ret; 796 797 if (time_is_after_eq_jiffies(stats->timestamp + stats->lifespan)) 798 return 0; 799 ret = dev->get_hw_stats(dev, stats, port_num, index); 800 if (ret < 0) 801 return ret; 802 if (ret == stats->num_counters) 803 stats->timestamp = jiffies; 804 805 return 0; 806 } 807 808 static ssize_t print_hw_stat(struct rdma_hw_stats *stats, int index, char *buf) 809 { 810 return sprintf(buf, "%llu\n", (unsigned long long)stats->value[index]); 811 } 812 813 static ssize_t show_hw_stats(struct kobject *kobj, struct attribute *attr, 814 char *buf) 815 { 816 struct ib_device *dev; 817 struct ib_port *port; 818 struct hw_stats_attribute *hsa; 819 struct rdma_hw_stats *stats; 820 int ret; 821 822 hsa = container_of(attr, struct hw_stats_attribute, attr); 823 if (!hsa->port_num) { 824 dev = container_of((struct device *)kobj, 825 struct ib_device, dev); 826 stats = dev->hw_stats; 827 } else { 828 port = container_of(kobj, struct ib_port, kobj); 829 dev = port->ibdev; 830 stats = port->hw_stats; 831 } 832 mutex_lock(&stats->lock); 833 ret = update_hw_stats(dev, stats, hsa->port_num, hsa->index); 834 if (ret) 835 goto unlock; 836 ret = print_hw_stat(stats, hsa->index, buf); 837 unlock: 838 mutex_unlock(&stats->lock); 839 840 return ret; 841 } 842 843 static ssize_t show_stats_lifespan(struct kobject *kobj, 844 struct attribute *attr, 845 char *buf) 846 { 847 struct hw_stats_attribute *hsa; 848 struct rdma_hw_stats *stats; 849 int msecs; 850 851 hsa = container_of(attr, struct hw_stats_attribute, attr); 852 if (!hsa->port_num) { 853 struct ib_device *dev = container_of((struct device *)kobj, 854 struct ib_device, dev); 855 856 stats = dev->hw_stats; 857 } else { 858 struct ib_port *p = container_of(kobj, struct ib_port, kobj); 859 860 stats = p->hw_stats; 861 } 862 863 mutex_lock(&stats->lock); 864 msecs = jiffies_to_msecs(stats->lifespan); 865 mutex_unlock(&stats->lock); 866 867 return sprintf(buf, "%d\n", msecs); 868 } 869 870 static ssize_t set_stats_lifespan(struct kobject *kobj, 871 struct attribute *attr, 872 const char *buf, size_t count) 873 { 874 struct hw_stats_attribute *hsa; 875 struct rdma_hw_stats *stats; 876 int msecs; 877 int jiffies; 878 int ret; 879 880 ret = kstrtoint(buf, 10, &msecs); 881 if (ret) 882 return ret; 883 if (msecs < 0 || msecs > 10000) 884 return -EINVAL; 885 jiffies = msecs_to_jiffies(msecs); 886 hsa = container_of(attr, struct hw_stats_attribute, attr); 887 if (!hsa->port_num) { 888 struct ib_device *dev = container_of((struct device *)kobj, 889 struct ib_device, dev); 890 891 stats = dev->hw_stats; 892 } else { 893 struct ib_port *p = container_of(kobj, struct ib_port, kobj); 894 895 stats = p->hw_stats; 896 } 897 898 mutex_lock(&stats->lock); 899 stats->lifespan = jiffies; 900 mutex_unlock(&stats->lock); 901 902 return count; 903 } 904 905 static void free_hsag(struct kobject *kobj, struct attribute_group *attr_group) 906 { 907 struct attribute **attr; 908 909 sysfs_remove_group(kobj, attr_group); 910 911 for (attr = attr_group->attrs; *attr; attr++) 912 kfree(*attr); 913 kfree(attr_group); 914 } 915 916 static struct attribute *alloc_hsa(int index, u8 port_num, const char *name) 917 { 918 struct hw_stats_attribute *hsa; 919 920 hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); 921 if (!hsa) 922 return NULL; 923 924 hsa->attr.name = __DECONST(char *, name); 925 hsa->attr.mode = S_IRUGO; 926 hsa->show = show_hw_stats; 927 hsa->store = NULL; 928 hsa->index = index; 929 hsa->port_num = port_num; 930 931 return &hsa->attr; 932 } 933 934 static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) 935 { 936 struct hw_stats_attribute *hsa; 937 938 hsa = kmalloc(sizeof(*hsa), GFP_KERNEL); 939 if (!hsa) 940 return NULL; 941 942 hsa->attr.name = name; 943 hsa->attr.mode = S_IWUSR | S_IRUGO; 944 hsa->show = show_stats_lifespan; 945 hsa->store = set_stats_lifespan; 946 hsa->index = 0; 947 hsa->port_num = port_num; 948 949 return &hsa->attr; 950 } 951 952 static void setup_hw_stats(struct ib_device *device, struct ib_port *port, 953 u8 port_num) 954 { 955 struct attribute_group *hsag; 956 struct rdma_hw_stats *stats; 957 int i, ret; 958 959 stats = device->alloc_hw_stats(device, port_num); 960 961 if (!stats) 962 return; 963 964 if (!stats->names || stats->num_counters <= 0) 965 goto err_free_stats; 966 967 /* 968 * Two extra attribue elements here, one for the lifespan entry and 969 * one to NULL terminate the list for the sysfs core code 970 */ 971 hsag = kzalloc(sizeof(*hsag) + 972 sizeof(void *) * (stats->num_counters + 2), 973 GFP_KERNEL); 974 if (!hsag) 975 goto err_free_stats; 976 977 ret = device->get_hw_stats(device, stats, port_num, 978 stats->num_counters); 979 if (ret != stats->num_counters) 980 goto err_free_hsag; 981 982 stats->timestamp = jiffies; 983 984 hsag->name = "hw_counters"; 985 hsag->attrs = (void *)((char *)hsag + sizeof(*hsag)); 986 987 for (i = 0; i < stats->num_counters; i++) { 988 hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); 989 if (!hsag->attrs[i]) 990 goto err; 991 sysfs_attr_init(hsag->attrs[i]); 992 } 993 994 mutex_init(&stats->lock); 995 /* treat an error here as non-fatal */ 996 hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); 997 if (hsag->attrs[i]) 998 sysfs_attr_init(hsag->attrs[i]); 999 1000 if (port) { 1001 struct kobject *kobj = &port->kobj; 1002 ret = sysfs_create_group(kobj, hsag); 1003 if (ret) 1004 goto err; 1005 port->hw_stats_ag = hsag; 1006 port->hw_stats = stats; 1007 } else { 1008 struct kobject *kobj = &device->dev.kobj; 1009 ret = sysfs_create_group(kobj, hsag); 1010 if (ret) 1011 goto err; 1012 device->hw_stats_ag = hsag; 1013 device->hw_stats = stats; 1014 } 1015 1016 return; 1017 1018 err: 1019 for (; i >= 0; i--) 1020 kfree(hsag->attrs[i]); 1021 err_free_hsag: 1022 kfree(hsag); 1023 err_free_stats: 1024 kfree(stats); 1025 return; 1026 } 1027 1028 static int add_port(struct ib_device *device, int port_num, 1029 int (*port_callback)(struct ib_device *, 1030 u8, struct kobject *)) 1031 { 1032 struct ib_port *p; 1033 struct ib_port_attr attr; 1034 int i; 1035 int ret; 1036 1037 ret = ib_query_port(device, port_num, &attr); 1038 if (ret) 1039 return ret; 1040 1041 p = kzalloc(sizeof *p, GFP_KERNEL); 1042 if (!p) 1043 return -ENOMEM; 1044 1045 p->ibdev = device; 1046 p->port_num = port_num; 1047 1048 ret = kobject_init_and_add(&p->kobj, &port_type, 1049 device->ports_parent, 1050 "%d", port_num); 1051 if (ret) { 1052 kfree(p); 1053 return ret; 1054 } 1055 1056 p->gid_attr_group = kzalloc(sizeof(*p->gid_attr_group), GFP_KERNEL); 1057 if (!p->gid_attr_group) { 1058 ret = -ENOMEM; 1059 goto err_put; 1060 } 1061 1062 p->gid_attr_group->port = p; 1063 ret = kobject_init_and_add(&p->gid_attr_group->kobj, &gid_attr_type, 1064 &p->kobj, "gid_attrs"); 1065 if (ret) { 1066 kfree(p->gid_attr_group); 1067 goto err_put; 1068 } 1069 1070 if (device->process_mad) { 1071 p->pma_table = get_counter_table(device, port_num); 1072 ret = sysfs_create_group(&p->kobj, p->pma_table); 1073 if (ret) 1074 goto err_put_gid_attrs; 1075 } 1076 1077 p->gid_group.name = "gids"; 1078 p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len); 1079 if (!p->gid_group.attrs) { 1080 ret = -ENOMEM; 1081 goto err_remove_pma; 1082 } 1083 1084 ret = sysfs_create_group(&p->kobj, &p->gid_group); 1085 if (ret) 1086 goto err_free_gid; 1087 1088 p->gid_attr_group->ndev.name = "ndevs"; 1089 p->gid_attr_group->ndev.attrs = alloc_group_attrs(show_port_gid_attr_ndev, 1090 attr.gid_tbl_len); 1091 if (!p->gid_attr_group->ndev.attrs) { 1092 ret = -ENOMEM; 1093 goto err_remove_gid; 1094 } 1095 1096 ret = sysfs_create_group(&p->gid_attr_group->kobj, 1097 &p->gid_attr_group->ndev); 1098 if (ret) 1099 goto err_free_gid_ndev; 1100 1101 p->gid_attr_group->type.name = "types"; 1102 p->gid_attr_group->type.attrs = alloc_group_attrs(show_port_gid_attr_gid_type, 1103 attr.gid_tbl_len); 1104 if (!p->gid_attr_group->type.attrs) { 1105 ret = -ENOMEM; 1106 goto err_remove_gid_ndev; 1107 } 1108 1109 ret = sysfs_create_group(&p->gid_attr_group->kobj, 1110 &p->gid_attr_group->type); 1111 if (ret) 1112 goto err_free_gid_type; 1113 1114 p->pkey_group.name = "pkeys"; 1115 p->pkey_group.attrs = alloc_group_attrs(show_port_pkey, 1116 attr.pkey_tbl_len); 1117 if (!p->pkey_group.attrs) { 1118 ret = -ENOMEM; 1119 goto err_remove_gid_type; 1120 } 1121 1122 ret = sysfs_create_group(&p->kobj, &p->pkey_group); 1123 if (ret) 1124 goto err_free_pkey; 1125 1126 if (port_callback) { 1127 ret = port_callback(device, port_num, &p->kobj); 1128 if (ret) 1129 goto err_remove_pkey; 1130 } 1131 1132 /* 1133 * If port == 0, it means we have only one port and the parent 1134 * device, not this port device, should be the holder of the 1135 * hw_counters 1136 */ 1137 if (device->alloc_hw_stats && port_num) 1138 setup_hw_stats(device, p, port_num); 1139 1140 list_add_tail(&p->kobj.entry, &device->port_list); 1141 1142 return 0; 1143 1144 err_remove_pkey: 1145 sysfs_remove_group(&p->kobj, &p->pkey_group); 1146 1147 err_free_pkey: 1148 for (i = 0; i < attr.pkey_tbl_len; ++i) 1149 kfree(p->pkey_group.attrs[i]); 1150 1151 kfree(p->pkey_group.attrs); 1152 p->pkey_group.attrs = NULL; 1153 1154 err_remove_gid_type: 1155 sysfs_remove_group(&p->gid_attr_group->kobj, 1156 &p->gid_attr_group->type); 1157 1158 err_free_gid_type: 1159 for (i = 0; i < attr.gid_tbl_len; ++i) 1160 kfree(p->gid_attr_group->type.attrs[i]); 1161 1162 kfree(p->gid_attr_group->type.attrs); 1163 p->gid_attr_group->type.attrs = NULL; 1164 1165 err_remove_gid_ndev: 1166 sysfs_remove_group(&p->gid_attr_group->kobj, 1167 &p->gid_attr_group->ndev); 1168 1169 err_free_gid_ndev: 1170 for (i = 0; i < attr.gid_tbl_len; ++i) 1171 kfree(p->gid_attr_group->ndev.attrs[i]); 1172 1173 kfree(p->gid_attr_group->ndev.attrs); 1174 p->gid_attr_group->ndev.attrs = NULL; 1175 1176 err_remove_gid: 1177 sysfs_remove_group(&p->kobj, &p->gid_group); 1178 1179 err_free_gid: 1180 for (i = 0; i < attr.gid_tbl_len; ++i) 1181 kfree(p->gid_group.attrs[i]); 1182 1183 kfree(p->gid_group.attrs); 1184 p->gid_group.attrs = NULL; 1185 1186 err_remove_pma: 1187 if (p->pma_table) 1188 sysfs_remove_group(&p->kobj, p->pma_table); 1189 1190 err_put_gid_attrs: 1191 kobject_put(&p->gid_attr_group->kobj); 1192 1193 err_put: 1194 kobject_put(&p->kobj); 1195 return ret; 1196 } 1197 1198 static ssize_t show_node_type(struct device *device, 1199 struct device_attribute *attr, char *buf) 1200 { 1201 struct ib_device *dev = container_of(device, struct ib_device, dev); 1202 1203 switch (dev->node_type) { 1204 case RDMA_NODE_IB_CA: return sprintf(buf, "%d: CA\n", dev->node_type); 1205 case RDMA_NODE_RNIC: return sprintf(buf, "%d: RNIC\n", dev->node_type); 1206 case RDMA_NODE_USNIC: return sprintf(buf, "%d: usNIC\n", dev->node_type); 1207 case RDMA_NODE_USNIC_UDP: return sprintf(buf, "%d: usNIC UDP\n", dev->node_type); 1208 case RDMA_NODE_IB_SWITCH: return sprintf(buf, "%d: switch\n", dev->node_type); 1209 case RDMA_NODE_IB_ROUTER: return sprintf(buf, "%d: router\n", dev->node_type); 1210 default: return sprintf(buf, "%d: <unknown>\n", dev->node_type); 1211 } 1212 } 1213 1214 static ssize_t show_sys_image_guid(struct device *device, 1215 struct device_attribute *dev_attr, char *buf) 1216 { 1217 struct ib_device *dev = container_of(device, struct ib_device, dev); 1218 1219 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 1220 be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[0]), 1221 be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[1]), 1222 be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[2]), 1223 be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[3])); 1224 } 1225 1226 static ssize_t show_node_guid(struct device *device, 1227 struct device_attribute *attr, char *buf) 1228 { 1229 struct ib_device *dev = container_of(device, struct ib_device, dev); 1230 1231 return sprintf(buf, "%04x:%04x:%04x:%04x\n", 1232 be16_to_cpu(((__be16 *) &dev->node_guid)[0]), 1233 be16_to_cpu(((__be16 *) &dev->node_guid)[1]), 1234 be16_to_cpu(((__be16 *) &dev->node_guid)[2]), 1235 be16_to_cpu(((__be16 *) &dev->node_guid)[3])); 1236 } 1237 1238 static ssize_t show_node_desc(struct device *device, 1239 struct device_attribute *attr, char *buf) 1240 { 1241 struct ib_device *dev = container_of(device, struct ib_device, dev); 1242 1243 return sprintf(buf, "%.64s\n", dev->node_desc); 1244 } 1245 1246 static ssize_t set_node_desc(struct device *device, 1247 struct device_attribute *attr, 1248 const char *buf, size_t count) 1249 { 1250 struct ib_device *dev = container_of(device, struct ib_device, dev); 1251 struct ib_device_modify desc = {}; 1252 int ret; 1253 1254 if (!dev->modify_device) 1255 return -EIO; 1256 1257 memcpy(desc.node_desc, buf, min_t(int, count, IB_DEVICE_NODE_DESC_MAX)); 1258 ret = ib_modify_device(dev, IB_DEVICE_MODIFY_NODE_DESC, &desc); 1259 if (ret) 1260 return ret; 1261 1262 return count; 1263 } 1264 1265 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, 1266 char *buf) 1267 { 1268 struct ib_device *dev = container_of(device, struct ib_device, dev); 1269 1270 ib_get_device_fw_str(dev, buf, PAGE_SIZE); 1271 strlcat(buf, "\n", PAGE_SIZE); 1272 return strlen(buf); 1273 } 1274 1275 static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL); 1276 static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL); 1277 static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL); 1278 static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc); 1279 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); 1280 1281 static struct device_attribute *ib_class_attributes[] = { 1282 &dev_attr_node_type, 1283 &dev_attr_sys_image_guid, 1284 &dev_attr_node_guid, 1285 &dev_attr_node_desc, 1286 &dev_attr_fw_ver, 1287 }; 1288 1289 static void free_port_list_attributes(struct ib_device *device) 1290 { 1291 struct kobject *p, *t; 1292 1293 list_for_each_entry_safe(p, t, &device->port_list, entry) { 1294 struct ib_port *port = container_of(p, struct ib_port, kobj); 1295 list_del(&p->entry); 1296 if (port->hw_stats) { 1297 kfree(port->hw_stats); 1298 free_hsag(&port->kobj, port->hw_stats_ag); 1299 } 1300 1301 if (port->pma_table) 1302 sysfs_remove_group(p, port->pma_table); 1303 sysfs_remove_group(p, &port->pkey_group); 1304 sysfs_remove_group(p, &port->gid_group); 1305 sysfs_remove_group(&port->gid_attr_group->kobj, 1306 &port->gid_attr_group->ndev); 1307 sysfs_remove_group(&port->gid_attr_group->kobj, 1308 &port->gid_attr_group->type); 1309 kobject_put(&port->gid_attr_group->kobj); 1310 kobject_put(p); 1311 } 1312 1313 kobject_put(device->ports_parent); 1314 } 1315 1316 int ib_device_register_sysfs(struct ib_device *device, 1317 int (*port_callback)(struct ib_device *, 1318 u8, struct kobject *)) 1319 { 1320 struct device *class_dev = &device->dev; 1321 int ret; 1322 int i; 1323 1324 device->dev.parent = device->dma_device; 1325 ret = dev_set_name(class_dev, "%s", device->name); 1326 if (ret) 1327 return ret; 1328 1329 ret = device_add(class_dev); 1330 if (ret) 1331 goto err; 1332 1333 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) { 1334 ret = device_create_file(class_dev, ib_class_attributes[i]); 1335 if (ret) 1336 goto err_unregister; 1337 } 1338 1339 device->ports_parent = kobject_create_and_add("ports", 1340 &class_dev->kobj); 1341 if (!device->ports_parent) { 1342 ret = -ENOMEM; 1343 goto err_put; 1344 } 1345 1346 if (rdma_cap_ib_switch(device)) { 1347 ret = add_port(device, 0, port_callback); 1348 if (ret) 1349 goto err_put; 1350 } else { 1351 for (i = 1; i <= device->phys_port_cnt; ++i) { 1352 ret = add_port(device, i, port_callback); 1353 if (ret) 1354 goto err_put; 1355 } 1356 } 1357 1358 if (device->alloc_hw_stats) 1359 setup_hw_stats(device, NULL, 0); 1360 1361 return 0; 1362 1363 err_put: 1364 free_port_list_attributes(device); 1365 1366 err_unregister: 1367 device_del(class_dev); 1368 1369 err: 1370 return ret; 1371 } 1372 1373 void ib_device_unregister_sysfs(struct ib_device *device) 1374 { 1375 int i; 1376 1377 /* Hold kobject until ib_dealloc_device() */ 1378 kobject_get(&device->dev.kobj); 1379 1380 free_port_list_attributes(device); 1381 1382 if (device->hw_stats) { 1383 kfree(device->hw_stats); 1384 free_hsag(&device->dev.kobj, device->hw_stats_ag); 1385 } 1386 1387 for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) 1388 device_remove_file(&device->dev, ib_class_attributes[i]); 1389 1390 device_unregister(&device->dev); 1391 } 1392 1393 /** 1394 * ib_port_register_module_stat - add module counters under relevant port 1395 * of IB device. 1396 * 1397 * @device: IB device to add counters 1398 * @port_num: valid port number 1399 * @kobj: pointer to the kobject to initialize 1400 * @ktype: pointer to the ktype for this kobject. 1401 * @name: the name of the kobject 1402 */ 1403 int ib_port_register_module_stat(struct ib_device *device, u8 port_num, 1404 struct kobject *kobj, struct kobj_type *ktype, 1405 const char *name) 1406 { 1407 struct kobject *p, *t; 1408 int ret; 1409 1410 list_for_each_entry_safe(p, t, &device->port_list, entry) { 1411 struct ib_port *port = container_of(p, struct ib_port, kobj); 1412 1413 if (port->port_num != port_num) 1414 continue; 1415 1416 ret = kobject_init_and_add(kobj, ktype, &port->kobj, "%s", 1417 name); 1418 if (ret) { 1419 kobject_put(kobj); 1420 return ret; 1421 } 1422 } 1423 1424 return 0; 1425 } 1426 EXPORT_SYMBOL(ib_port_register_module_stat); 1427 1428 /** 1429 * ib_port_unregister_module_stat - release module counters 1430 * @kobj: pointer to the kobject to release 1431 */ 1432 void ib_port_unregister_module_stat(struct kobject *kobj) 1433 { 1434 kobject_put(kobj); 1435 } 1436 EXPORT_SYMBOL(ib_port_unregister_module_stat); 1437