1 /* 2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/string.h> 35 #include <linux/etherdevice.h> 36 37 #include <dev/mlx4/cmd.h> 38 #include <linux/module.h> 39 #include <linux/printk.h> 40 41 #include "mlx4.h" 42 43 int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) 44 { 45 return 1 << dev->oper_log_mgm_entry_size; 46 } 47 48 int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) 49 { 50 return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); 51 } 52 53 static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, 54 struct mlx4_cmd_mailbox *mailbox, 55 u32 size, 56 u64 *reg_id) 57 { 58 u64 imm; 59 int err = 0; 60 61 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, 62 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, 63 MLX4_CMD_NATIVE); 64 if (err) 65 return err; 66 *reg_id = imm; 67 68 return err; 69 } 70 71 static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) 72 { 73 int err = 0; 74 75 err = mlx4_cmd(dev, regid, 0, 0, 76 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, 77 MLX4_CMD_NATIVE); 78 79 return err; 80 } 81 82 static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, 83 struct mlx4_cmd_mailbox *mailbox) 84 { 85 return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG, 86 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 87 } 88 89 static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index, 90 struct mlx4_cmd_mailbox *mailbox) 91 { 92 return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG, 93 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 94 } 95 96 static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer, 97 struct mlx4_cmd_mailbox *mailbox) 98 { 99 u32 in_mod; 100 101 in_mod = (u32) port << 16 | steer << 1; 102 return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1, 103 MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A, 104 MLX4_CMD_NATIVE); 105 } 106 107 static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 108 u16 *hash, u8 op_mod) 109 { 110 u64 imm; 111 int err; 112 113 err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod, 114 MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A, 115 MLX4_CMD_NATIVE); 116 117 if (!err) 118 *hash = imm; 119 120 return err; 121 } 122 123 static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, 124 enum mlx4_steer_type steer, 125 u32 qpn) 126 { 127 struct mlx4_steer *s_steer; 128 struct mlx4_promisc_qp *pqp; 129 130 if (port < 1 || port > dev->caps.num_ports) 131 return NULL; 132 133 s_steer = &mlx4_priv(dev)->steer[port - 1]; 134 135 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 136 if (pqp->qpn == qpn) 137 return pqp; 138 } 139 /* not found */ 140 return NULL; 141 } 142 143 /* 144 * Add new entry to steering data structure. 145 * All promisc QPs should be added as well 146 */ 147 static int new_steering_entry(struct mlx4_dev *dev, u8 port, 148 enum mlx4_steer_type steer, 149 unsigned int index, u32 qpn) 150 { 151 struct mlx4_steer *s_steer; 152 struct mlx4_cmd_mailbox *mailbox; 153 struct mlx4_mgm *mgm; 154 u32 members_count; 155 struct mlx4_steer_index *new_entry; 156 struct mlx4_promisc_qp *pqp; 157 struct mlx4_promisc_qp *dqp = NULL; 158 u32 prot; 159 int err; 160 161 if (port < 1 || port > dev->caps.num_ports) 162 return -EINVAL; 163 164 s_steer = &mlx4_priv(dev)->steer[port - 1]; 165 new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL); 166 if (!new_entry) 167 return -ENOMEM; 168 169 INIT_LIST_HEAD(&new_entry->duplicates); 170 new_entry->index = index; 171 list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]); 172 173 /* If the given qpn is also a promisc qp, 174 * it should be inserted to duplicates list 175 */ 176 pqp = get_promisc_qp(dev, port, steer, qpn); 177 if (pqp) { 178 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 179 if (!dqp) { 180 err = -ENOMEM; 181 goto out_alloc; 182 } 183 dqp->qpn = qpn; 184 list_add_tail(&dqp->list, &new_entry->duplicates); 185 } 186 187 /* if no promisc qps for this vep, we are done */ 188 if (list_empty(&s_steer->promisc_qps[steer])) 189 return 0; 190 191 /* now need to add all the promisc qps to the new 192 * steering entry, as they should also receive the packets 193 * destined to this address */ 194 mailbox = mlx4_alloc_cmd_mailbox(dev); 195 if (IS_ERR(mailbox)) { 196 err = -ENOMEM; 197 goto out_alloc; 198 } 199 mgm = mailbox->buf; 200 201 err = mlx4_READ_ENTRY(dev, index, mailbox); 202 if (err) 203 goto out_mailbox; 204 205 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 206 prot = be32_to_cpu(mgm->members_count) >> 30; 207 list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { 208 /* don't add already existing qpn */ 209 if (pqp->qpn == qpn) 210 continue; 211 if (members_count == dev->caps.num_qp_per_mgm) { 212 /* out of space */ 213 err = -ENOMEM; 214 goto out_mailbox; 215 } 216 217 /* add the qpn */ 218 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); 219 } 220 /* update the qps count and update the entry with all the promisc qps*/ 221 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); 222 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 223 224 out_mailbox: 225 mlx4_free_cmd_mailbox(dev, mailbox); 226 if (!err) 227 return 0; 228 out_alloc: 229 if (dqp) { 230 list_del(&dqp->list); 231 kfree(dqp); 232 } 233 list_del(&new_entry->list); 234 kfree(new_entry); 235 return err; 236 } 237 238 /* update the data structures with existing steering entry */ 239 static int existing_steering_entry(struct mlx4_dev *dev, u8 port, 240 enum mlx4_steer_type steer, 241 unsigned int index, u32 qpn) 242 { 243 struct mlx4_steer *s_steer; 244 struct mlx4_steer_index *tmp_entry, *entry = NULL; 245 struct mlx4_promisc_qp *pqp; 246 struct mlx4_promisc_qp *dqp; 247 248 if (port < 1 || port > dev->caps.num_ports) 249 return -EINVAL; 250 251 s_steer = &mlx4_priv(dev)->steer[port - 1]; 252 253 pqp = get_promisc_qp(dev, port, steer, qpn); 254 if (!pqp) 255 return 0; /* nothing to do */ 256 257 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 258 if (tmp_entry->index == index) { 259 entry = tmp_entry; 260 break; 261 } 262 } 263 if (unlikely(!entry)) { 264 mlx4_warn(dev, "Steering entry at index %x is not registered\n", index); 265 return -EINVAL; 266 } 267 268 /* the given qpn is listed as a promisc qpn 269 * we need to add it as a duplicate to this entry 270 * for future references */ 271 list_for_each_entry(dqp, &entry->duplicates, list) { 272 if (qpn == dqp->qpn) 273 return 0; /* qp is already duplicated */ 274 } 275 276 /* add the qp as a duplicate on this index */ 277 dqp = kmalloc(sizeof *dqp, GFP_KERNEL); 278 if (!dqp) 279 return -ENOMEM; 280 dqp->qpn = qpn; 281 list_add_tail(&dqp->list, &entry->duplicates); 282 283 return 0; 284 } 285 286 /* Check whether a qpn is a duplicate on steering entry 287 * If so, it should not be removed from mgm */ 288 static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, 289 enum mlx4_steer_type steer, 290 unsigned int index, u32 qpn) 291 { 292 struct mlx4_steer *s_steer; 293 struct mlx4_steer_index *tmp_entry, *entry = NULL; 294 struct mlx4_promisc_qp *dqp, *tmp_dqp; 295 296 if (port < 1 || port > dev->caps.num_ports) 297 return NULL; 298 299 s_steer = &mlx4_priv(dev)->steer[port - 1]; 300 301 /* if qp is not promisc, it cannot be duplicated */ 302 if (!get_promisc_qp(dev, port, steer, qpn)) 303 return false; 304 305 /* The qp is promisc qp so it is a duplicate on this index 306 * Find the index entry, and remove the duplicate */ 307 list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) { 308 if (tmp_entry->index == index) { 309 entry = tmp_entry; 310 break; 311 } 312 } 313 if (unlikely(!entry)) { 314 mlx4_warn(dev, "Steering entry for index %x is not registered\n", index); 315 return false; 316 } 317 list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) { 318 if (dqp->qpn == qpn) { 319 list_del(&dqp->list); 320 kfree(dqp); 321 } 322 } 323 return true; 324 } 325 326 /* Returns true if all the QPs != tqpn contained in this entry 327 * are Promisc QPs. Returns false otherwise. 328 */ 329 static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port, 330 enum mlx4_steer_type steer, 331 unsigned int index, u32 tqpn, 332 u32 *members_count) 333 { 334 struct mlx4_cmd_mailbox *mailbox; 335 struct mlx4_mgm *mgm; 336 u32 m_count; 337 bool ret = false; 338 int i; 339 340 if (port < 1 || port > dev->caps.num_ports) 341 return false; 342 343 mailbox = mlx4_alloc_cmd_mailbox(dev); 344 if (IS_ERR(mailbox)) 345 return false; 346 mgm = mailbox->buf; 347 348 if (mlx4_READ_ENTRY(dev, index, mailbox)) 349 goto out; 350 m_count = be32_to_cpu(mgm->members_count) & 0xffffff; 351 if (members_count) 352 *members_count = m_count; 353 354 for (i = 0; i < m_count; i++) { 355 u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; 356 if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { 357 /* the qp is not promisc, the entry can't be removed */ 358 goto out; 359 } 360 } 361 ret = true; 362 out: 363 mlx4_free_cmd_mailbox(dev, mailbox); 364 return ret; 365 } 366 367 /* IF a steering entry contains only promisc QPs, it can be removed. */ 368 static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, 369 enum mlx4_steer_type steer, 370 unsigned int index, u32 tqpn) 371 { 372 struct mlx4_steer *s_steer; 373 struct mlx4_steer_index *entry = NULL, *tmp_entry; 374 u32 members_count; 375 bool ret = false; 376 377 if (port < 1 || port > dev->caps.num_ports) 378 return NULL; 379 380 s_steer = &mlx4_priv(dev)->steer[port - 1]; 381 382 if (!promisc_steering_entry(dev, port, steer, index, 383 tqpn, &members_count)) 384 goto out; 385 386 /* All the qps currently registered for this entry are promiscuous, 387 * Checking for duplicates */ 388 ret = true; 389 list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) { 390 if (entry->index == index) { 391 if (list_empty(&entry->duplicates) || 392 members_count == 1) { 393 struct mlx4_promisc_qp *pqp, *tmp_pqp; 394 /* If there is only 1 entry in duplicates then 395 * this is the QP we want to delete, going over 396 * the list and deleting the entry. 397 */ 398 list_del(&entry->list); 399 list_for_each_entry_safe(pqp, tmp_pqp, 400 &entry->duplicates, 401 list) { 402 list_del(&pqp->list); 403 kfree(pqp); 404 } 405 kfree(entry); 406 } else { 407 /* This entry contains duplicates so it shouldn't be removed */ 408 ret = false; 409 goto out; 410 } 411 } 412 } 413 414 out: 415 return ret; 416 } 417 418 static int add_promisc_qp(struct mlx4_dev *dev, u8 port, 419 enum mlx4_steer_type steer, u32 qpn) 420 { 421 struct mlx4_steer *s_steer; 422 struct mlx4_cmd_mailbox *mailbox; 423 struct mlx4_mgm *mgm; 424 struct mlx4_steer_index *entry; 425 struct mlx4_promisc_qp *pqp; 426 struct mlx4_promisc_qp *dqp; 427 u32 members_count; 428 u32 prot; 429 int i; 430 bool found; 431 int err; 432 struct mlx4_priv *priv = mlx4_priv(dev); 433 434 if (port < 1 || port > dev->caps.num_ports) 435 return -EINVAL; 436 437 s_steer = &mlx4_priv(dev)->steer[port - 1]; 438 439 mutex_lock(&priv->mcg_table.mutex); 440 441 if (get_promisc_qp(dev, port, steer, qpn)) { 442 err = 0; /* Noting to do, already exists */ 443 goto out_mutex; 444 } 445 446 pqp = kmalloc(sizeof *pqp, GFP_KERNEL); 447 if (!pqp) { 448 err = -ENOMEM; 449 goto out_mutex; 450 } 451 pqp->qpn = qpn; 452 453 mailbox = mlx4_alloc_cmd_mailbox(dev); 454 if (IS_ERR(mailbox)) { 455 err = -ENOMEM; 456 goto out_alloc; 457 } 458 mgm = mailbox->buf; 459 460 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 461 /* The promisc QP needs to be added for each one of the steering 462 * entries. If it already exists, needs to be added as 463 * a duplicate for this entry. 464 */ 465 list_for_each_entry(entry, 466 &s_steer->steer_entries[steer], 467 list) { 468 err = mlx4_READ_ENTRY(dev, entry->index, mailbox); 469 if (err) 470 goto out_mailbox; 471 472 members_count = be32_to_cpu(mgm->members_count) & 473 0xffffff; 474 prot = be32_to_cpu(mgm->members_count) >> 30; 475 found = false; 476 for (i = 0; i < members_count; i++) { 477 if ((be32_to_cpu(mgm->qp[i]) & 478 MGM_QPN_MASK) == qpn) { 479 /* Entry already exists. 480 * Add to duplicates. 481 */ 482 dqp = kmalloc(sizeof(*dqp), GFP_KERNEL); 483 if (!dqp) { 484 err = -ENOMEM; 485 goto out_mailbox; 486 } 487 dqp->qpn = qpn; 488 list_add_tail(&dqp->list, 489 &entry->duplicates); 490 found = true; 491 } 492 } 493 if (!found) { 494 /* Need to add the qpn to mgm */ 495 if (members_count == 496 dev->caps.num_qp_per_mgm) { 497 /* entry is full */ 498 err = -ENOMEM; 499 goto out_mailbox; 500 } 501 mgm->qp[members_count++] = 502 cpu_to_be32(qpn & MGM_QPN_MASK); 503 mgm->members_count = 504 cpu_to_be32(members_count | 505 (prot << 30)); 506 err = mlx4_WRITE_ENTRY(dev, entry->index, 507 mailbox); 508 if (err) 509 goto out_mailbox; 510 } 511 } 512 } 513 514 /* add the new qpn to list of promisc qps */ 515 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 516 /* now need to add all the promisc qps to default entry */ 517 memset(mgm, 0, sizeof *mgm); 518 members_count = 0; 519 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) { 520 if (members_count == dev->caps.num_qp_per_mgm) { 521 /* entry is full */ 522 err = -ENOMEM; 523 goto out_list; 524 } 525 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 526 } 527 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 528 529 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 530 if (err) 531 goto out_list; 532 533 mlx4_free_cmd_mailbox(dev, mailbox); 534 mutex_unlock(&priv->mcg_table.mutex); 535 return 0; 536 537 out_list: 538 list_del(&pqp->list); 539 out_mailbox: 540 mlx4_free_cmd_mailbox(dev, mailbox); 541 out_alloc: 542 kfree(pqp); 543 out_mutex: 544 mutex_unlock(&priv->mcg_table.mutex); 545 return err; 546 } 547 548 static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, 549 enum mlx4_steer_type steer, u32 qpn) 550 { 551 struct mlx4_priv *priv = mlx4_priv(dev); 552 struct mlx4_steer *s_steer; 553 struct mlx4_cmd_mailbox *mailbox; 554 struct mlx4_mgm *mgm; 555 struct mlx4_steer_index *entry, *tmp_entry; 556 struct mlx4_promisc_qp *pqp; 557 struct mlx4_promisc_qp *dqp; 558 u32 members_count; 559 bool found; 560 bool back_to_list = false; 561 int i; 562 int err; 563 564 if (port < 1 || port > dev->caps.num_ports) 565 return -EINVAL; 566 567 s_steer = &mlx4_priv(dev)->steer[port - 1]; 568 mutex_lock(&priv->mcg_table.mutex); 569 570 pqp = get_promisc_qp(dev, port, steer, qpn); 571 if (unlikely(!pqp)) { 572 mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); 573 /* nothing to do */ 574 err = 0; 575 goto out_mutex; 576 } 577 578 /*remove from list of promisc qps */ 579 list_del(&pqp->list); 580 581 /* set the default entry not to include the removed one */ 582 mailbox = mlx4_alloc_cmd_mailbox(dev); 583 if (IS_ERR(mailbox)) { 584 err = -ENOMEM; 585 back_to_list = true; 586 goto out_list; 587 } 588 mgm = mailbox->buf; 589 members_count = 0; 590 list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) 591 mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK); 592 mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30); 593 594 err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox); 595 if (err) 596 goto out_mailbox; 597 598 if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) { 599 /* Remove the QP from all the steering entries */ 600 list_for_each_entry_safe(entry, tmp_entry, 601 &s_steer->steer_entries[steer], 602 list) { 603 found = false; 604 list_for_each_entry(dqp, &entry->duplicates, list) { 605 if (dqp->qpn == qpn) { 606 found = true; 607 break; 608 } 609 } 610 if (found) { 611 /* A duplicate, no need to change the MGM, 612 * only update the duplicates list 613 */ 614 list_del(&dqp->list); 615 kfree(dqp); 616 } else { 617 int loc = -1; 618 619 err = mlx4_READ_ENTRY(dev, 620 entry->index, 621 mailbox); 622 if (err) 623 goto out_mailbox; 624 members_count = 625 be32_to_cpu(mgm->members_count) & 626 0xffffff; 627 if (!members_count) { 628 mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n", 629 qpn, entry->index); 630 list_del(&entry->list); 631 kfree(entry); 632 continue; 633 } 634 635 for (i = 0; i < members_count; ++i) 636 if ((be32_to_cpu(mgm->qp[i]) & 637 MGM_QPN_MASK) == qpn) { 638 loc = i; 639 break; 640 } 641 642 if (loc < 0) { 643 mlx4_err(dev, "QP %06x wasn't found in entry %d\n", 644 qpn, entry->index); 645 err = -EINVAL; 646 goto out_mailbox; 647 } 648 649 /* Copy the last QP in this MGM 650 * over removed QP 651 */ 652 mgm->qp[loc] = mgm->qp[members_count - 1]; 653 mgm->qp[members_count - 1] = 0; 654 mgm->members_count = 655 cpu_to_be32(--members_count | 656 (MLX4_PROT_ETH << 30)); 657 658 err = mlx4_WRITE_ENTRY(dev, 659 entry->index, 660 mailbox); 661 if (err) 662 goto out_mailbox; 663 } 664 } 665 } 666 667 out_mailbox: 668 mlx4_free_cmd_mailbox(dev, mailbox); 669 out_list: 670 if (back_to_list) 671 list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]); 672 else 673 kfree(pqp); 674 out_mutex: 675 mutex_unlock(&priv->mcg_table.mutex); 676 return err; 677 } 678 679 /* 680 * Caller must hold MCG table semaphore. gid and mgm parameters must 681 * be properly aligned for command interface. 682 * 683 * Returns 0 unless a firmware command error occurs. 684 * 685 * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 686 * and *mgm holds MGM entry. 687 * 688 * if GID is found in AMGM, *index = index in AMGM, *prev = index of 689 * previous entry in hash chain and *mgm holds AMGM entry. 690 * 691 * If no AMGM exists for given gid, *index = -1, *prev = index of last 692 * entry in hash chain and *mgm holds end of hash chain. 693 */ 694 static int find_entry(struct mlx4_dev *dev, u8 port, 695 u8 *gid, enum mlx4_protocol prot, 696 struct mlx4_cmd_mailbox *mgm_mailbox, 697 int *prev, int *index) 698 { 699 struct mlx4_cmd_mailbox *mailbox; 700 struct mlx4_mgm *mgm = mgm_mailbox->buf; 701 u8 *mgid; 702 int err; 703 u16 hash; 704 u8 op_mod = (prot == MLX4_PROT_ETH) ? 705 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0; 706 707 mailbox = mlx4_alloc_cmd_mailbox(dev); 708 if (IS_ERR(mailbox)) 709 return -ENOMEM; 710 mgid = mailbox->buf; 711 712 memcpy(mgid, gid, 16); 713 714 err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod); 715 mlx4_free_cmd_mailbox(dev, mailbox); 716 if (err) 717 return err; 718 719 if (0) { 720 mlx4_dbg(dev, "Hash for "GID_PRINT_FMT" is %04x\n", 721 GID_PRINT_ARGS(gid), hash); 722 } 723 724 *index = hash; 725 *prev = -1; 726 727 do { 728 err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox); 729 if (err) 730 return err; 731 732 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 733 if (*index != hash) { 734 mlx4_err(dev, "Found zero MGID in AMGM\n"); 735 err = -EINVAL; 736 } 737 return err; 738 } 739 740 if (!memcmp(mgm->gid, gid, 16) && 741 be32_to_cpu(mgm->members_count) >> 30 == prot) 742 return err; 743 744 *prev = *index; 745 *index = be32_to_cpu(mgm->next_gid_index) >> 6; 746 } while (*index); 747 748 *index = -1; 749 return err; 750 } 751 752 static const u8 __promisc_mode[] = { 753 [MLX4_FS_REGULAR] = 0x0, 754 [MLX4_FS_ALL_DEFAULT] = 0x1, 755 [MLX4_FS_MC_DEFAULT] = 0x3, 756 [MLX4_FS_MIRROR_RX_PORT] = 0x4, 757 [MLX4_FS_MIRROR_SX_PORT] = 0x5, 758 [MLX4_FS_UC_SNIFFER] = 0x6, 759 [MLX4_FS_MC_SNIFFER] = 0x7, 760 }; 761 762 int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, 763 enum mlx4_net_trans_promisc_mode flow_type) 764 { 765 if (flow_type >= MLX4_FS_MODE_NUM) { 766 mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); 767 return -EINVAL; 768 } 769 return __promisc_mode[flow_type]; 770 } 771 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); 772 773 static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, 774 struct mlx4_net_trans_rule_hw_ctrl *hw) 775 { 776 u8 flags = 0; 777 778 flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; 779 flags |= ctrl->exclusive ? (1 << 2) : 0; 780 flags |= ctrl->allow_loopback ? (1 << 3) : 0; 781 782 hw->flags = flags; 783 hw->type = __promisc_mode[ctrl->promisc_mode]; 784 hw->prio = cpu_to_be16(ctrl->priority); 785 hw->port = ctrl->port; 786 hw->qpn = cpu_to_be32(ctrl->qpn); 787 } 788 789 const u16 __sw_id_hw[] = { 790 [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, 791 [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, 792 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, 793 [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, 794 [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, 795 [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, 796 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 797 }; 798 799 int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, 800 enum mlx4_net_trans_rule_id id) 801 { 802 if (id >= MLX4_NET_TRANS_RULE_NUM) { 803 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 804 return -EINVAL; 805 } 806 return __sw_id_hw[id]; 807 } 808 EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); 809 810 static const int __rule_hw_sz[] = { 811 [MLX4_NET_TRANS_RULE_ID_ETH] = 812 sizeof(struct mlx4_net_trans_rule_hw_eth), 813 [MLX4_NET_TRANS_RULE_ID_IB] = 814 sizeof(struct mlx4_net_trans_rule_hw_ib), 815 [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, 816 [MLX4_NET_TRANS_RULE_ID_IPV4] = 817 sizeof(struct mlx4_net_trans_rule_hw_ipv4), 818 [MLX4_NET_TRANS_RULE_ID_TCP] = 819 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 820 [MLX4_NET_TRANS_RULE_ID_UDP] = 821 sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), 822 [MLX4_NET_TRANS_RULE_ID_VXLAN] = 823 sizeof(struct mlx4_net_trans_rule_hw_vxlan) 824 }; 825 826 int mlx4_hw_rule_sz(struct mlx4_dev *dev, 827 enum mlx4_net_trans_rule_id id) 828 { 829 if (id >= MLX4_NET_TRANS_RULE_NUM) { 830 mlx4_err(dev, "Invalid network rule id. id = %d\n", id); 831 return -EINVAL; 832 } 833 834 return __rule_hw_sz[id]; 835 } 836 EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); 837 838 static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, 839 struct _rule_hw *rule_hw) 840 { 841 if (mlx4_hw_rule_sz(dev, spec->id) < 0) 842 return -EINVAL; 843 memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); 844 rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); 845 rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; 846 847 switch (spec->id) { 848 case MLX4_NET_TRANS_RULE_ID_ETH: 849 memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); 850 memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, 851 ETH_ALEN); 852 memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); 853 memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, 854 ETH_ALEN); 855 if (spec->eth.ether_type_enable) { 856 rule_hw->eth.ether_type_enable = 1; 857 rule_hw->eth.ether_type = spec->eth.ether_type; 858 } 859 rule_hw->eth.vlan_tag = spec->eth.vlan_id; 860 rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; 861 break; 862 863 case MLX4_NET_TRANS_RULE_ID_IB: 864 rule_hw->ib.l3_qpn = spec->ib.l3_qpn; 865 rule_hw->ib.qpn_mask = spec->ib.qpn_msk; 866 memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); 867 memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); 868 break; 869 870 case MLX4_NET_TRANS_RULE_ID_IPV6: 871 return -EOPNOTSUPP; 872 873 case MLX4_NET_TRANS_RULE_ID_IPV4: 874 rule_hw->ipv4.src_ip = spec->ipv4.src_ip; 875 rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; 876 rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; 877 rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; 878 break; 879 880 case MLX4_NET_TRANS_RULE_ID_TCP: 881 case MLX4_NET_TRANS_RULE_ID_UDP: 882 rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; 883 rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; 884 rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; 885 rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; 886 break; 887 888 case MLX4_NET_TRANS_RULE_ID_VXLAN: 889 rule_hw->vxlan.vni = 890 cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); 891 rule_hw->vxlan.vni_mask = 892 cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); 893 break; 894 895 default: 896 return -EINVAL; 897 } 898 899 return __rule_hw_sz[spec->id]; 900 } 901 902 static void mlx4_err_rule(struct mlx4_dev *dev, char *str, 903 struct mlx4_net_trans_rule *rule) 904 { 905 #define BUF_SIZE 256 906 struct mlx4_spec_list *cur; 907 char buf[BUF_SIZE]; 908 int len = 0; 909 910 mlx4_err(dev, "%s", str); 911 len += snprintf(buf + len, BUF_SIZE - len, 912 "port = %d prio = 0x%x qp = 0x%x ", 913 rule->port, rule->priority, rule->qpn); 914 915 list_for_each_entry(cur, &rule->list, list) { 916 switch (cur->id) { 917 case MLX4_NET_TRANS_RULE_ID_ETH: 918 len += snprintf(buf + len, BUF_SIZE - len, 919 "dmac = 0x%02x%02x%02x%02x%02x%02x ", 920 cur->eth.dst_mac[0], cur->eth.dst_mac[1], 921 cur->eth.dst_mac[2], cur->eth.dst_mac[3], 922 cur->eth.dst_mac[4], cur->eth.dst_mac[5]); 923 if (cur->eth.ether_type) 924 len += snprintf(buf + len, BUF_SIZE - len, 925 "ethertype = 0x%x ", 926 be16_to_cpu(cur->eth.ether_type)); 927 if (cur->eth.vlan_id) 928 len += snprintf(buf + len, BUF_SIZE - len, 929 "vlan-id = %d ", 930 be16_to_cpu(cur->eth.vlan_id)); 931 break; 932 933 case MLX4_NET_TRANS_RULE_ID_IPV4: 934 if (cur->ipv4.src_ip) 935 len += snprintf(buf + len, BUF_SIZE - len, 936 "src-ip = %pI4 ", 937 &cur->ipv4.src_ip); 938 if (cur->ipv4.dst_ip) 939 len += snprintf(buf + len, BUF_SIZE - len, 940 "dst-ip = %pI4 ", 941 &cur->ipv4.dst_ip); 942 break; 943 944 case MLX4_NET_TRANS_RULE_ID_TCP: 945 case MLX4_NET_TRANS_RULE_ID_UDP: 946 if (cur->tcp_udp.src_port) 947 len += snprintf(buf + len, BUF_SIZE - len, 948 "src-port = %d ", 949 be16_to_cpu(cur->tcp_udp.src_port)); 950 if (cur->tcp_udp.dst_port) 951 len += snprintf(buf + len, BUF_SIZE - len, 952 "dst-port = %d ", 953 be16_to_cpu(cur->tcp_udp.dst_port)); 954 break; 955 956 case MLX4_NET_TRANS_RULE_ID_IB: 957 len += snprintf(buf + len, BUF_SIZE - len, 958 "dst-gid = "GID_PRINT_FMT"\n", 959 GID_PRINT_ARGS(cur->ib.dst_gid)); 960 len += snprintf(buf + len, BUF_SIZE - len, 961 "dst-gid-mask = "GID_PRINT_FMT"\n", 962 GID_PRINT_ARGS(cur->ib.dst_gid_msk)); 963 break; 964 965 case MLX4_NET_TRANS_RULE_ID_VXLAN: 966 len += snprintf(buf + len, BUF_SIZE - len, 967 "VNID = %d ", be32_to_cpu(cur->vxlan.vni)); 968 break; 969 case MLX4_NET_TRANS_RULE_ID_IPV6: 970 break; 971 972 default: 973 break; 974 } 975 } 976 len += snprintf(buf + len, BUF_SIZE - len, "\n"); 977 mlx4_err(dev, "%s", buf); 978 979 if (len >= BUF_SIZE) 980 mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n"); 981 } 982 983 int mlx4_flow_attach(struct mlx4_dev *dev, 984 struct mlx4_net_trans_rule *rule, u64 *reg_id) 985 { 986 struct mlx4_cmd_mailbox *mailbox; 987 struct mlx4_spec_list *cur; 988 u32 size = 0; 989 int ret; 990 991 mailbox = mlx4_alloc_cmd_mailbox(dev); 992 if (IS_ERR(mailbox)) 993 return PTR_ERR(mailbox); 994 995 trans_rule_ctrl_to_hw(rule, mailbox->buf); 996 997 size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); 998 999 list_for_each_entry(cur, &rule->list, list) { 1000 ret = parse_trans_rule(dev, cur, mailbox->buf + size); 1001 if (ret < 0) { 1002 mlx4_free_cmd_mailbox(dev, mailbox); 1003 return ret; 1004 } 1005 size += ret; 1006 } 1007 1008 ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); 1009 if (ret == -ENOMEM) { 1010 mlx4_err_rule(dev, 1011 "mcg table is full. Fail to register network rule\n", 1012 rule); 1013 } else if (ret) { 1014 if (ret == -ENXIO) { 1015 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) 1016 mlx4_err_rule(dev, 1017 "DMFS is not enabled, " 1018 "failed to register network rule.\n", 1019 rule); 1020 else 1021 mlx4_err_rule(dev, 1022 "Rule exceeds the dmfs_high_rate_mode limitations, " 1023 "failed to register network rule.\n", 1024 rule); 1025 1026 } else { 1027 mlx4_err_rule(dev, "Fail to register network rule.\n", rule); 1028 } 1029 } 1030 1031 mlx4_free_cmd_mailbox(dev, mailbox); 1032 1033 return ret; 1034 } 1035 EXPORT_SYMBOL_GPL(mlx4_flow_attach); 1036 1037 int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) 1038 { 1039 int err; 1040 1041 err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); 1042 if (err) 1043 mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", 1044 (unsigned long long)reg_id); 1045 return err; 1046 } 1047 EXPORT_SYMBOL_GPL(mlx4_flow_detach); 1048 1049 int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, 1050 int port, int qpn, u16 prio, u64 *reg_id) 1051 { 1052 int err; 1053 struct mlx4_spec_list spec_eth_outer = { {NULL} }; 1054 struct mlx4_spec_list spec_vxlan = { {NULL} }; 1055 struct mlx4_spec_list spec_eth_inner = { {NULL} }; 1056 1057 struct mlx4_net_trans_rule rule = { 1058 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1059 .exclusive = 0, 1060 .allow_loopback = 1, 1061 .promisc_mode = MLX4_FS_REGULAR, 1062 }; 1063 1064 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1065 1066 rule.port = port; 1067 rule.qpn = qpn; 1068 rule.priority = prio; 1069 INIT_LIST_HEAD(&rule.list); 1070 1071 spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; 1072 memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); 1073 memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1074 1075 spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ 1076 spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ 1077 1078 list_add_tail(&spec_eth_outer.list, &rule.list); 1079 list_add_tail(&spec_vxlan.list, &rule.list); 1080 list_add_tail(&spec_eth_inner.list, &rule.list); 1081 1082 err = mlx4_flow_attach(dev, &rule, reg_id); 1083 return err; 1084 } 1085 EXPORT_SYMBOL(mlx4_tunnel_steer_add); 1086 1087 int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, 1088 u32 max_range_qpn) 1089 { 1090 int err; 1091 u64 in_param; 1092 1093 in_param = ((u64) min_range_qpn) << 32; 1094 in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; 1095 1096 err = mlx4_cmd(dev, in_param, 0, 0, 1097 MLX4_FLOW_STEERING_IB_UC_QP_RANGE, 1098 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); 1099 1100 return err; 1101 } 1102 EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); 1103 1104 int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1105 int block_mcast_loopback, enum mlx4_protocol prot, 1106 enum mlx4_steer_type steer) 1107 { 1108 struct mlx4_priv *priv = mlx4_priv(dev); 1109 struct mlx4_cmd_mailbox *mailbox; 1110 struct mlx4_mgm *mgm; 1111 u32 members_count; 1112 int index = -1, prev; 1113 int link = 0; 1114 int i; 1115 int err; 1116 u8 port = gid[5]; 1117 u8 new_entry = 0; 1118 1119 mailbox = mlx4_alloc_cmd_mailbox(dev); 1120 if (IS_ERR(mailbox)) 1121 return PTR_ERR(mailbox); 1122 mgm = mailbox->buf; 1123 1124 mutex_lock(&priv->mcg_table.mutex); 1125 err = find_entry(dev, port, gid, prot, 1126 mailbox, &prev, &index); 1127 if (err) 1128 goto out; 1129 1130 if (index != -1) { 1131 if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) { 1132 new_entry = 1; 1133 memcpy(mgm->gid, gid, 16); 1134 } 1135 } else { 1136 link = 1; 1137 1138 index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap); 1139 if (index == -1) { 1140 mlx4_err(dev, "No AMGM entries left\n"); 1141 err = -ENOMEM; 1142 goto out; 1143 } 1144 index += dev->caps.num_mgms; 1145 1146 new_entry = 1; 1147 memset(mgm, 0, sizeof *mgm); 1148 memcpy(mgm->gid, gid, 16); 1149 } 1150 1151 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1152 if (members_count == dev->caps.num_qp_per_mgm) { 1153 mlx4_err(dev, "MGM at index %x is full\n", index); 1154 err = -ENOMEM; 1155 goto out; 1156 } 1157 1158 for (i = 0; i < members_count; ++i) 1159 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1160 mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn); 1161 err = 0; 1162 goto out; 1163 } 1164 1165 if (block_mcast_loopback) 1166 mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) | 1167 (1U << MGM_BLCK_LB_BIT)); 1168 else 1169 mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK); 1170 1171 mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30); 1172 1173 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1174 if (err) 1175 goto out; 1176 1177 if (!link) 1178 goto out; 1179 1180 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1181 if (err) 1182 goto out; 1183 1184 mgm->next_gid_index = cpu_to_be32(index << 6); 1185 1186 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1187 if (err) 1188 goto out; 1189 1190 out: 1191 if (prot == MLX4_PROT_ETH && index != -1) { 1192 /* manage the steering entry for promisc mode */ 1193 if (new_entry) 1194 err = new_steering_entry(dev, port, steer, 1195 index, qp->qpn); 1196 else 1197 err = existing_steering_entry(dev, port, steer, 1198 index, qp->qpn); 1199 } 1200 if (err && link && index != -1) { 1201 if (index < dev->caps.num_mgms) 1202 mlx4_warn(dev, "Got AMGM index %d < %d\n", 1203 index, dev->caps.num_mgms); 1204 else 1205 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1206 index - dev->caps.num_mgms, MLX4_USE_RR); 1207 } 1208 mutex_unlock(&priv->mcg_table.mutex); 1209 1210 mlx4_free_cmd_mailbox(dev, mailbox); 1211 return err; 1212 } 1213 1214 int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1215 enum mlx4_protocol prot, enum mlx4_steer_type steer) 1216 { 1217 struct mlx4_priv *priv = mlx4_priv(dev); 1218 struct mlx4_cmd_mailbox *mailbox; 1219 struct mlx4_mgm *mgm; 1220 u32 members_count; 1221 int prev, index; 1222 int i, loc = -1; 1223 int err; 1224 u8 port = gid[5]; 1225 bool removed_entry = false; 1226 1227 mailbox = mlx4_alloc_cmd_mailbox(dev); 1228 if (IS_ERR(mailbox)) 1229 return PTR_ERR(mailbox); 1230 mgm = mailbox->buf; 1231 1232 mutex_lock(&priv->mcg_table.mutex); 1233 1234 err = find_entry(dev, port, gid, prot, 1235 mailbox, &prev, &index); 1236 if (err) 1237 goto out; 1238 1239 if (index == -1) { 1240 mlx4_err(dev, "MGID "GID_PRINT_FMT" not found\n", 1241 GID_PRINT_ARGS(gid)); 1242 err = -EINVAL; 1243 goto out; 1244 } 1245 1246 /* If this QP is also a promisc QP, it shouldn't be removed only if 1247 * at least one none promisc QP is also attached to this MCG 1248 */ 1249 if (prot == MLX4_PROT_ETH && 1250 check_duplicate_entry(dev, port, steer, index, qp->qpn) && 1251 !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL)) 1252 goto out; 1253 1254 members_count = be32_to_cpu(mgm->members_count) & 0xffffff; 1255 for (i = 0; i < members_count; ++i) 1256 if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) { 1257 loc = i; 1258 break; 1259 } 1260 1261 if (loc == -1) { 1262 mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn); 1263 err = -EINVAL; 1264 goto out; 1265 } 1266 1267 /* copy the last QP in this MGM over removed QP */ 1268 mgm->qp[loc] = mgm->qp[members_count - 1]; 1269 mgm->qp[members_count - 1] = 0; 1270 mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30); 1271 1272 if (prot == MLX4_PROT_ETH) 1273 removed_entry = can_remove_steering_entry(dev, port, steer, 1274 index, qp->qpn); 1275 if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) { 1276 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1277 goto out; 1278 } 1279 1280 /* We are going to delete the entry, members count should be 0 */ 1281 mgm->members_count = cpu_to_be32((u32) prot << 30); 1282 1283 if (prev == -1) { 1284 /* Remove entry from MGM */ 1285 int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1286 if (amgm_index) { 1287 err = mlx4_READ_ENTRY(dev, amgm_index, mailbox); 1288 if (err) 1289 goto out; 1290 } else 1291 memset(mgm->gid, 0, 16); 1292 1293 err = mlx4_WRITE_ENTRY(dev, index, mailbox); 1294 if (err) 1295 goto out; 1296 1297 if (amgm_index) { 1298 if (amgm_index < dev->caps.num_mgms) 1299 mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n", 1300 index, amgm_index, dev->caps.num_mgms); 1301 else 1302 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1303 amgm_index - dev->caps.num_mgms, MLX4_USE_RR); 1304 } 1305 } else { 1306 /* Remove entry from AMGM */ 1307 int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 1308 err = mlx4_READ_ENTRY(dev, prev, mailbox); 1309 if (err) 1310 goto out; 1311 1312 mgm->next_gid_index = cpu_to_be32(cur_next_index << 6); 1313 1314 err = mlx4_WRITE_ENTRY(dev, prev, mailbox); 1315 if (err) 1316 goto out; 1317 1318 if (index < dev->caps.num_mgms) 1319 mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n", 1320 prev, index, dev->caps.num_mgms); 1321 else 1322 mlx4_bitmap_free(&priv->mcg_table.bitmap, 1323 index - dev->caps.num_mgms, MLX4_USE_RR); 1324 } 1325 1326 out: 1327 mutex_unlock(&priv->mcg_table.mutex); 1328 1329 mlx4_free_cmd_mailbox(dev, mailbox); 1330 if (err && dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 1331 /* In case device is under an error, return success as a closing command */ 1332 err = 0; 1333 return err; 1334 } 1335 1336 static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, 1337 u8 gid[16], u8 attach, u8 block_loopback, 1338 enum mlx4_protocol prot) 1339 { 1340 struct mlx4_cmd_mailbox *mailbox; 1341 int err = 0; 1342 int qpn; 1343 1344 if (!mlx4_is_mfunc(dev)) 1345 return -EBADF; 1346 1347 mailbox = mlx4_alloc_cmd_mailbox(dev); 1348 if (IS_ERR(mailbox)) 1349 return PTR_ERR(mailbox); 1350 1351 memcpy(mailbox->buf, gid, 16); 1352 qpn = qp->qpn; 1353 qpn |= (prot << 28); 1354 if (attach && block_loopback) 1355 qpn |= (1U << 31); 1356 1357 err = mlx4_cmd(dev, mailbox->dma, qpn, attach, 1358 MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A, 1359 MLX4_CMD_WRAPPED); 1360 1361 mlx4_free_cmd_mailbox(dev, mailbox); 1362 if (err && !attach && 1363 dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) 1364 err = 0; 1365 return err; 1366 } 1367 1368 int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1369 u8 gid[16], u8 port, 1370 int block_mcast_loopback, 1371 enum mlx4_protocol prot, u64 *reg_id) 1372 { 1373 struct mlx4_spec_list spec = { {NULL} }; 1374 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 1375 1376 struct mlx4_net_trans_rule rule = { 1377 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1378 .exclusive = 0, 1379 .promisc_mode = MLX4_FS_REGULAR, 1380 .priority = MLX4_DOMAIN_NIC, 1381 }; 1382 1383 rule.allow_loopback = !block_mcast_loopback; 1384 rule.port = port; 1385 rule.qpn = qp->qpn; 1386 INIT_LIST_HEAD(&rule.list); 1387 1388 switch (prot) { 1389 case MLX4_PROT_ETH: 1390 spec.id = MLX4_NET_TRANS_RULE_ID_ETH; 1391 memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); 1392 memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 1393 break; 1394 1395 case MLX4_PROT_IB_IPV6: 1396 spec.id = MLX4_NET_TRANS_RULE_ID_IB; 1397 memcpy(spec.ib.dst_gid, gid, 16); 1398 memset(&spec.ib.dst_gid_msk, 0xff, 16); 1399 break; 1400 default: 1401 return -EINVAL; 1402 } 1403 list_add_tail(&spec.list, &rule.list); 1404 1405 return mlx4_flow_attach(dev, &rule, reg_id); 1406 } 1407 1408 int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1409 u8 port, int block_mcast_loopback, 1410 enum mlx4_protocol prot, u64 *reg_id) 1411 { 1412 switch (dev->caps.steering_mode) { 1413 case MLX4_STEERING_MODE_A0: 1414 if (prot == MLX4_PROT_ETH) 1415 return 0; 1416 1417 case MLX4_STEERING_MODE_B0: 1418 if (prot == MLX4_PROT_ETH) 1419 gid[7] |= (MLX4_MC_STEER << 1); 1420 1421 if (mlx4_is_mfunc(dev)) 1422 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1423 block_mcast_loopback, prot); 1424 return mlx4_qp_attach_common(dev, qp, gid, 1425 block_mcast_loopback, prot, 1426 MLX4_MC_STEER); 1427 1428 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1429 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port, 1430 block_mcast_loopback, 1431 prot, reg_id); 1432 default: 1433 return -EINVAL; 1434 } 1435 } 1436 EXPORT_SYMBOL_GPL(mlx4_multicast_attach); 1437 1438 int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], 1439 enum mlx4_protocol prot, u64 reg_id) 1440 { 1441 switch (dev->caps.steering_mode) { 1442 case MLX4_STEERING_MODE_A0: 1443 if (prot == MLX4_PROT_ETH) 1444 return 0; 1445 1446 case MLX4_STEERING_MODE_B0: 1447 if (prot == MLX4_PROT_ETH) 1448 gid[7] |= (MLX4_MC_STEER << 1); 1449 1450 if (mlx4_is_mfunc(dev)) 1451 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1452 1453 return mlx4_qp_detach_common(dev, qp, gid, prot, 1454 MLX4_MC_STEER); 1455 1456 case MLX4_STEERING_MODE_DEVICE_MANAGED: 1457 return mlx4_flow_detach(dev, reg_id); 1458 1459 default: 1460 return -EINVAL; 1461 } 1462 } 1463 EXPORT_SYMBOL_GPL(mlx4_multicast_detach); 1464 1465 int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, 1466 u32 qpn, enum mlx4_net_trans_promisc_mode mode) 1467 { 1468 struct mlx4_net_trans_rule rule = { 1469 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 1470 .exclusive = 0, 1471 .allow_loopback = 1, 1472 }; 1473 1474 u64 *regid_p; 1475 1476 switch (mode) { 1477 case MLX4_FS_ALL_DEFAULT: 1478 regid_p = &dev->regid_promisc_array[port]; 1479 break; 1480 case MLX4_FS_MC_DEFAULT: 1481 regid_p = &dev->regid_allmulti_array[port]; 1482 break; 1483 default: 1484 return -1; 1485 } 1486 1487 if (*regid_p != 0) 1488 return -1; 1489 1490 rule.promisc_mode = mode; 1491 rule.port = port; 1492 rule.qpn = qpn; 1493 INIT_LIST_HEAD(&rule.list); 1494 mlx4_err(dev, "going promisc on %x\n", port); 1495 1496 return mlx4_flow_attach(dev, &rule, regid_p); 1497 } 1498 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); 1499 1500 int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, 1501 enum mlx4_net_trans_promisc_mode mode) 1502 { 1503 int ret; 1504 u64 *regid_p; 1505 1506 switch (mode) { 1507 case MLX4_FS_ALL_DEFAULT: 1508 regid_p = &dev->regid_promisc_array[port]; 1509 break; 1510 case MLX4_FS_MC_DEFAULT: 1511 regid_p = &dev->regid_allmulti_array[port]; 1512 break; 1513 default: 1514 return -1; 1515 } 1516 1517 if (*regid_p == 0) 1518 return -1; 1519 1520 ret = mlx4_flow_detach(dev, *regid_p); 1521 if (ret == 0) 1522 *regid_p = 0; 1523 1524 return ret; 1525 } 1526 EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); 1527 1528 int mlx4_unicast_attach(struct mlx4_dev *dev, 1529 struct mlx4_qp *qp, u8 gid[16], 1530 int block_mcast_loopback, enum mlx4_protocol prot) 1531 { 1532 if (prot == MLX4_PROT_ETH) 1533 gid[7] |= (MLX4_UC_STEER << 1); 1534 1535 if (mlx4_is_mfunc(dev)) 1536 return mlx4_QP_ATTACH(dev, qp, gid, 1, 1537 block_mcast_loopback, prot); 1538 1539 return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, 1540 prot, MLX4_UC_STEER); 1541 } 1542 EXPORT_SYMBOL_GPL(mlx4_unicast_attach); 1543 1544 int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, 1545 u8 gid[16], enum mlx4_protocol prot) 1546 { 1547 if (prot == MLX4_PROT_ETH) 1548 gid[7] |= (MLX4_UC_STEER << 1); 1549 1550 if (mlx4_is_mfunc(dev)) 1551 return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); 1552 1553 return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER); 1554 } 1555 EXPORT_SYMBOL_GPL(mlx4_unicast_detach); 1556 1557 int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, 1558 struct mlx4_vhcr *vhcr, 1559 struct mlx4_cmd_mailbox *inbox, 1560 struct mlx4_cmd_mailbox *outbox, 1561 struct mlx4_cmd_info *cmd) 1562 { 1563 u32 qpn = (u32) vhcr->in_param & 0xffffffff; 1564 int port = mlx4_slave_convert_port(dev, slave, vhcr->in_param >> 62); 1565 enum mlx4_steer_type steer = vhcr->in_modifier; 1566 1567 if (port < 0) 1568 return -EINVAL; 1569 1570 /* Promiscuous unicast is not allowed in mfunc */ 1571 if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER) 1572 return 0; 1573 1574 if (vhcr->op_modifier) 1575 return add_promisc_qp(dev, port, steer, qpn); 1576 else 1577 return remove_promisc_qp(dev, port, steer, qpn); 1578 } 1579 1580 static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, 1581 enum mlx4_steer_type steer, u8 add, u8 port) 1582 { 1583 return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add, 1584 MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A, 1585 MLX4_CMD_WRAPPED); 1586 } 1587 1588 int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1589 { 1590 if (mlx4_is_mfunc(dev)) 1591 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); 1592 1593 return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1594 } 1595 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); 1596 1597 int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1598 { 1599 if (mlx4_is_mfunc(dev)) 1600 return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); 1601 1602 return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn); 1603 } 1604 EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); 1605 1606 int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 1607 { 1608 if (mlx4_is_mfunc(dev)) 1609 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); 1610 1611 return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1612 } 1613 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); 1614 1615 int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 1616 { 1617 if (mlx4_is_mfunc(dev)) 1618 return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); 1619 1620 return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn); 1621 } 1622 EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove); 1623 1624 int mlx4_init_mcg_table(struct mlx4_dev *dev) 1625 { 1626 struct mlx4_priv *priv = mlx4_priv(dev); 1627 int err; 1628 1629 /* No need for mcg_table when fw managed the mcg table*/ 1630 if (dev->caps.steering_mode == 1631 MLX4_STEERING_MODE_DEVICE_MANAGED) 1632 return 0; 1633 err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, 1634 dev->caps.num_amgms - 1, 0, 0); 1635 if (err) 1636 return err; 1637 1638 mutex_init(&priv->mcg_table.mutex); 1639 1640 return 0; 1641 } 1642 1643 void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) 1644 { 1645 if (dev->caps.steering_mode != 1646 MLX4_STEERING_MODE_DEVICE_MANAGED) 1647 mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); 1648 } 1649