1 /* 2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <dev/mlx4/cq.h> 35 #include <dev/mlx4/qp.h> 36 #include <dev/mlx4/srq.h> 37 #include <linux/slab.h> 38 39 #include "mlx4_ib.h" 40 #include "user.h" 41 42 /* Which firmware version adds support for Resize CQ */ 43 #define MLX4_FW_VER_RESIZE_CQ mlx4_fw_ver(2, 5, 0) 44 #define MLX4_FW_VER_IGNORE_OVERRUN_CQ mlx4_fw_ver(2, 7, 8200) 45 46 static void mlx4_ib_cq_comp(struct mlx4_cq *cq) 47 { 48 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; 49 ibcq->comp_handler(ibcq, ibcq->cq_context); 50 } 51 52 static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) 53 { 54 struct ib_event event; 55 struct ib_cq *ibcq; 56 57 if (type != MLX4_EVENT_TYPE_CQ_ERROR) { 58 pr_warn("Unexpected event type %d " 59 "on CQ %06x\n", type, cq->cqn); 60 return; 61 } 62 63 ibcq = &to_mibcq(cq)->ibcq; 64 if (ibcq->event_handler) { 65 event.device = ibcq->device; 66 event.event = IB_EVENT_CQ_ERR; 67 event.element.cq = ibcq; 68 ibcq->event_handler(&event, ibcq->cq_context); 69 } 70 } 71 72 static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) 73 { 74 return mlx4_buf_offset(&buf->buf, n * buf->entry_size); 75 } 76 77 static void *get_cqe(struct mlx4_ib_cq *cq, int n) 78 { 79 return get_cqe_from_buf(&cq->buf, n); 80 } 81 82 static void *get_sw_cqe(struct mlx4_ib_cq *cq, int n) 83 { 84 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); 85 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); 86 87 return (!!(tcqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^ 88 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; 89 } 90 91 static struct mlx4_cqe *next_cqe_sw(struct mlx4_ib_cq *cq) 92 { 93 return get_sw_cqe(cq, cq->mcq.cons_index); 94 } 95 96 int mlx4_ib_modify_cq(struct ib_cq *cq, 97 struct ib_cq_attr *cq_attr, 98 int cq_attr_mask) 99 { 100 int err = 0; 101 struct mlx4_ib_cq *mcq = to_mcq(cq); 102 struct mlx4_ib_dev *dev = to_mdev(cq->device); 103 104 if (cq_attr_mask & IB_CQ_CAP_FLAGS) { 105 if (cq_attr->cq_cap_flags & IB_CQ_TIMESTAMP) 106 return -ENOTSUPP; 107 108 if (cq_attr->cq_cap_flags & IB_CQ_IGNORE_OVERRUN) 109 return -ENOSYS; 110 } 111 112 if (!err) 113 if (cq_attr_mask & IB_CQ_MODERATION) 114 err = mlx4_cq_modify(dev->dev, &mcq->mcq, 115 cq_attr->moderation.cq_count, 116 cq_attr->moderation.cq_period); 117 118 return err; 119 } 120 121 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) 122 { 123 int err; 124 125 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 126 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); 127 128 if (err) 129 goto out; 130 131 buf->entry_size = dev->dev->caps.cqe_size; 132 err = mlx4_mtt_init(dev->dev, buf->buf.npages, buf->buf.page_shift, 133 &buf->mtt); 134 if (err) 135 goto err_buf; 136 137 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); 138 if (err) 139 goto err_mtt; 140 141 return 0; 142 143 err_mtt: 144 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 145 146 err_buf: 147 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); 148 149 out: 150 return err; 151 } 152 153 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) 154 { 155 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); 156 } 157 158 static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *context, 159 struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, 160 u64 buf_addr, int cqe) 161 { 162 int err; 163 int cqe_size = dev->dev->caps.cqe_size; 164 int shift; 165 int n; 166 167 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, 168 IB_ACCESS_LOCAL_WRITE, 1); 169 if (IS_ERR(*umem)) 170 return PTR_ERR(*umem); 171 172 n = ib_umem_page_count(*umem); 173 shift = mlx4_ib_umem_calc_optimal_mtt_size(*umem, 0, &n); 174 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); 175 176 if (err) 177 goto err_buf; 178 179 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); 180 if (err) 181 goto err_mtt; 182 183 return 0; 184 185 err_mtt: 186 mlx4_mtt_cleanup(dev->dev, &buf->mtt); 187 188 err_buf: 189 ib_umem_release(*umem); 190 191 return err; 192 } 193 194 /* we don't support system timestamping */ 195 #define CQ_CREATE_FLAGS_SUPPORTED IB_CQ_TIMESTAMP 196 197 struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, 198 struct ib_cq_init_attr *attr, 199 struct ib_ucontext *context, 200 struct ib_udata *udata) 201 { 202 struct mlx4_ib_dev *dev = to_mdev(ibdev); 203 struct mlx4_ib_cq *cq; 204 struct mlx4_uar *uar; 205 int err; 206 int entries = attr->cqe; 207 int vector = attr->comp_vector; 208 209 if (entries < 1 || entries > dev->dev->caps.max_cqes) 210 return ERR_PTR(-EINVAL); 211 212 if (attr->flags & ~CQ_CREATE_FLAGS_SUPPORTED) 213 return ERR_PTR(-EINVAL); 214 215 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 216 if (!cq) 217 return ERR_PTR(-ENOMEM); 218 219 entries = roundup_pow_of_two(entries + 1); 220 cq->ibcq.cqe = entries - 1; 221 mutex_init(&cq->resize_mutex); 222 spin_lock_init(&cq->lock); 223 cq->resize_buf = NULL; 224 cq->resize_umem = NULL; 225 cq->create_flags = attr->flags; 226 227 if (context) { 228 struct mlx4_ib_create_cq ucmd; 229 230 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) { 231 err = -EFAULT; 232 goto err_cq; 233 } 234 235 err = mlx4_ib_get_cq_umem(dev, context, &cq->buf, &cq->umem, 236 ucmd.buf_addr, entries); 237 if (err) 238 goto err_cq; 239 240 err = mlx4_ib_db_map_user(to_mucontext(context), ucmd.db_addr, 241 &cq->db); 242 if (err) 243 goto err_mtt; 244 245 uar = &to_mucontext(context)->uar; 246 } else { 247 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); 248 if (err) 249 goto err_cq; 250 251 cq->mcq.set_ci_db = cq->db.db; 252 cq->mcq.arm_db = cq->db.db + 1; 253 *cq->mcq.set_ci_db = 0; 254 *cq->mcq.arm_db = 0; 255 256 err = mlx4_ib_alloc_cq_buf(dev, &cq->buf, entries); 257 if (err) 258 goto err_db; 259 260 uar = &dev->priv_uar; 261 } 262 263 if (dev->eq_table) 264 vector = dev->eq_table[vector % ibdev->num_comp_vectors]; 265 266 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, 267 cq->db.dma, &cq->mcq, vector, 0, 268 !!(cq->create_flags & IB_CQ_TIMESTAMP)); 269 if (err) 270 goto err_dbmap; 271 272 cq->mcq.comp = mlx4_ib_cq_comp; 273 cq->mcq.event = mlx4_ib_cq_event; 274 275 if (context) 276 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { 277 err = -EFAULT; 278 goto err_dbmap; 279 } 280 281 return &cq->ibcq; 282 283 err_dbmap: 284 if (context) 285 mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); 286 287 err_mtt: 288 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); 289 290 if (context) 291 ib_umem_release(cq->umem); 292 else 293 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 294 295 err_db: 296 if (!context) 297 mlx4_db_free(dev->dev, &cq->db); 298 299 err_cq: 300 kfree(cq); 301 302 return ERR_PTR(err); 303 } 304 305 static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 306 int entries) 307 { 308 int err; 309 310 if (cq->resize_buf) 311 return -EBUSY; 312 313 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 314 if (!cq->resize_buf) 315 return -ENOMEM; 316 317 err = mlx4_ib_alloc_cq_buf(dev, &cq->resize_buf->buf, entries); 318 if (err) { 319 kfree(cq->resize_buf); 320 cq->resize_buf = NULL; 321 return err; 322 } 323 324 cq->resize_buf->cqe = entries - 1; 325 326 return 0; 327 } 328 329 static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq, 330 int entries, struct ib_udata *udata) 331 { 332 struct mlx4_ib_resize_cq ucmd; 333 int err; 334 335 if (cq->resize_umem) 336 return -EBUSY; 337 338 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) 339 return -EFAULT; 340 341 cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC); 342 if (!cq->resize_buf) 343 return -ENOMEM; 344 345 err = mlx4_ib_get_cq_umem(dev, cq->umem->context, &cq->resize_buf->buf, 346 &cq->resize_umem, ucmd.buf_addr, entries); 347 if (err) { 348 kfree(cq->resize_buf); 349 cq->resize_buf = NULL; 350 return err; 351 } 352 353 cq->resize_buf->cqe = entries - 1; 354 355 return 0; 356 } 357 358 static int mlx4_ib_get_outstanding_cqes(struct mlx4_ib_cq *cq) 359 { 360 u32 i; 361 362 i = cq->mcq.cons_index; 363 while (get_sw_cqe(cq, i)) 364 ++i; 365 366 return i - cq->mcq.cons_index; 367 } 368 369 static int mlx4_ib_cq_resize_copy_cqes(struct mlx4_ib_cq *cq) 370 { 371 struct mlx4_cqe *cqe, *new_cqe; 372 int i; 373 int cqe_size = cq->buf.entry_size; 374 int cqe_inc = cqe_size == 64 ? 1 : 0; 375 struct mlx4_cqe *start_cqe; 376 377 i = cq->mcq.cons_index; 378 cqe = get_cqe(cq, i & cq->ibcq.cqe); 379 start_cqe = cqe; 380 cqe += cqe_inc; 381 382 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { 383 new_cqe = get_cqe_from_buf(&cq->resize_buf->buf, 384 (i + 1) & cq->resize_buf->cqe); 385 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); 386 new_cqe += cqe_inc; 387 388 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | 389 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); 390 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); 391 if (cqe == start_cqe) { 392 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n", cq->mcq.cqn); 393 return -ENOMEM; 394 } 395 cqe += cqe_inc; 396 397 } 398 ++cq->mcq.cons_index; 399 return 0; 400 } 401 402 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) 403 { 404 struct mlx4_ib_dev *dev = to_mdev(ibcq->device); 405 struct mlx4_ib_cq *cq = to_mcq(ibcq); 406 struct mlx4_mtt mtt; 407 int outst_cqe; 408 int err; 409 410 if (dev->dev->caps.fw_ver < MLX4_FW_VER_RESIZE_CQ) 411 return -ENOSYS; 412 413 mutex_lock(&cq->resize_mutex); 414 if (entries < 1 || entries > dev->dev->caps.max_cqes) { 415 err = -EINVAL; 416 goto out; 417 } 418 419 entries = roundup_pow_of_two(entries + 1); 420 if (entries == ibcq->cqe + 1) { 421 err = 0; 422 goto out; 423 } 424 425 if (entries > dev->dev->caps.max_cqes + 1) { 426 err = -EINVAL; 427 goto out; 428 } 429 430 if (ibcq->uobject) { 431 err = mlx4_alloc_resize_umem(dev, cq, entries, udata); 432 if (err) 433 goto out; 434 } else { 435 /* Can't be smaller than the number of outstanding CQEs */ 436 outst_cqe = mlx4_ib_get_outstanding_cqes(cq); 437 if (entries < outst_cqe + 1) { 438 err = 0; 439 goto out; 440 } 441 442 err = mlx4_alloc_resize_buf(dev, cq, entries); 443 if (err) 444 goto out; 445 } 446 447 mtt = cq->buf.mtt; 448 449 err = mlx4_cq_resize(dev->dev, &cq->mcq, entries, &cq->resize_buf->buf.mtt); 450 if (err) 451 goto err_buf; 452 453 mlx4_mtt_cleanup(dev->dev, &mtt); 454 if (ibcq->uobject) { 455 cq->buf = cq->resize_buf->buf; 456 cq->ibcq.cqe = cq->resize_buf->cqe; 457 ib_umem_release(cq->umem); 458 cq->umem = cq->resize_umem; 459 460 kfree(cq->resize_buf); 461 cq->resize_buf = NULL; 462 cq->resize_umem = NULL; 463 } else { 464 struct mlx4_ib_cq_buf tmp_buf; 465 int tmp_cqe = 0; 466 467 spin_lock_irq(&cq->lock); 468 if (cq->resize_buf) { 469 err = mlx4_ib_cq_resize_copy_cqes(cq); 470 tmp_buf = cq->buf; 471 tmp_cqe = cq->ibcq.cqe; 472 cq->buf = cq->resize_buf->buf; 473 cq->ibcq.cqe = cq->resize_buf->cqe; 474 475 kfree(cq->resize_buf); 476 cq->resize_buf = NULL; 477 } 478 spin_unlock_irq(&cq->lock); 479 480 if (tmp_cqe) 481 mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe); 482 } 483 484 goto out; 485 486 err_buf: 487 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); 488 if (!ibcq->uobject) 489 mlx4_ib_free_cq_buf(dev, &cq->resize_buf->buf, 490 cq->resize_buf->cqe); 491 492 kfree(cq->resize_buf); 493 cq->resize_buf = NULL; 494 495 if (cq->resize_umem) { 496 ib_umem_release(cq->resize_umem); 497 cq->resize_umem = NULL; 498 } 499 500 out: 501 mutex_unlock(&cq->resize_mutex); 502 503 return err; 504 } 505 506 int mlx4_ib_ignore_overrun_cq(struct ib_cq *ibcq) 507 { 508 return -ENOSYS; 509 } 510 511 int mlx4_ib_destroy_cq(struct ib_cq *cq) 512 { 513 struct mlx4_ib_dev *dev = to_mdev(cq->device); 514 struct mlx4_ib_cq *mcq = to_mcq(cq); 515 516 mlx4_cq_free(dev->dev, &mcq->mcq); 517 mlx4_mtt_cleanup(dev->dev, &mcq->buf.mtt); 518 519 if (cq->uobject) { 520 mlx4_ib_db_unmap_user(to_mucontext(cq->uobject->context), &mcq->db); 521 ib_umem_release(mcq->umem); 522 } else { 523 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); 524 mlx4_db_free(dev->dev, &mcq->db); 525 } 526 527 kfree(mcq); 528 529 return 0; 530 } 531 532 static void dump_cqe(void *cqe) 533 { 534 __be32 *buf = cqe; 535 536 pr_debug("CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n", 537 be32_to_cpu(buf[0]), be32_to_cpu(buf[1]), be32_to_cpu(buf[2]), 538 be32_to_cpu(buf[3]), be32_to_cpu(buf[4]), be32_to_cpu(buf[5]), 539 be32_to_cpu(buf[6]), be32_to_cpu(buf[7])); 540 } 541 542 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, 543 struct ib_wc *wc) 544 { 545 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { 546 pr_debug("local QP operation err " 547 "(QPN %06x, WQE index %x, vendor syndrome %02x, " 548 "opcode = %02x)\n", 549 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), 550 cqe->vendor_err_syndrome, 551 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 552 dump_cqe(cqe); 553 } 554 555 switch (cqe->syndrome) { 556 case MLX4_CQE_SYNDROME_LOCAL_LENGTH_ERR: 557 wc->status = IB_WC_LOC_LEN_ERR; 558 break; 559 case MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR: 560 wc->status = IB_WC_LOC_QP_OP_ERR; 561 break; 562 case MLX4_CQE_SYNDROME_LOCAL_PROT_ERR: 563 wc->status = IB_WC_LOC_PROT_ERR; 564 break; 565 case MLX4_CQE_SYNDROME_WR_FLUSH_ERR: 566 wc->status = IB_WC_WR_FLUSH_ERR; 567 break; 568 case MLX4_CQE_SYNDROME_MW_BIND_ERR: 569 wc->status = IB_WC_MW_BIND_ERR; 570 break; 571 case MLX4_CQE_SYNDROME_BAD_RESP_ERR: 572 wc->status = IB_WC_BAD_RESP_ERR; 573 break; 574 case MLX4_CQE_SYNDROME_LOCAL_ACCESS_ERR: 575 wc->status = IB_WC_LOC_ACCESS_ERR; 576 break; 577 case MLX4_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: 578 wc->status = IB_WC_REM_INV_REQ_ERR; 579 break; 580 case MLX4_CQE_SYNDROME_REMOTE_ACCESS_ERR: 581 wc->status = IB_WC_REM_ACCESS_ERR; 582 break; 583 case MLX4_CQE_SYNDROME_REMOTE_OP_ERR: 584 wc->status = IB_WC_REM_OP_ERR; 585 break; 586 case MLX4_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: 587 wc->status = IB_WC_RETRY_EXC_ERR; 588 break; 589 case MLX4_CQE_SYNDROME_RNR_RETRY_EXC_ERR: 590 wc->status = IB_WC_RNR_RETRY_EXC_ERR; 591 break; 592 case MLX4_CQE_SYNDROME_REMOTE_ABORTED_ERR: 593 wc->status = IB_WC_REM_ABORT_ERR; 594 break; 595 default: 596 wc->status = IB_WC_GENERAL_ERR; 597 break; 598 } 599 600 wc->vendor_err = cqe->vendor_err_syndrome; 601 } 602 603 static int mlx4_ib_ipoib_csum_ok(__be16 status, __be16 checksum) 604 { 605 return ((status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 606 MLX4_CQE_STATUS_IPV4F | 607 MLX4_CQE_STATUS_IPV4OPT | 608 MLX4_CQE_STATUS_IPV6 | 609 MLX4_CQE_STATUS_IPOK)) == 610 cpu_to_be16(MLX4_CQE_STATUS_IPV4 | 611 MLX4_CQE_STATUS_IPOK)) && 612 (status & cpu_to_be16(MLX4_CQE_STATUS_UDP | 613 MLX4_CQE_STATUS_TCP)) && 614 checksum == cpu_to_be16(0xffff); 615 } 616 617 static int use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, 618 unsigned tail, struct mlx4_cqe *cqe, int is_eth) 619 { 620 struct mlx4_ib_proxy_sqp_hdr *hdr; 621 622 ib_dma_sync_single_for_cpu(qp->ibqp.device, 623 qp->sqp_proxy_rcv[tail].map, 624 sizeof (struct mlx4_ib_proxy_sqp_hdr), 625 DMA_FROM_DEVICE); 626 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); 627 wc->pkey_index = be16_to_cpu(hdr->tun.pkey_index); 628 wc->src_qp = be32_to_cpu(hdr->tun.flags_src_qp) & 0xFFFFFF; 629 wc->wc_flags |= (hdr->tun.g_ml_path & 0x80) ? (IB_WC_GRH) : 0; 630 wc->dlid_path_bits = 0; 631 632 if (is_eth) { 633 wc->vlan_id = be16_to_cpu(hdr->tun.sl_vid); 634 memcpy(&(wc->smac[0]), (char *)&hdr->tun.mac_31_0, 4); 635 memcpy(&(wc->smac[4]), (char *)&hdr->tun.slid_mac_47_32, 2); 636 } else { 637 wc->slid = be16_to_cpu(hdr->tun.slid_mac_47_32); 638 wc->sl = (u8) (be16_to_cpu(hdr->tun.sl_vid) >> 12); 639 } 640 641 return 0; 642 } 643 644 static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, 645 struct mlx4_ib_qp **cur_qp, 646 struct ib_wc *wc) 647 { 648 struct mlx4_cqe *cqe; 649 struct mlx4_qp *mqp; 650 struct mlx4_ib_wq *wq; 651 struct mlx4_ib_srq *srq; 652 struct mlx4_srq *msrq = NULL; 653 int is_send; 654 int is_error; 655 u32 g_mlpath_rqpn; 656 u16 wqe_ctr; 657 unsigned tail = 0; 658 int timestamp_en = !!(cq->create_flags & IB_CQ_TIMESTAMP); 659 660 661 repoll: 662 cqe = next_cqe_sw(cq); 663 if (!cqe) 664 return -EAGAIN; 665 666 if (cq->buf.entry_size == 64) 667 cqe++; 668 669 ++cq->mcq.cons_index; 670 671 /* 672 * Make sure we read CQ entry contents after we've checked the 673 * ownership bit. 674 */ 675 rmb(); 676 677 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; 678 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == 679 MLX4_CQE_OPCODE_ERROR; 680 681 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && 682 is_send)) { 683 pr_warn("Completion for NOP opcode detected!\n"); 684 return -EINVAL; 685 } 686 687 /* Resize CQ in progress */ 688 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { 689 if (cq->resize_buf) { 690 struct mlx4_ib_dev *dev = to_mdev(cq->ibcq.device); 691 692 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); 693 cq->buf = cq->resize_buf->buf; 694 cq->ibcq.cqe = cq->resize_buf->cqe; 695 696 kfree(cq->resize_buf); 697 cq->resize_buf = NULL; 698 } 699 700 goto repoll; 701 } 702 703 if (!*cur_qp || 704 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { 705 /* 706 * We do not have to take the QP table lock here, 707 * because CQs will be locked while QPs are removed 708 * from the table. 709 */ 710 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, 711 be32_to_cpu(cqe->vlan_my_qpn)); 712 if (unlikely(!mqp)) { 713 pr_warn("CQ %06x with entry for unknown QPN %06x\n", 714 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); 715 return -EINVAL; 716 } 717 718 *cur_qp = to_mibqp(mqp); 719 } 720 721 wc->qp = &(*cur_qp)->ibqp; 722 723 if (wc->qp->qp_type == IB_QPT_XRC_TGT) { 724 u32 srq_num; 725 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 726 srq_num = g_mlpath_rqpn & 0xffffff; 727 /* SRQ is also in the radix tree */ 728 msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, 729 srq_num); 730 if (unlikely(!msrq)) { 731 pr_warn("CQ %06x with entry for unknown SRQN %06x\n", 732 cq->mcq.cqn, srq_num); 733 return -EINVAL; 734 } 735 } 736 737 if (is_send) { 738 wq = &(*cur_qp)->sq; 739 if (!(*cur_qp)->sq_signal_bits) { 740 wqe_ctr = be16_to_cpu(cqe->wqe_index); 741 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); 742 } 743 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 744 ++wq->tail; 745 } else if ((*cur_qp)->ibqp.srq) { 746 srq = to_msrq((*cur_qp)->ibqp.srq); 747 wqe_ctr = be16_to_cpu(cqe->wqe_index); 748 wc->wr_id = srq->wrid[wqe_ctr]; 749 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 750 } else if (msrq) { 751 srq = to_mibsrq(msrq); 752 wqe_ctr = be16_to_cpu(cqe->wqe_index); 753 wc->wr_id = srq->wrid[wqe_ctr]; 754 mlx4_ib_free_srq_wqe(srq, wqe_ctr); 755 } else { 756 wq = &(*cur_qp)->rq; 757 tail = wq->tail & (wq->wqe_cnt - 1); 758 wc->wr_id = wq->wrid[tail]; 759 ++wq->tail; 760 } 761 762 if (unlikely(is_error)) { 763 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); 764 return 0; 765 } 766 767 wc->status = IB_WC_SUCCESS; 768 769 if (is_send) { 770 wc->wc_flags = 0; 771 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 772 case MLX4_OPCODE_RDMA_WRITE_IMM: 773 wc->wc_flags |= IB_WC_WITH_IMM; 774 /* fall through */ 775 case MLX4_OPCODE_RDMA_WRITE: 776 wc->opcode = IB_WC_RDMA_WRITE; 777 break; 778 case MLX4_OPCODE_SEND_IMM: 779 wc->wc_flags |= IB_WC_WITH_IMM; 780 case MLX4_OPCODE_SEND: 781 case MLX4_OPCODE_SEND_INVAL: 782 wc->opcode = IB_WC_SEND; 783 break; 784 case MLX4_OPCODE_RDMA_READ: 785 wc->opcode = IB_WC_RDMA_READ; 786 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 787 break; 788 case MLX4_OPCODE_ATOMIC_CS: 789 wc->opcode = IB_WC_COMP_SWAP; 790 wc->byte_len = 8; 791 break; 792 case MLX4_OPCODE_ATOMIC_FA: 793 wc->opcode = IB_WC_FETCH_ADD; 794 wc->byte_len = 8; 795 break; 796 case MLX4_OPCODE_MASKED_ATOMIC_CS: 797 wc->opcode = IB_WC_MASKED_COMP_SWAP; 798 wc->byte_len = 8; 799 break; 800 case MLX4_OPCODE_MASKED_ATOMIC_FA: 801 wc->opcode = IB_WC_MASKED_FETCH_ADD; 802 wc->byte_len = 8; 803 break; 804 case MLX4_OPCODE_BIND_MW: 805 wc->opcode = IB_WC_BIND_MW; 806 break; 807 case MLX4_OPCODE_LSO: 808 wc->opcode = IB_WC_LSO; 809 break; 810 case MLX4_OPCODE_FMR: 811 wc->opcode = IB_WC_FAST_REG_MR; 812 break; 813 case MLX4_OPCODE_LOCAL_INVAL: 814 wc->opcode = IB_WC_LOCAL_INV; 815 break; 816 } 817 } else { 818 wc->byte_len = be32_to_cpu(cqe->byte_cnt); 819 820 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { 821 case MLX4_RECV_OPCODE_RDMA_WRITE_IMM: 822 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; 823 wc->wc_flags = IB_WC_WITH_IMM; 824 wc->ex.imm_data = cqe->immed_rss_invalid; 825 break; 826 case MLX4_RECV_OPCODE_SEND_INVAL: 827 wc->opcode = IB_WC_RECV; 828 wc->wc_flags = IB_WC_WITH_INVALIDATE; 829 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); 830 break; 831 case MLX4_RECV_OPCODE_SEND: 832 wc->opcode = IB_WC_RECV; 833 wc->wc_flags = 0; 834 break; 835 case MLX4_RECV_OPCODE_SEND_IMM: 836 wc->opcode = IB_WC_RECV; 837 wc->wc_flags = IB_WC_WITH_IMM; 838 wc->ex.imm_data = cqe->immed_rss_invalid; 839 break; 840 } 841 842 if (mlx4_is_mfunc(to_mdev(cq->ibcq.device)->dev)) { 843 if ((*cur_qp)->mlx4_ib_qp_type & 844 (MLX4_IB_QPT_PROXY_SMI_OWNER | 845 MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_GSI)) 846 return use_tunnel_data 847 (*cur_qp, cq, wc, tail, cqe, 848 rdma_port_get_link_layer 849 (wc->qp->device, 850 (*cur_qp)->port) == 851 IB_LINK_LAYER_ETHERNET); 852 } 853 854 if (timestamp_en) { 855 const struct mlx4_ts_cqe *ts_cqe = 856 (const struct mlx4_ts_cqe *)cqe; 857 /* currently, only CQ_CREATE_WITH_TIMESTAMPING_RAW is 858 * supported. CQ_CREATE_WITH_TIMESTAMPING_SYS isn't 859 * supported */ 860 if (cq->create_flags & IB_CQ_TIMESTAMP_TO_SYS_TIME) { 861 wc->ts.timestamp = 0; 862 } else { 863 wc->ts.timestamp = 864 ((u64)(be32_to_cpu(ts_cqe->timestamp_hi) 865 + !ts_cqe->timestamp_lo) << 16) 866 | be16_to_cpu(ts_cqe->timestamp_lo); 867 wc->wc_flags |= IB_WC_WITH_TIMESTAMP; 868 } 869 } else { 870 wc->wc_flags |= IB_WC_WITH_SLID; 871 wc->slid = be16_to_cpu(cqe->rlid); 872 } 873 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); 874 wc->src_qp = g_mlpath_rqpn & 0xffffff; 875 wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; 876 wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; 877 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; 878 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, 879 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; 880 if (!timestamp_en) { 881 if (rdma_port_get_link_layer(wc->qp->device, 882 (*cur_qp)->port) == 883 IB_LINK_LAYER_ETHERNET) 884 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; 885 else 886 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; 887 wc->wc_flags |= IB_WC_WITH_SL; 888 } 889 if ((be32_to_cpu(cqe->vlan_my_qpn) & 890 MLX4_CQE_CVLAN_PRESENT_MASK) && !timestamp_en) { 891 wc->vlan_id = be16_to_cpu(cqe->sl_vid) & 892 MLX4_CQE_VID_MASK; 893 wc->wc_flags |= IB_WC_WITH_VLAN; 894 } else { 895 wc->vlan_id = 0xffff; 896 } 897 if (!timestamp_en) { 898 memcpy(wc->smac, cqe->smac, 6); 899 wc->wc_flags |= IB_WC_WITH_SMAC; 900 } 901 } 902 903 return 0; 904 } 905 906 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) 907 { 908 struct mlx4_ib_cq *cq = to_mcq(ibcq); 909 struct mlx4_ib_qp *cur_qp = NULL; 910 unsigned long flags; 911 int npolled; 912 int err = 0; 913 914 spin_lock_irqsave(&cq->lock, flags); 915 916 for (npolled = 0; npolled < num_entries; ++npolled) { 917 err = mlx4_ib_poll_one(cq, &cur_qp, wc + npolled); 918 if (err) 919 break; 920 } 921 922 mlx4_cq_set_ci(&cq->mcq); 923 924 spin_unlock_irqrestore(&cq->lock, flags); 925 926 if (err == 0 || err == -EAGAIN) 927 return npolled; 928 else 929 return err; 930 } 931 932 int mlx4_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 933 { 934 mlx4_cq_arm(&to_mcq(ibcq)->mcq, 935 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ? 936 MLX4_CQ_DB_REQ_NOT_SOL : MLX4_CQ_DB_REQ_NOT, 937 to_mdev(ibcq->device)->priv_uar.map, 938 MLX4_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->uar_lock)); 939 940 return 0; 941 } 942 943 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 944 { 945 u32 prod_index; 946 int nfreed = 0; 947 struct mlx4_cqe *cqe, *dest; 948 u8 owner_bit; 949 int cqe_inc = cq->buf.entry_size == 64 ? 1 : 0; 950 951 /* 952 * First we need to find the current producer index, so we 953 * know where to start cleaning from. It doesn't matter if HW 954 * adds new entries after this loop -- the QP we're worried 955 * about is already in RESET, so the new entries won't come 956 * from our QP and therefore don't need to be checked. 957 */ 958 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); ++prod_index) 959 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) 960 break; 961 962 /* 963 * Now sweep backwards through the CQ, removing CQ entries 964 * that match our QP by copying older entries on top of them. 965 */ 966 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { 967 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); 968 cqe += cqe_inc; 969 970 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { 971 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) 972 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); 973 ++nfreed; 974 } else if (nfreed) { 975 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); 976 dest += cqe_inc; 977 978 owner_bit = dest->owner_sr_opcode & MLX4_CQE_OWNER_MASK; 979 memcpy(dest, cqe, sizeof *cqe); 980 dest->owner_sr_opcode = owner_bit | 981 (dest->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); 982 } 983 } 984 985 if (nfreed) { 986 cq->mcq.cons_index += nfreed; 987 /* 988 * Make sure update of buffer contents is done before 989 * updating consumer index. 990 */ 991 wmb(); 992 mlx4_cq_set_ci(&cq->mcq); 993 } 994 } 995 996 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) 997 { 998 spin_lock_irq(&cq->lock); 999 __mlx4_ib_cq_clean(cq, qpn, srq); 1000 spin_unlock_irq(&cq->lock); 1001 } 1002