1 /* 2 * Copyright (c) 2005, 2006, 2007, 2008, 2014 Mellanox Technologies. All rights reserved. 3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/interrupt.h> 35 #include <linux/slab.h> 36 #include <linux/module.h> 37 #include <linux/mm.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/hardirq.h> 40 41 #include <dev/mlx4/cmd.h> 42 43 #include "mlx4.h" 44 #include "fw.h" 45 46 enum { 47 MLX4_IRQNAME_SIZE = 32 48 }; 49 50 enum { 51 MLX4_NUM_ASYNC_EQE = 0x100, 52 MLX4_NUM_SPARE_EQE = 0x80, 53 MLX4_EQ_ENTRY_SIZE = 0x20 54 }; 55 56 #define MLX4_EQ_STATUS_OK ( 0 << 28) 57 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 58 #define MLX4_EQ_OWNER_SW ( 0 << 24) 59 #define MLX4_EQ_OWNER_HW ( 1 << 24) 60 #define MLX4_EQ_FLAG_EC ( 1 << 18) 61 #define MLX4_EQ_FLAG_OI ( 1 << 17) 62 #define MLX4_EQ_STATE_ARMED ( 9 << 8) 63 #define MLX4_EQ_STATE_FIRED (10 << 8) 64 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) 65 66 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ 67 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ 68 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ 69 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ 70 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ 71 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ 72 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ 73 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ 74 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ 75 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ 76 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ 77 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 78 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ 79 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ 80 (1ull << MLX4_EVENT_TYPE_CMD) | \ 81 (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \ 82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ 83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ 84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) 85 86 static u64 get_async_ev_mask(struct mlx4_dev *dev) 87 { 88 u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK; 89 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV) 90 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT); 91 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT) 92 async_ev_mask |= (1ull << MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT); 93 94 return async_ev_mask; 95 } 96 97 static void eq_set_ci(struct mlx4_eq *eq, int req_not) 98 { 99 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | 100 req_not << 31), 101 eq->doorbell); 102 /* We still want ordering, just not swabbing, so add a barrier */ 103 mb(); 104 } 105 106 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry, u8 eqe_factor, 107 u8 eqe_size) 108 { 109 /* (entry & (eq->nent - 1)) gives us a cyclic array */ 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; 111 /* CX3 is capable of extending the EQE from 32 to 64 bytes with 112 * strides of 64B,128B and 256B. 113 * When 64B EQE is used, the first (in the lower addresses) 114 * 32 bytes in the 64 byte EQE are reserved and the next 32 bytes 115 * contain the legacy EQE information. 116 * In all other cases, the first 32B contains the legacy EQE info. 117 */ 118 return eq->page_list[offset / PAGE_SIZE].buf + (offset + (eqe_factor ? MLX4_EQ_ENTRY_SIZE : 0)) % PAGE_SIZE; 119 } 120 121 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq, u8 eqe_factor, u8 size) 122 { 123 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index, eqe_factor, size); 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; 125 } 126 127 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq) 128 { 129 struct mlx4_eqe *eqe = 130 &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)]; 131 return (!!(eqe->owner & 0x80) ^ 132 !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ? 133 eqe : NULL; 134 } 135 136 void mlx4_gen_slave_eqe(struct work_struct *work) 137 { 138 struct mlx4_mfunc_master_ctx *master = 139 container_of(work, struct mlx4_mfunc_master_ctx, 140 slave_event_work); 141 struct mlx4_mfunc *mfunc = 142 container_of(master, struct mlx4_mfunc, master); 143 struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc); 144 struct mlx4_dev *dev = &priv->dev; 145 struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq; 146 struct mlx4_eqe *eqe; 147 u8 slave; 148 int i, phys_port, slave_port; 149 150 for (eqe = next_slave_event_eqe(slave_eq); eqe; 151 eqe = next_slave_event_eqe(slave_eq)) { 152 slave = eqe->slave_id; 153 154 if (eqe->type == MLX4_EVENT_TYPE_PORT_CHANGE && 155 eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN && 156 mlx4_is_bonded(dev)) { 157 struct mlx4_port_cap port_cap; 158 159 if (!mlx4_QUERY_PORT(dev, 1, &port_cap) && port_cap.link_state) 160 goto consume; 161 162 if (!mlx4_QUERY_PORT(dev, 2, &port_cap) && port_cap.link_state) 163 goto consume; 164 } 165 /* All active slaves need to receive the event */ 166 if (slave == ALL_SLAVES) { 167 for (i = 0; i <= dev->persist->num_vfs; i++) { 168 phys_port = 0; 169 if (eqe->type == MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT && 170 eqe->subtype == MLX4_DEV_PMC_SUBTYPE_PORT_INFO) { 171 phys_port = eqe->event.port_mgmt_change.port; 172 slave_port = mlx4_phys_to_slave_port(dev, i, phys_port); 173 if (slave_port < 0) /* VF doesn't have this port */ 174 continue; 175 eqe->event.port_mgmt_change.port = slave_port; 176 } 177 if (mlx4_GEN_EQE(dev, i, eqe)) 178 mlx4_warn(dev, "Failed to generate event for slave %d\n", 179 i); 180 if (phys_port) 181 eqe->event.port_mgmt_change.port = phys_port; 182 } 183 } else { 184 if (mlx4_GEN_EQE(dev, slave, eqe)) 185 mlx4_warn(dev, "Failed to generate event for slave %d\n", 186 slave); 187 } 188 consume: 189 ++slave_eq->cons; 190 } 191 } 192 193 194 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe) 195 { 196 struct mlx4_priv *priv = mlx4_priv(dev); 197 struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq; 198 struct mlx4_eqe *s_eqe; 199 unsigned long flags; 200 201 spin_lock_irqsave(&slave_eq->event_lock, flags); 202 s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)]; 203 if ((!!(s_eqe->owner & 0x80)) ^ 204 (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) { 205 mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n", 206 slave); 207 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 208 return; 209 } 210 211 memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1); 212 s_eqe->slave_id = slave; 213 /* ensure all information is written before setting the ownersip bit */ 214 wmb(); 215 s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80; 216 ++slave_eq->prod; 217 218 queue_work(priv->mfunc.master.comm_wq, 219 &priv->mfunc.master.slave_event_work); 220 spin_unlock_irqrestore(&slave_eq->event_lock, flags); 221 } 222 223 static void mlx4_slave_event(struct mlx4_dev *dev, int slave, 224 struct mlx4_eqe *eqe) 225 { 226 struct mlx4_priv *priv = mlx4_priv(dev); 227 228 if (slave < 0 || slave > dev->persist->num_vfs || 229 slave == dev->caps.function || 230 !priv->mfunc.master.slave_state[slave].active) 231 return; 232 233 slave_event(dev, slave, eqe); 234 } 235 236 static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec) 237 { 238 int hint_err; 239 struct mlx4_dev *dev = &priv->dev; 240 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; 241 242 hint_err = bind_irq_to_cpu(eq->irq, eq->affinity_cpu_id); 243 244 if (hint_err) 245 mlx4_warn(dev, "bind_irq_to_cpu failed, err %d\n", hint_err); 246 } 247 248 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) 249 { 250 struct mlx4_eqe eqe; 251 252 struct mlx4_priv *priv = mlx4_priv(dev); 253 struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave]; 254 255 if (!s_slave->active) 256 return 0; 257 258 memset(&eqe, 0, sizeof eqe); 259 260 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 261 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE; 262 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); 263 264 return mlx4_GEN_EQE(dev, slave, &eqe); 265 } 266 EXPORT_SYMBOL(mlx4_gen_pkey_eqe); 267 268 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) 269 { 270 struct mlx4_eqe eqe; 271 272 /*don't send if we don't have the that slave */ 273 if (dev->persist->num_vfs < slave) 274 return 0; 275 memset(&eqe, 0, sizeof eqe); 276 277 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 278 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_GUID_INFO; 279 eqe.event.port_mgmt_change.port = mlx4_phys_to_slave_port(dev, slave, port); 280 281 return mlx4_GEN_EQE(dev, slave, &eqe); 282 } 283 EXPORT_SYMBOL(mlx4_gen_guid_change_eqe); 284 285 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, 286 u8 port_subtype_change) 287 { 288 struct mlx4_eqe eqe; 289 u8 slave_port = mlx4_phys_to_slave_port(dev, slave, port); 290 291 /*don't send if we don't have the that slave */ 292 if (dev->persist->num_vfs < slave) 293 return 0; 294 memset(&eqe, 0, sizeof eqe); 295 296 eqe.type = MLX4_EVENT_TYPE_PORT_CHANGE; 297 eqe.subtype = port_subtype_change; 298 eqe.event.port_change.port = cpu_to_be32(slave_port << 28); 299 300 mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__, 301 port_subtype_change, slave, port); 302 return mlx4_GEN_EQE(dev, slave, &eqe); 303 } 304 EXPORT_SYMBOL(mlx4_gen_port_state_change_eqe); 305 306 enum slave_port_state mlx4_get_slave_port_state(struct mlx4_dev *dev, int slave, u8 port) 307 { 308 struct mlx4_priv *priv = mlx4_priv(dev); 309 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 310 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 311 312 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 313 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 314 pr_err("%s: Error: asking for slave:%d, port:%d\n", 315 __func__, slave, port); 316 return SLAVE_PORT_DOWN; 317 } 318 return s_state[slave].port_state[port]; 319 } 320 EXPORT_SYMBOL(mlx4_get_slave_port_state); 321 322 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, 323 enum slave_port_state state) 324 { 325 struct mlx4_priv *priv = mlx4_priv(dev); 326 struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state; 327 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 328 329 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 330 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 331 pr_err("%s: Error: asking for slave:%d, port:%d\n", 332 __func__, slave, port); 333 return -1; 334 } 335 s_state[slave].port_state[port] = state; 336 337 return 0; 338 } 339 340 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) 341 { 342 int i; 343 enum slave_port_gen_event gen_event; 344 struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, 345 port); 346 347 for (i = 0; i < dev->persist->num_vfs + 1; i++) 348 if (test_bit(i, slaves_pport.slaves)) 349 set_and_calc_slave_port_state(dev, i, port, 350 event, &gen_event); 351 } 352 /************************************************************************** 353 The function get as input the new event to that port, 354 and according to the prev state change the slave's port state. 355 The events are: 356 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 357 MLX4_PORT_STATE_DEV_EVENT_PORT_UP 358 MLX4_PORT_STATE_IB_EVENT_GID_VALID 359 MLX4_PORT_STATE_IB_EVENT_GID_INVALID 360 ***************************************************************************/ 361 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, 362 u8 port, int event, 363 enum slave_port_gen_event *gen_event) 364 { 365 struct mlx4_priv *priv = mlx4_priv(dev); 366 struct mlx4_slave_state *ctx = NULL; 367 unsigned long flags; 368 int ret = -1; 369 struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave); 370 enum slave_port_state cur_state = 371 mlx4_get_slave_port_state(dev, slave, port); 372 373 *gen_event = SLAVE_PORT_GEN_EVENT_NONE; 374 375 if (slave >= dev->num_slaves || port > dev->caps.num_ports || 376 port <= 0 || !test_bit(port - 1, actv_ports.ports)) { 377 pr_err("%s: Error: asking for slave:%d, port:%d\n", 378 __func__, slave, port); 379 return ret; 380 } 381 382 ctx = &priv->mfunc.master.slave_state[slave]; 383 spin_lock_irqsave(&ctx->lock, flags); 384 385 switch (cur_state) { 386 case SLAVE_PORT_DOWN: 387 if (MLX4_PORT_STATE_DEV_EVENT_PORT_UP == event) 388 mlx4_set_slave_port_state(dev, slave, port, 389 SLAVE_PENDING_UP); 390 break; 391 case SLAVE_PENDING_UP: 392 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) 393 mlx4_set_slave_port_state(dev, slave, port, 394 SLAVE_PORT_DOWN); 395 else if (MLX4_PORT_STATE_IB_PORT_STATE_EVENT_GID_VALID == event) { 396 mlx4_set_slave_port_state(dev, slave, port, 397 SLAVE_PORT_UP); 398 *gen_event = SLAVE_PORT_GEN_EVENT_UP; 399 } 400 break; 401 case SLAVE_PORT_UP: 402 if (MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN == event) { 403 mlx4_set_slave_port_state(dev, slave, port, 404 SLAVE_PORT_DOWN); 405 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 406 } else if (MLX4_PORT_STATE_IB_EVENT_GID_INVALID == 407 event) { 408 mlx4_set_slave_port_state(dev, slave, port, 409 SLAVE_PENDING_UP); 410 *gen_event = SLAVE_PORT_GEN_EVENT_DOWN; 411 } 412 break; 413 default: 414 pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n", 415 __func__, slave, port); 416 goto out; 417 } 418 ret = mlx4_get_slave_port_state(dev, slave, port); 419 420 out: 421 spin_unlock_irqrestore(&ctx->lock, flags); 422 return ret; 423 } 424 425 EXPORT_SYMBOL(set_and_calc_slave_port_state); 426 427 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr) 428 { 429 struct mlx4_eqe eqe; 430 431 memset(&eqe, 0, sizeof eqe); 432 433 eqe.type = MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT; 434 eqe.subtype = MLX4_DEV_PMC_SUBTYPE_PORT_INFO; 435 eqe.event.port_mgmt_change.port = port; 436 eqe.event.port_mgmt_change.params.port_info.changed_attr = 437 cpu_to_be32((u32) attr); 438 439 slave_event(dev, ALL_SLAVES, &eqe); 440 return 0; 441 } 442 EXPORT_SYMBOL(mlx4_gen_slaves_port_mgt_ev); 443 444 void mlx4_master_handle_slave_flr(struct work_struct *work) 445 { 446 struct mlx4_mfunc_master_ctx *master = 447 container_of(work, struct mlx4_mfunc_master_ctx, 448 slave_flr_event_work); 449 struct mlx4_mfunc *mfunc = 450 container_of(master, struct mlx4_mfunc, master); 451 struct mlx4_priv *priv = 452 container_of(mfunc, struct mlx4_priv, mfunc); 453 struct mlx4_dev *dev = &priv->dev; 454 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state; 455 int i; 456 int err; 457 unsigned long flags; 458 459 mlx4_dbg(dev, "mlx4_handle_slave_flr\n"); 460 461 for (i = 0 ; i < dev->num_slaves; i++) { 462 463 if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) { 464 mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n", 465 i); 466 /* In case of 'Reset flow' FLR can be generated for 467 * a slave before mlx4_load_one is done. 468 * make sure interface is up before trying to delete 469 * slave resources which weren't allocated yet. 470 */ 471 if (dev->persist->interface_state & 472 MLX4_INTERFACE_STATE_UP) 473 mlx4_delete_all_resources_for_slave(dev, i); 474 /*return the slave to running mode*/ 475 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 476 slave_state[i].last_cmd = MLX4_COMM_CMD_RESET; 477 slave_state[i].is_slave_going_down = 0; 478 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 479 /*notify the FW:*/ 480 err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE, 481 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 482 if (err) 483 mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n", 484 i); 485 } 486 } 487 } 488 489 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) 490 { 491 struct mlx4_priv *priv = mlx4_priv(dev); 492 struct mlx4_eqe *eqe; 493 int cqn = -1; 494 int eqes_found = 0; 495 int set_ci = 0; 496 int port; 497 int slave = 0; 498 int ret; 499 u32 flr_slave; 500 u8 update_slave_state; 501 int i; 502 enum slave_port_gen_event gen_event; 503 unsigned long flags; 504 struct mlx4_vport_state *s_info; 505 int eqe_size = dev->caps.eqe_size; 506 507 while ((eqe = next_eqe_sw(eq, dev->caps.eqe_factor, eqe_size))) { 508 /* 509 * Make sure we read EQ entry contents after we've 510 * checked the ownership bit. 511 */ 512 rmb(); 513 514 switch (eqe->type) { 515 case MLX4_EVENT_TYPE_COMP: 516 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; 517 mlx4_cq_completion(dev, cqn); 518 break; 519 520 case MLX4_EVENT_TYPE_PATH_MIG: 521 case MLX4_EVENT_TYPE_COMM_EST: 522 case MLX4_EVENT_TYPE_SQ_DRAINED: 523 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: 524 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: 525 case MLX4_EVENT_TYPE_PATH_MIG_FAILED: 526 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: 527 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: 528 mlx4_dbg(dev, "event %d arrived\n", eqe->type); 529 if (mlx4_is_master(dev)) { 530 /* forward only to slave owning the QP */ 531 ret = mlx4_get_slave_from_resource_id(dev, 532 RES_QP, 533 be32_to_cpu(eqe->event.qp.qpn) 534 & 0xffffff, &slave); 535 if (ret && ret != -ENOENT) { 536 mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 537 eqe->type, eqe->subtype, 538 eq->eqn, eq->cons_index, ret); 539 break; 540 } 541 542 if (!ret && slave != dev->caps.function) { 543 mlx4_slave_event(dev, slave, eqe); 544 break; 545 } 546 547 } 548 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 549 0xffffff, eqe->type); 550 break; 551 552 case MLX4_EVENT_TYPE_SRQ_LIMIT: 553 mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", 554 __func__); 555 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: 556 if (mlx4_is_master(dev)) { 557 /* forward only to slave owning the SRQ */ 558 ret = mlx4_get_slave_from_resource_id(dev, 559 RES_SRQ, 560 be32_to_cpu(eqe->event.srq.srqn) 561 & 0xffffff, 562 &slave); 563 if (ret && ret != -ENOENT) { 564 mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 565 eqe->type, eqe->subtype, 566 eq->eqn, eq->cons_index, ret); 567 break; 568 } 569 mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n", 570 __func__, slave, 571 be32_to_cpu(eqe->event.srq.srqn), 572 eqe->type, eqe->subtype); 573 574 if (!ret && slave != dev->caps.function) { 575 mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n", 576 __func__, eqe->type, 577 eqe->subtype, slave); 578 mlx4_slave_event(dev, slave, eqe); 579 break; 580 } 581 } 582 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 583 0xffffff, eqe->type); 584 break; 585 586 case MLX4_EVENT_TYPE_CMD: 587 mlx4_cmd_event(dev, 588 be16_to_cpu(eqe->event.cmd.token), 589 eqe->event.cmd.status, 590 be64_to_cpu(eqe->event.cmd.out_param)); 591 break; 592 593 case MLX4_EVENT_TYPE_PORT_CHANGE: { 594 struct mlx4_slaves_pport slaves_port; 595 port = be32_to_cpu(eqe->event.port_change.port) >> 28; 596 slaves_port = mlx4_phys_to_slaves_pport(dev, port); 597 if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) { 598 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN, 599 port); 600 mlx4_priv(dev)->sense.do_sense_port[port] = 1; 601 if (!mlx4_is_master(dev)) 602 break; 603 for (i = 0; i < dev->persist->num_vfs + 1; 604 i++) { 605 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); 606 607 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) 608 continue; 609 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { 610 if (i == mlx4_master_func_num(dev)) 611 continue; 612 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 613 __func__, i, port); 614 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; 615 if (0 /*IFLA_VF_LINK_STATE_AUTO == s_info->link_state*/) { 616 eqe->event.port_change.port = 617 cpu_to_be32( 618 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 619 | (reported_port << 28)); 620 mlx4_slave_event(dev, i, eqe); 621 } 622 } else { /* IB port */ 623 set_and_calc_slave_port_state(dev, i, port, 624 MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN, 625 &gen_event); 626 /*we can be in pending state, then do not send port_down event*/ 627 if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) { 628 if (i == mlx4_master_func_num(dev)) 629 continue; 630 eqe->event.port_change.port = 631 cpu_to_be32( 632 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 633 | (mlx4_phys_to_slave_port(dev, i, port) << 28)); 634 mlx4_slave_event(dev, i, eqe); 635 } 636 } 637 } 638 } else { 639 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP, port); 640 641 mlx4_priv(dev)->sense.do_sense_port[port] = 0; 642 643 if (!mlx4_is_master(dev)) 644 break; 645 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 646 for (i = 0; 647 i < dev->persist->num_vfs + 1; 648 i++) { 649 int reported_port = mlx4_is_bonded(dev) ? 1 : mlx4_phys_to_slave_port(dev, i, port); 650 651 if (!test_bit(i, slaves_port.slaves) && !mlx4_is_bonded(dev)) 652 continue; 653 if (i == mlx4_master_func_num(dev)) 654 continue; 655 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; 656 if (0 /*IFLA_VF_LINK_STATE_AUTO == s_info->link_state*/) { 657 eqe->event.port_change.port = 658 cpu_to_be32( 659 (be32_to_cpu(eqe->event.port_change.port) & 0xFFFFFFF) 660 | (reported_port << 28)); 661 mlx4_slave_event(dev, i, eqe); 662 } 663 } 664 else /* IB port */ 665 /* port-up event will be sent to a slave when the 666 * slave's alias-guid is set. This is done in alias_GUID.c 667 */ 668 set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP); 669 } 670 break; 671 } 672 673 case MLX4_EVENT_TYPE_CQ_ERROR: 674 mlx4_warn(dev, "CQ %s on CQN %06x\n", 675 eqe->event.cq_err.syndrome == 1 ? 676 "overrun" : "access violation", 677 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); 678 if (mlx4_is_master(dev)) { 679 ret = mlx4_get_slave_from_resource_id(dev, 680 RES_CQ, 681 be32_to_cpu(eqe->event.cq_err.cqn) 682 & 0xffffff, &slave); 683 if (ret && ret != -ENOENT) { 684 mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n", 685 eqe->type, eqe->subtype, 686 eq->eqn, eq->cons_index, ret); 687 break; 688 } 689 690 if (!ret && slave != dev->caps.function) { 691 mlx4_slave_event(dev, slave, eqe); 692 break; 693 } 694 } 695 mlx4_cq_event(dev, 696 be32_to_cpu(eqe->event.cq_err.cqn) 697 & 0xffffff, 698 eqe->type); 699 break; 700 701 case MLX4_EVENT_TYPE_EQ_OVERFLOW: 702 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); 703 break; 704 705 case MLX4_EVENT_TYPE_OP_REQUIRED: 706 atomic_inc(&priv->opreq_count); 707 /* FW commands can't be executed from interrupt context 708 * working in deferred task 709 */ 710 queue_work(mlx4_wq, &priv->opreq_task); 711 break; 712 713 case MLX4_EVENT_TYPE_COMM_CHANNEL: 714 if (!mlx4_is_master(dev)) { 715 mlx4_warn(dev, "Received comm channel event for non master device\n"); 716 break; 717 } 718 memcpy(&priv->mfunc.master.comm_arm_bit_vector, 719 eqe->event.comm_channel_arm.bit_vec, 720 sizeof eqe->event.comm_channel_arm.bit_vec); 721 queue_work(priv->mfunc.master.comm_wq, 722 &priv->mfunc.master.comm_work); 723 break; 724 725 case MLX4_EVENT_TYPE_FLR_EVENT: 726 flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id); 727 if (!mlx4_is_master(dev)) { 728 mlx4_warn(dev, "Non-master function received FLR event\n"); 729 break; 730 } 731 732 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 733 734 if (flr_slave >= dev->num_slaves) { 735 mlx4_warn(dev, 736 "Got FLR for unknown function: %d\n", 737 flr_slave); 738 update_slave_state = 0; 739 } else 740 update_slave_state = 1; 741 742 spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags); 743 if (update_slave_state) { 744 priv->mfunc.master.slave_state[flr_slave].active = false; 745 priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR; 746 priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1; 747 } 748 spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags); 749 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, 750 flr_slave); 751 queue_work(priv->mfunc.master.comm_wq, 752 &priv->mfunc.master.slave_flr_event_work); 753 break; 754 755 case MLX4_EVENT_TYPE_FATAL_WARNING: 756 if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { 757 if (mlx4_is_master(dev)) 758 for (i = 0; i < dev->num_slaves; i++) { 759 mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n", 760 __func__, i); 761 if (i == dev->caps.function) 762 continue; 763 mlx4_slave_event(dev, i, eqe); 764 } 765 mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n", 766 be16_to_cpu(eqe->event.warming.warning_threshold), 767 be16_to_cpu(eqe->event.warming.current_temperature)); 768 } else 769 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 770 eqe->type, eqe->subtype, eq->eqn, 771 eq->cons_index, eqe->owner, eq->nent, 772 eqe->slave_id, 773 !!(eqe->owner & 0x80) ^ 774 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 775 776 break; 777 778 case MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT: 779 mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_MGMT_CHANGE, 780 (unsigned long) eqe); 781 break; 782 783 case MLX4_EVENT_TYPE_RECOVERABLE_ERROR_EVENT: 784 switch (eqe->subtype) { 785 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_BAD_CABLE: 786 mlx4_warn(dev, "Bad cable detected on port %u\n", 787 eqe->event.bad_cable.port); 788 break; 789 case MLX4_RECOVERABLE_ERROR_EVENT_SUBTYPE_UNSUPPORTED_CABLE: 790 mlx4_warn(dev, "Unsupported cable detected\n"); 791 break; 792 default: 793 mlx4_dbg(dev, 794 "Unhandled recoverable error event detected: %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, ownership=%s\n", 795 eqe->type, eqe->subtype, eq->eqn, 796 eq->cons_index, eqe->owner, eq->nent, 797 !!(eqe->owner & 0x80) ^ 798 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 799 break; 800 } 801 break; 802 803 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: 804 case MLX4_EVENT_TYPE_ECC_DETECT: 805 default: 806 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n", 807 eqe->type, eqe->subtype, eq->eqn, 808 eq->cons_index, eqe->owner, eq->nent, 809 eqe->slave_id, 810 !!(eqe->owner & 0x80) ^ 811 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); 812 break; 813 } 814 815 ++eq->cons_index; 816 eqes_found = 1; 817 ++set_ci; 818 819 /* 820 * The HCA will think the queue has overflowed if we 821 * don't tell it we've been processing events. We 822 * create our EQs with MLX4_NUM_SPARE_EQE extra 823 * entries, so we must update our consumer index at 824 * least that often. 825 */ 826 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { 827 eq_set_ci(eq, 0); 828 set_ci = 0; 829 } 830 } 831 832 eq_set_ci(eq, 1); 833 834 return eqes_found; 835 } 836 837 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) 838 { 839 struct mlx4_dev *dev = dev_ptr; 840 struct mlx4_priv *priv = mlx4_priv(dev); 841 int work = 0; 842 int i; 843 844 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 845 846 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 847 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 848 849 return IRQ_RETVAL(work); 850 } 851 852 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) 853 { 854 struct mlx4_eq *eq = eq_ptr; 855 struct mlx4_dev *dev = eq->dev; 856 857 mlx4_eq_int(dev, eq); 858 859 /* MSI-X vectors always belong to us */ 860 return IRQ_HANDLED; 861 } 862 863 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave, 864 struct mlx4_vhcr *vhcr, 865 struct mlx4_cmd_mailbox *inbox, 866 struct mlx4_cmd_mailbox *outbox, 867 struct mlx4_cmd_info *cmd) 868 { 869 struct mlx4_priv *priv = mlx4_priv(dev); 870 struct mlx4_slave_event_eq_info *event_eq = 871 priv->mfunc.master.slave_state[slave].event_eq; 872 u32 in_modifier = vhcr->in_modifier; 873 u32 eqn = in_modifier & 0x3FF; 874 u64 in_param = vhcr->in_param; 875 int err = 0; 876 int i; 877 878 if (slave == dev->caps.function) 879 err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn, 880 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 881 MLX4_CMD_NATIVE); 882 if (!err) 883 for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) 884 if (in_param & (1LL << i)) 885 event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn; 886 887 return err; 888 } 889 890 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, 891 int eq_num) 892 { 893 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, 894 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B, 895 MLX4_CMD_WRAPPED); 896 } 897 898 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 899 int eq_num) 900 { 901 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, 902 MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A, 903 MLX4_CMD_WRAPPED); 904 } 905 906 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, int eq_num) 907 { 908 return mlx4_cmd(dev, 0, eq_num, 1, MLX4_CMD_HW2SW_EQ, 909 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 910 } 911 912 static int mlx4_num_eq_uar(struct mlx4_dev *dev) 913 { 914 /* 915 * Each UAR holds 4 EQ doorbells. To figure out how many UARs 916 * we need to map, take the difference of highest index and 917 * the lowest index we'll use and add 1. 918 */ 919 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - 920 dev->caps.reserved_eqs / 4 + 1; 921 } 922 923 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 924 { 925 struct mlx4_priv *priv = mlx4_priv(dev); 926 int index; 927 928 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; 929 930 if (!priv->eq_table.uar_map[index]) { 931 priv->eq_table.uar_map[index] = 932 ioremap(pci_resource_start(dev->persist->pdev, 2) + 933 ((eq->eqn / 4) << (dev->uar_page_shift)), 934 (1 << (dev->uar_page_shift))); 935 if (!priv->eq_table.uar_map[index]) { 936 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 937 eq->eqn); 938 return NULL; 939 } 940 } 941 942 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); 943 } 944 945 static void mlx4_unmap_uar(struct mlx4_dev *dev) 946 { 947 struct mlx4_priv *priv = mlx4_priv(dev); 948 int i; 949 950 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 951 if (priv->eq_table.uar_map[i]) { 952 iounmap(priv->eq_table.uar_map[i]); 953 priv->eq_table.uar_map[i] = NULL; 954 } 955 } 956 957 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, 958 u8 intr, struct mlx4_eq *eq) 959 { 960 struct mlx4_priv *priv = mlx4_priv(dev); 961 struct mlx4_cmd_mailbox *mailbox; 962 struct mlx4_eq_context *eq_context; 963 int npages; 964 u64 *dma_list = NULL; 965 dma_addr_t t; 966 u64 mtt_addr; 967 int err = -ENOMEM; 968 int i; 969 970 eq->dev = dev; 971 eq->nent = roundup_pow_of_two(max(nent, 2)); 972 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 973 * strides of 64B,128B and 256B. 974 */ 975 npages = PAGE_ALIGN(eq->nent * dev->caps.eqe_size) / PAGE_SIZE; 976 977 eq->page_list = kmalloc(npages * sizeof *eq->page_list, 978 GFP_KERNEL); 979 if (!eq->page_list) 980 goto err_out; 981 982 for (i = 0; i < npages; ++i) 983 eq->page_list[i].buf = NULL; 984 985 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); 986 if (!dma_list) 987 goto err_out_free; 988 989 mailbox = mlx4_alloc_cmd_mailbox(dev); 990 if (IS_ERR(mailbox)) 991 goto err_out_free; 992 eq_context = mailbox->buf; 993 994 for (i = 0; i < npages; ++i) { 995 eq->page_list[i].buf = dma_alloc_coherent(&dev->persist-> 996 pdev->dev, 997 PAGE_SIZE, &t, 998 GFP_KERNEL); 999 if (!eq->page_list[i].buf) 1000 goto err_out_free_pages; 1001 1002 dma_list[i] = t; 1003 eq->page_list[i].map = t; 1004 1005 memset(eq->page_list[i].buf, 0, PAGE_SIZE); 1006 } 1007 1008 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); 1009 if (eq->eqn == -1) 1010 goto err_out_free_pages; 1011 1012 eq->doorbell = mlx4_get_eq_uar(dev, eq); 1013 if (!eq->doorbell) { 1014 err = -ENOMEM; 1015 goto err_out_free_eq; 1016 } 1017 1018 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); 1019 if (err) 1020 goto err_out_free_eq; 1021 1022 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); 1023 if (err) 1024 goto err_out_free_mtt; 1025 1026 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | 1027 MLX4_EQ_STATE_ARMED); 1028 eq_context->log_eq_size = ilog2(eq->nent); 1029 eq_context->intr = intr; 1030 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; 1031 1032 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); 1033 eq_context->mtt_base_addr_h = mtt_addr >> 32; 1034 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); 1035 1036 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); 1037 if (err) { 1038 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); 1039 goto err_out_free_mtt; 1040 } 1041 1042 kfree(dma_list); 1043 mlx4_free_cmd_mailbox(dev, mailbox); 1044 1045 eq->cons_index = 0; 1046 1047 return err; 1048 1049 err_out_free_mtt: 1050 mlx4_mtt_cleanup(dev, &eq->mtt); 1051 1052 err_out_free_eq: 1053 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1054 1055 err_out_free_pages: 1056 for (i = 0; i < npages; ++i) 1057 if (eq->page_list[i].buf) 1058 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1059 eq->page_list[i].buf, 1060 eq->page_list[i].map); 1061 1062 mlx4_free_cmd_mailbox(dev, mailbox); 1063 1064 err_out_free: 1065 kfree(eq->page_list); 1066 kfree(dma_list); 1067 1068 err_out: 1069 return err; 1070 } 1071 1072 static void mlx4_free_eq(struct mlx4_dev *dev, 1073 struct mlx4_eq *eq) 1074 { 1075 struct mlx4_priv *priv = mlx4_priv(dev); 1076 int err; 1077 int i; 1078 /* CX3 is capable of extending the CQE/EQE from 32 to 64 bytes, with 1079 * strides of 64B,128B and 256B 1080 */ 1081 int npages = PAGE_ALIGN(dev->caps.eqe_size * eq->nent) / PAGE_SIZE; 1082 1083 err = mlx4_HW2SW_EQ(dev, eq->eqn); 1084 if (err) 1085 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); 1086 1087 synchronize_irq(eq->irq); 1088 1089 mlx4_mtt_cleanup(dev, &eq->mtt); 1090 for (i = 0; i < npages; ++i) 1091 dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE, 1092 eq->page_list[i].buf, 1093 eq->page_list[i].map); 1094 1095 kfree(eq->page_list); 1096 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn, MLX4_USE_RR); 1097 } 1098 1099 static void mlx4_free_irqs(struct mlx4_dev *dev) 1100 { 1101 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; 1102 int i; 1103 1104 if (eq_table->have_irq) 1105 free_irq(dev->persist->pdev->irq, dev); 1106 1107 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1108 if (eq_table->eq[i].have_irq) { 1109 eq_table->eq[i].affinity_cpu_id = NOCPU; 1110 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 1111 eq_table->eq[i].have_irq = 0; 1112 } 1113 1114 kfree(eq_table->irq_names); 1115 } 1116 1117 static int mlx4_map_clr_int(struct mlx4_dev *dev) 1118 { 1119 struct mlx4_priv *priv = mlx4_priv(dev); 1120 1121 priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev, 1122 priv->fw.clr_int_bar) + 1123 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); 1124 if (!priv->clr_base) { 1125 mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); 1126 return -ENOMEM; 1127 } 1128 1129 return 0; 1130 } 1131 1132 static void mlx4_unmap_clr_int(struct mlx4_dev *dev) 1133 { 1134 struct mlx4_priv *priv = mlx4_priv(dev); 1135 1136 iounmap(priv->clr_base); 1137 } 1138 1139 int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1140 { 1141 struct mlx4_priv *priv = mlx4_priv(dev); 1142 1143 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, 1144 sizeof *priv->eq_table.eq, GFP_KERNEL); 1145 if (!priv->eq_table.eq) 1146 return -ENOMEM; 1147 1148 return 0; 1149 } 1150 1151 void mlx4_free_eq_table(struct mlx4_dev *dev) 1152 { 1153 kfree(mlx4_priv(dev)->eq_table.eq); 1154 } 1155 1156 int mlx4_init_eq_table(struct mlx4_dev *dev) 1157 { 1158 struct mlx4_priv *priv = mlx4_priv(dev); 1159 int err; 1160 int i; 1161 1162 priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev), 1163 sizeof *priv->eq_table.uar_map, 1164 GFP_KERNEL); 1165 if (!priv->eq_table.uar_map) { 1166 err = -ENOMEM; 1167 goto err_out_free; 1168 } 1169 1170 err = mlx4_bitmap_init(&priv->eq_table.bitmap, 1171 roundup_pow_of_two(dev->caps.num_eqs), 1172 dev->caps.num_eqs - 1, 1173 dev->caps.reserved_eqs, 1174 roundup_pow_of_two(dev->caps.num_eqs) - 1175 dev->caps.num_eqs); 1176 if (err) 1177 goto err_out_free; 1178 1179 for (i = 0; i < mlx4_num_eq_uar(dev); ++i) 1180 priv->eq_table.uar_map[i] = NULL; 1181 1182 if (!mlx4_is_slave(dev)) { 1183 err = mlx4_map_clr_int(dev); 1184 if (err) 1185 goto err_out_bitmap; 1186 1187 priv->eq_table.clr_mask = 1188 swab32(1 << (priv->eq_table.inta_pin & 31)); 1189 priv->eq_table.clr_int = priv->clr_base + 1190 (priv->eq_table.inta_pin < 32 ? 4 : 0); 1191 } 1192 1193 priv->eq_table.irq_names = 1194 kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), 1195 GFP_KERNEL); 1196 if (!priv->eq_table.irq_names) { 1197 err = -ENOMEM; 1198 goto err_out_clr_int; 1199 } 1200 1201 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { 1202 if (i == MLX4_EQ_ASYNC) { 1203 err = mlx4_create_eq(dev, 1204 MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 1205 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]); 1206 } else { 1207 struct mlx4_eq *eq = &priv->eq_table.eq[i]; 1208 #ifdef CONFIG_RFS_ACCEL 1209 int port = find_first_bit(eq->actv_ports.ports, 1210 dev->caps.num_ports) + 1; 1211 1212 if (port <= dev->caps.num_ports) { 1213 struct mlx4_port_info *info = 1214 &mlx4_priv(dev)->port[port]; 1215 1216 if (!info->rmap) { 1217 info->rmap = alloc_irq_cpu_rmap( 1218 mlx4_get_eqs_per_port(dev, port)); 1219 if (!info->rmap) { 1220 mlx4_warn(dev, "Failed to allocate cpu rmap\n"); 1221 err = -ENOMEM; 1222 goto err_out_unmap; 1223 } 1224 } 1225 1226 err = irq_cpu_rmap_add( 1227 info->rmap, eq->irq); 1228 if (err) 1229 mlx4_warn(dev, "Failed adding irq rmap\n"); 1230 } 1231 #endif 1232 err = mlx4_create_eq(dev, dev->quotas.cq + 1233 MLX4_NUM_SPARE_EQE, 1234 (dev->flags & MLX4_FLAG_MSI_X) ? 1235 i + 1 - !!(i > MLX4_EQ_ASYNC) : 0, 1236 eq); 1237 } 1238 if (err) 1239 goto err_out_unmap; 1240 } 1241 1242 if (dev->flags & MLX4_FLAG_MSI_X) { 1243 const char *eq_name; 1244 1245 snprintf(priv->eq_table.irq_names + 1246 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE, 1247 MLX4_IRQNAME_SIZE, 1248 "mlx4-async@pci:%s", 1249 pci_name(dev->persist->pdev)); 1250 eq_name = priv->eq_table.irq_names + 1251 MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE; 1252 1253 err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq, 1254 mlx4_msi_x_interrupt, 0, eq_name, 1255 priv->eq_table.eq + MLX4_EQ_ASYNC); 1256 if (err) 1257 goto err_out_unmap; 1258 1259 priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1; 1260 } else { 1261 snprintf(priv->eq_table.irq_names, 1262 MLX4_IRQNAME_SIZE, 1263 DRV_NAME "@pci:%s", 1264 pci_name(dev->persist->pdev)); 1265 err = request_irq(dev->persist->pdev->irq, mlx4_interrupt, 1266 IRQF_SHARED, priv->eq_table.irq_names, dev); 1267 if (err) 1268 goto err_out_unmap; 1269 1270 priv->eq_table.have_irq = 1; 1271 } 1272 1273 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1274 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1275 if (err) 1276 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 1277 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); 1278 1279 /* arm ASYNC eq */ 1280 eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1); 1281 1282 return 0; 1283 1284 err_out_unmap: 1285 while (i > 0) 1286 mlx4_free_eq(dev, &priv->eq_table.eq[--i]); 1287 #ifdef CONFIG_RFS_ACCEL 1288 for (i = 1; i <= dev->caps.num_ports; i++) { 1289 if (mlx4_priv(dev)->port[i].rmap) { 1290 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); 1291 mlx4_priv(dev)->port[i].rmap = NULL; 1292 } 1293 } 1294 #endif 1295 mlx4_free_irqs(dev); 1296 1297 err_out_clr_int: 1298 if (!mlx4_is_slave(dev)) 1299 mlx4_unmap_clr_int(dev); 1300 1301 err_out_bitmap: 1302 mlx4_unmap_uar(dev); 1303 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1304 1305 err_out_free: 1306 kfree(priv->eq_table.uar_map); 1307 1308 return err; 1309 } 1310 1311 void mlx4_cleanup_eq_table(struct mlx4_dev *dev) 1312 { 1313 struct mlx4_priv *priv = mlx4_priv(dev); 1314 int i; 1315 1316 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1, 1317 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1318 1319 #ifdef CONFIG_RFS_ACCEL 1320 for (i = 1; i <= dev->caps.num_ports; i++) { 1321 if (mlx4_priv(dev)->port[i].rmap) { 1322 free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap); 1323 mlx4_priv(dev)->port[i].rmap = NULL; 1324 } 1325 } 1326 #endif 1327 mlx4_free_irqs(dev); 1328 1329 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 1330 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 1331 1332 if (!mlx4_is_slave(dev)) 1333 mlx4_unmap_clr_int(dev); 1334 1335 mlx4_unmap_uar(dev); 1336 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 1337 1338 kfree(priv->eq_table.uar_map); 1339 } 1340 1341 /* A test that verifies that we can accept interrupts 1342 * on the vector allocated for asynchronous events 1343 */ 1344 int mlx4_test_async(struct mlx4_dev *dev) 1345 { 1346 return mlx4_NOP(dev); 1347 } 1348 EXPORT_SYMBOL(mlx4_test_async); 1349 1350 /* A test that verifies that we can accept interrupts 1351 * on the given irq vector of the tested port. 1352 * Interrupts are checked using the NOP command. 1353 */ 1354 int mlx4_test_interrupt(struct mlx4_dev *dev, int vector) 1355 { 1356 struct mlx4_priv *priv = mlx4_priv(dev); 1357 int err; 1358 1359 /* Temporary use polling for command completions */ 1360 mlx4_cmd_use_polling(dev); 1361 1362 /* Map the new eq to handle all asynchronous events */ 1363 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1364 priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn); 1365 if (err) { 1366 mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); 1367 goto out; 1368 } 1369 1370 /* Go back to using events */ 1371 mlx4_cmd_use_events(dev); 1372 err = mlx4_NOP(dev); 1373 1374 /* Return to default */ 1375 mlx4_cmd_use_polling(dev); 1376 out: 1377 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0, 1378 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 1379 mlx4_cmd_use_events(dev); 1380 1381 return err; 1382 } 1383 EXPORT_SYMBOL(mlx4_test_interrupt); 1384 1385 bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector) 1386 { 1387 struct mlx4_priv *priv = mlx4_priv(dev); 1388 1389 vector = MLX4_CQ_TO_EQ_VECTOR(vector); 1390 if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) || 1391 (vector == MLX4_EQ_ASYNC)) 1392 return false; 1393 1394 return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports); 1395 } 1396 EXPORT_SYMBOL(mlx4_is_eq_vector_valid); 1397 1398 u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port) 1399 { 1400 struct mlx4_priv *priv = mlx4_priv(dev); 1401 unsigned int i; 1402 unsigned int sum = 0; 1403 1404 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) 1405 sum += !!test_bit(port - 1, 1406 priv->eq_table.eq[i].actv_ports.ports); 1407 1408 return sum; 1409 } 1410 EXPORT_SYMBOL(mlx4_get_eqs_per_port); 1411 1412 int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector) 1413 { 1414 struct mlx4_priv *priv = mlx4_priv(dev); 1415 1416 vector = MLX4_CQ_TO_EQ_VECTOR(vector); 1417 if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1)) 1418 return -EINVAL; 1419 1420 return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports, 1421 dev->caps.num_ports) > 1); 1422 } 1423 EXPORT_SYMBOL(mlx4_is_eq_shared); 1424 1425 int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) 1426 { 1427 struct mlx4_priv *priv = mlx4_priv(dev); 1428 int err = 0, i = 0; 1429 u32 min_ref_count_val = (u32)-1; 1430 int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector); 1431 int *prequested_vector = NULL; 1432 1433 1434 mutex_lock(&priv->msix_ctl.pool_lock); 1435 if (requested_vector < (dev->caps.num_comp_vectors + 1) && 1436 (requested_vector >= 0) && 1437 (requested_vector != MLX4_EQ_ASYNC)) { 1438 if (test_bit(port - 1, 1439 priv->eq_table.eq[requested_vector].actv_ports.ports)) { 1440 prequested_vector = &requested_vector; 1441 } else { 1442 struct mlx4_eq *eq; 1443 1444 for (i = 1; i < port; 1445 requested_vector += mlx4_get_eqs_per_port(dev, i++)) 1446 ; 1447 1448 eq = &priv->eq_table.eq[requested_vector]; 1449 if (requested_vector < dev->caps.num_comp_vectors + 1 && 1450 test_bit(port - 1, eq->actv_ports.ports)) { 1451 prequested_vector = &requested_vector; 1452 } 1453 } 1454 } 1455 1456 if (!prequested_vector) { 1457 requested_vector = -1; 1458 for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1; 1459 i++) { 1460 struct mlx4_eq *eq = &priv->eq_table.eq[i]; 1461 1462 if (min_ref_count_val > eq->ref_count && 1463 test_bit(port - 1, eq->actv_ports.ports)) { 1464 min_ref_count_val = eq->ref_count; 1465 requested_vector = i; 1466 } 1467 } 1468 1469 if (requested_vector < 0) { 1470 err = -ENOSPC; 1471 goto err_unlock; 1472 } 1473 1474 prequested_vector = &requested_vector; 1475 } 1476 1477 if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) && 1478 dev->flags & MLX4_FLAG_MSI_X) { 1479 set_bit(*prequested_vector, priv->msix_ctl.pool_bm); 1480 snprintf(priv->eq_table.irq_names + 1481 *prequested_vector * MLX4_IRQNAME_SIZE, 1482 MLX4_IRQNAME_SIZE, "mlx4-%d@%s", 1483 *prequested_vector, dev_name(&dev->persist->pdev->dev)); 1484 1485 err = request_irq(priv->eq_table.eq[*prequested_vector].irq, 1486 mlx4_msi_x_interrupt, 0, 1487 &priv->eq_table.irq_names[*prequested_vector << 5], 1488 priv->eq_table.eq + *prequested_vector); 1489 1490 if (err) { 1491 clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); 1492 *prequested_vector = -1; 1493 } else { 1494 mlx4_set_eq_affinity_hint(priv, *prequested_vector); 1495 eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); 1496 priv->eq_table.eq[*prequested_vector].have_irq = 1; 1497 } 1498 } 1499 1500 if (!err && *prequested_vector >= 0) 1501 priv->eq_table.eq[*prequested_vector].ref_count++; 1502 1503 err_unlock: 1504 mutex_unlock(&priv->msix_ctl.pool_lock); 1505 1506 if (!err && *prequested_vector >= 0) 1507 *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector); 1508 else 1509 *vector = 0; 1510 1511 return err; 1512 } 1513 EXPORT_SYMBOL(mlx4_assign_eq); 1514 1515 int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec) 1516 { 1517 struct mlx4_priv *priv = mlx4_priv(dev); 1518 1519 return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq; 1520 } 1521 EXPORT_SYMBOL(mlx4_eq_get_irq); 1522 1523 void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1524 { 1525 struct mlx4_priv *priv = mlx4_priv(dev); 1526 int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec); 1527 1528 mutex_lock(&priv->msix_ctl.pool_lock); 1529 priv->eq_table.eq[eq_vec].ref_count--; 1530 1531 /* once we allocated EQ, we don't release it because it might be binded 1532 * to cpu_rmap. 1533 */ 1534 mutex_unlock(&priv->msix_ctl.pool_lock); 1535 } 1536 EXPORT_SYMBOL(mlx4_release_eq); 1537 1538 void 1539 mlx4_disable_interrupts(struct mlx4_dev *dev) 1540 { 1541 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 1542 int i; 1543 1544 if (dev->flags & MLX4_FLAG_MSI_X) { 1545 for (i = 0; i < (dev->caps.num_comp_vectors + 1); ++i) 1546 disable_irq(priv->eq_table.eq[i].irq); 1547 } else { 1548 disable_irq(dev->persist->pdev->irq); 1549 } 1550 } 1551 EXPORT_SYMBOL(mlx4_disable_interrupts); 1552 1553 void 1554 mlx4_poll_interrupts(struct mlx4_dev *dev) 1555 { 1556 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 1557 int i; 1558 1559 if (dev->flags & MLX4_FLAG_MSI_X) { 1560 for (i = 0; i < (dev->caps.num_comp_vectors + 1); ++i) { 1561 mlx4_msi_x_interrupt(priv->eq_table.eq[i].irq, 1562 priv->eq_table.eq + i); 1563 } 1564 } else { 1565 mlx4_interrupt(dev->persist->pdev->irq, dev); 1566 } 1567 } 1568 EXPORT_SYMBOL(mlx4_poll_interrupts); 1569