1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 44 struct mlx5_cmd_msg *msg); 45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 79 struct mlx5_cmd_msg *in, 80 int uin_size, 81 struct mlx5_cmd_msg *out, 82 void *uout, int uout_size, 83 mlx5_cmd_cbk_t cbk, 84 void *context, int page_queue) 85 { 86 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 87 struct mlx5_cmd_work_ent *ent; 88 89 ent = kzalloc(sizeof(*ent), alloc_flags); 90 if (!ent) 91 return ERR_PTR(-ENOMEM); 92 93 ent->in = in; 94 ent->uin_size = uin_size; 95 ent->out = out; 96 ent->uout = uout; 97 ent->uout_size = uout_size; 98 ent->callback = cbk; 99 ent->context = context; 100 ent->cmd = cmd; 101 ent->page_queue = page_queue; 102 103 return ent; 104 } 105 106 static u8 alloc_token(struct mlx5_cmd *cmd) 107 { 108 u8 token; 109 110 spin_lock(&cmd->token_lock); 111 cmd->token++; 112 if (cmd->token == 0) 113 cmd->token++; 114 token = cmd->token; 115 spin_unlock(&cmd->token_lock); 116 117 return token; 118 } 119 120 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 121 { 122 unsigned long flags; 123 struct mlx5_cmd *cmd = ent->cmd; 124 struct mlx5_core_dev *dev = 125 container_of(cmd, struct mlx5_core_dev, cmd); 126 int ret = cmd->max_reg_cmds; 127 128 spin_lock_irqsave(&cmd->alloc_lock, flags); 129 if (!ent->page_queue) { 130 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 131 if (ret >= cmd->max_reg_cmds) 132 ret = -1; 133 } 134 135 if (dev->state != MLX5_DEVICE_STATE_UP) 136 ret = -1; 137 138 if (ret != -1) { 139 ent->busy = 1; 140 ent->idx = ret; 141 clear_bit(ent->idx, &cmd->bitmask); 142 cmd->ent_arr[ent->idx] = ent; 143 } 144 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 145 146 return ret; 147 } 148 149 static void free_ent(struct mlx5_cmd *cmd, int idx) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&cmd->alloc_lock, flags); 154 set_bit(idx, &cmd->bitmask); 155 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 156 } 157 158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 159 { 160 return cmd->cmd_buf + (idx << cmd->log_stride); 161 } 162 163 static u8 xor8_buf(void *buf, int len) 164 { 165 u8 *ptr = buf; 166 u8 sum = 0; 167 int i; 168 169 for (i = 0; i < len; i++) 170 sum ^= ptr[i]; 171 172 return sum; 173 } 174 175 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 176 { 177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 178 return -EINVAL; 179 180 if (xor8_buf(block, sizeof(*block)) != 0xff) 181 return -EINVAL; 182 183 return 0; 184 } 185 186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 187 int csum) 188 { 189 block->token = token; 190 if (csum) { 191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 192 sizeof(block->data) - 2); 193 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 194 } 195 } 196 197 static void 198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 199 { 200 size_t i; 201 202 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 203 struct mlx5_cmd_prot_block *block; 204 205 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 206 207 /* compute signature */ 208 calc_block_sig(block, token, csum); 209 210 /* check for last block */ 211 if (block->next == 0) 212 break; 213 } 214 215 /* make sure data gets written to RAM */ 216 mlx5_fwp_flush(msg); 217 } 218 219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 220 { 221 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 222 calc_chain_sig(ent->in, ent->token, csum); 223 calc_chain_sig(ent->out, ent->token, csum); 224 } 225 226 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 227 { 228 struct mlx5_core_dev *dev = container_of(ent->cmd, 229 struct mlx5_core_dev, cmd); 230 int poll_end = jiffies + 231 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 232 u8 own; 233 234 do { 235 own = ent->lay->status_own; 236 if (!(own & CMD_OWNER_HW) || 237 dev->state != MLX5_DEVICE_STATE_UP) { 238 ent->ret = 0; 239 return; 240 } 241 usleep_range(5000, 10000); 242 } while (time_before(jiffies, poll_end)); 243 244 ent->ret = -ETIMEDOUT; 245 } 246 247 static void free_cmd(struct mlx5_cmd_work_ent *ent) 248 { 249 cancel_delayed_work_sync(&ent->cb_timeout_work); 250 kfree(ent); 251 } 252 253 static int 254 verify_signature(struct mlx5_cmd_work_ent *ent) 255 { 256 struct mlx5_cmd_msg *msg = ent->out; 257 size_t i; 258 int err; 259 u8 sig; 260 261 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 262 if (sig != 0xff) 263 return -EINVAL; 264 265 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 266 struct mlx5_cmd_prot_block *block; 267 268 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 269 270 /* compute signature */ 271 err = verify_block_sig(block); 272 if (err != 0) 273 return (err); 274 275 /* check for last block */ 276 if (block->next == 0) 277 break; 278 } 279 return (0); 280 } 281 282 static void dump_buf(void *buf, int size, int data_only, int offset) 283 { 284 __be32 *p = buf; 285 int i; 286 287 for (i = 0; i < size; i += 16) { 288 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 289 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 290 be32_to_cpu(p[3])); 291 p += 4; 292 offset += 16; 293 } 294 if (!data_only) 295 pr_debug("\n"); 296 } 297 298 enum { 299 MLX5_DRIVER_STATUS_ABORTED = 0xfe, 300 MLX5_DRIVER_SYND = 0xbadd00de, 301 }; 302 303 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 304 u32 *synd, u8 *status) 305 { 306 *synd = 0; 307 *status = 0; 308 309 switch (op) { 310 case MLX5_CMD_OP_TEARDOWN_HCA: 311 case MLX5_CMD_OP_DISABLE_HCA: 312 case MLX5_CMD_OP_MANAGE_PAGES: 313 case MLX5_CMD_OP_DESTROY_MKEY: 314 case MLX5_CMD_OP_DESTROY_EQ: 315 case MLX5_CMD_OP_DESTROY_CQ: 316 case MLX5_CMD_OP_DESTROY_QP: 317 case MLX5_CMD_OP_DESTROY_PSV: 318 case MLX5_CMD_OP_DESTROY_SRQ: 319 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 320 case MLX5_CMD_OP_DESTROY_DCT: 321 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 322 case MLX5_CMD_OP_DEALLOC_PD: 323 case MLX5_CMD_OP_DEALLOC_UAR: 324 case MLX5_CMD_OP_DETACH_FROM_MCG: 325 case MLX5_CMD_OP_DEALLOC_XRCD: 326 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 327 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 328 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 329 case MLX5_CMD_OP_DESTROY_TIR: 330 case MLX5_CMD_OP_DESTROY_SQ: 331 case MLX5_CMD_OP_DESTROY_RQ: 332 case MLX5_CMD_OP_DESTROY_RMP: 333 case MLX5_CMD_OP_DESTROY_TIS: 334 case MLX5_CMD_OP_DESTROY_RQT: 335 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 336 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 337 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 338 case MLX5_CMD_OP_2ERR_QP: 339 case MLX5_CMD_OP_2RST_QP: 340 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 341 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 342 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 343 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 344 return MLX5_CMD_STAT_OK; 345 346 case MLX5_CMD_OP_QUERY_HCA_CAP: 347 case MLX5_CMD_OP_QUERY_ADAPTER: 348 case MLX5_CMD_OP_INIT_HCA: 349 case MLX5_CMD_OP_ENABLE_HCA: 350 case MLX5_CMD_OP_QUERY_PAGES: 351 case MLX5_CMD_OP_SET_HCA_CAP: 352 case MLX5_CMD_OP_QUERY_ISSI: 353 case MLX5_CMD_OP_SET_ISSI: 354 case MLX5_CMD_OP_CREATE_MKEY: 355 case MLX5_CMD_OP_QUERY_MKEY: 356 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 357 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 358 case MLX5_CMD_OP_CREATE_EQ: 359 case MLX5_CMD_OP_QUERY_EQ: 360 case MLX5_CMD_OP_GEN_EQE: 361 case MLX5_CMD_OP_CREATE_CQ: 362 case MLX5_CMD_OP_QUERY_CQ: 363 case MLX5_CMD_OP_MODIFY_CQ: 364 case MLX5_CMD_OP_CREATE_QP: 365 case MLX5_CMD_OP_RST2INIT_QP: 366 case MLX5_CMD_OP_INIT2RTR_QP: 367 case MLX5_CMD_OP_RTR2RTS_QP: 368 case MLX5_CMD_OP_RTS2RTS_QP: 369 case MLX5_CMD_OP_SQERR2RTS_QP: 370 case MLX5_CMD_OP_QUERY_QP: 371 case MLX5_CMD_OP_SQD_RTS_QP: 372 case MLX5_CMD_OP_INIT2INIT_QP: 373 case MLX5_CMD_OP_CREATE_PSV: 374 case MLX5_CMD_OP_CREATE_SRQ: 375 case MLX5_CMD_OP_QUERY_SRQ: 376 case MLX5_CMD_OP_ARM_RQ: 377 case MLX5_CMD_OP_CREATE_XRC_SRQ: 378 case MLX5_CMD_OP_QUERY_XRC_SRQ: 379 case MLX5_CMD_OP_ARM_XRC_SRQ: 380 case MLX5_CMD_OP_CREATE_DCT: 381 case MLX5_CMD_OP_DRAIN_DCT: 382 case MLX5_CMD_OP_QUERY_DCT: 383 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 384 case MLX5_CMD_OP_QUERY_VPORT_STATE: 385 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 386 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 387 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 388 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 389 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 390 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 391 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 392 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 393 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 394 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 395 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 396 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 397 case MLX5_CMD_OP_QUERY_Q_COUNTER: 398 case MLX5_CMD_OP_ALLOC_PD: 399 case MLX5_CMD_OP_ALLOC_UAR: 400 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 401 case MLX5_CMD_OP_ACCESS_REG: 402 case MLX5_CMD_OP_ATTACH_TO_MCG: 403 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 404 case MLX5_CMD_OP_MAD_IFC: 405 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 406 case MLX5_CMD_OP_SET_MAD_DEMUX: 407 case MLX5_CMD_OP_NOP: 408 case MLX5_CMD_OP_ALLOC_XRCD: 409 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 410 case MLX5_CMD_OP_QUERY_CONG_STATUS: 411 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 412 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 413 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 414 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 415 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 416 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 417 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 418 case MLX5_CMD_OP_CREATE_TIR: 419 case MLX5_CMD_OP_MODIFY_TIR: 420 case MLX5_CMD_OP_QUERY_TIR: 421 case MLX5_CMD_OP_CREATE_SQ: 422 case MLX5_CMD_OP_MODIFY_SQ: 423 case MLX5_CMD_OP_QUERY_SQ: 424 case MLX5_CMD_OP_CREATE_RQ: 425 case MLX5_CMD_OP_MODIFY_RQ: 426 case MLX5_CMD_OP_QUERY_RQ: 427 case MLX5_CMD_OP_CREATE_RMP: 428 case MLX5_CMD_OP_MODIFY_RMP: 429 case MLX5_CMD_OP_QUERY_RMP: 430 case MLX5_CMD_OP_CREATE_TIS: 431 case MLX5_CMD_OP_MODIFY_TIS: 432 case MLX5_CMD_OP_QUERY_TIS: 433 case MLX5_CMD_OP_CREATE_RQT: 434 case MLX5_CMD_OP_MODIFY_RQT: 435 case MLX5_CMD_OP_QUERY_RQT: 436 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 437 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 438 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 439 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 440 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 441 *status = MLX5_DRIVER_STATUS_ABORTED; 442 *synd = MLX5_DRIVER_SYND; 443 return -EIO; 444 default: 445 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 446 return -EINVAL; 447 } 448 } 449 450 const char *mlx5_command_str(int command) 451 { 452 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 453 454 switch (command) { 455 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 456 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 457 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 458 MLX5_COMMAND_STR_CASE(INIT_HCA); 459 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 460 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 461 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 462 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 463 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 464 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 465 MLX5_COMMAND_STR_CASE(SET_ISSI); 466 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 467 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 468 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 469 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 470 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 471 MLX5_COMMAND_STR_CASE(CREATE_EQ); 472 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 473 MLX5_COMMAND_STR_CASE(QUERY_EQ); 474 MLX5_COMMAND_STR_CASE(GEN_EQE); 475 MLX5_COMMAND_STR_CASE(CREATE_CQ); 476 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 477 MLX5_COMMAND_STR_CASE(QUERY_CQ); 478 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 479 MLX5_COMMAND_STR_CASE(CREATE_QP); 480 MLX5_COMMAND_STR_CASE(DESTROY_QP); 481 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 482 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 483 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 484 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 485 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 486 MLX5_COMMAND_STR_CASE(2ERR_QP); 487 MLX5_COMMAND_STR_CASE(2RST_QP); 488 MLX5_COMMAND_STR_CASE(QUERY_QP); 489 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 490 MLX5_COMMAND_STR_CASE(MAD_IFC); 491 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 492 MLX5_COMMAND_STR_CASE(CREATE_PSV); 493 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 494 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 495 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 496 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 497 MLX5_COMMAND_STR_CASE(ARM_RQ); 498 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 499 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 500 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 501 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 502 MLX5_COMMAND_STR_CASE(CREATE_DCT); 503 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 504 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 505 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 506 MLX5_COMMAND_STR_CASE(QUERY_DCT); 507 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 508 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 509 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 510 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 511 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 512 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 513 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 514 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 515 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 516 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 517 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 518 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 519 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 520 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 521 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 522 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 523 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 524 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 525 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 526 MLX5_COMMAND_STR_CASE(ALLOC_PD); 527 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 528 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 529 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 530 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 531 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 532 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 533 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 534 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 535 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 536 MLX5_COMMAND_STR_CASE(NOP); 537 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 538 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 539 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 540 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 541 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 542 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 543 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 544 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 545 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 546 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 547 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 548 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 549 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 550 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 551 MLX5_COMMAND_STR_CASE(CREATE_RMP); 552 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 553 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 554 MLX5_COMMAND_STR_CASE(QUERY_RMP); 555 MLX5_COMMAND_STR_CASE(CREATE_RQT); 556 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 557 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 558 MLX5_COMMAND_STR_CASE(QUERY_RQT); 559 MLX5_COMMAND_STR_CASE(ACCESS_REG); 560 MLX5_COMMAND_STR_CASE(CREATE_SQ); 561 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 562 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 563 MLX5_COMMAND_STR_CASE(QUERY_SQ); 564 MLX5_COMMAND_STR_CASE(CREATE_RQ); 565 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 566 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 567 MLX5_COMMAND_STR_CASE(QUERY_RQ); 568 MLX5_COMMAND_STR_CASE(CREATE_TIR); 569 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 570 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 571 MLX5_COMMAND_STR_CASE(QUERY_TIR); 572 MLX5_COMMAND_STR_CASE(CREATE_TIS); 573 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 574 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 575 MLX5_COMMAND_STR_CASE(QUERY_TIS); 576 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 577 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 578 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 579 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 580 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 581 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 582 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 583 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 584 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 585 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 586 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 587 default: return "unknown command opcode"; 588 } 589 } 590 591 static void dump_command(struct mlx5_core_dev *dev, 592 struct mlx5_cmd_work_ent *ent, int input) 593 { 594 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 595 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 596 size_t i; 597 int data_only; 598 int offset = 0; 599 int msg_len = input ? ent->uin_size : ent->uout_size; 600 int dump_len; 601 602 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 603 604 if (data_only) 605 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 606 "dump command data %s(0x%x) %s\n", 607 mlx5_command_str(op), op, 608 input ? "INPUT" : "OUTPUT"); 609 else 610 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 611 mlx5_command_str(op), op, 612 input ? "INPUT" : "OUTPUT"); 613 614 if (data_only) { 615 if (input) { 616 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 617 offset += sizeof(ent->lay->in); 618 } else { 619 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 620 offset += sizeof(ent->lay->out); 621 } 622 } else { 623 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 624 offset += sizeof(*ent->lay); 625 } 626 627 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 628 struct mlx5_cmd_prot_block *block; 629 630 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 631 632 if (data_only) { 633 if (offset >= msg_len) 634 break; 635 dump_len = min_t(int, 636 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 637 638 dump_buf(block->data, dump_len, 1, offset); 639 offset += MLX5_CMD_DATA_BLOCK_SIZE; 640 } else { 641 mlx5_core_dbg(dev, "command block:\n"); 642 dump_buf(block, sizeof(*block), 0, offset); 643 offset += sizeof(*block); 644 } 645 646 /* check for last block */ 647 if (block->next == 0) 648 break; 649 } 650 651 if (data_only) 652 pr_debug("\n"); 653 } 654 655 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 656 { 657 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 658 659 return be16_to_cpu(hdr->opcode); 660 } 661 662 static void cb_timeout_handler(struct work_struct *work) 663 { 664 struct delayed_work *dwork = container_of(work, struct delayed_work, 665 work); 666 struct mlx5_cmd_work_ent *ent = container_of(dwork, 667 struct mlx5_cmd_work_ent, 668 cb_timeout_work); 669 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 670 cmd); 671 672 ent->ret = -ETIMEDOUT; 673 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 674 mlx5_command_str(msg_to_opcode(ent->in)), 675 msg_to_opcode(ent->in)); 676 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 677 } 678 679 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, 680 struct mlx5_outbox_hdr *hdr) 681 { 682 hdr->status = 0; 683 hdr->syndrome = 0; 684 685 switch (opcode) { 686 case MLX5_CMD_OP_TEARDOWN_HCA: 687 case MLX5_CMD_OP_DISABLE_HCA: 688 case MLX5_CMD_OP_MANAGE_PAGES: 689 case MLX5_CMD_OP_DESTROY_MKEY: 690 case MLX5_CMD_OP_DESTROY_EQ: 691 case MLX5_CMD_OP_DESTROY_CQ: 692 case MLX5_CMD_OP_DESTROY_QP: 693 case MLX5_CMD_OP_DESTROY_PSV: 694 case MLX5_CMD_OP_DESTROY_SRQ: 695 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 696 case MLX5_CMD_OP_DESTROY_DCT: 697 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 698 case MLX5_CMD_OP_DEALLOC_PD: 699 case MLX5_CMD_OP_DEALLOC_UAR: 700 case MLX5_CMD_OP_DETACH_FROM_MCG: 701 case MLX5_CMD_OP_DEALLOC_XRCD: 702 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 703 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 704 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 705 case MLX5_CMD_OP_DESTROY_LAG: 706 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 707 case MLX5_CMD_OP_DESTROY_TIR: 708 case MLX5_CMD_OP_DESTROY_SQ: 709 case MLX5_CMD_OP_DESTROY_RQ: 710 case MLX5_CMD_OP_DESTROY_RMP: 711 case MLX5_CMD_OP_DESTROY_TIS: 712 case MLX5_CMD_OP_DESTROY_RQT: 713 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 714 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 715 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 716 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 717 case MLX5_CMD_OP_2ERR_QP: 718 case MLX5_CMD_OP_2RST_QP: 719 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 720 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 721 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 722 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 723 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 724 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 725 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 726 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 727 case MLX5_CMD_OP_MODIFY_SQ: 728 case MLX5_CMD_OP_MODIFY_RQ: 729 case MLX5_CMD_OP_MODIFY_TIS: 730 case MLX5_CMD_OP_MODIFY_LAG: 731 case MLX5_CMD_OP_MODIFY_TIR: 732 case MLX5_CMD_OP_MODIFY_RMP: 733 case MLX5_CMD_OP_MODIFY_RQT: 734 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 735 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 736 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 737 case MLX5_CMD_OP_MODIFY_CQ: 738 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 739 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 740 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: 741 case MLX5_CMD_OP_ACCESS_REG: 742 case MLX5_CMD_OP_DRAIN_DCT: 743 return 0; 744 745 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 746 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 747 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 748 case MLX5_CMD_OP_ALLOC_PD: 749 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 750 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 751 case MLX5_CMD_OP_ALLOC_UAR: 752 case MLX5_CMD_OP_ALLOC_XRCD: 753 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 754 case MLX5_CMD_OP_ARM_RQ: 755 case MLX5_CMD_OP_ARM_XRC_SRQ: 756 case MLX5_CMD_OP_ATTACH_TO_MCG: 757 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 758 case MLX5_CMD_OP_CREATE_CQ: 759 case MLX5_CMD_OP_CREATE_DCT: 760 case MLX5_CMD_OP_CREATE_EQ: 761 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 762 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 763 case MLX5_CMD_OP_CREATE_LAG: 764 case MLX5_CMD_OP_CREATE_MKEY: 765 case MLX5_CMD_OP_CREATE_PSV: 766 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 767 case MLX5_CMD_OP_CREATE_QP: 768 case MLX5_CMD_OP_CREATE_RMP: 769 case MLX5_CMD_OP_CREATE_RQ: 770 case MLX5_CMD_OP_CREATE_RQT: 771 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 772 case MLX5_CMD_OP_CREATE_SQ: 773 case MLX5_CMD_OP_CREATE_SRQ: 774 case MLX5_CMD_OP_CREATE_TIR: 775 case MLX5_CMD_OP_CREATE_TIS: 776 case MLX5_CMD_OP_CREATE_VPORT_LAG: 777 case MLX5_CMD_OP_CREATE_XRC_SRQ: 778 case MLX5_CMD_OP_ENABLE_HCA: 779 case MLX5_CMD_OP_GEN_EQE: 780 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 781 case MLX5_CMD_OP_INIT2INIT_QP: 782 case MLX5_CMD_OP_INIT2RTR_QP: 783 case MLX5_CMD_OP_INIT_HCA: 784 case MLX5_CMD_OP_MAD_IFC: 785 case MLX5_CMD_OP_NOP: 786 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 787 case MLX5_CMD_OP_QUERY_ADAPTER: 788 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 789 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 790 case MLX5_CMD_OP_QUERY_CONG_STATUS: 791 case MLX5_CMD_OP_QUERY_CQ: 792 case MLX5_CMD_OP_QUERY_DCT: 793 case MLX5_CMD_OP_QUERY_EQ: 794 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 795 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 796 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 797 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 798 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 799 case MLX5_CMD_OP_QUERY_HCA_CAP: 800 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 801 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 802 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 803 case MLX5_CMD_OP_QUERY_ISSI: 804 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 805 case MLX5_CMD_OP_QUERY_LAG: 806 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 807 case MLX5_CMD_OP_QUERY_MKEY: 808 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 809 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: 810 case MLX5_CMD_OP_QUERY_PAGES: 811 case MLX5_CMD_OP_QUERY_QP: 812 case MLX5_CMD_OP_QUERY_Q_COUNTER: 813 case MLX5_CMD_OP_QUERY_RMP: 814 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 815 case MLX5_CMD_OP_QUERY_RQ: 816 case MLX5_CMD_OP_QUERY_RQT: 817 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 818 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 819 case MLX5_CMD_OP_QUERY_SQ: 820 case MLX5_CMD_OP_QUERY_SRQ: 821 case MLX5_CMD_OP_QUERY_TIR: 822 case MLX5_CMD_OP_QUERY_TIS: 823 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 824 case MLX5_CMD_OP_QUERY_VPORT_STATE: 825 case MLX5_CMD_OP_QUERY_XRC_SRQ: 826 case MLX5_CMD_OP_RST2INIT_QP: 827 case MLX5_CMD_OP_RTR2RTS_QP: 828 case MLX5_CMD_OP_RTS2RTS_QP: 829 case MLX5_CMD_OP_SET_DC_CNAK_TRACE: 830 case MLX5_CMD_OP_SET_HCA_CAP: 831 case MLX5_CMD_OP_SET_ISSI: 832 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 833 case MLX5_CMD_OP_SET_MAD_DEMUX: 834 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 835 case MLX5_CMD_OP_SQD_RTS_QP: 836 case MLX5_CMD_OP_SQERR2RTS_QP: 837 hdr->status = MLX5_CMD_STAT_INT_ERR; 838 hdr->syndrome = 0xFFFFFFFF; 839 return -ECANCELED; 840 default: 841 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); 842 return -EINVAL; 843 } 844 } 845 846 static void complete_command(struct mlx5_cmd_work_ent *ent) 847 { 848 struct mlx5_cmd *cmd = ent->cmd; 849 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 850 cmd); 851 mlx5_cmd_cbk_t callback; 852 void *context; 853 854 s64 ds; 855 struct mlx5_cmd_stats *stats; 856 unsigned long flags; 857 int err; 858 struct semaphore *sem; 859 860 if (ent->page_queue) 861 sem = &cmd->pages_sem; 862 else 863 sem = &cmd->sem; 864 865 if (dev->state != MLX5_DEVICE_STATE_UP) { 866 struct mlx5_outbox_hdr *out_hdr = 867 (struct mlx5_outbox_hdr *)ent->out; 868 struct mlx5_inbox_hdr *in_hdr = 869 (struct mlx5_inbox_hdr *)(ent->in->first.data); 870 u16 opcode = be16_to_cpu(in_hdr->opcode); 871 872 ent->ret = set_internal_err_outbox(dev, 873 opcode, 874 out_hdr); 875 } 876 877 if (ent->callback) { 878 ds = ent->ts2 - ent->ts1; 879 if (ent->op < ARRAY_SIZE(cmd->stats)) { 880 stats = &cmd->stats[ent->op]; 881 spin_lock_irqsave(&stats->lock, flags); 882 stats->sum += ds; 883 ++stats->n; 884 spin_unlock_irqrestore(&stats->lock, flags); 885 } 886 887 callback = ent->callback; 888 context = ent->context; 889 err = ent->ret; 890 if (!err) 891 err = mlx5_copy_from_msg(ent->uout, 892 ent->out, 893 ent->uout_size); 894 895 mlx5_free_cmd_msg(dev, ent->out); 896 free_msg(dev, ent->in); 897 898 err = err ? err : ent->status; 899 free_cmd(ent); 900 callback(err, context); 901 } else { 902 complete(&ent->done); 903 } 904 up(sem); 905 } 906 907 static void cmd_work_handler(struct work_struct *work) 908 { 909 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 910 struct mlx5_cmd *cmd = ent->cmd; 911 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 912 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 913 struct mlx5_cmd_layout *lay; 914 struct semaphore *sem; 915 916 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 917 down(sem); 918 919 if (alloc_ent(ent) < 0) { 920 complete_command(ent); 921 return; 922 } 923 924 ent->token = alloc_token(cmd); 925 lay = get_inst(cmd, ent->idx); 926 ent->lay = lay; 927 memset(lay, 0, sizeof(*lay)); 928 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 929 ent->op = be32_to_cpu(lay->in[0]) >> 16; 930 if (ent->in->numpages != 0) 931 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 932 if (ent->out->numpages != 0) 933 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 934 lay->inlen = cpu_to_be32(ent->uin_size); 935 lay->outlen = cpu_to_be32(ent->uout_size); 936 lay->type = MLX5_PCI_CMD_XPORT; 937 lay->token = ent->token; 938 lay->status_own = CMD_OWNER_HW; 939 set_signature(ent, !cmd->checksum_disabled); 940 dump_command(dev, ent, 1); 941 ent->ts1 = ktime_get_ns(); 942 ent->busy = 0; 943 if (ent->callback) 944 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 945 946 /* ring doorbell after the descriptor is valid */ 947 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 948 /* make sure data is written to RAM */ 949 mlx5_fwp_flush(cmd->cmd_page); 950 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 951 mmiowb(); 952 /* if not in polling don't use ent after this point*/ 953 if (cmd->mode == CMD_MODE_POLLING) { 954 poll_timeout(ent); 955 /* make sure we read the descriptor after ownership is SW */ 956 mlx5_cmd_comp_handler(dev, 1U << ent->idx); 957 } 958 } 959 960 static const char *deliv_status_to_str(u8 status) 961 { 962 switch (status) { 963 case MLX5_CMD_DELIVERY_STAT_OK: 964 return "no errors"; 965 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 966 return "signature error"; 967 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 968 return "token error"; 969 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 970 return "bad block number"; 971 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 972 return "output pointer not aligned to block size"; 973 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 974 return "input pointer not aligned to block size"; 975 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 976 return "firmware internal error"; 977 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 978 return "command input length error"; 979 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 980 return "command ouput length error"; 981 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 982 return "reserved fields not cleared"; 983 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 984 return "bad command descriptor type"; 985 default: 986 return "unknown status code"; 987 } 988 } 989 990 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 991 { 992 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 993 struct mlx5_cmd *cmd = &dev->cmd; 994 int err; 995 996 if (cmd->mode == CMD_MODE_POLLING) { 997 wait_for_completion(&ent->done); 998 err = ent->ret; 999 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 1000 ent->ret = -ETIMEDOUT; 1001 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 1002 } 1003 1004 err = ent->ret; 1005 1006 if (err == -ETIMEDOUT) { 1007 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1008 mlx5_command_str(msg_to_opcode(ent->in)), 1009 msg_to_opcode(ent->in)); 1010 } 1011 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1012 err, deliv_status_to_str(ent->status), ent->status); 1013 1014 return err; 1015 } 1016 1017 static __be32 *get_synd_ptr(struct mlx5_outbox_hdr *out) 1018 { 1019 return &out->syndrome; 1020 } 1021 1022 static u8 *get_status_ptr(struct mlx5_outbox_hdr *out) 1023 { 1024 return &out->status; 1025 } 1026 1027 /* Notes: 1028 * 1. Callback functions may not sleep 1029 * 2. page queue commands do not support asynchrous completion 1030 */ 1031 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 1032 int uin_size, 1033 struct mlx5_cmd_msg *out, void *uout, int uout_size, 1034 mlx5_cmd_cbk_t callback, 1035 void *context, int page_queue, u8 *status) 1036 { 1037 struct mlx5_cmd *cmd = &dev->cmd; 1038 struct mlx5_cmd_work_ent *ent; 1039 struct mlx5_cmd_stats *stats; 1040 int err = 0; 1041 s64 ds; 1042 u16 op; 1043 1044 if (callback && page_queue) 1045 return -EINVAL; 1046 1047 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 1048 context, page_queue); 1049 if (IS_ERR(ent)) 1050 return PTR_ERR(ent); 1051 1052 if (!callback) 1053 init_completion(&ent->done); 1054 1055 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1056 INIT_WORK(&ent->work, cmd_work_handler); 1057 if (page_queue) { 1058 cmd_work_handler(&ent->work); 1059 } else if (!queue_work(cmd->wq, &ent->work)) { 1060 mlx5_core_warn(dev, "failed to queue work\n"); 1061 err = -ENOMEM; 1062 goto out_free; 1063 } 1064 1065 if (callback) 1066 goto out; 1067 1068 err = wait_func(dev, ent); 1069 if (err == -ETIMEDOUT) 1070 goto out; 1071 1072 ds = ent->ts2 - ent->ts1; 1073 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 1074 if (op < ARRAY_SIZE(cmd->stats)) { 1075 stats = &cmd->stats[op]; 1076 spin_lock_irq(&stats->lock); 1077 stats->sum += ds; 1078 ++stats->n; 1079 spin_unlock_irq(&stats->lock); 1080 } 1081 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1082 "fw exec time for %s is %lld nsec\n", 1083 mlx5_command_str(op), (long long)ds); 1084 *status = ent->status; 1085 free_cmd(ent); 1086 1087 return err; 1088 1089 out_free: 1090 free_cmd(ent); 1091 out: 1092 return err; 1093 } 1094 1095 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 1096 { 1097 size_t delta; 1098 size_t i; 1099 1100 if (to == NULL || from == NULL) 1101 return (-ENOMEM); 1102 1103 delta = min_t(size_t, size, sizeof(to->first.data)); 1104 memcpy(to->first.data, from, delta); 1105 from = (char *)from + delta; 1106 size -= delta; 1107 1108 for (i = 0; size != 0; i++) { 1109 struct mlx5_cmd_prot_block *block; 1110 1111 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 1112 1113 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1114 memcpy(block->data, from, delta); 1115 from = (char *)from + delta; 1116 size -= delta; 1117 } 1118 return (0); 1119 } 1120 1121 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1122 { 1123 size_t delta; 1124 size_t i; 1125 1126 if (to == NULL || from == NULL) 1127 return (-ENOMEM); 1128 1129 delta = min_t(size_t, size, sizeof(from->first.data)); 1130 memcpy(to, from->first.data, delta); 1131 to = (char *)to + delta; 1132 size -= delta; 1133 1134 for (i = 0; size != 0; i++) { 1135 struct mlx5_cmd_prot_block *block; 1136 1137 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 1138 1139 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1140 memcpy(to, block->data, delta); 1141 to = (char *)to + delta; 1142 size -= delta; 1143 } 1144 return (0); 1145 } 1146 1147 static struct mlx5_cmd_msg * 1148 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 1149 { 1150 struct mlx5_cmd_msg *msg; 1151 size_t blen; 1152 size_t n; 1153 size_t i; 1154 1155 blen = size - min_t(size_t, sizeof(msg->first.data), size); 1156 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 1157 1158 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 1159 if (msg == NULL) 1160 return (ERR_PTR(-ENOMEM)); 1161 1162 for (i = 0; i != n; i++) { 1163 struct mlx5_cmd_prot_block *block; 1164 1165 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 1166 1167 memset(block, 0, MLX5_CMD_MBOX_SIZE); 1168 1169 if (i != (n - 1)) { 1170 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 1171 block->next = cpu_to_be64(dma); 1172 } 1173 block->block_num = cpu_to_be32(i); 1174 } 1175 1176 /* make sure initial data is written to RAM */ 1177 mlx5_fwp_flush(msg); 1178 1179 return (msg); 1180 } 1181 1182 static void 1183 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1184 { 1185 1186 mlx5_fwp_free(msg); 1187 } 1188 1189 static void set_wqname(struct mlx5_core_dev *dev) 1190 { 1191 struct mlx5_cmd *cmd = &dev->cmd; 1192 1193 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1194 dev_name(&dev->pdev->dev)); 1195 } 1196 1197 static void clean_debug_files(struct mlx5_core_dev *dev) 1198 { 1199 } 1200 1201 1202 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1203 { 1204 struct mlx5_cmd *cmd = &dev->cmd; 1205 int i; 1206 1207 for (i = 0; i < cmd->max_reg_cmds; i++) 1208 down(&cmd->sem); 1209 1210 down(&cmd->pages_sem); 1211 cmd->mode = mode; 1212 1213 up(&cmd->pages_sem); 1214 for (i = 0; i < cmd->max_reg_cmds; i++) 1215 up(&cmd->sem); 1216 } 1217 1218 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1219 { 1220 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1221 } 1222 1223 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1224 { 1225 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1226 } 1227 1228 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1229 { 1230 unsigned long flags; 1231 1232 if (msg->cache) { 1233 spin_lock_irqsave(&msg->cache->lock, flags); 1234 list_add_tail(&msg->list, &msg->cache->head); 1235 spin_unlock_irqrestore(&msg->cache->lock, flags); 1236 } else { 1237 mlx5_free_cmd_msg(dev, msg); 1238 } 1239 } 1240 1241 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) 1242 { 1243 struct mlx5_cmd *cmd = &dev->cmd; 1244 struct mlx5_cmd_work_ent *ent; 1245 int i; 1246 1247 /* make sure data gets read from RAM */ 1248 mlx5_fwp_invalidate(cmd->cmd_page); 1249 1250 while (vector != 0) { 1251 i = ffs(vector) - 1; 1252 vector &= ~(1U << i); 1253 ent = cmd->ent_arr[i]; 1254 if (ent->callback) 1255 cancel_delayed_work(&ent->cb_timeout_work); 1256 ent->ts2 = ktime_get_ns(); 1257 memcpy(ent->out->first.data, ent->lay->out, 1258 sizeof(ent->lay->out)); 1259 /* make sure data gets read from RAM */ 1260 mlx5_fwp_invalidate(ent->out); 1261 dump_command(dev, ent, 0); 1262 if (!ent->ret) { 1263 if (!cmd->checksum_disabled) 1264 ent->ret = verify_signature(ent); 1265 else 1266 ent->ret = 0; 1267 ent->status = ent->lay->status_own >> 1; 1268 if (vector & MLX5_TRIGGERED_CMD_COMP) 1269 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1270 else 1271 ent->status = ent->lay->status_own >> 1; 1272 1273 mlx5_core_dbg(dev, 1274 "FW command ret 0x%x, status %s(0x%x)\n", 1275 ent->ret, 1276 deliv_status_to_str(ent->status), 1277 ent->status); 1278 } 1279 free_ent(cmd, ent->idx); 1280 complete_command(ent); 1281 } 1282 } 1283 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1284 1285 static int status_to_err(u8 status) 1286 { 1287 return status ? -1 : 0; /* TBD more meaningful codes */ 1288 } 1289 1290 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1291 gfp_t gfp) 1292 { 1293 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1294 struct mlx5_cmd *cmd = &dev->cmd; 1295 struct cache_ent *ent = NULL; 1296 1297 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1298 ent = &cmd->cache.large; 1299 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1300 ent = &cmd->cache.med; 1301 1302 if (ent) { 1303 spin_lock_irq(&ent->lock); 1304 if (!list_empty(&ent->head)) { 1305 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1306 list); 1307 list_del(&msg->list); 1308 } 1309 spin_unlock_irq(&ent->lock); 1310 } 1311 1312 if (IS_ERR(msg)) 1313 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1314 1315 return msg; 1316 } 1317 1318 static u16 opcode_from_in(struct mlx5_inbox_hdr *in) 1319 { 1320 return be16_to_cpu(in->opcode); 1321 } 1322 1323 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1324 { 1325 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1326 } 1327 1328 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1329 void *in, int in_size, 1330 void *out, int out_size, 1331 mlx5_cmd_cbk_t callback, void *context) 1332 { 1333 struct mlx5_cmd_msg *inb; 1334 struct mlx5_cmd_msg *outb; 1335 int pages_queue; 1336 const gfp_t gfp = GFP_KERNEL; 1337 int err; 1338 u8 status = 0; 1339 u32 drv_synd; 1340 1341 if (pci_channel_offline(dev->pdev) || 1342 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1343 err = mlx5_internal_err_ret_value(dev, opcode_from_in(in), &drv_synd, &status); 1344 *get_synd_ptr(out) = cpu_to_be32(drv_synd); 1345 *get_status_ptr(out) = status; 1346 return err; 1347 } 1348 1349 pages_queue = is_manage_pages(in); 1350 1351 inb = alloc_msg(dev, in_size, gfp); 1352 if (IS_ERR(inb)) { 1353 err = PTR_ERR(inb); 1354 return err; 1355 } 1356 1357 err = mlx5_copy_to_msg(inb, in, in_size); 1358 if (err) { 1359 mlx5_core_warn(dev, "err %d\n", err); 1360 goto out_in; 1361 } 1362 1363 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1364 if (IS_ERR(outb)) { 1365 err = PTR_ERR(outb); 1366 goto out_in; 1367 } 1368 1369 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1370 context, pages_queue, &status); 1371 if (err) { 1372 if (err == -ETIMEDOUT) 1373 return err; 1374 goto out_out; 1375 } 1376 1377 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1378 if (status) { 1379 err = status_to_err(status); 1380 goto out_out; 1381 } 1382 1383 if (callback) 1384 return err; 1385 1386 err = mlx5_copy_from_msg(out, outb, out_size); 1387 1388 out_out: 1389 mlx5_free_cmd_msg(dev, outb); 1390 1391 out_in: 1392 free_msg(dev, inb); 1393 return err; 1394 } 1395 1396 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1397 int out_size) 1398 { 1399 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1400 } 1401 EXPORT_SYMBOL(mlx5_cmd_exec); 1402 1403 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1404 void *out, int out_size, mlx5_cmd_cbk_t callback, 1405 void *context) 1406 { 1407 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1408 } 1409 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1410 1411 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1412 { 1413 struct mlx5_cmd *cmd = &dev->cmd; 1414 struct mlx5_cmd_msg *msg; 1415 struct mlx5_cmd_msg *n; 1416 1417 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1418 list_del(&msg->list); 1419 mlx5_free_cmd_msg(dev, msg); 1420 } 1421 1422 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1423 list_del(&msg->list); 1424 mlx5_free_cmd_msg(dev, msg); 1425 } 1426 } 1427 1428 static int create_msg_cache(struct mlx5_core_dev *dev) 1429 { 1430 struct mlx5_cmd *cmd = &dev->cmd; 1431 struct mlx5_cmd_msg *msg; 1432 int err; 1433 int i; 1434 1435 spin_lock_init(&cmd->cache.large.lock); 1436 INIT_LIST_HEAD(&cmd->cache.large.head); 1437 spin_lock_init(&cmd->cache.med.lock); 1438 INIT_LIST_HEAD(&cmd->cache.med.head); 1439 1440 for (i = 0; i < NUM_LONG_LISTS; i++) { 1441 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1442 if (IS_ERR(msg)) { 1443 err = PTR_ERR(msg); 1444 goto ex_err; 1445 } 1446 msg->cache = &cmd->cache.large; 1447 list_add_tail(&msg->list, &cmd->cache.large.head); 1448 } 1449 1450 for (i = 0; i < NUM_MED_LISTS; i++) { 1451 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1452 if (IS_ERR(msg)) { 1453 err = PTR_ERR(msg); 1454 goto ex_err; 1455 } 1456 msg->cache = &cmd->cache.med; 1457 list_add_tail(&msg->list, &cmd->cache.med.head); 1458 } 1459 1460 return 0; 1461 1462 ex_err: 1463 destroy_msg_cache(dev); 1464 return err; 1465 } 1466 1467 static int 1468 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1469 { 1470 int err; 1471 1472 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1473 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1474 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1475 1476 /* 1477 * Create global DMA descriptor tag for allocating 1478 * 4K firmware pages: 1479 */ 1480 err = -bus_dma_tag_create( 1481 bus_get_dma_tag(dev->pdev->dev.bsddev), 1482 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1483 0, /* no boundary */ 1484 BUS_SPACE_MAXADDR, /* lowaddr */ 1485 BUS_SPACE_MAXADDR, /* highaddr */ 1486 NULL, NULL, /* filter, filterarg */ 1487 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1488 1, /* nsegments */ 1489 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1490 0, /* flags */ 1491 NULL, NULL, /* lockfunc, lockfuncarg */ 1492 &cmd->dma_tag); 1493 if (err != 0) 1494 goto failure_destroy_sx; 1495 1496 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1497 if (cmd->cmd_page == NULL) { 1498 err = -ENOMEM; 1499 goto failure_alloc_page; 1500 } 1501 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1502 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1503 return (0); 1504 1505 failure_alloc_page: 1506 bus_dma_tag_destroy(cmd->dma_tag); 1507 1508 failure_destroy_sx: 1509 cv_destroy(&cmd->dma_cv); 1510 mtx_destroy(&cmd->dma_mtx); 1511 sx_destroy(&cmd->dma_sx); 1512 return (err); 1513 } 1514 1515 static void 1516 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1517 { 1518 1519 mlx5_fwp_free(cmd->cmd_page); 1520 bus_dma_tag_destroy(cmd->dma_tag); 1521 cv_destroy(&cmd->dma_cv); 1522 mtx_destroy(&cmd->dma_mtx); 1523 sx_destroy(&cmd->dma_sx); 1524 } 1525 1526 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1527 { 1528 struct mlx5_cmd *cmd = &dev->cmd; 1529 u32 cmd_h, cmd_l; 1530 u16 cmd_if_rev; 1531 int err; 1532 int i; 1533 1534 memset(cmd, 0, sizeof(*cmd)); 1535 cmd_if_rev = cmdif_rev_get(dev); 1536 if (cmd_if_rev != CMD_IF_REV) { 1537 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1538 return -EINVAL; 1539 } 1540 1541 err = alloc_cmd_page(dev, cmd); 1542 if (err) 1543 goto err_free_pool; 1544 1545 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1546 cmd->log_sz = cmd_l >> 4 & 0xf; 1547 cmd->log_stride = cmd_l & 0xf; 1548 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1549 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1550 err = -EINVAL; 1551 goto err_free_page; 1552 } 1553 1554 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1555 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1556 err = -EINVAL; 1557 goto err_free_page; 1558 } 1559 1560 cmd->checksum_disabled = 1; 1561 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1562 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1563 1564 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1565 if (cmd->cmdif_rev > CMD_IF_REV) { 1566 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1567 err = -ENOTSUPP; 1568 goto err_free_page; 1569 } 1570 1571 spin_lock_init(&cmd->alloc_lock); 1572 spin_lock_init(&cmd->token_lock); 1573 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1574 spin_lock_init(&cmd->stats[i].lock); 1575 1576 sema_init(&cmd->sem, cmd->max_reg_cmds); 1577 sema_init(&cmd->pages_sem, 1); 1578 1579 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1580 cmd_l = (u32)(cmd->dma); 1581 if (cmd_l & 0xfff) { 1582 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1583 err = -ENOMEM; 1584 goto err_free_page; 1585 } 1586 1587 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1588 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1589 1590 /* Make sure firmware sees the complete address before we proceed */ 1591 wmb(); 1592 1593 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1594 1595 cmd->mode = CMD_MODE_POLLING; 1596 1597 err = create_msg_cache(dev); 1598 if (err) { 1599 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1600 goto err_free_page; 1601 } 1602 1603 set_wqname(dev); 1604 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1605 if (!cmd->wq) { 1606 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1607 err = -ENOMEM; 1608 goto err_cache; 1609 } 1610 1611 return 0; 1612 1613 err_cache: 1614 destroy_msg_cache(dev); 1615 1616 err_free_page: 1617 free_cmd_page(dev, cmd); 1618 1619 err_free_pool: 1620 return err; 1621 } 1622 EXPORT_SYMBOL(mlx5_cmd_init); 1623 1624 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1625 { 1626 struct mlx5_cmd *cmd = &dev->cmd; 1627 1628 clean_debug_files(dev); 1629 destroy_workqueue(cmd->wq); 1630 destroy_msg_cache(dev); 1631 free_cmd_page(dev, cmd); 1632 } 1633 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1634 1635 static const char *cmd_status_str(u8 status) 1636 { 1637 switch (status) { 1638 case MLX5_CMD_STAT_OK: 1639 return "OK"; 1640 case MLX5_CMD_STAT_INT_ERR: 1641 return "internal error"; 1642 case MLX5_CMD_STAT_BAD_OP_ERR: 1643 return "bad operation"; 1644 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1645 return "bad parameter"; 1646 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1647 return "bad system state"; 1648 case MLX5_CMD_STAT_BAD_RES_ERR: 1649 return "bad resource"; 1650 case MLX5_CMD_STAT_RES_BUSY: 1651 return "resource busy"; 1652 case MLX5_CMD_STAT_LIM_ERR: 1653 return "limits exceeded"; 1654 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1655 return "bad resource state"; 1656 case MLX5_CMD_STAT_IX_ERR: 1657 return "bad index"; 1658 case MLX5_CMD_STAT_NO_RES_ERR: 1659 return "no resources"; 1660 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1661 return "bad input length"; 1662 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1663 return "bad output length"; 1664 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1665 return "bad QP state"; 1666 case MLX5_CMD_STAT_BAD_PKT_ERR: 1667 return "bad packet (discarded)"; 1668 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1669 return "bad size too many outstanding CQEs"; 1670 default: 1671 return "unknown status"; 1672 } 1673 } 1674 1675 static int cmd_status_to_err_helper(u8 status) 1676 { 1677 switch (status) { 1678 case MLX5_CMD_STAT_OK: return 0; 1679 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1680 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1681 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1682 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1683 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1684 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1685 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1686 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1687 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1688 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1689 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1690 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1691 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1692 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1693 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1694 default: return -EIO; 1695 } 1696 } 1697 1698 /* this will be available till all the commands use set/get macros */ 1699 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1700 { 1701 if (!hdr->status) 1702 return 0; 1703 1704 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1705 1706 return cmd_status_to_err_helper(hdr->status); 1707 } 1708 1709 int mlx5_cmd_status_to_err_v2(void *ptr) 1710 { 1711 u32 syndrome; 1712 u8 status; 1713 1714 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1715 if (!status) 1716 return 0; 1717 1718 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1719 1720 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1721 1722 return cmd_status_to_err_helper(status); 1723 } 1724 1725