1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 44 struct mlx5_cmd_msg *msg); 45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 struct mlx5_ifc_mbox_out_bits { 79 u8 status[0x8]; 80 u8 reserved_at_8[0x18]; 81 82 u8 syndrome[0x20]; 83 84 u8 reserved_at_40[0x40]; 85 }; 86 87 struct mlx5_ifc_mbox_in_bits { 88 u8 opcode[0x10]; 89 u8 reserved_at_10[0x10]; 90 91 u8 reserved_at_20[0x10]; 92 u8 op_mod[0x10]; 93 94 u8 reserved_at_40[0x40]; 95 }; 96 97 98 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 99 struct mlx5_cmd_msg *in, 100 int uin_size, 101 struct mlx5_cmd_msg *out, 102 void *uout, int uout_size, 103 mlx5_cmd_cbk_t cbk, 104 void *context, int page_queue) 105 { 106 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 107 struct mlx5_cmd_work_ent *ent; 108 109 ent = kzalloc(sizeof(*ent), alloc_flags); 110 if (!ent) 111 return ERR_PTR(-ENOMEM); 112 113 ent->in = in; 114 ent->uin_size = uin_size; 115 ent->out = out; 116 ent->uout = uout; 117 ent->uout_size = uout_size; 118 ent->callback = cbk; 119 ent->context = context; 120 ent->cmd = cmd; 121 ent->page_queue = page_queue; 122 123 return ent; 124 } 125 126 static u8 alloc_token(struct mlx5_cmd *cmd) 127 { 128 u8 token; 129 130 spin_lock(&cmd->token_lock); 131 cmd->token++; 132 if (cmd->token == 0) 133 cmd->token++; 134 token = cmd->token; 135 spin_unlock(&cmd->token_lock); 136 137 return token; 138 } 139 140 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 141 { 142 unsigned long flags; 143 struct mlx5_cmd *cmd = ent->cmd; 144 struct mlx5_core_dev *dev = 145 container_of(cmd, struct mlx5_core_dev, cmd); 146 int ret = cmd->max_reg_cmds; 147 148 spin_lock_irqsave(&cmd->alloc_lock, flags); 149 if (!ent->page_queue) { 150 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 151 if (ret >= cmd->max_reg_cmds) 152 ret = -1; 153 } 154 155 if (dev->state != MLX5_DEVICE_STATE_UP) 156 ret = -1; 157 158 if (ret != -1) { 159 ent->busy = 1; 160 ent->idx = ret; 161 clear_bit(ent->idx, &cmd->bitmask); 162 cmd->ent_arr[ent->idx] = ent; 163 } 164 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 165 166 return ret; 167 } 168 169 static void free_ent(struct mlx5_cmd *cmd, int idx) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&cmd->alloc_lock, flags); 174 set_bit(idx, &cmd->bitmask); 175 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 176 } 177 178 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 179 { 180 return cmd->cmd_buf + (idx << cmd->log_stride); 181 } 182 183 static u8 xor8_buf(void *buf, int len) 184 { 185 u8 *ptr = buf; 186 u8 sum = 0; 187 int i; 188 189 for (i = 0; i < len; i++) 190 sum ^= ptr[i]; 191 192 return sum; 193 } 194 195 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 196 { 197 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 198 return -EINVAL; 199 200 if (xor8_buf(block, sizeof(*block)) != 0xff) 201 return -EINVAL; 202 203 return 0; 204 } 205 206 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 207 int csum) 208 { 209 block->token = token; 210 if (csum) { 211 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 212 sizeof(block->data) - 2); 213 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 214 } 215 } 216 217 static void 218 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 219 { 220 size_t i; 221 222 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 223 struct mlx5_cmd_prot_block *block; 224 225 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 226 227 /* compute signature */ 228 calc_block_sig(block, token, csum); 229 230 /* check for last block */ 231 if (block->next == 0) 232 break; 233 } 234 235 /* make sure data gets written to RAM */ 236 mlx5_fwp_flush(msg); 237 } 238 239 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 240 { 241 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 242 calc_chain_sig(ent->in, ent->token, csum); 243 calc_chain_sig(ent->out, ent->token, csum); 244 } 245 246 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 247 { 248 struct mlx5_core_dev *dev = container_of(ent->cmd, 249 struct mlx5_core_dev, cmd); 250 int poll_end = jiffies + 251 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 252 u8 own; 253 254 do { 255 own = ent->lay->status_own; 256 if (!(own & CMD_OWNER_HW) || 257 dev->state != MLX5_DEVICE_STATE_UP) { 258 ent->ret = 0; 259 return; 260 } 261 usleep_range(5000, 10000); 262 } while (time_before(jiffies, poll_end)); 263 264 ent->ret = -ETIMEDOUT; 265 } 266 267 static void free_cmd(struct mlx5_cmd_work_ent *ent) 268 { 269 cancel_delayed_work_sync(&ent->cb_timeout_work); 270 kfree(ent); 271 } 272 273 static int 274 verify_signature(struct mlx5_cmd_work_ent *ent) 275 { 276 struct mlx5_cmd_msg *msg = ent->out; 277 size_t i; 278 int err; 279 u8 sig; 280 281 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 282 if (sig != 0xff) 283 return -EINVAL; 284 285 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 286 struct mlx5_cmd_prot_block *block; 287 288 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 289 290 /* compute signature */ 291 err = verify_block_sig(block); 292 if (err != 0) 293 return (err); 294 295 /* check for last block */ 296 if (block->next == 0) 297 break; 298 } 299 return (0); 300 } 301 302 static void dump_buf(void *buf, int size, int data_only, int offset) 303 { 304 __be32 *p = buf; 305 int i; 306 307 for (i = 0; i < size; i += 16) { 308 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 309 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 310 be32_to_cpu(p[3])); 311 p += 4; 312 offset += 16; 313 } 314 if (!data_only) 315 pr_debug("\n"); 316 } 317 318 enum { 319 MLX5_DRIVER_STATUS_ABORTED = 0xfe, 320 MLX5_DRIVER_SYND = 0xbadd00de, 321 }; 322 323 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 324 u32 *synd, u8 *status) 325 { 326 *synd = 0; 327 *status = 0; 328 329 switch (op) { 330 case MLX5_CMD_OP_TEARDOWN_HCA: 331 case MLX5_CMD_OP_DISABLE_HCA: 332 case MLX5_CMD_OP_MANAGE_PAGES: 333 case MLX5_CMD_OP_DESTROY_MKEY: 334 case MLX5_CMD_OP_DESTROY_EQ: 335 case MLX5_CMD_OP_DESTROY_CQ: 336 case MLX5_CMD_OP_DESTROY_QP: 337 case MLX5_CMD_OP_DESTROY_PSV: 338 case MLX5_CMD_OP_DESTROY_SRQ: 339 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 340 case MLX5_CMD_OP_DESTROY_DCT: 341 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 342 case MLX5_CMD_OP_DEALLOC_PD: 343 case MLX5_CMD_OP_DEALLOC_UAR: 344 case MLX5_CMD_OP_DETACH_FROM_MCG: 345 case MLX5_CMD_OP_DEALLOC_XRCD: 346 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 347 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 348 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 349 case MLX5_CMD_OP_DESTROY_TIR: 350 case MLX5_CMD_OP_DESTROY_SQ: 351 case MLX5_CMD_OP_DESTROY_RQ: 352 case MLX5_CMD_OP_DESTROY_RMP: 353 case MLX5_CMD_OP_DESTROY_TIS: 354 case MLX5_CMD_OP_DESTROY_RQT: 355 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 356 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 357 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 358 case MLX5_CMD_OP_2ERR_QP: 359 case MLX5_CMD_OP_2RST_QP: 360 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 361 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 362 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 363 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 364 return MLX5_CMD_STAT_OK; 365 366 case MLX5_CMD_OP_QUERY_HCA_CAP: 367 case MLX5_CMD_OP_QUERY_ADAPTER: 368 case MLX5_CMD_OP_INIT_HCA: 369 case MLX5_CMD_OP_ENABLE_HCA: 370 case MLX5_CMD_OP_QUERY_PAGES: 371 case MLX5_CMD_OP_SET_HCA_CAP: 372 case MLX5_CMD_OP_QUERY_ISSI: 373 case MLX5_CMD_OP_SET_ISSI: 374 case MLX5_CMD_OP_CREATE_MKEY: 375 case MLX5_CMD_OP_QUERY_MKEY: 376 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 377 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 378 case MLX5_CMD_OP_CREATE_EQ: 379 case MLX5_CMD_OP_QUERY_EQ: 380 case MLX5_CMD_OP_GEN_EQE: 381 case MLX5_CMD_OP_CREATE_CQ: 382 case MLX5_CMD_OP_QUERY_CQ: 383 case MLX5_CMD_OP_MODIFY_CQ: 384 case MLX5_CMD_OP_CREATE_QP: 385 case MLX5_CMD_OP_RST2INIT_QP: 386 case MLX5_CMD_OP_INIT2RTR_QP: 387 case MLX5_CMD_OP_RTR2RTS_QP: 388 case MLX5_CMD_OP_RTS2RTS_QP: 389 case MLX5_CMD_OP_SQERR2RTS_QP: 390 case MLX5_CMD_OP_QUERY_QP: 391 case MLX5_CMD_OP_SQD_RTS_QP: 392 case MLX5_CMD_OP_INIT2INIT_QP: 393 case MLX5_CMD_OP_CREATE_PSV: 394 case MLX5_CMD_OP_CREATE_SRQ: 395 case MLX5_CMD_OP_QUERY_SRQ: 396 case MLX5_CMD_OP_ARM_RQ: 397 case MLX5_CMD_OP_CREATE_XRC_SRQ: 398 case MLX5_CMD_OP_QUERY_XRC_SRQ: 399 case MLX5_CMD_OP_ARM_XRC_SRQ: 400 case MLX5_CMD_OP_CREATE_DCT: 401 case MLX5_CMD_OP_DRAIN_DCT: 402 case MLX5_CMD_OP_QUERY_DCT: 403 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 404 case MLX5_CMD_OP_QUERY_VPORT_STATE: 405 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 406 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 407 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 408 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 409 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 410 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 411 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 412 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 413 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 414 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 415 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 416 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 417 case MLX5_CMD_OP_QUERY_Q_COUNTER: 418 case MLX5_CMD_OP_ALLOC_PD: 419 case MLX5_CMD_OP_ALLOC_UAR: 420 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 421 case MLX5_CMD_OP_ACCESS_REG: 422 case MLX5_CMD_OP_ATTACH_TO_MCG: 423 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 424 case MLX5_CMD_OP_MAD_IFC: 425 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 426 case MLX5_CMD_OP_SET_MAD_DEMUX: 427 case MLX5_CMD_OP_NOP: 428 case MLX5_CMD_OP_ALLOC_XRCD: 429 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 430 case MLX5_CMD_OP_QUERY_CONG_STATUS: 431 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 432 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 433 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 434 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 435 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 436 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 437 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 438 case MLX5_CMD_OP_CREATE_TIR: 439 case MLX5_CMD_OP_MODIFY_TIR: 440 case MLX5_CMD_OP_QUERY_TIR: 441 case MLX5_CMD_OP_CREATE_SQ: 442 case MLX5_CMD_OP_MODIFY_SQ: 443 case MLX5_CMD_OP_QUERY_SQ: 444 case MLX5_CMD_OP_CREATE_RQ: 445 case MLX5_CMD_OP_MODIFY_RQ: 446 case MLX5_CMD_OP_QUERY_RQ: 447 case MLX5_CMD_OP_CREATE_RMP: 448 case MLX5_CMD_OP_MODIFY_RMP: 449 case MLX5_CMD_OP_QUERY_RMP: 450 case MLX5_CMD_OP_CREATE_TIS: 451 case MLX5_CMD_OP_MODIFY_TIS: 452 case MLX5_CMD_OP_QUERY_TIS: 453 case MLX5_CMD_OP_CREATE_RQT: 454 case MLX5_CMD_OP_MODIFY_RQT: 455 case MLX5_CMD_OP_QUERY_RQT: 456 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 457 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 458 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 459 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 460 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 461 *status = MLX5_DRIVER_STATUS_ABORTED; 462 *synd = MLX5_DRIVER_SYND; 463 return -EIO; 464 default: 465 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 466 return -EINVAL; 467 } 468 } 469 470 const char *mlx5_command_str(int command) 471 { 472 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 473 474 switch (command) { 475 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 476 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 477 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 478 MLX5_COMMAND_STR_CASE(INIT_HCA); 479 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 480 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 481 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 482 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 483 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 484 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 485 MLX5_COMMAND_STR_CASE(SET_ISSI); 486 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 487 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 488 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 489 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 490 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 491 MLX5_COMMAND_STR_CASE(CREATE_EQ); 492 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 493 MLX5_COMMAND_STR_CASE(QUERY_EQ); 494 MLX5_COMMAND_STR_CASE(GEN_EQE); 495 MLX5_COMMAND_STR_CASE(CREATE_CQ); 496 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 497 MLX5_COMMAND_STR_CASE(QUERY_CQ); 498 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 499 MLX5_COMMAND_STR_CASE(CREATE_QP); 500 MLX5_COMMAND_STR_CASE(DESTROY_QP); 501 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 502 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 503 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 504 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 505 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 506 MLX5_COMMAND_STR_CASE(2ERR_QP); 507 MLX5_COMMAND_STR_CASE(2RST_QP); 508 MLX5_COMMAND_STR_CASE(QUERY_QP); 509 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 510 MLX5_COMMAND_STR_CASE(MAD_IFC); 511 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 512 MLX5_COMMAND_STR_CASE(CREATE_PSV); 513 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 514 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 515 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 516 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 517 MLX5_COMMAND_STR_CASE(ARM_RQ); 518 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 519 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 520 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 521 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 522 MLX5_COMMAND_STR_CASE(CREATE_DCT); 523 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 524 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 525 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 526 MLX5_COMMAND_STR_CASE(QUERY_DCT); 527 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 528 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 529 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 530 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 531 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 532 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 533 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 534 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 535 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 536 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 537 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 538 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 539 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 540 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 541 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 542 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 543 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 544 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 545 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 546 MLX5_COMMAND_STR_CASE(ALLOC_PD); 547 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 548 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 549 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 550 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 551 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 552 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 553 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 554 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 555 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 556 MLX5_COMMAND_STR_CASE(NOP); 557 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 558 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 559 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 560 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 561 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 562 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 563 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 564 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 565 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 566 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 567 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 568 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 569 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 570 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 571 MLX5_COMMAND_STR_CASE(CREATE_RMP); 572 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 573 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 574 MLX5_COMMAND_STR_CASE(QUERY_RMP); 575 MLX5_COMMAND_STR_CASE(CREATE_RQT); 576 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 577 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 578 MLX5_COMMAND_STR_CASE(QUERY_RQT); 579 MLX5_COMMAND_STR_CASE(ACCESS_REG); 580 MLX5_COMMAND_STR_CASE(CREATE_SQ); 581 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 582 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 583 MLX5_COMMAND_STR_CASE(QUERY_SQ); 584 MLX5_COMMAND_STR_CASE(CREATE_RQ); 585 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 586 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 587 MLX5_COMMAND_STR_CASE(QUERY_RQ); 588 MLX5_COMMAND_STR_CASE(CREATE_TIR); 589 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 590 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 591 MLX5_COMMAND_STR_CASE(QUERY_TIR); 592 MLX5_COMMAND_STR_CASE(CREATE_TIS); 593 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 594 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 595 MLX5_COMMAND_STR_CASE(QUERY_TIS); 596 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 597 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 598 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 599 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 600 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 601 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 602 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 603 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 604 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 605 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 606 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 607 default: return "unknown command opcode"; 608 } 609 } 610 611 static const char *cmd_status_str(u8 status) 612 { 613 switch (status) { 614 case MLX5_CMD_STAT_OK: 615 return "OK"; 616 case MLX5_CMD_STAT_INT_ERR: 617 return "internal error"; 618 case MLX5_CMD_STAT_BAD_OP_ERR: 619 return "bad operation"; 620 case MLX5_CMD_STAT_BAD_PARAM_ERR: 621 return "bad parameter"; 622 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 623 return "bad system state"; 624 case MLX5_CMD_STAT_BAD_RES_ERR: 625 return "bad resource"; 626 case MLX5_CMD_STAT_RES_BUSY: 627 return "resource busy"; 628 case MLX5_CMD_STAT_LIM_ERR: 629 return "limits exceeded"; 630 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 631 return "bad resource state"; 632 case MLX5_CMD_STAT_IX_ERR: 633 return "bad index"; 634 case MLX5_CMD_STAT_NO_RES_ERR: 635 return "no resources"; 636 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 637 return "bad input length"; 638 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 639 return "bad output length"; 640 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 641 return "bad QP state"; 642 case MLX5_CMD_STAT_BAD_PKT_ERR: 643 return "bad packet (discarded)"; 644 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 645 return "bad size too many outstanding CQEs"; 646 default: 647 return "unknown status"; 648 } 649 } 650 651 static int cmd_status_to_err_helper(u8 status) 652 { 653 switch (status) { 654 case MLX5_CMD_STAT_OK: return 0; 655 case MLX5_CMD_STAT_INT_ERR: return -EIO; 656 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 657 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 658 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 659 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 660 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 661 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 662 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 663 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 664 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 665 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 666 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 667 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 668 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 669 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 670 default: return -EIO; 671 } 672 } 673 674 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) 675 { 676 *status = MLX5_GET(mbox_out, out, status); 677 *syndrome = MLX5_GET(mbox_out, out, syndrome); 678 } 679 680 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) 681 { 682 u32 syndrome; 683 u8 status; 684 u16 opcode; 685 u16 op_mod; 686 687 mlx5_cmd_mbox_status(out, &status, &syndrome); 688 if (!status) 689 return 0; 690 691 opcode = MLX5_GET(mbox_in, in, opcode); 692 op_mod = MLX5_GET(mbox_in, in, op_mod); 693 694 mlx5_core_err(dev, 695 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 696 mlx5_command_str(opcode), 697 opcode, op_mod, 698 cmd_status_str(status), 699 status, 700 syndrome); 701 702 return cmd_status_to_err_helper(status); 703 } 704 705 static void dump_command(struct mlx5_core_dev *dev, 706 struct mlx5_cmd_work_ent *ent, int input) 707 { 708 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 709 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 710 size_t i; 711 int data_only; 712 int offset = 0; 713 int msg_len = input ? ent->uin_size : ent->uout_size; 714 int dump_len; 715 716 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 717 718 if (data_only) 719 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 720 "dump command data %s(0x%x) %s\n", 721 mlx5_command_str(op), op, 722 input ? "INPUT" : "OUTPUT"); 723 else 724 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 725 mlx5_command_str(op), op, 726 input ? "INPUT" : "OUTPUT"); 727 728 if (data_only) { 729 if (input) { 730 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 731 offset += sizeof(ent->lay->in); 732 } else { 733 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 734 offset += sizeof(ent->lay->out); 735 } 736 } else { 737 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 738 offset += sizeof(*ent->lay); 739 } 740 741 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 742 struct mlx5_cmd_prot_block *block; 743 744 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 745 746 if (data_only) { 747 if (offset >= msg_len) 748 break; 749 dump_len = min_t(int, 750 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 751 752 dump_buf(block->data, dump_len, 1, offset); 753 offset += MLX5_CMD_DATA_BLOCK_SIZE; 754 } else { 755 mlx5_core_dbg(dev, "command block:\n"); 756 dump_buf(block, sizeof(*block), 0, offset); 757 offset += sizeof(*block); 758 } 759 760 /* check for last block */ 761 if (block->next == 0) 762 break; 763 } 764 765 if (data_only) 766 pr_debug("\n"); 767 } 768 769 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 770 { 771 return MLX5_GET(mbox_in, in->first.data, opcode); 772 } 773 774 static void cb_timeout_handler(struct work_struct *work) 775 { 776 struct delayed_work *dwork = container_of(work, struct delayed_work, 777 work); 778 struct mlx5_cmd_work_ent *ent = container_of(dwork, 779 struct mlx5_cmd_work_ent, 780 cb_timeout_work); 781 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 782 cmd); 783 784 ent->ret = -ETIMEDOUT; 785 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 786 mlx5_command_str(msg_to_opcode(ent->in)), 787 msg_to_opcode(ent->in)); 788 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 789 } 790 791 static void complete_command(struct mlx5_cmd_work_ent *ent) 792 { 793 struct mlx5_cmd *cmd = ent->cmd; 794 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 795 cmd); 796 mlx5_cmd_cbk_t callback; 797 void *context; 798 799 s64 ds; 800 struct mlx5_cmd_stats *stats; 801 unsigned long flags; 802 int err; 803 struct semaphore *sem; 804 805 if (ent->page_queue) 806 sem = &cmd->pages_sem; 807 else 808 sem = &cmd->sem; 809 810 if (dev->state != MLX5_DEVICE_STATE_UP) { 811 u8 status = 0; 812 u32 drv_synd; 813 814 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); 815 MLX5_SET(mbox_out, ent->out, status, status); 816 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 817 } 818 819 if (ent->callback) { 820 ds = ent->ts2 - ent->ts1; 821 if (ent->op < ARRAY_SIZE(cmd->stats)) { 822 stats = &cmd->stats[ent->op]; 823 spin_lock_irqsave(&stats->lock, flags); 824 stats->sum += ds; 825 ++stats->n; 826 spin_unlock_irqrestore(&stats->lock, flags); 827 } 828 829 callback = ent->callback; 830 context = ent->context; 831 err = ent->ret; 832 if (!err) { 833 err = mlx5_copy_from_msg(ent->uout, 834 ent->out, 835 ent->uout_size); 836 err = err ? err : mlx5_cmd_check(dev, 837 ent->in->first.data, 838 ent->uout); 839 } 840 841 mlx5_free_cmd_msg(dev, ent->out); 842 free_msg(dev, ent->in); 843 844 err = err ? err : ent->status; 845 free_cmd(ent); 846 callback(err, context); 847 } else { 848 complete(&ent->done); 849 } 850 up(sem); 851 } 852 853 static void cmd_work_handler(struct work_struct *work) 854 { 855 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 856 struct mlx5_cmd *cmd = ent->cmd; 857 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 858 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 859 struct mlx5_cmd_layout *lay; 860 struct semaphore *sem; 861 862 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 863 down(sem); 864 865 if (alloc_ent(ent) < 0) { 866 complete_command(ent); 867 return; 868 } 869 870 ent->token = alloc_token(cmd); 871 lay = get_inst(cmd, ent->idx); 872 ent->lay = lay; 873 memset(lay, 0, sizeof(*lay)); 874 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 875 ent->op = be32_to_cpu(lay->in[0]) >> 16; 876 if (ent->in->numpages != 0) 877 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 878 if (ent->out->numpages != 0) 879 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 880 lay->inlen = cpu_to_be32(ent->uin_size); 881 lay->outlen = cpu_to_be32(ent->uout_size); 882 lay->type = MLX5_PCI_CMD_XPORT; 883 lay->token = ent->token; 884 lay->status_own = CMD_OWNER_HW; 885 set_signature(ent, !cmd->checksum_disabled); 886 dump_command(dev, ent, 1); 887 ent->ts1 = ktime_get_ns(); 888 ent->busy = 0; 889 if (ent->callback) 890 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 891 892 /* ring doorbell after the descriptor is valid */ 893 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 894 /* make sure data is written to RAM */ 895 mlx5_fwp_flush(cmd->cmd_page); 896 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 897 mmiowb(); 898 /* if not in polling don't use ent after this point*/ 899 if (cmd->mode == CMD_MODE_POLLING) { 900 poll_timeout(ent); 901 /* make sure we read the descriptor after ownership is SW */ 902 mlx5_cmd_comp_handler(dev, 1U << ent->idx); 903 } 904 } 905 906 static const char *deliv_status_to_str(u8 status) 907 { 908 switch (status) { 909 case MLX5_CMD_DELIVERY_STAT_OK: 910 return "no errors"; 911 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 912 return "signature error"; 913 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 914 return "token error"; 915 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 916 return "bad block number"; 917 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 918 return "output pointer not aligned to block size"; 919 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 920 return "input pointer not aligned to block size"; 921 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 922 return "firmware internal error"; 923 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 924 return "command input length error"; 925 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 926 return "command ouput length error"; 927 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 928 return "reserved fields not cleared"; 929 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 930 return "bad command descriptor type"; 931 default: 932 return "unknown status code"; 933 } 934 } 935 936 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 937 { 938 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 939 struct mlx5_cmd *cmd = &dev->cmd; 940 int err; 941 942 if (cmd->mode == CMD_MODE_POLLING) { 943 wait_for_completion(&ent->done); 944 err = ent->ret; 945 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 946 ent->ret = -ETIMEDOUT; 947 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 948 } 949 950 err = ent->ret; 951 952 if (err == -ETIMEDOUT) { 953 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 954 mlx5_command_str(msg_to_opcode(ent->in)), 955 msg_to_opcode(ent->in)); 956 } 957 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 958 err, deliv_status_to_str(ent->status), ent->status); 959 960 return err; 961 } 962 963 /* Notes: 964 * 1. Callback functions may not sleep 965 * 2. page queue commands do not support asynchrous completion 966 */ 967 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 968 int uin_size, 969 struct mlx5_cmd_msg *out, void *uout, int uout_size, 970 mlx5_cmd_cbk_t callback, 971 void *context, int page_queue, u8 *status) 972 { 973 struct mlx5_cmd *cmd = &dev->cmd; 974 struct mlx5_cmd_work_ent *ent; 975 struct mlx5_cmd_stats *stats; 976 int err = 0; 977 s64 ds; 978 u16 op; 979 980 if (callback && page_queue) 981 return -EINVAL; 982 983 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 984 context, page_queue); 985 if (IS_ERR(ent)) 986 return PTR_ERR(ent); 987 988 if (!callback) 989 init_completion(&ent->done); 990 991 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 992 INIT_WORK(&ent->work, cmd_work_handler); 993 if (page_queue) { 994 cmd_work_handler(&ent->work); 995 } else if (!queue_work(cmd->wq, &ent->work)) { 996 mlx5_core_warn(dev, "failed to queue work\n"); 997 err = -ENOMEM; 998 goto out_free; 999 } 1000 1001 if (callback) 1002 goto out; 1003 1004 err = wait_func(dev, ent); 1005 if (err == -ETIMEDOUT) 1006 goto out; 1007 1008 ds = ent->ts2 - ent->ts1; 1009 op = MLX5_GET(mbox_in, in->first.data, opcode); 1010 if (op < ARRAY_SIZE(cmd->stats)) { 1011 stats = &cmd->stats[op]; 1012 spin_lock_irq(&stats->lock); 1013 stats->sum += ds; 1014 ++stats->n; 1015 spin_unlock_irq(&stats->lock); 1016 } 1017 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1018 "fw exec time for %s is %lld nsec\n", 1019 mlx5_command_str(op), (long long)ds); 1020 *status = ent->status; 1021 free_cmd(ent); 1022 1023 return err; 1024 1025 out_free: 1026 free_cmd(ent); 1027 out: 1028 return err; 1029 } 1030 1031 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 1032 { 1033 size_t delta; 1034 size_t i; 1035 1036 if (to == NULL || from == NULL) 1037 return (-ENOMEM); 1038 1039 delta = min_t(size_t, size, sizeof(to->first.data)); 1040 memcpy(to->first.data, from, delta); 1041 from = (char *)from + delta; 1042 size -= delta; 1043 1044 for (i = 0; size != 0; i++) { 1045 struct mlx5_cmd_prot_block *block; 1046 1047 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 1048 1049 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1050 memcpy(block->data, from, delta); 1051 from = (char *)from + delta; 1052 size -= delta; 1053 } 1054 return (0); 1055 } 1056 1057 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1058 { 1059 size_t delta; 1060 size_t i; 1061 1062 if (to == NULL || from == NULL) 1063 return (-ENOMEM); 1064 1065 delta = min_t(size_t, size, sizeof(from->first.data)); 1066 memcpy(to, from->first.data, delta); 1067 to = (char *)to + delta; 1068 size -= delta; 1069 1070 for (i = 0; size != 0; i++) { 1071 struct mlx5_cmd_prot_block *block; 1072 1073 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 1074 1075 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1076 memcpy(to, block->data, delta); 1077 to = (char *)to + delta; 1078 size -= delta; 1079 } 1080 return (0); 1081 } 1082 1083 static struct mlx5_cmd_msg * 1084 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 1085 { 1086 struct mlx5_cmd_msg *msg; 1087 size_t blen; 1088 size_t n; 1089 size_t i; 1090 1091 blen = size - min_t(size_t, sizeof(msg->first.data), size); 1092 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 1093 1094 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 1095 if (msg == NULL) 1096 return (ERR_PTR(-ENOMEM)); 1097 1098 for (i = 0; i != n; i++) { 1099 struct mlx5_cmd_prot_block *block; 1100 1101 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 1102 1103 memset(block, 0, MLX5_CMD_MBOX_SIZE); 1104 1105 if (i != (n - 1)) { 1106 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 1107 block->next = cpu_to_be64(dma); 1108 } 1109 block->block_num = cpu_to_be32(i); 1110 } 1111 1112 /* make sure initial data is written to RAM */ 1113 mlx5_fwp_flush(msg); 1114 1115 return (msg); 1116 } 1117 1118 static void 1119 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1120 { 1121 1122 mlx5_fwp_free(msg); 1123 } 1124 1125 static void set_wqname(struct mlx5_core_dev *dev) 1126 { 1127 struct mlx5_cmd *cmd = &dev->cmd; 1128 1129 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1130 dev_name(&dev->pdev->dev)); 1131 } 1132 1133 static void clean_debug_files(struct mlx5_core_dev *dev) 1134 { 1135 } 1136 1137 1138 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1139 { 1140 struct mlx5_cmd *cmd = &dev->cmd; 1141 int i; 1142 1143 for (i = 0; i < cmd->max_reg_cmds; i++) 1144 down(&cmd->sem); 1145 1146 down(&cmd->pages_sem); 1147 cmd->mode = mode; 1148 1149 up(&cmd->pages_sem); 1150 for (i = 0; i < cmd->max_reg_cmds; i++) 1151 up(&cmd->sem); 1152 } 1153 1154 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1155 { 1156 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1157 } 1158 1159 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1160 { 1161 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1162 } 1163 1164 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1165 { 1166 unsigned long flags; 1167 1168 if (msg->cache) { 1169 spin_lock_irqsave(&msg->cache->lock, flags); 1170 list_add_tail(&msg->list, &msg->cache->head); 1171 spin_unlock_irqrestore(&msg->cache->lock, flags); 1172 } else { 1173 mlx5_free_cmd_msg(dev, msg); 1174 } 1175 } 1176 1177 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) 1178 { 1179 struct mlx5_cmd *cmd = &dev->cmd; 1180 struct mlx5_cmd_work_ent *ent; 1181 int i; 1182 1183 /* make sure data gets read from RAM */ 1184 mlx5_fwp_invalidate(cmd->cmd_page); 1185 1186 while (vector != 0) { 1187 i = ffs(vector) - 1; 1188 vector &= ~(1U << i); 1189 ent = cmd->ent_arr[i]; 1190 if (ent->callback) 1191 cancel_delayed_work(&ent->cb_timeout_work); 1192 ent->ts2 = ktime_get_ns(); 1193 memcpy(ent->out->first.data, ent->lay->out, 1194 sizeof(ent->lay->out)); 1195 /* make sure data gets read from RAM */ 1196 mlx5_fwp_invalidate(ent->out); 1197 dump_command(dev, ent, 0); 1198 if (!ent->ret) { 1199 if (!cmd->checksum_disabled) 1200 ent->ret = verify_signature(ent); 1201 else 1202 ent->ret = 0; 1203 ent->status = ent->lay->status_own >> 1; 1204 if (vector & MLX5_TRIGGERED_CMD_COMP) 1205 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1206 else 1207 ent->status = ent->lay->status_own >> 1; 1208 1209 mlx5_core_dbg(dev, 1210 "FW command ret 0x%x, status %s(0x%x)\n", 1211 ent->ret, 1212 deliv_status_to_str(ent->status), 1213 ent->status); 1214 } 1215 free_ent(cmd, ent->idx); 1216 complete_command(ent); 1217 } 1218 } 1219 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1220 1221 static int status_to_err(u8 status) 1222 { 1223 return status ? -1 : 0; /* TBD more meaningful codes */ 1224 } 1225 1226 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1227 gfp_t gfp) 1228 { 1229 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1230 struct mlx5_cmd *cmd = &dev->cmd; 1231 struct cache_ent *ent = NULL; 1232 1233 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1234 ent = &cmd->cache.large; 1235 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1236 ent = &cmd->cache.med; 1237 1238 if (ent) { 1239 spin_lock_irq(&ent->lock); 1240 if (!list_empty(&ent->head)) { 1241 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1242 list); 1243 list_del(&msg->list); 1244 } 1245 spin_unlock_irq(&ent->lock); 1246 } 1247 1248 if (IS_ERR(msg)) 1249 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1250 1251 return msg; 1252 } 1253 1254 static int is_manage_pages(void *in) 1255 { 1256 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1257 } 1258 1259 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1260 void *in, int in_size, 1261 void *out, int out_size, 1262 mlx5_cmd_cbk_t callback, void *context) 1263 { 1264 struct mlx5_cmd_msg *inb; 1265 struct mlx5_cmd_msg *outb; 1266 int pages_queue; 1267 const gfp_t gfp = GFP_KERNEL; 1268 int err; 1269 u8 status = 0; 1270 u32 drv_synd; 1271 1272 if (pci_channel_offline(dev->pdev) || 1273 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1274 u16 opcode = MLX5_GET(mbox_in, in, opcode); 1275 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1276 MLX5_SET(mbox_out, out, status, status); 1277 MLX5_SET(mbox_out, out, syndrome, drv_synd); 1278 return err; 1279 } 1280 1281 pages_queue = is_manage_pages(in); 1282 1283 inb = alloc_msg(dev, in_size, gfp); 1284 if (IS_ERR(inb)) { 1285 err = PTR_ERR(inb); 1286 return err; 1287 } 1288 1289 err = mlx5_copy_to_msg(inb, in, in_size); 1290 if (err) { 1291 mlx5_core_warn(dev, "err %d\n", err); 1292 goto out_in; 1293 } 1294 1295 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1296 if (IS_ERR(outb)) { 1297 err = PTR_ERR(outb); 1298 goto out_in; 1299 } 1300 1301 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1302 context, pages_queue, &status); 1303 if (err) { 1304 if (err == -ETIMEDOUT) 1305 return err; 1306 goto out_out; 1307 } 1308 1309 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1310 if (status) { 1311 err = status_to_err(status); 1312 goto out_out; 1313 } 1314 1315 if (callback) 1316 return err; 1317 1318 err = mlx5_copy_from_msg(out, outb, out_size); 1319 1320 out_out: 1321 mlx5_free_cmd_msg(dev, outb); 1322 1323 out_in: 1324 free_msg(dev, inb); 1325 return err; 1326 } 1327 1328 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1329 int out_size) 1330 { 1331 int err; 1332 1333 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1334 return err ? : mlx5_cmd_check(dev, in, out); 1335 } 1336 EXPORT_SYMBOL(mlx5_cmd_exec); 1337 1338 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1339 void *out, int out_size, mlx5_cmd_cbk_t callback, 1340 void *context) 1341 { 1342 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1343 } 1344 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1345 1346 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1347 { 1348 struct mlx5_cmd *cmd = &dev->cmd; 1349 struct mlx5_cmd_msg *msg; 1350 struct mlx5_cmd_msg *n; 1351 1352 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1353 list_del(&msg->list); 1354 mlx5_free_cmd_msg(dev, msg); 1355 } 1356 1357 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1358 list_del(&msg->list); 1359 mlx5_free_cmd_msg(dev, msg); 1360 } 1361 } 1362 1363 static int create_msg_cache(struct mlx5_core_dev *dev) 1364 { 1365 struct mlx5_cmd *cmd = &dev->cmd; 1366 struct mlx5_cmd_msg *msg; 1367 int err; 1368 int i; 1369 1370 spin_lock_init(&cmd->cache.large.lock); 1371 INIT_LIST_HEAD(&cmd->cache.large.head); 1372 spin_lock_init(&cmd->cache.med.lock); 1373 INIT_LIST_HEAD(&cmd->cache.med.head); 1374 1375 for (i = 0; i < NUM_LONG_LISTS; i++) { 1376 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1377 if (IS_ERR(msg)) { 1378 err = PTR_ERR(msg); 1379 goto ex_err; 1380 } 1381 msg->cache = &cmd->cache.large; 1382 list_add_tail(&msg->list, &cmd->cache.large.head); 1383 } 1384 1385 for (i = 0; i < NUM_MED_LISTS; i++) { 1386 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1387 if (IS_ERR(msg)) { 1388 err = PTR_ERR(msg); 1389 goto ex_err; 1390 } 1391 msg->cache = &cmd->cache.med; 1392 list_add_tail(&msg->list, &cmd->cache.med.head); 1393 } 1394 1395 return 0; 1396 1397 ex_err: 1398 destroy_msg_cache(dev); 1399 return err; 1400 } 1401 1402 static int 1403 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1404 { 1405 int err; 1406 1407 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1408 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1409 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1410 1411 /* 1412 * Create global DMA descriptor tag for allocating 1413 * 4K firmware pages: 1414 */ 1415 err = -bus_dma_tag_create( 1416 bus_get_dma_tag(dev->pdev->dev.bsddev), 1417 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1418 0, /* no boundary */ 1419 BUS_SPACE_MAXADDR, /* lowaddr */ 1420 BUS_SPACE_MAXADDR, /* highaddr */ 1421 NULL, NULL, /* filter, filterarg */ 1422 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1423 1, /* nsegments */ 1424 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1425 0, /* flags */ 1426 NULL, NULL, /* lockfunc, lockfuncarg */ 1427 &cmd->dma_tag); 1428 if (err != 0) 1429 goto failure_destroy_sx; 1430 1431 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1432 if (cmd->cmd_page == NULL) { 1433 err = -ENOMEM; 1434 goto failure_alloc_page; 1435 } 1436 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1437 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1438 return (0); 1439 1440 failure_alloc_page: 1441 bus_dma_tag_destroy(cmd->dma_tag); 1442 1443 failure_destroy_sx: 1444 cv_destroy(&cmd->dma_cv); 1445 mtx_destroy(&cmd->dma_mtx); 1446 sx_destroy(&cmd->dma_sx); 1447 return (err); 1448 } 1449 1450 static void 1451 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1452 { 1453 1454 mlx5_fwp_free(cmd->cmd_page); 1455 bus_dma_tag_destroy(cmd->dma_tag); 1456 cv_destroy(&cmd->dma_cv); 1457 mtx_destroy(&cmd->dma_mtx); 1458 sx_destroy(&cmd->dma_sx); 1459 } 1460 1461 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1462 { 1463 struct mlx5_cmd *cmd = &dev->cmd; 1464 u32 cmd_h, cmd_l; 1465 u16 cmd_if_rev; 1466 int err; 1467 int i; 1468 1469 memset(cmd, 0, sizeof(*cmd)); 1470 cmd_if_rev = cmdif_rev_get(dev); 1471 if (cmd_if_rev != CMD_IF_REV) { 1472 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1473 return -EINVAL; 1474 } 1475 1476 err = alloc_cmd_page(dev, cmd); 1477 if (err) 1478 goto err_free_pool; 1479 1480 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1481 cmd->log_sz = cmd_l >> 4 & 0xf; 1482 cmd->log_stride = cmd_l & 0xf; 1483 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1484 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1485 err = -EINVAL; 1486 goto err_free_page; 1487 } 1488 1489 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1490 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1491 err = -EINVAL; 1492 goto err_free_page; 1493 } 1494 1495 cmd->checksum_disabled = 1; 1496 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1497 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1498 1499 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1500 if (cmd->cmdif_rev > CMD_IF_REV) { 1501 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1502 err = -ENOTSUPP; 1503 goto err_free_page; 1504 } 1505 1506 spin_lock_init(&cmd->alloc_lock); 1507 spin_lock_init(&cmd->token_lock); 1508 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1509 spin_lock_init(&cmd->stats[i].lock); 1510 1511 sema_init(&cmd->sem, cmd->max_reg_cmds); 1512 sema_init(&cmd->pages_sem, 1); 1513 1514 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1515 cmd_l = (u32)(cmd->dma); 1516 if (cmd_l & 0xfff) { 1517 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1518 err = -ENOMEM; 1519 goto err_free_page; 1520 } 1521 1522 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1523 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1524 1525 /* Make sure firmware sees the complete address before we proceed */ 1526 wmb(); 1527 1528 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1529 1530 cmd->mode = CMD_MODE_POLLING; 1531 1532 err = create_msg_cache(dev); 1533 if (err) { 1534 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1535 goto err_free_page; 1536 } 1537 1538 set_wqname(dev); 1539 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1540 if (!cmd->wq) { 1541 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1542 err = -ENOMEM; 1543 goto err_cache; 1544 } 1545 1546 return 0; 1547 1548 err_cache: 1549 destroy_msg_cache(dev); 1550 1551 err_free_page: 1552 free_cmd_page(dev, cmd); 1553 1554 err_free_pool: 1555 return err; 1556 } 1557 EXPORT_SYMBOL(mlx5_cmd_init); 1558 1559 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1560 { 1561 struct mlx5_cmd *cmd = &dev->cmd; 1562 1563 clean_debug_files(dev); 1564 destroy_workqueue(cmd->wq); 1565 destroy_msg_cache(dev); 1566 free_cmd_page(dev, cmd); 1567 } 1568 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1569