1 /*- 2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 #include <dev/mlx5/cmd.h> 40 41 #include "mlx5_core.h" 42 43 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 44 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 45 struct mlx5_cmd_msg *msg); 46 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 47 48 enum { 49 CMD_IF_REV = 5, 50 }; 51 52 enum { 53 NUM_LONG_LISTS = 2, 54 NUM_MED_LISTS = 64, 55 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 56 MLX5_CMD_DATA_BLOCK_SIZE, 57 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 58 }; 59 60 enum { 61 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 62 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 63 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 64 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 65 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 66 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 67 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 68 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 69 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 70 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 71 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 72 }; 73 74 struct mlx5_ifc_mbox_out_bits { 75 u8 status[0x8]; 76 u8 reserved_at_8[0x18]; 77 78 u8 syndrome[0x20]; 79 80 u8 reserved_at_40[0x40]; 81 }; 82 83 struct mlx5_ifc_mbox_in_bits { 84 u8 opcode[0x10]; 85 u8 reserved_at_10[0x10]; 86 87 u8 reserved_at_20[0x10]; 88 u8 op_mod[0x10]; 89 90 u8 reserved_at_40[0x40]; 91 }; 92 93 94 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 95 struct mlx5_cmd_msg *in, 96 int uin_size, 97 struct mlx5_cmd_msg *out, 98 void *uout, int uout_size, 99 mlx5_cmd_cbk_t cbk, 100 void *context, int page_queue) 101 { 102 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 103 struct mlx5_cmd_work_ent *ent; 104 105 ent = kzalloc(sizeof(*ent), alloc_flags); 106 if (!ent) 107 return ERR_PTR(-ENOMEM); 108 109 ent->in = in; 110 ent->uin_size = uin_size; 111 ent->out = out; 112 ent->uout = uout; 113 ent->uout_size = uout_size; 114 ent->callback = cbk; 115 ent->context = context; 116 ent->cmd = cmd; 117 ent->page_queue = page_queue; 118 119 return ent; 120 } 121 122 static u8 alloc_token(struct mlx5_cmd *cmd) 123 { 124 u8 token; 125 126 spin_lock(&cmd->token_lock); 127 cmd->token++; 128 if (cmd->token == 0) 129 cmd->token++; 130 token = cmd->token; 131 spin_unlock(&cmd->token_lock); 132 133 return token; 134 } 135 136 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 137 { 138 unsigned long flags; 139 struct mlx5_cmd *cmd = ent->cmd; 140 struct mlx5_core_dev *dev = 141 container_of(cmd, struct mlx5_core_dev, cmd); 142 int ret = cmd->max_reg_cmds; 143 144 spin_lock_irqsave(&cmd->alloc_lock, flags); 145 if (!ent->page_queue) { 146 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 147 if (ret >= cmd->max_reg_cmds) 148 ret = -1; 149 } 150 151 if (dev->state != MLX5_DEVICE_STATE_UP) 152 ret = -1; 153 154 if (ret != -1) { 155 ent->busy = 1; 156 ent->idx = ret; 157 clear_bit(ent->idx, &cmd->bitmask); 158 cmd->ent_mode[ent->idx] = 159 ent->polling ? MLX5_CMD_MODE_POLLING : MLX5_CMD_MODE_EVENTS; 160 cmd->ent_arr[ent->idx] = ent; 161 } 162 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 163 164 return ret; 165 } 166 167 static void free_ent(struct mlx5_cmd *cmd, int idx) 168 { 169 unsigned long flags; 170 171 spin_lock_irqsave(&cmd->alloc_lock, flags); 172 cmd->ent_arr[idx] = NULL; /* safety clear */ 173 cmd->ent_mode[idx] = MLX5_CMD_MODE_POLLING; /* reset mode */ 174 set_bit(idx, &cmd->bitmask); 175 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 176 } 177 178 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 179 { 180 return cmd->cmd_buf + (idx << cmd->log_stride); 181 } 182 183 static u8 xor8_buf(void *buf, int len) 184 { 185 u8 *ptr = buf; 186 u8 sum = 0; 187 int i; 188 189 for (i = 0; i < len; i++) 190 sum ^= ptr[i]; 191 192 return sum; 193 } 194 195 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 196 { 197 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 198 return -EINVAL; 199 200 if (xor8_buf(block, sizeof(*block)) != 0xff) 201 return -EINVAL; 202 203 return 0; 204 } 205 206 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 207 int csum) 208 { 209 block->token = token; 210 if (csum) { 211 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 212 sizeof(block->data) - 2); 213 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 214 } 215 } 216 217 static void 218 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 219 { 220 size_t i; 221 222 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 223 struct mlx5_cmd_prot_block *block; 224 225 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 226 227 /* compute signature */ 228 calc_block_sig(block, token, csum); 229 230 /* check for last block */ 231 if (block->next == 0) 232 break; 233 } 234 235 /* make sure data gets written to RAM */ 236 mlx5_fwp_flush(msg); 237 } 238 239 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 240 { 241 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 242 calc_chain_sig(ent->in, ent->token, csum); 243 calc_chain_sig(ent->out, ent->token, csum); 244 } 245 246 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 247 { 248 struct mlx5_core_dev *dev = container_of(ent->cmd, 249 struct mlx5_core_dev, cmd); 250 int poll_end = jiffies + 251 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 252 u8 own; 253 254 do { 255 own = ent->lay->status_own; 256 if (!(own & CMD_OWNER_HW) || 257 dev->state != MLX5_DEVICE_STATE_UP) { 258 ent->ret = 0; 259 return; 260 } 261 usleep_range(5000, 10000); 262 } while (time_before(jiffies, poll_end)); 263 264 ent->ret = -ETIMEDOUT; 265 } 266 267 static void free_cmd(struct mlx5_cmd_work_ent *ent) 268 { 269 cancel_delayed_work_sync(&ent->cb_timeout_work); 270 kfree(ent); 271 } 272 273 static int 274 verify_signature(struct mlx5_cmd_work_ent *ent) 275 { 276 struct mlx5_cmd_msg *msg = ent->out; 277 size_t i; 278 int err; 279 u8 sig; 280 281 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 282 if (sig != 0xff) 283 return -EINVAL; 284 285 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 286 struct mlx5_cmd_prot_block *block; 287 288 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 289 290 /* compute signature */ 291 err = verify_block_sig(block); 292 if (err != 0) 293 return (err); 294 295 /* check for last block */ 296 if (block->next == 0) 297 break; 298 } 299 return (0); 300 } 301 302 static void dump_buf(void *buf, int size, int data_only, int offset) 303 { 304 __be32 *p = buf; 305 int i; 306 307 for (i = 0; i < size; i += 16) { 308 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 309 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 310 be32_to_cpu(p[3])); 311 p += 4; 312 offset += 16; 313 } 314 if (!data_only) 315 pr_debug("\n"); 316 } 317 318 enum { 319 MLX5_DRIVER_STATUS_ABORTED = 0xfe, 320 MLX5_DRIVER_SYND = 0xbadd00de, 321 }; 322 323 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 324 u32 *synd, u8 *status) 325 { 326 *synd = 0; 327 *status = 0; 328 329 switch (op) { 330 case MLX5_CMD_OP_TEARDOWN_HCA: 331 case MLX5_CMD_OP_DISABLE_HCA: 332 case MLX5_CMD_OP_MANAGE_PAGES: 333 case MLX5_CMD_OP_DESTROY_MKEY: 334 case MLX5_CMD_OP_DESTROY_EQ: 335 case MLX5_CMD_OP_DESTROY_CQ: 336 case MLX5_CMD_OP_DESTROY_QP: 337 case MLX5_CMD_OP_DESTROY_PSV: 338 case MLX5_CMD_OP_DESTROY_SRQ: 339 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 340 case MLX5_CMD_OP_DESTROY_DCT: 341 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 342 case MLX5_CMD_OP_DEALLOC_PD: 343 case MLX5_CMD_OP_DEALLOC_UAR: 344 case MLX5_CMD_OP_DETACH_FROM_MCG: 345 case MLX5_CMD_OP_DEALLOC_XRCD: 346 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 347 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 348 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 349 case MLX5_CMD_OP_DESTROY_TIR: 350 case MLX5_CMD_OP_DESTROY_SQ: 351 case MLX5_CMD_OP_DESTROY_RQ: 352 case MLX5_CMD_OP_DESTROY_RMP: 353 case MLX5_CMD_OP_DESTROY_TIS: 354 case MLX5_CMD_OP_DESTROY_RQT: 355 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 356 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 357 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 358 case MLX5_CMD_OP_2ERR_QP: 359 case MLX5_CMD_OP_2RST_QP: 360 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 361 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 362 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 363 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 364 return MLX5_CMD_STAT_OK; 365 366 case MLX5_CMD_OP_QUERY_HCA_CAP: 367 case MLX5_CMD_OP_QUERY_ADAPTER: 368 case MLX5_CMD_OP_INIT_HCA: 369 case MLX5_CMD_OP_ENABLE_HCA: 370 case MLX5_CMD_OP_QUERY_PAGES: 371 case MLX5_CMD_OP_SET_HCA_CAP: 372 case MLX5_CMD_OP_QUERY_ISSI: 373 case MLX5_CMD_OP_SET_ISSI: 374 case MLX5_CMD_OP_CREATE_MKEY: 375 case MLX5_CMD_OP_QUERY_MKEY: 376 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 377 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 378 case MLX5_CMD_OP_CREATE_EQ: 379 case MLX5_CMD_OP_QUERY_EQ: 380 case MLX5_CMD_OP_GEN_EQE: 381 case MLX5_CMD_OP_CREATE_CQ: 382 case MLX5_CMD_OP_QUERY_CQ: 383 case MLX5_CMD_OP_MODIFY_CQ: 384 case MLX5_CMD_OP_CREATE_QP: 385 case MLX5_CMD_OP_RST2INIT_QP: 386 case MLX5_CMD_OP_INIT2RTR_QP: 387 case MLX5_CMD_OP_RTR2RTS_QP: 388 case MLX5_CMD_OP_RTS2RTS_QP: 389 case MLX5_CMD_OP_SQERR2RTS_QP: 390 case MLX5_CMD_OP_QUERY_QP: 391 case MLX5_CMD_OP_SQD_RTS_QP: 392 case MLX5_CMD_OP_INIT2INIT_QP: 393 case MLX5_CMD_OP_CREATE_PSV: 394 case MLX5_CMD_OP_CREATE_SRQ: 395 case MLX5_CMD_OP_QUERY_SRQ: 396 case MLX5_CMD_OP_ARM_RQ: 397 case MLX5_CMD_OP_CREATE_XRC_SRQ: 398 case MLX5_CMD_OP_QUERY_XRC_SRQ: 399 case MLX5_CMD_OP_ARM_XRC_SRQ: 400 case MLX5_CMD_OP_CREATE_DCT: 401 case MLX5_CMD_OP_DRAIN_DCT: 402 case MLX5_CMD_OP_QUERY_DCT: 403 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 404 case MLX5_CMD_OP_QUERY_VPORT_STATE: 405 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 406 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 407 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 408 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 409 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 410 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 411 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 412 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 413 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 414 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 415 case MLX5_CMD_OP_QUERY_VNIC_ENV: 416 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 417 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 418 case MLX5_CMD_OP_QUERY_Q_COUNTER: 419 case MLX5_CMD_OP_ALLOC_PD: 420 case MLX5_CMD_OP_ALLOC_UAR: 421 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 422 case MLX5_CMD_OP_ACCESS_REG: 423 case MLX5_CMD_OP_ATTACH_TO_MCG: 424 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 425 case MLX5_CMD_OP_MAD_IFC: 426 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 427 case MLX5_CMD_OP_SET_MAD_DEMUX: 428 case MLX5_CMD_OP_NOP: 429 case MLX5_CMD_OP_ALLOC_XRCD: 430 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 431 case MLX5_CMD_OP_QUERY_CONG_STATUS: 432 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 433 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 434 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 435 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 436 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 437 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 438 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 439 case MLX5_CMD_OP_CREATE_TIR: 440 case MLX5_CMD_OP_MODIFY_TIR: 441 case MLX5_CMD_OP_QUERY_TIR: 442 case MLX5_CMD_OP_CREATE_SQ: 443 case MLX5_CMD_OP_MODIFY_SQ: 444 case MLX5_CMD_OP_QUERY_SQ: 445 case MLX5_CMD_OP_CREATE_RQ: 446 case MLX5_CMD_OP_MODIFY_RQ: 447 case MLX5_CMD_OP_QUERY_RQ: 448 case MLX5_CMD_OP_CREATE_RMP: 449 case MLX5_CMD_OP_MODIFY_RMP: 450 case MLX5_CMD_OP_QUERY_RMP: 451 case MLX5_CMD_OP_CREATE_TIS: 452 case MLX5_CMD_OP_MODIFY_TIS: 453 case MLX5_CMD_OP_QUERY_TIS: 454 case MLX5_CMD_OP_CREATE_RQT: 455 case MLX5_CMD_OP_MODIFY_RQT: 456 case MLX5_CMD_OP_QUERY_RQT: 457 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 458 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 459 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 460 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 461 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 462 *status = MLX5_DRIVER_STATUS_ABORTED; 463 *synd = MLX5_DRIVER_SYND; 464 return -EIO; 465 default: 466 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 467 return -EINVAL; 468 } 469 } 470 471 const char *mlx5_command_str(int command) 472 { 473 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 474 475 switch (command) { 476 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 477 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 478 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 479 MLX5_COMMAND_STR_CASE(INIT_HCA); 480 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 481 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 482 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 483 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 484 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 485 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 486 MLX5_COMMAND_STR_CASE(SET_ISSI); 487 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 488 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 489 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 490 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 491 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 492 MLX5_COMMAND_STR_CASE(CREATE_EQ); 493 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 494 MLX5_COMMAND_STR_CASE(QUERY_EQ); 495 MLX5_COMMAND_STR_CASE(GEN_EQE); 496 MLX5_COMMAND_STR_CASE(CREATE_CQ); 497 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 498 MLX5_COMMAND_STR_CASE(QUERY_CQ); 499 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 500 MLX5_COMMAND_STR_CASE(CREATE_QP); 501 MLX5_COMMAND_STR_CASE(DESTROY_QP); 502 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 503 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 504 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 505 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 506 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 507 MLX5_COMMAND_STR_CASE(2ERR_QP); 508 MLX5_COMMAND_STR_CASE(2RST_QP); 509 MLX5_COMMAND_STR_CASE(QUERY_QP); 510 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 511 MLX5_COMMAND_STR_CASE(MAD_IFC); 512 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 513 MLX5_COMMAND_STR_CASE(CREATE_PSV); 514 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 515 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 516 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 517 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 518 MLX5_COMMAND_STR_CASE(ARM_RQ); 519 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 520 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 521 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 522 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 523 MLX5_COMMAND_STR_CASE(CREATE_DCT); 524 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 525 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 526 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 527 MLX5_COMMAND_STR_CASE(QUERY_DCT); 528 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 529 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 530 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 531 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 532 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 533 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 534 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 535 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 536 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 537 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 538 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 539 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 540 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 541 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 542 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 543 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 544 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 545 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 546 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 547 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 548 MLX5_COMMAND_STR_CASE(ALLOC_PD); 549 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 550 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 551 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 552 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 553 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 554 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 555 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 556 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 557 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 558 MLX5_COMMAND_STR_CASE(NOP); 559 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 560 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 561 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 562 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 563 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 564 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 565 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 566 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 567 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 568 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 569 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 570 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 571 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 572 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 573 MLX5_COMMAND_STR_CASE(CREATE_RMP); 574 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 575 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 576 MLX5_COMMAND_STR_CASE(QUERY_RMP); 577 MLX5_COMMAND_STR_CASE(CREATE_RQT); 578 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 579 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 580 MLX5_COMMAND_STR_CASE(QUERY_RQT); 581 MLX5_COMMAND_STR_CASE(ACCESS_REG); 582 MLX5_COMMAND_STR_CASE(CREATE_SQ); 583 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 584 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 585 MLX5_COMMAND_STR_CASE(QUERY_SQ); 586 MLX5_COMMAND_STR_CASE(CREATE_RQ); 587 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 588 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 589 MLX5_COMMAND_STR_CASE(QUERY_RQ); 590 MLX5_COMMAND_STR_CASE(CREATE_TIR); 591 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 592 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 593 MLX5_COMMAND_STR_CASE(QUERY_TIR); 594 MLX5_COMMAND_STR_CASE(CREATE_TIS); 595 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 596 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 597 MLX5_COMMAND_STR_CASE(QUERY_TIS); 598 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 599 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 600 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 601 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 602 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 603 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 604 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 605 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 606 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 607 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 608 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 609 default: return "unknown command opcode"; 610 } 611 } 612 613 static const char *cmd_status_str(u8 status) 614 { 615 switch (status) { 616 case MLX5_CMD_STAT_OK: 617 return "OK"; 618 case MLX5_CMD_STAT_INT_ERR: 619 return "internal error"; 620 case MLX5_CMD_STAT_BAD_OP_ERR: 621 return "bad operation"; 622 case MLX5_CMD_STAT_BAD_PARAM_ERR: 623 return "bad parameter"; 624 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 625 return "bad system state"; 626 case MLX5_CMD_STAT_BAD_RES_ERR: 627 return "bad resource"; 628 case MLX5_CMD_STAT_RES_BUSY: 629 return "resource busy"; 630 case MLX5_CMD_STAT_LIM_ERR: 631 return "limits exceeded"; 632 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 633 return "bad resource state"; 634 case MLX5_CMD_STAT_IX_ERR: 635 return "bad index"; 636 case MLX5_CMD_STAT_NO_RES_ERR: 637 return "no resources"; 638 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 639 return "bad input length"; 640 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 641 return "bad output length"; 642 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 643 return "bad QP state"; 644 case MLX5_CMD_STAT_BAD_PKT_ERR: 645 return "bad packet (discarded)"; 646 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 647 return "bad size too many outstanding CQEs"; 648 default: 649 return "unknown status"; 650 } 651 } 652 653 static int cmd_status_to_err_helper(u8 status) 654 { 655 switch (status) { 656 case MLX5_CMD_STAT_OK: return 0; 657 case MLX5_CMD_STAT_INT_ERR: return -EIO; 658 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 659 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 660 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 661 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 662 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 663 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 664 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 665 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 666 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 667 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 668 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 669 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 670 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 671 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 672 default: return -EIO; 673 } 674 } 675 676 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome) 677 { 678 *status = MLX5_GET(mbox_out, out, status); 679 *syndrome = MLX5_GET(mbox_out, out, syndrome); 680 } 681 682 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out) 683 { 684 u32 syndrome; 685 u8 status; 686 u16 opcode; 687 u16 op_mod; 688 689 mlx5_cmd_mbox_status(out, &status, &syndrome); 690 if (!status) 691 return 0; 692 693 opcode = MLX5_GET(mbox_in, in, opcode); 694 op_mod = MLX5_GET(mbox_in, in, op_mod); 695 696 mlx5_core_err(dev, 697 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 698 mlx5_command_str(opcode), 699 opcode, op_mod, 700 cmd_status_str(status), 701 status, 702 syndrome); 703 704 return cmd_status_to_err_helper(status); 705 } 706 707 static void dump_command(struct mlx5_core_dev *dev, 708 struct mlx5_cmd_work_ent *ent, int input) 709 { 710 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 711 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 712 size_t i; 713 int data_only; 714 int offset = 0; 715 int msg_len = input ? ent->uin_size : ent->uout_size; 716 int dump_len; 717 718 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 719 720 if (data_only) 721 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 722 "dump command data %s(0x%x) %s\n", 723 mlx5_command_str(op), op, 724 input ? "INPUT" : "OUTPUT"); 725 else 726 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 727 mlx5_command_str(op), op, 728 input ? "INPUT" : "OUTPUT"); 729 730 if (data_only) { 731 if (input) { 732 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 733 offset += sizeof(ent->lay->in); 734 } else { 735 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 736 offset += sizeof(ent->lay->out); 737 } 738 } else { 739 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 740 offset += sizeof(*ent->lay); 741 } 742 743 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 744 struct mlx5_cmd_prot_block *block; 745 746 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 747 748 if (data_only) { 749 if (offset >= msg_len) 750 break; 751 dump_len = min_t(int, 752 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 753 754 dump_buf(block->data, dump_len, 1, offset); 755 offset += MLX5_CMD_DATA_BLOCK_SIZE; 756 } else { 757 mlx5_core_dbg(dev, "command block:\n"); 758 dump_buf(block, sizeof(*block), 0, offset); 759 offset += sizeof(*block); 760 } 761 762 /* check for last block */ 763 if (block->next == 0) 764 break; 765 } 766 767 if (data_only) 768 pr_debug("\n"); 769 } 770 771 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 772 { 773 return MLX5_GET(mbox_in, in->first.data, opcode); 774 } 775 776 static void cb_timeout_handler(struct work_struct *work) 777 { 778 struct delayed_work *dwork = container_of(work, struct delayed_work, 779 work); 780 struct mlx5_cmd_work_ent *ent = container_of(dwork, 781 struct mlx5_cmd_work_ent, 782 cb_timeout_work); 783 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 784 cmd); 785 786 ent->ret = -ETIMEDOUT; 787 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 788 mlx5_command_str(msg_to_opcode(ent->in)), 789 msg_to_opcode(ent->in)); 790 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS); 791 } 792 793 static void complete_command(struct mlx5_cmd_work_ent *ent) 794 { 795 struct mlx5_cmd *cmd = ent->cmd; 796 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 797 cmd); 798 mlx5_cmd_cbk_t callback; 799 void *context; 800 801 s64 ds; 802 struct mlx5_cmd_stats *stats; 803 unsigned long flags; 804 int err; 805 struct semaphore *sem; 806 807 if (ent->page_queue) 808 sem = &cmd->pages_sem; 809 else 810 sem = &cmd->sem; 811 812 if (dev->state != MLX5_DEVICE_STATE_UP) { 813 u8 status = 0; 814 u32 drv_synd; 815 816 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status); 817 MLX5_SET(mbox_out, ent->out, status, status); 818 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); 819 } 820 821 if (ent->callback) { 822 ds = ent->ts2 - ent->ts1; 823 if (ent->op < ARRAY_SIZE(cmd->stats)) { 824 stats = &cmd->stats[ent->op]; 825 spin_lock_irqsave(&stats->lock, flags); 826 stats->sum += ds; 827 ++stats->n; 828 spin_unlock_irqrestore(&stats->lock, flags); 829 } 830 831 callback = ent->callback; 832 context = ent->context; 833 err = ent->ret; 834 if (!err) { 835 err = mlx5_copy_from_msg(ent->uout, 836 ent->out, 837 ent->uout_size); 838 err = err ? err : mlx5_cmd_check(dev, 839 ent->in->first.data, 840 ent->uout); 841 } 842 843 mlx5_free_cmd_msg(dev, ent->out); 844 free_msg(dev, ent->in); 845 846 err = err ? err : ent->status; 847 free_cmd(ent); 848 callback(err, context); 849 } else { 850 complete(&ent->done); 851 } 852 up(sem); 853 } 854 855 static void cmd_work_handler(struct work_struct *work) 856 { 857 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 858 struct mlx5_cmd *cmd = ent->cmd; 859 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 860 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 861 struct mlx5_cmd_layout *lay; 862 struct semaphore *sem; 863 bool poll_cmd = ent->polling; 864 865 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 866 down(sem); 867 868 if (alloc_ent(ent) < 0) { 869 complete_command(ent); 870 return; 871 } 872 873 ent->token = alloc_token(cmd); 874 lay = get_inst(cmd, ent->idx); 875 ent->lay = lay; 876 memset(lay, 0, sizeof(*lay)); 877 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 878 ent->op = be32_to_cpu(lay->in[0]) >> 16; 879 if (ent->in->numpages != 0) 880 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 881 if (ent->out->numpages != 0) 882 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 883 lay->inlen = cpu_to_be32(ent->uin_size); 884 lay->outlen = cpu_to_be32(ent->uout_size); 885 lay->type = MLX5_PCI_CMD_XPORT; 886 lay->token = ent->token; 887 lay->status_own = CMD_OWNER_HW; 888 set_signature(ent, !cmd->checksum_disabled); 889 dump_command(dev, ent, 1); 890 ent->ts1 = ktime_get_ns(); 891 ent->busy = 0; 892 if (ent->callback) 893 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 894 895 /* ring doorbell after the descriptor is valid */ 896 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 897 /* make sure data is written to RAM */ 898 mlx5_fwp_flush(cmd->cmd_page); 899 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 900 mmiowb(); 901 902 /* if not in polling don't use ent after this point */ 903 if (poll_cmd) { 904 poll_timeout(ent); 905 /* make sure we read the descriptor after ownership is SW */ 906 mlx5_cmd_comp_handler(dev, 1U << ent->idx, MLX5_CMD_MODE_POLLING); 907 } 908 } 909 910 static const char *deliv_status_to_str(u8 status) 911 { 912 switch (status) { 913 case MLX5_CMD_DELIVERY_STAT_OK: 914 return "no errors"; 915 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 916 return "signature error"; 917 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 918 return "token error"; 919 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 920 return "bad block number"; 921 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 922 return "output pointer not aligned to block size"; 923 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 924 return "input pointer not aligned to block size"; 925 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 926 return "firmware internal error"; 927 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 928 return "command input length error"; 929 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 930 return "command ouput length error"; 931 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 932 return "reserved fields not cleared"; 933 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 934 return "bad command descriptor type"; 935 default: 936 return "unknown status code"; 937 } 938 } 939 940 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 941 { 942 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 943 int err; 944 945 if (ent->polling) { 946 wait_for_completion(&ent->done); 947 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 948 ent->ret = -ETIMEDOUT; 949 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS); 950 } 951 952 err = ent->ret; 953 954 if (err == -ETIMEDOUT) { 955 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 956 mlx5_command_str(msg_to_opcode(ent->in)), 957 msg_to_opcode(ent->in)); 958 } 959 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 960 err, deliv_status_to_str(ent->status), ent->status); 961 962 return err; 963 } 964 965 /* Notes: 966 * 1. Callback functions may not sleep 967 * 2. page queue commands do not support asynchrous completion 968 */ 969 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 970 int uin_size, 971 struct mlx5_cmd_msg *out, void *uout, int uout_size, 972 mlx5_cmd_cbk_t callback, 973 void *context, int page_queue, u8 *status, 974 bool force_polling) 975 { 976 struct mlx5_cmd *cmd = &dev->cmd; 977 struct mlx5_cmd_work_ent *ent; 978 struct mlx5_cmd_stats *stats; 979 int err = 0; 980 s64 ds; 981 u16 op; 982 983 if (callback && page_queue) 984 return -EINVAL; 985 986 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 987 context, page_queue); 988 if (IS_ERR(ent)) 989 return PTR_ERR(ent); 990 991 ent->polling = force_polling || (cmd->mode == MLX5_CMD_MODE_POLLING); 992 993 if (!callback) 994 init_completion(&ent->done); 995 996 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 997 INIT_WORK(&ent->work, cmd_work_handler); 998 if (page_queue) { 999 cmd_work_handler(&ent->work); 1000 } else if (!queue_work(dev->priv.health.wq_cmd, &ent->work)) { 1001 mlx5_core_warn(dev, "failed to queue work\n"); 1002 err = -ENOMEM; 1003 goto out_free; 1004 } 1005 1006 if (callback) 1007 goto out; 1008 1009 err = wait_func(dev, ent); 1010 if (err == -ETIMEDOUT) 1011 goto out; 1012 1013 ds = ent->ts2 - ent->ts1; 1014 op = MLX5_GET(mbox_in, in->first.data, opcode); 1015 if (op < ARRAY_SIZE(cmd->stats)) { 1016 stats = &cmd->stats[op]; 1017 spin_lock_irq(&stats->lock); 1018 stats->sum += ds; 1019 ++stats->n; 1020 spin_unlock_irq(&stats->lock); 1021 } 1022 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1023 "fw exec time for %s is %lld nsec\n", 1024 mlx5_command_str(op), (long long)ds); 1025 *status = ent->status; 1026 free_cmd(ent); 1027 1028 return err; 1029 1030 out_free: 1031 free_cmd(ent); 1032 out: 1033 return err; 1034 } 1035 1036 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 1037 { 1038 size_t delta; 1039 size_t i; 1040 1041 if (to == NULL || from == NULL) 1042 return (-ENOMEM); 1043 1044 delta = min_t(size_t, size, sizeof(to->first.data)); 1045 memcpy(to->first.data, from, delta); 1046 from = (char *)from + delta; 1047 size -= delta; 1048 1049 for (i = 0; size != 0; i++) { 1050 struct mlx5_cmd_prot_block *block; 1051 1052 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 1053 1054 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1055 memcpy(block->data, from, delta); 1056 from = (char *)from + delta; 1057 size -= delta; 1058 } 1059 return (0); 1060 } 1061 1062 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1063 { 1064 size_t delta; 1065 size_t i; 1066 1067 if (to == NULL || from == NULL) 1068 return (-ENOMEM); 1069 1070 delta = min_t(size_t, size, sizeof(from->first.data)); 1071 memcpy(to, from->first.data, delta); 1072 to = (char *)to + delta; 1073 size -= delta; 1074 1075 for (i = 0; size != 0; i++) { 1076 struct mlx5_cmd_prot_block *block; 1077 1078 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 1079 1080 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 1081 memcpy(to, block->data, delta); 1082 to = (char *)to + delta; 1083 size -= delta; 1084 } 1085 return (0); 1086 } 1087 1088 static struct mlx5_cmd_msg * 1089 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 1090 { 1091 struct mlx5_cmd_msg *msg; 1092 size_t blen; 1093 size_t n; 1094 size_t i; 1095 1096 blen = size - min_t(size_t, sizeof(msg->first.data), size); 1097 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 1098 1099 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 1100 if (msg == NULL) 1101 return (ERR_PTR(-ENOMEM)); 1102 1103 for (i = 0; i != n; i++) { 1104 struct mlx5_cmd_prot_block *block; 1105 1106 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 1107 1108 memset(block, 0, MLX5_CMD_MBOX_SIZE); 1109 1110 if (i != (n - 1)) { 1111 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 1112 block->next = cpu_to_be64(dma); 1113 } 1114 block->block_num = cpu_to_be32(i); 1115 } 1116 1117 /* make sure initial data is written to RAM */ 1118 mlx5_fwp_flush(msg); 1119 1120 return (msg); 1121 } 1122 1123 static void 1124 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1125 { 1126 1127 mlx5_fwp_free(msg); 1128 } 1129 1130 static void clean_debug_files(struct mlx5_core_dev *dev) 1131 { 1132 } 1133 1134 1135 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1136 { 1137 struct mlx5_cmd *cmd = &dev->cmd; 1138 int i; 1139 1140 if (cmd->mode == mode) 1141 return; 1142 1143 for (i = 0; i < cmd->max_reg_cmds; i++) 1144 down(&cmd->sem); 1145 1146 down(&cmd->pages_sem); 1147 cmd->mode = mode; 1148 1149 up(&cmd->pages_sem); 1150 for (i = 0; i < cmd->max_reg_cmds; i++) 1151 up(&cmd->sem); 1152 } 1153 1154 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1155 { 1156 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_EVENTS); 1157 } 1158 1159 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1160 { 1161 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_POLLING); 1162 } 1163 1164 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1165 { 1166 unsigned long flags; 1167 1168 if (msg->cache) { 1169 spin_lock_irqsave(&msg->cache->lock, flags); 1170 list_add_tail(&msg->list, &msg->cache->head); 1171 spin_unlock_irqrestore(&msg->cache->lock, flags); 1172 } else { 1173 mlx5_free_cmd_msg(dev, msg); 1174 } 1175 } 1176 1177 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vector_flags, 1178 enum mlx5_cmd_mode cmd_mode) 1179 { 1180 struct mlx5_cmd *cmd = &dev->cmd; 1181 struct mlx5_cmd_work_ent *ent; 1182 bool triggered = (vector_flags & MLX5_TRIGGERED_CMD_COMP) ? 1 : 0; 1183 u32 vector = vector_flags; /* discard flags in the upper dword */ 1184 int i; 1185 1186 /* make sure data gets read from RAM */ 1187 mlx5_fwp_invalidate(cmd->cmd_page); 1188 1189 while (vector != 0) { 1190 i = ffs(vector) - 1; 1191 vector &= ~(1U << i); 1192 /* check command mode */ 1193 if (cmd->ent_mode[i] != cmd_mode) 1194 continue; 1195 ent = cmd->ent_arr[i]; 1196 /* check if command was already handled */ 1197 if (ent == NULL) 1198 continue; 1199 if (ent->callback) 1200 cancel_delayed_work(&ent->cb_timeout_work); 1201 ent->ts2 = ktime_get_ns(); 1202 memcpy(ent->out->first.data, ent->lay->out, 1203 sizeof(ent->lay->out)); 1204 /* make sure data gets read from RAM */ 1205 mlx5_fwp_invalidate(ent->out); 1206 dump_command(dev, ent, 0); 1207 if (!ent->ret) { 1208 if (!cmd->checksum_disabled) 1209 ent->ret = verify_signature(ent); 1210 else 1211 ent->ret = 0; 1212 1213 if (triggered) 1214 ent->status = MLX5_DRIVER_STATUS_ABORTED; 1215 else 1216 ent->status = ent->lay->status_own >> 1; 1217 1218 mlx5_core_dbg(dev, 1219 "FW command ret 0x%x, status %s(0x%x)\n", 1220 ent->ret, 1221 deliv_status_to_str(ent->status), 1222 ent->status); 1223 } 1224 free_ent(cmd, ent->idx); 1225 complete_command(ent); 1226 } 1227 } 1228 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1229 1230 static int status_to_err(u8 status) 1231 { 1232 return status ? -EIO : 0; /* TBD more meaningful codes */ 1233 } 1234 1235 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1236 gfp_t gfp) 1237 { 1238 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1239 struct mlx5_cmd *cmd = &dev->cmd; 1240 struct cache_ent *ent = NULL; 1241 1242 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1243 ent = &cmd->cache.large; 1244 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1245 ent = &cmd->cache.med; 1246 1247 if (ent) { 1248 spin_lock_irq(&ent->lock); 1249 if (!list_empty(&ent->head)) { 1250 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1251 list); 1252 list_del(&msg->list); 1253 } 1254 spin_unlock_irq(&ent->lock); 1255 } 1256 1257 if (IS_ERR(msg)) 1258 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1259 1260 return msg; 1261 } 1262 1263 static int is_manage_pages(void *in) 1264 { 1265 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1266 } 1267 1268 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1269 void *in, int in_size, 1270 void *out, int out_size, 1271 mlx5_cmd_cbk_t callback, void *context, 1272 bool force_polling) 1273 { 1274 struct mlx5_cmd_msg *inb; 1275 struct mlx5_cmd_msg *outb; 1276 int pages_queue; 1277 const gfp_t gfp = GFP_KERNEL; 1278 int err; 1279 u8 status = 0; 1280 u32 drv_synd; 1281 1282 if (pci_channel_offline(dev->pdev) || 1283 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1284 u16 opcode = MLX5_GET(mbox_in, in, opcode); 1285 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1286 MLX5_SET(mbox_out, out, status, status); 1287 MLX5_SET(mbox_out, out, syndrome, drv_synd); 1288 return err; 1289 } 1290 1291 pages_queue = is_manage_pages(in); 1292 1293 inb = alloc_msg(dev, in_size, gfp); 1294 if (IS_ERR(inb)) { 1295 err = PTR_ERR(inb); 1296 return err; 1297 } 1298 1299 err = mlx5_copy_to_msg(inb, in, in_size); 1300 if (err) { 1301 mlx5_core_warn(dev, "err %d\n", err); 1302 goto out_in; 1303 } 1304 1305 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1306 if (IS_ERR(outb)) { 1307 err = PTR_ERR(outb); 1308 goto out_in; 1309 } 1310 1311 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1312 context, pages_queue, &status, force_polling); 1313 if (err) { 1314 if (err == -ETIMEDOUT) 1315 return err; 1316 goto out_out; 1317 } 1318 1319 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1320 if (status) { 1321 err = status_to_err(status); 1322 goto out_out; 1323 } 1324 1325 if (callback) 1326 return err; 1327 1328 err = mlx5_copy_from_msg(out, outb, out_size); 1329 1330 out_out: 1331 mlx5_free_cmd_msg(dev, outb); 1332 1333 out_in: 1334 free_msg(dev, inb); 1335 return err; 1336 } 1337 1338 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1339 int out_size) 1340 { 1341 int err; 1342 1343 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, false); 1344 return err ? : mlx5_cmd_check(dev, in, out); 1345 } 1346 EXPORT_SYMBOL(mlx5_cmd_exec); 1347 1348 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1349 void *out, int out_size, mlx5_cmd_cbk_t callback, 1350 void *context) 1351 { 1352 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context, false); 1353 } 1354 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1355 1356 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 1357 void *out, int out_size) 1358 { 1359 int err; 1360 1361 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, true); 1362 return err ? : mlx5_cmd_check(dev, in, out); 1363 } 1364 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 1365 1366 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1367 { 1368 struct mlx5_cmd *cmd = &dev->cmd; 1369 struct mlx5_cmd_msg *msg; 1370 struct mlx5_cmd_msg *n; 1371 1372 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1373 list_del(&msg->list); 1374 mlx5_free_cmd_msg(dev, msg); 1375 } 1376 1377 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1378 list_del(&msg->list); 1379 mlx5_free_cmd_msg(dev, msg); 1380 } 1381 } 1382 1383 static int create_msg_cache(struct mlx5_core_dev *dev) 1384 { 1385 struct mlx5_cmd *cmd = &dev->cmd; 1386 struct mlx5_cmd_msg *msg; 1387 int err; 1388 int i; 1389 1390 spin_lock_init(&cmd->cache.large.lock); 1391 INIT_LIST_HEAD(&cmd->cache.large.head); 1392 spin_lock_init(&cmd->cache.med.lock); 1393 INIT_LIST_HEAD(&cmd->cache.med.head); 1394 1395 for (i = 0; i < NUM_LONG_LISTS; i++) { 1396 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1397 if (IS_ERR(msg)) { 1398 err = PTR_ERR(msg); 1399 goto ex_err; 1400 } 1401 msg->cache = &cmd->cache.large; 1402 list_add_tail(&msg->list, &cmd->cache.large.head); 1403 } 1404 1405 for (i = 0; i < NUM_MED_LISTS; i++) { 1406 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1407 if (IS_ERR(msg)) { 1408 err = PTR_ERR(msg); 1409 goto ex_err; 1410 } 1411 msg->cache = &cmd->cache.med; 1412 list_add_tail(&msg->list, &cmd->cache.med.head); 1413 } 1414 1415 return 0; 1416 1417 ex_err: 1418 destroy_msg_cache(dev); 1419 return err; 1420 } 1421 1422 static int 1423 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1424 { 1425 int err; 1426 1427 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1428 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1429 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1430 1431 /* 1432 * Create global DMA descriptor tag for allocating 1433 * 4K firmware pages: 1434 */ 1435 err = -bus_dma_tag_create( 1436 bus_get_dma_tag(dev->pdev->dev.bsddev), 1437 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1438 0, /* no boundary */ 1439 BUS_SPACE_MAXADDR, /* lowaddr */ 1440 BUS_SPACE_MAXADDR, /* highaddr */ 1441 NULL, NULL, /* filter, filterarg */ 1442 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1443 1, /* nsegments */ 1444 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1445 0, /* flags */ 1446 NULL, NULL, /* lockfunc, lockfuncarg */ 1447 &cmd->dma_tag); 1448 if (err != 0) 1449 goto failure_destroy_sx; 1450 1451 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1452 if (cmd->cmd_page == NULL) { 1453 err = -ENOMEM; 1454 goto failure_alloc_page; 1455 } 1456 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1457 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1458 return (0); 1459 1460 failure_alloc_page: 1461 bus_dma_tag_destroy(cmd->dma_tag); 1462 1463 failure_destroy_sx: 1464 cv_destroy(&cmd->dma_cv); 1465 mtx_destroy(&cmd->dma_mtx); 1466 sx_destroy(&cmd->dma_sx); 1467 return (err); 1468 } 1469 1470 static void 1471 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1472 { 1473 1474 mlx5_fwp_free(cmd->cmd_page); 1475 bus_dma_tag_destroy(cmd->dma_tag); 1476 cv_destroy(&cmd->dma_cv); 1477 mtx_destroy(&cmd->dma_mtx); 1478 sx_destroy(&cmd->dma_sx); 1479 } 1480 1481 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1482 { 1483 struct mlx5_cmd *cmd = &dev->cmd; 1484 u32 cmd_h, cmd_l; 1485 u16 cmd_if_rev; 1486 int err; 1487 int i; 1488 1489 memset(cmd, 0, sizeof(*cmd)); 1490 cmd_if_rev = cmdif_rev_get(dev); 1491 if (cmd_if_rev != CMD_IF_REV) { 1492 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1493 return -EINVAL; 1494 } 1495 1496 err = alloc_cmd_page(dev, cmd); 1497 if (err) 1498 goto err_free_pool; 1499 1500 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1501 cmd->log_sz = cmd_l >> 4 & 0xf; 1502 cmd->log_stride = cmd_l & 0xf; 1503 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1504 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1505 err = -EINVAL; 1506 goto err_free_page; 1507 } 1508 1509 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1510 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1511 err = -EINVAL; 1512 goto err_free_page; 1513 } 1514 1515 cmd->checksum_disabled = 1; 1516 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1517 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1518 1519 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1520 if (cmd->cmdif_rev > CMD_IF_REV) { 1521 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1522 err = -ENOTSUPP; 1523 goto err_free_page; 1524 } 1525 1526 spin_lock_init(&cmd->alloc_lock); 1527 spin_lock_init(&cmd->token_lock); 1528 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1529 spin_lock_init(&cmd->stats[i].lock); 1530 1531 sema_init(&cmd->sem, cmd->max_reg_cmds); 1532 sema_init(&cmd->pages_sem, 1); 1533 1534 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1535 cmd_l = (u32)(cmd->dma); 1536 if (cmd_l & 0xfff) { 1537 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1538 err = -ENOMEM; 1539 goto err_free_page; 1540 } 1541 1542 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1543 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1544 1545 /* Make sure firmware sees the complete address before we proceed */ 1546 wmb(); 1547 1548 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1549 1550 cmd->mode = MLX5_CMD_MODE_POLLING; 1551 1552 err = create_msg_cache(dev); 1553 if (err) { 1554 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1555 goto err_free_page; 1556 } 1557 return 0; 1558 1559 err_free_page: 1560 free_cmd_page(dev, cmd); 1561 1562 err_free_pool: 1563 return err; 1564 } 1565 EXPORT_SYMBOL(mlx5_cmd_init); 1566 1567 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1568 { 1569 struct mlx5_cmd *cmd = &dev->cmd; 1570 1571 clean_debug_files(dev); 1572 flush_workqueue(dev->priv.health.wq_cmd); 1573 destroy_msg_cache(dev); 1574 free_cmd_page(dev, cmd); 1575 } 1576 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1577 1578 int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev, 1579 bool reset, void *out, int out_size) 1580 { 1581 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { }; 1582 1583 MLX5_SET(query_cong_statistics_in, in, opcode, 1584 MLX5_CMD_OP_QUERY_CONG_STATISTICS); 1585 MLX5_SET(query_cong_statistics_in, in, clear, reset); 1586 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); 1587 } 1588 EXPORT_SYMBOL(mlx5_cmd_query_cong_counter); 1589 1590 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, 1591 void *out, int out_size) 1592 { 1593 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { }; 1594 1595 MLX5_SET(query_cong_params_in, in, opcode, 1596 MLX5_CMD_OP_QUERY_CONG_PARAMS); 1597 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point); 1598 1599 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); 1600 } 1601 EXPORT_SYMBOL(mlx5_cmd_query_cong_params); 1602 1603 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev, 1604 void *in, int in_size) 1605 { 1606 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { }; 1607 1608 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out)); 1609 } 1610 EXPORT_SYMBOL(mlx5_cmd_modify_cong_params); 1611