1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 44 struct mlx5_cmd_msg *msg); 45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 79 struct mlx5_cmd_msg *in, 80 int uin_size, 81 struct mlx5_cmd_msg *out, 82 void *uout, int uout_size, 83 mlx5_cmd_cbk_t cbk, 84 void *context, int page_queue) 85 { 86 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 87 struct mlx5_cmd_work_ent *ent; 88 89 ent = kzalloc(sizeof(*ent), alloc_flags); 90 if (!ent) 91 return ERR_PTR(-ENOMEM); 92 93 ent->in = in; 94 ent->uin_size = uin_size; 95 ent->out = out; 96 ent->uout = uout; 97 ent->uout_size = uout_size; 98 ent->callback = cbk; 99 ent->context = context; 100 ent->cmd = cmd; 101 ent->page_queue = page_queue; 102 103 return ent; 104 } 105 106 static u8 alloc_token(struct mlx5_cmd *cmd) 107 { 108 u8 token; 109 110 spin_lock(&cmd->token_lock); 111 cmd->token++; 112 if (cmd->token == 0) 113 cmd->token++; 114 token = cmd->token; 115 spin_unlock(&cmd->token_lock); 116 117 return token; 118 } 119 120 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 121 { 122 unsigned long flags; 123 struct mlx5_cmd *cmd = ent->cmd; 124 struct mlx5_core_dev *dev = 125 container_of(cmd, struct mlx5_core_dev, cmd); 126 int ret = cmd->max_reg_cmds; 127 128 spin_lock_irqsave(&cmd->alloc_lock, flags); 129 if (!ent->page_queue) { 130 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 131 if (ret >= cmd->max_reg_cmds) 132 ret = -1; 133 } 134 135 if (dev->state != MLX5_DEVICE_STATE_UP) 136 ret = -1; 137 138 if (ret != -1) { 139 ent->busy = 1; 140 ent->idx = ret; 141 clear_bit(ent->idx, &cmd->bitmask); 142 cmd->ent_arr[ent->idx] = ent; 143 } 144 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 145 146 return ret; 147 } 148 149 static void free_ent(struct mlx5_cmd *cmd, int idx) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&cmd->alloc_lock, flags); 154 set_bit(idx, &cmd->bitmask); 155 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 156 } 157 158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 159 { 160 return cmd->cmd_buf + (idx << cmd->log_stride); 161 } 162 163 static u8 xor8_buf(void *buf, int len) 164 { 165 u8 *ptr = buf; 166 u8 sum = 0; 167 int i; 168 169 for (i = 0; i < len; i++) 170 sum ^= ptr[i]; 171 172 return sum; 173 } 174 175 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 176 { 177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 178 return -EINVAL; 179 180 if (xor8_buf(block, sizeof(*block)) != 0xff) 181 return -EINVAL; 182 183 return 0; 184 } 185 186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 187 int csum) 188 { 189 block->token = token; 190 if (csum) { 191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 192 sizeof(block->data) - 2); 193 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 194 } 195 } 196 197 static void 198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 199 { 200 size_t i; 201 202 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 203 struct mlx5_cmd_prot_block *block; 204 205 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 206 207 /* compute signature */ 208 calc_block_sig(block, token, csum); 209 210 /* check for last block */ 211 if (block->next == 0) 212 break; 213 } 214 215 /* make sure data gets written to RAM */ 216 mlx5_fwp_flush(msg); 217 } 218 219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 220 { 221 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 222 calc_chain_sig(ent->in, ent->token, csum); 223 calc_chain_sig(ent->out, ent->token, csum); 224 } 225 226 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 227 { 228 struct mlx5_core_dev *dev = container_of(ent->cmd, 229 struct mlx5_core_dev, cmd); 230 int poll_end = jiffies + 231 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 232 u8 own; 233 234 do { 235 own = ent->lay->status_own; 236 if (!(own & CMD_OWNER_HW) || 237 dev->state != MLX5_DEVICE_STATE_UP) { 238 ent->ret = 0; 239 return; 240 } 241 usleep_range(5000, 10000); 242 } while (time_before(jiffies, poll_end)); 243 244 ent->ret = -ETIMEDOUT; 245 } 246 247 static void free_cmd(struct mlx5_cmd_work_ent *ent) 248 { 249 cancel_delayed_work_sync(&ent->cb_timeout_work); 250 kfree(ent); 251 } 252 253 static int 254 verify_signature(struct mlx5_cmd_work_ent *ent) 255 { 256 struct mlx5_cmd_msg *msg = ent->out; 257 size_t i; 258 int err; 259 u8 sig; 260 261 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 262 if (sig != 0xff) 263 return -EINVAL; 264 265 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 266 struct mlx5_cmd_prot_block *block; 267 268 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 269 270 /* compute signature */ 271 err = verify_block_sig(block); 272 if (err != 0) 273 return (err); 274 275 /* check for last block */ 276 if (block->next == 0) 277 break; 278 } 279 return (0); 280 } 281 282 static void dump_buf(void *buf, int size, int data_only, int offset) 283 { 284 __be32 *p = buf; 285 int i; 286 287 for (i = 0; i < size; i += 16) { 288 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 289 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 290 be32_to_cpu(p[3])); 291 p += 4; 292 offset += 16; 293 } 294 if (!data_only) 295 pr_debug("\n"); 296 } 297 298 const char *mlx5_command_str(int command) 299 { 300 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 301 302 switch (command) { 303 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 304 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 305 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 306 MLX5_COMMAND_STR_CASE(INIT_HCA); 307 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 308 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 309 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 310 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 311 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 312 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 313 MLX5_COMMAND_STR_CASE(SET_ISSI); 314 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 315 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 316 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 317 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 318 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 319 MLX5_COMMAND_STR_CASE(CREATE_EQ); 320 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 321 MLX5_COMMAND_STR_CASE(QUERY_EQ); 322 MLX5_COMMAND_STR_CASE(GEN_EQE); 323 MLX5_COMMAND_STR_CASE(CREATE_CQ); 324 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 325 MLX5_COMMAND_STR_CASE(QUERY_CQ); 326 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 327 MLX5_COMMAND_STR_CASE(CREATE_QP); 328 MLX5_COMMAND_STR_CASE(DESTROY_QP); 329 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 330 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 331 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 332 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 333 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 334 MLX5_COMMAND_STR_CASE(2ERR_QP); 335 MLX5_COMMAND_STR_CASE(2RST_QP); 336 MLX5_COMMAND_STR_CASE(QUERY_QP); 337 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 338 MLX5_COMMAND_STR_CASE(MAD_IFC); 339 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 340 MLX5_COMMAND_STR_CASE(CREATE_PSV); 341 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 342 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 343 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 344 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 345 MLX5_COMMAND_STR_CASE(ARM_RQ); 346 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 347 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 348 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 349 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 350 MLX5_COMMAND_STR_CASE(CREATE_DCT); 351 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 352 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 353 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 354 MLX5_COMMAND_STR_CASE(QUERY_DCT); 355 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 356 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 357 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 358 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 359 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 360 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 361 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 362 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 363 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 364 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 365 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 366 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 367 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 368 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 369 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 370 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 371 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 372 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 373 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 374 MLX5_COMMAND_STR_CASE(ALLOC_PD); 375 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 376 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 377 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 378 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 379 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 380 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 381 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 382 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 383 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 384 MLX5_COMMAND_STR_CASE(NOP); 385 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 386 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 387 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 388 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 389 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 390 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 391 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 392 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 393 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 394 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 395 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 396 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 397 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 398 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 399 MLX5_COMMAND_STR_CASE(CREATE_RMP); 400 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 401 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 402 MLX5_COMMAND_STR_CASE(QUERY_RMP); 403 MLX5_COMMAND_STR_CASE(CREATE_RQT); 404 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 405 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 406 MLX5_COMMAND_STR_CASE(QUERY_RQT); 407 MLX5_COMMAND_STR_CASE(ACCESS_REG); 408 MLX5_COMMAND_STR_CASE(CREATE_SQ); 409 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 410 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 411 MLX5_COMMAND_STR_CASE(QUERY_SQ); 412 MLX5_COMMAND_STR_CASE(CREATE_RQ); 413 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 414 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 415 MLX5_COMMAND_STR_CASE(QUERY_RQ); 416 MLX5_COMMAND_STR_CASE(CREATE_TIR); 417 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 418 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 419 MLX5_COMMAND_STR_CASE(QUERY_TIR); 420 MLX5_COMMAND_STR_CASE(CREATE_TIS); 421 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 422 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 423 MLX5_COMMAND_STR_CASE(QUERY_TIS); 424 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 425 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 426 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 427 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 428 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 429 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 430 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 431 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 432 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 433 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 434 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 435 default: return "unknown command opcode"; 436 } 437 } 438 439 static void dump_command(struct mlx5_core_dev *dev, 440 struct mlx5_cmd_work_ent *ent, int input) 441 { 442 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 443 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 444 size_t i; 445 int data_only; 446 int offset = 0; 447 int msg_len = input ? ent->uin_size : ent->uout_size; 448 int dump_len; 449 450 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 451 452 if (data_only) 453 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 454 "dump command data %s(0x%x) %s\n", 455 mlx5_command_str(op), op, 456 input ? "INPUT" : "OUTPUT"); 457 else 458 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 459 mlx5_command_str(op), op, 460 input ? "INPUT" : "OUTPUT"); 461 462 if (data_only) { 463 if (input) { 464 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 465 offset += sizeof(ent->lay->in); 466 } else { 467 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 468 offset += sizeof(ent->lay->out); 469 } 470 } else { 471 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 472 offset += sizeof(*ent->lay); 473 } 474 475 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 476 struct mlx5_cmd_prot_block *block; 477 478 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 479 480 if (data_only) { 481 if (offset >= msg_len) 482 break; 483 dump_len = min_t(int, 484 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 485 486 dump_buf(block->data, dump_len, 1, offset); 487 offset += MLX5_CMD_DATA_BLOCK_SIZE; 488 } else { 489 mlx5_core_dbg(dev, "command block:\n"); 490 dump_buf(block, sizeof(*block), 0, offset); 491 offset += sizeof(*block); 492 } 493 494 /* check for last block */ 495 if (block->next == 0) 496 break; 497 } 498 499 if (data_only) 500 pr_debug("\n"); 501 } 502 503 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 504 { 505 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 506 507 return be16_to_cpu(hdr->opcode); 508 } 509 510 static void cb_timeout_handler(struct work_struct *work) 511 { 512 struct delayed_work *dwork = container_of(work, struct delayed_work, 513 work); 514 struct mlx5_cmd_work_ent *ent = container_of(dwork, 515 struct mlx5_cmd_work_ent, 516 cb_timeout_work); 517 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 518 cmd); 519 520 ent->ret = -ETIMEDOUT; 521 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 522 mlx5_command_str(msg_to_opcode(ent->in)), 523 msg_to_opcode(ent->in)); 524 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 525 } 526 527 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, 528 struct mlx5_outbox_hdr *hdr) 529 { 530 hdr->status = 0; 531 hdr->syndrome = 0; 532 533 switch (opcode) { 534 case MLX5_CMD_OP_TEARDOWN_HCA: 535 case MLX5_CMD_OP_DISABLE_HCA: 536 case MLX5_CMD_OP_MANAGE_PAGES: 537 case MLX5_CMD_OP_DESTROY_MKEY: 538 case MLX5_CMD_OP_DESTROY_EQ: 539 case MLX5_CMD_OP_DESTROY_CQ: 540 case MLX5_CMD_OP_DESTROY_QP: 541 case MLX5_CMD_OP_DESTROY_PSV: 542 case MLX5_CMD_OP_DESTROY_SRQ: 543 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 544 case MLX5_CMD_OP_DESTROY_DCT: 545 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 546 case MLX5_CMD_OP_DEALLOC_PD: 547 case MLX5_CMD_OP_DEALLOC_UAR: 548 case MLX5_CMD_OP_DETACH_FROM_MCG: 549 case MLX5_CMD_OP_DEALLOC_XRCD: 550 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 551 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 552 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 553 case MLX5_CMD_OP_DESTROY_LAG: 554 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 555 case MLX5_CMD_OP_DESTROY_TIR: 556 case MLX5_CMD_OP_DESTROY_SQ: 557 case MLX5_CMD_OP_DESTROY_RQ: 558 case MLX5_CMD_OP_DESTROY_RMP: 559 case MLX5_CMD_OP_DESTROY_TIS: 560 case MLX5_CMD_OP_DESTROY_RQT: 561 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 562 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 563 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 564 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 565 case MLX5_CMD_OP_2ERR_QP: 566 case MLX5_CMD_OP_2RST_QP: 567 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 568 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 569 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 570 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 571 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 572 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 573 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 574 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 575 case MLX5_CMD_OP_MODIFY_SQ: 576 case MLX5_CMD_OP_MODIFY_RQ: 577 case MLX5_CMD_OP_MODIFY_TIS: 578 case MLX5_CMD_OP_MODIFY_LAG: 579 case MLX5_CMD_OP_MODIFY_TIR: 580 case MLX5_CMD_OP_MODIFY_RMP: 581 case MLX5_CMD_OP_MODIFY_RQT: 582 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 583 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 584 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 585 case MLX5_CMD_OP_MODIFY_CQ: 586 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 587 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 588 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: 589 case MLX5_CMD_OP_ACCESS_REG: 590 case MLX5_CMD_OP_DRAIN_DCT: 591 return 0; 592 593 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 594 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 595 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 596 case MLX5_CMD_OP_ALLOC_PD: 597 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 598 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 599 case MLX5_CMD_OP_ALLOC_UAR: 600 case MLX5_CMD_OP_ALLOC_XRCD: 601 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 602 case MLX5_CMD_OP_ARM_RQ: 603 case MLX5_CMD_OP_ARM_XRC_SRQ: 604 case MLX5_CMD_OP_ATTACH_TO_MCG: 605 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 606 case MLX5_CMD_OP_CREATE_CQ: 607 case MLX5_CMD_OP_CREATE_DCT: 608 case MLX5_CMD_OP_CREATE_EQ: 609 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 610 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 611 case MLX5_CMD_OP_CREATE_LAG: 612 case MLX5_CMD_OP_CREATE_MKEY: 613 case MLX5_CMD_OP_CREATE_PSV: 614 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 615 case MLX5_CMD_OP_CREATE_QP: 616 case MLX5_CMD_OP_CREATE_RMP: 617 case MLX5_CMD_OP_CREATE_RQ: 618 case MLX5_CMD_OP_CREATE_RQT: 619 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 620 case MLX5_CMD_OP_CREATE_SQ: 621 case MLX5_CMD_OP_CREATE_SRQ: 622 case MLX5_CMD_OP_CREATE_TIR: 623 case MLX5_CMD_OP_CREATE_TIS: 624 case MLX5_CMD_OP_CREATE_VPORT_LAG: 625 case MLX5_CMD_OP_CREATE_XRC_SRQ: 626 case MLX5_CMD_OP_ENABLE_HCA: 627 case MLX5_CMD_OP_GEN_EQE: 628 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 629 case MLX5_CMD_OP_INIT2INIT_QP: 630 case MLX5_CMD_OP_INIT2RTR_QP: 631 case MLX5_CMD_OP_INIT_HCA: 632 case MLX5_CMD_OP_MAD_IFC: 633 case MLX5_CMD_OP_NOP: 634 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 635 case MLX5_CMD_OP_QUERY_ADAPTER: 636 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 637 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 638 case MLX5_CMD_OP_QUERY_CONG_STATUS: 639 case MLX5_CMD_OP_QUERY_CQ: 640 case MLX5_CMD_OP_QUERY_DCT: 641 case MLX5_CMD_OP_QUERY_EQ: 642 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 643 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 644 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 645 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 646 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 647 case MLX5_CMD_OP_QUERY_HCA_CAP: 648 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 649 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 650 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 651 case MLX5_CMD_OP_QUERY_ISSI: 652 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 653 case MLX5_CMD_OP_QUERY_LAG: 654 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 655 case MLX5_CMD_OP_QUERY_MKEY: 656 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 657 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: 658 case MLX5_CMD_OP_QUERY_PAGES: 659 case MLX5_CMD_OP_QUERY_QP: 660 case MLX5_CMD_OP_QUERY_Q_COUNTER: 661 case MLX5_CMD_OP_QUERY_RMP: 662 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 663 case MLX5_CMD_OP_QUERY_RQ: 664 case MLX5_CMD_OP_QUERY_RQT: 665 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 666 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 667 case MLX5_CMD_OP_QUERY_SQ: 668 case MLX5_CMD_OP_QUERY_SRQ: 669 case MLX5_CMD_OP_QUERY_TIR: 670 case MLX5_CMD_OP_QUERY_TIS: 671 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 672 case MLX5_CMD_OP_QUERY_VPORT_STATE: 673 case MLX5_CMD_OP_QUERY_XRC_SRQ: 674 case MLX5_CMD_OP_RST2INIT_QP: 675 case MLX5_CMD_OP_RTR2RTS_QP: 676 case MLX5_CMD_OP_RTS2RTS_QP: 677 case MLX5_CMD_OP_SET_DC_CNAK_TRACE: 678 case MLX5_CMD_OP_SET_HCA_CAP: 679 case MLX5_CMD_OP_SET_ISSI: 680 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 681 case MLX5_CMD_OP_SET_MAD_DEMUX: 682 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 683 case MLX5_CMD_OP_SQD_RTS_QP: 684 case MLX5_CMD_OP_SQERR2RTS_QP: 685 hdr->status = MLX5_CMD_STAT_INT_ERR; 686 hdr->syndrome = 0xFFFFFFFF; 687 return -ECANCELED; 688 default: 689 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); 690 return -EINVAL; 691 } 692 } 693 694 static void complete_command(struct mlx5_cmd_work_ent *ent) 695 { 696 struct mlx5_cmd *cmd = ent->cmd; 697 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 698 cmd); 699 mlx5_cmd_cbk_t callback; 700 void *context; 701 702 s64 ds; 703 struct mlx5_cmd_stats *stats; 704 unsigned long flags; 705 int err; 706 struct semaphore *sem; 707 708 if (ent->page_queue) 709 sem = &cmd->pages_sem; 710 else 711 sem = &cmd->sem; 712 713 if (dev->state != MLX5_DEVICE_STATE_UP) { 714 struct mlx5_outbox_hdr *out_hdr = 715 (struct mlx5_outbox_hdr *)ent->out; 716 struct mlx5_inbox_hdr *in_hdr = 717 (struct mlx5_inbox_hdr *)(ent->in->first.data); 718 u16 opcode = be16_to_cpu(in_hdr->opcode); 719 720 ent->ret = set_internal_err_outbox(dev, 721 opcode, 722 out_hdr); 723 } 724 725 if (ent->callback) { 726 ds = ent->ts2 - ent->ts1; 727 if (ent->op < ARRAY_SIZE(cmd->stats)) { 728 stats = &cmd->stats[ent->op]; 729 spin_lock_irqsave(&stats->lock, flags); 730 stats->sum += ds; 731 ++stats->n; 732 spin_unlock_irqrestore(&stats->lock, flags); 733 } 734 735 callback = ent->callback; 736 context = ent->context; 737 err = ent->ret; 738 if (!err) 739 err = mlx5_copy_from_msg(ent->uout, 740 ent->out, 741 ent->uout_size); 742 743 mlx5_free_cmd_msg(dev, ent->out); 744 free_msg(dev, ent->in); 745 746 free_cmd(ent); 747 callback(err, context); 748 } else { 749 complete(&ent->done); 750 } 751 up(sem); 752 } 753 754 static void cmd_work_handler(struct work_struct *work) 755 { 756 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 757 struct mlx5_cmd *cmd = ent->cmd; 758 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 759 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 760 struct mlx5_cmd_layout *lay; 761 struct semaphore *sem; 762 763 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 764 down(sem); 765 766 if (alloc_ent(ent) < 0) { 767 complete_command(ent); 768 return; 769 } 770 771 ent->token = alloc_token(cmd); 772 lay = get_inst(cmd, ent->idx); 773 ent->lay = lay; 774 memset(lay, 0, sizeof(*lay)); 775 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 776 ent->op = be32_to_cpu(lay->in[0]) >> 16; 777 if (ent->in->numpages != 0) 778 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 779 if (ent->out->numpages != 0) 780 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 781 lay->inlen = cpu_to_be32(ent->uin_size); 782 lay->outlen = cpu_to_be32(ent->uout_size); 783 lay->type = MLX5_PCI_CMD_XPORT; 784 lay->token = ent->token; 785 lay->status_own = CMD_OWNER_HW; 786 set_signature(ent, !cmd->checksum_disabled); 787 dump_command(dev, ent, 1); 788 ent->ts1 = ktime_get_ns(); 789 ent->busy = 0; 790 if (ent->callback) 791 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 792 793 /* ring doorbell after the descriptor is valid */ 794 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 795 /* make sure data is written to RAM */ 796 mlx5_fwp_flush(cmd->cmd_page); 797 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 798 mmiowb(); 799 /* if not in polling don't use ent after this point*/ 800 if (cmd->mode == CMD_MODE_POLLING) { 801 poll_timeout(ent); 802 /* make sure we read the descriptor after ownership is SW */ 803 mlx5_cmd_comp_handler(dev, 1U << ent->idx); 804 } 805 } 806 807 static const char *deliv_status_to_str(u8 status) 808 { 809 switch (status) { 810 case MLX5_CMD_DELIVERY_STAT_OK: 811 return "no errors"; 812 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 813 return "signature error"; 814 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 815 return "token error"; 816 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 817 return "bad block number"; 818 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 819 return "output pointer not aligned to block size"; 820 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 821 return "input pointer not aligned to block size"; 822 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 823 return "firmware internal error"; 824 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 825 return "command input length error"; 826 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 827 return "command ouput length error"; 828 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 829 return "reserved fields not cleared"; 830 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 831 return "bad command descriptor type"; 832 default: 833 return "unknown status code"; 834 } 835 } 836 837 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 838 { 839 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 840 struct mlx5_cmd *cmd = &dev->cmd; 841 int err; 842 843 if (cmd->mode == CMD_MODE_POLLING) { 844 wait_for_completion(&ent->done); 845 err = ent->ret; 846 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 847 ent->ret = -ETIMEDOUT; 848 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 849 } 850 851 err = ent->ret; 852 853 if (err == -ETIMEDOUT) { 854 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 855 mlx5_command_str(msg_to_opcode(ent->in)), 856 msg_to_opcode(ent->in)); 857 } 858 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 859 err, deliv_status_to_str(ent->status), ent->status); 860 861 return err; 862 } 863 864 /* Notes: 865 * 1. Callback functions may not sleep 866 * 2. page queue commands do not support asynchrous completion 867 */ 868 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 869 int uin_size, 870 struct mlx5_cmd_msg *out, void *uout, int uout_size, 871 mlx5_cmd_cbk_t callback, 872 void *context, int page_queue, u8 *status) 873 { 874 struct mlx5_cmd *cmd = &dev->cmd; 875 struct mlx5_cmd_work_ent *ent; 876 struct mlx5_cmd_stats *stats; 877 int err = 0; 878 s64 ds; 879 u16 op; 880 881 if (callback && page_queue) 882 return -EINVAL; 883 884 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 885 context, page_queue); 886 if (IS_ERR(ent)) 887 return PTR_ERR(ent); 888 889 if (!callback) 890 init_completion(&ent->done); 891 892 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 893 INIT_WORK(&ent->work, cmd_work_handler); 894 if (page_queue) { 895 cmd_work_handler(&ent->work); 896 } else if (!queue_work(cmd->wq, &ent->work)) { 897 mlx5_core_warn(dev, "failed to queue work\n"); 898 err = -ENOMEM; 899 goto out_free; 900 } 901 902 if (callback) 903 goto out; 904 905 err = wait_func(dev, ent); 906 if (err == -ETIMEDOUT) 907 goto out; 908 909 ds = ent->ts2 - ent->ts1; 910 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 911 if (op < ARRAY_SIZE(cmd->stats)) { 912 stats = &cmd->stats[op]; 913 spin_lock_irq(&stats->lock); 914 stats->sum += ds; 915 ++stats->n; 916 spin_unlock_irq(&stats->lock); 917 } 918 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 919 "fw exec time for %s is %lld nsec\n", 920 mlx5_command_str(op), (long long)ds); 921 *status = ent->status; 922 free_cmd(ent); 923 924 return err; 925 926 out_free: 927 free_cmd(ent); 928 out: 929 return err; 930 } 931 932 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 933 { 934 size_t delta; 935 size_t i; 936 937 if (to == NULL || from == NULL) 938 return (-ENOMEM); 939 940 delta = min_t(size_t, size, sizeof(to->first.data)); 941 memcpy(to->first.data, from, delta); 942 from = (char *)from + delta; 943 size -= delta; 944 945 for (i = 0; size != 0; i++) { 946 struct mlx5_cmd_prot_block *block; 947 948 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 949 950 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 951 memcpy(block->data, from, delta); 952 from = (char *)from + delta; 953 size -= delta; 954 } 955 return (0); 956 } 957 958 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 959 { 960 size_t delta; 961 size_t i; 962 963 if (to == NULL || from == NULL) 964 return (-ENOMEM); 965 966 delta = min_t(size_t, size, sizeof(from->first.data)); 967 memcpy(to, from->first.data, delta); 968 to = (char *)to + delta; 969 size -= delta; 970 971 for (i = 0; size != 0; i++) { 972 struct mlx5_cmd_prot_block *block; 973 974 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 975 976 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 977 memcpy(to, block->data, delta); 978 to = (char *)to + delta; 979 size -= delta; 980 } 981 return (0); 982 } 983 984 static struct mlx5_cmd_msg * 985 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 986 { 987 struct mlx5_cmd_msg *msg; 988 size_t blen; 989 size_t n; 990 size_t i; 991 992 blen = size - min_t(size_t, sizeof(msg->first.data), size); 993 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 994 995 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 996 if (msg == NULL) 997 return (ERR_PTR(-ENOMEM)); 998 999 for (i = 0; i != n; i++) { 1000 struct mlx5_cmd_prot_block *block; 1001 1002 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 1003 1004 memset(block, 0, MLX5_CMD_MBOX_SIZE); 1005 1006 if (i != (n - 1)) { 1007 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 1008 block->next = cpu_to_be64(dma); 1009 } 1010 block->block_num = cpu_to_be32(i); 1011 } 1012 1013 /* make sure initial data is written to RAM */ 1014 mlx5_fwp_flush(msg); 1015 1016 return (msg); 1017 } 1018 1019 static void 1020 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1021 { 1022 1023 mlx5_fwp_free(msg); 1024 } 1025 1026 static void set_wqname(struct mlx5_core_dev *dev) 1027 { 1028 struct mlx5_cmd *cmd = &dev->cmd; 1029 1030 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1031 dev_name(&dev->pdev->dev)); 1032 } 1033 1034 static void clean_debug_files(struct mlx5_core_dev *dev) 1035 { 1036 } 1037 1038 1039 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1040 { 1041 struct mlx5_cmd *cmd = &dev->cmd; 1042 int i; 1043 1044 for (i = 0; i < cmd->max_reg_cmds; i++) 1045 down(&cmd->sem); 1046 1047 down(&cmd->pages_sem); 1048 cmd->mode = mode; 1049 1050 up(&cmd->pages_sem); 1051 for (i = 0; i < cmd->max_reg_cmds; i++) 1052 up(&cmd->sem); 1053 } 1054 1055 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1056 { 1057 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1058 } 1059 1060 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1061 { 1062 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1063 } 1064 1065 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1066 { 1067 unsigned long flags; 1068 1069 if (msg->cache) { 1070 spin_lock_irqsave(&msg->cache->lock, flags); 1071 list_add_tail(&msg->list, &msg->cache->head); 1072 spin_unlock_irqrestore(&msg->cache->lock, flags); 1073 } else { 1074 mlx5_free_cmd_msg(dev, msg); 1075 } 1076 } 1077 1078 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) 1079 { 1080 struct mlx5_cmd *cmd = &dev->cmd; 1081 struct mlx5_cmd_work_ent *ent; 1082 int i; 1083 1084 /* make sure data gets read from RAM */ 1085 mlx5_fwp_invalidate(cmd->cmd_page); 1086 1087 while (vector != 0) { 1088 i = ffs(vector) - 1; 1089 vector &= ~(1U << i); 1090 ent = cmd->ent_arr[i]; 1091 if (ent->callback) 1092 cancel_delayed_work(&ent->cb_timeout_work); 1093 ent->ts2 = ktime_get_ns(); 1094 memcpy(ent->out->first.data, ent->lay->out, 1095 sizeof(ent->lay->out)); 1096 /* make sure data gets read from RAM */ 1097 mlx5_fwp_invalidate(ent->out); 1098 dump_command(dev, ent, 0); 1099 if (!ent->ret) { 1100 if (!cmd->checksum_disabled) 1101 ent->ret = verify_signature(ent); 1102 else 1103 ent->ret = 0; 1104 ent->status = ent->lay->status_own >> 1; 1105 1106 mlx5_core_dbg(dev, 1107 "FW command ret 0x%x, status %s(0x%x)\n", 1108 ent->ret, 1109 deliv_status_to_str(ent->status), 1110 ent->status); 1111 } 1112 free_ent(cmd, ent->idx); 1113 complete_command(ent); 1114 } 1115 } 1116 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1117 1118 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) 1119 { 1120 unsigned long vector; 1121 int i = 0; 1122 unsigned long flags; 1123 synchronize_irq(dev->priv.eq_table.cmd_eq.irqn); 1124 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1125 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1126 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1127 1128 if (!vector) 1129 return; 1130 1131 for (i = 0; i < (1 << dev->cmd.log_sz); i++) { 1132 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; 1133 1134 if (!test_bit(i, &vector)) 1135 continue; 1136 1137 while (ent->busy) 1138 usleep_range(1000, 1100); 1139 free_ent(&dev->cmd, i); 1140 complete_command(ent); 1141 } 1142 } 1143 EXPORT_SYMBOL(mlx5_trigger_cmd_completions); 1144 1145 static int status_to_err(u8 status) 1146 { 1147 return status ? -1 : 0; /* TBD more meaningful codes */ 1148 } 1149 1150 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1151 gfp_t gfp) 1152 { 1153 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1154 struct mlx5_cmd *cmd = &dev->cmd; 1155 struct cache_ent *ent = NULL; 1156 1157 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1158 ent = &cmd->cache.large; 1159 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1160 ent = &cmd->cache.med; 1161 1162 if (ent) { 1163 spin_lock_irq(&ent->lock); 1164 if (!list_empty(&ent->head)) { 1165 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1166 list); 1167 list_del(&msg->list); 1168 } 1169 spin_unlock_irq(&ent->lock); 1170 } 1171 1172 if (IS_ERR(msg)) 1173 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1174 1175 return msg; 1176 } 1177 1178 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1179 { 1180 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1181 } 1182 1183 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1184 void *in, int in_size, 1185 void *out, int out_size, 1186 mlx5_cmd_cbk_t callback, void *context) 1187 { 1188 struct mlx5_cmd_msg *inb; 1189 struct mlx5_cmd_msg *outb; 1190 int pages_queue; 1191 const gfp_t gfp = GFP_KERNEL; 1192 int err; 1193 u8 status = 0; 1194 1195 pages_queue = is_manage_pages(in); 1196 1197 inb = alloc_msg(dev, in_size, gfp); 1198 if (IS_ERR(inb)) { 1199 err = PTR_ERR(inb); 1200 return err; 1201 } 1202 1203 err = mlx5_copy_to_msg(inb, in, in_size); 1204 if (err) { 1205 mlx5_core_warn(dev, "err %d\n", err); 1206 goto out_in; 1207 } 1208 1209 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1210 if (IS_ERR(outb)) { 1211 err = PTR_ERR(outb); 1212 goto out_in; 1213 } 1214 1215 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1216 context, pages_queue, &status); 1217 if (err) { 1218 if (err == -ETIMEDOUT) 1219 return err; 1220 goto out_out; 1221 } 1222 1223 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1224 if (status) { 1225 err = status_to_err(status); 1226 goto out_out; 1227 } 1228 1229 if (callback) 1230 return err; 1231 1232 err = mlx5_copy_from_msg(out, outb, out_size); 1233 1234 out_out: 1235 mlx5_free_cmd_msg(dev, outb); 1236 1237 out_in: 1238 free_msg(dev, inb); 1239 return err; 1240 } 1241 1242 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1243 int out_size) 1244 { 1245 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1246 } 1247 EXPORT_SYMBOL(mlx5_cmd_exec); 1248 1249 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1250 void *out, int out_size, mlx5_cmd_cbk_t callback, 1251 void *context) 1252 { 1253 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1254 } 1255 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1256 1257 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1258 { 1259 struct mlx5_cmd *cmd = &dev->cmd; 1260 struct mlx5_cmd_msg *msg; 1261 struct mlx5_cmd_msg *n; 1262 1263 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1264 list_del(&msg->list); 1265 mlx5_free_cmd_msg(dev, msg); 1266 } 1267 1268 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1269 list_del(&msg->list); 1270 mlx5_free_cmd_msg(dev, msg); 1271 } 1272 } 1273 1274 static int create_msg_cache(struct mlx5_core_dev *dev) 1275 { 1276 struct mlx5_cmd *cmd = &dev->cmd; 1277 struct mlx5_cmd_msg *msg; 1278 int err; 1279 int i; 1280 1281 spin_lock_init(&cmd->cache.large.lock); 1282 INIT_LIST_HEAD(&cmd->cache.large.head); 1283 spin_lock_init(&cmd->cache.med.lock); 1284 INIT_LIST_HEAD(&cmd->cache.med.head); 1285 1286 for (i = 0; i < NUM_LONG_LISTS; i++) { 1287 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1288 if (IS_ERR(msg)) { 1289 err = PTR_ERR(msg); 1290 goto ex_err; 1291 } 1292 msg->cache = &cmd->cache.large; 1293 list_add_tail(&msg->list, &cmd->cache.large.head); 1294 } 1295 1296 for (i = 0; i < NUM_MED_LISTS; i++) { 1297 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1298 if (IS_ERR(msg)) { 1299 err = PTR_ERR(msg); 1300 goto ex_err; 1301 } 1302 msg->cache = &cmd->cache.med; 1303 list_add_tail(&msg->list, &cmd->cache.med.head); 1304 } 1305 1306 return 0; 1307 1308 ex_err: 1309 destroy_msg_cache(dev); 1310 return err; 1311 } 1312 1313 static int 1314 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1315 { 1316 int err; 1317 1318 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1319 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1320 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1321 1322 /* 1323 * Create global DMA descriptor tag for allocating 1324 * 4K firmware pages: 1325 */ 1326 err = -bus_dma_tag_create( 1327 bus_get_dma_tag(dev->pdev->dev.bsddev), 1328 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1329 0, /* no boundary */ 1330 BUS_SPACE_MAXADDR, /* lowaddr */ 1331 BUS_SPACE_MAXADDR, /* highaddr */ 1332 NULL, NULL, /* filter, filterarg */ 1333 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1334 1, /* nsegments */ 1335 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1336 0, /* flags */ 1337 NULL, NULL, /* lockfunc, lockfuncarg */ 1338 &cmd->dma_tag); 1339 if (err != 0) 1340 goto failure_destroy_sx; 1341 1342 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1343 if (cmd->cmd_page == NULL) { 1344 err = -ENOMEM; 1345 goto failure_alloc_page; 1346 } 1347 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1348 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1349 return (0); 1350 1351 failure_alloc_page: 1352 bus_dma_tag_destroy(cmd->dma_tag); 1353 1354 failure_destroy_sx: 1355 cv_destroy(&cmd->dma_cv); 1356 mtx_destroy(&cmd->dma_mtx); 1357 sx_destroy(&cmd->dma_sx); 1358 return (err); 1359 } 1360 1361 static void 1362 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1363 { 1364 1365 mlx5_fwp_free(cmd->cmd_page); 1366 bus_dma_tag_destroy(cmd->dma_tag); 1367 cv_destroy(&cmd->dma_cv); 1368 mtx_destroy(&cmd->dma_mtx); 1369 sx_destroy(&cmd->dma_sx); 1370 } 1371 1372 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1373 { 1374 struct mlx5_cmd *cmd = &dev->cmd; 1375 u32 cmd_h, cmd_l; 1376 u16 cmd_if_rev; 1377 int err; 1378 int i; 1379 1380 cmd_if_rev = cmdif_rev_get(dev); 1381 if (cmd_if_rev != CMD_IF_REV) { 1382 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1383 return -EINVAL; 1384 } 1385 1386 err = alloc_cmd_page(dev, cmd); 1387 if (err) 1388 goto err_free_pool; 1389 1390 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1391 cmd->log_sz = cmd_l >> 4 & 0xf; 1392 cmd->log_stride = cmd_l & 0xf; 1393 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1394 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1395 err = -EINVAL; 1396 goto err_free_page; 1397 } 1398 1399 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1400 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1401 err = -EINVAL; 1402 goto err_free_page; 1403 } 1404 1405 cmd->checksum_disabled = 1; 1406 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1407 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1408 1409 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1410 if (cmd->cmdif_rev > CMD_IF_REV) { 1411 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1412 err = -ENOTSUPP; 1413 goto err_free_page; 1414 } 1415 1416 spin_lock_init(&cmd->alloc_lock); 1417 spin_lock_init(&cmd->token_lock); 1418 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1419 spin_lock_init(&cmd->stats[i].lock); 1420 1421 sema_init(&cmd->sem, cmd->max_reg_cmds); 1422 sema_init(&cmd->pages_sem, 1); 1423 1424 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1425 cmd_l = (u32)(cmd->dma); 1426 if (cmd_l & 0xfff) { 1427 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1428 err = -ENOMEM; 1429 goto err_free_page; 1430 } 1431 1432 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1433 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1434 1435 /* Make sure firmware sees the complete address before we proceed */ 1436 wmb(); 1437 1438 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1439 1440 cmd->mode = CMD_MODE_POLLING; 1441 1442 err = create_msg_cache(dev); 1443 if (err) { 1444 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1445 goto err_free_page; 1446 } 1447 1448 set_wqname(dev); 1449 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1450 if (!cmd->wq) { 1451 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1452 err = -ENOMEM; 1453 goto err_cache; 1454 } 1455 1456 return 0; 1457 1458 err_cache: 1459 destroy_msg_cache(dev); 1460 1461 err_free_page: 1462 free_cmd_page(dev, cmd); 1463 1464 err_free_pool: 1465 return err; 1466 } 1467 EXPORT_SYMBOL(mlx5_cmd_init); 1468 1469 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1470 { 1471 struct mlx5_cmd *cmd = &dev->cmd; 1472 1473 clean_debug_files(dev); 1474 destroy_workqueue(cmd->wq); 1475 destroy_msg_cache(dev); 1476 free_cmd_page(dev, cmd); 1477 } 1478 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1479 1480 static const char *cmd_status_str(u8 status) 1481 { 1482 switch (status) { 1483 case MLX5_CMD_STAT_OK: 1484 return "OK"; 1485 case MLX5_CMD_STAT_INT_ERR: 1486 return "internal error"; 1487 case MLX5_CMD_STAT_BAD_OP_ERR: 1488 return "bad operation"; 1489 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1490 return "bad parameter"; 1491 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1492 return "bad system state"; 1493 case MLX5_CMD_STAT_BAD_RES_ERR: 1494 return "bad resource"; 1495 case MLX5_CMD_STAT_RES_BUSY: 1496 return "resource busy"; 1497 case MLX5_CMD_STAT_LIM_ERR: 1498 return "limits exceeded"; 1499 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1500 return "bad resource state"; 1501 case MLX5_CMD_STAT_IX_ERR: 1502 return "bad index"; 1503 case MLX5_CMD_STAT_NO_RES_ERR: 1504 return "no resources"; 1505 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1506 return "bad input length"; 1507 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1508 return "bad output length"; 1509 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1510 return "bad QP state"; 1511 case MLX5_CMD_STAT_BAD_PKT_ERR: 1512 return "bad packet (discarded)"; 1513 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1514 return "bad size too many outstanding CQEs"; 1515 default: 1516 return "unknown status"; 1517 } 1518 } 1519 1520 static int cmd_status_to_err_helper(u8 status) 1521 { 1522 switch (status) { 1523 case MLX5_CMD_STAT_OK: return 0; 1524 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1525 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1526 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1527 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1528 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1529 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1530 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1531 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1532 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1533 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1534 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1535 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1536 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1537 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1538 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1539 default: return -EIO; 1540 } 1541 } 1542 1543 /* this will be available till all the commands use set/get macros */ 1544 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1545 { 1546 if (!hdr->status) 1547 return 0; 1548 1549 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1550 1551 return cmd_status_to_err_helper(hdr->status); 1552 } 1553 1554 int mlx5_cmd_status_to_err_v2(void *ptr) 1555 { 1556 u32 syndrome; 1557 u8 status; 1558 1559 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1560 if (!status) 1561 return 0; 1562 1563 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1564 1565 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1566 1567 return cmd_status_to_err_helper(status); 1568 } 1569 1570