1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 44 struct mlx5_cmd_msg *msg); 45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 79 struct mlx5_cmd_msg *in, 80 int uin_size, 81 struct mlx5_cmd_msg *out, 82 void *uout, int uout_size, 83 mlx5_cmd_cbk_t cbk, 84 void *context, int page_queue) 85 { 86 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 87 struct mlx5_cmd_work_ent *ent; 88 89 ent = kzalloc(sizeof(*ent), alloc_flags); 90 if (!ent) 91 return ERR_PTR(-ENOMEM); 92 93 ent->in = in; 94 ent->uin_size = uin_size; 95 ent->out = out; 96 ent->uout = uout; 97 ent->uout_size = uout_size; 98 ent->callback = cbk; 99 ent->context = context; 100 ent->cmd = cmd; 101 ent->page_queue = page_queue; 102 103 return ent; 104 } 105 106 static u8 alloc_token(struct mlx5_cmd *cmd) 107 { 108 u8 token; 109 110 spin_lock(&cmd->token_lock); 111 cmd->token++; 112 if (cmd->token == 0) 113 cmd->token++; 114 token = cmd->token; 115 spin_unlock(&cmd->token_lock); 116 117 return token; 118 } 119 120 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 121 { 122 unsigned long flags; 123 struct mlx5_cmd *cmd = ent->cmd; 124 struct mlx5_core_dev *dev = 125 container_of(cmd, struct mlx5_core_dev, cmd); 126 int ret = cmd->max_reg_cmds; 127 128 spin_lock_irqsave(&cmd->alloc_lock, flags); 129 if (!ent->page_queue) { 130 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 131 if (ret >= cmd->max_reg_cmds) 132 ret = -1; 133 } 134 135 if (dev->state != MLX5_DEVICE_STATE_UP) 136 ret = -1; 137 138 if (ret != -1) { 139 ent->busy = 1; 140 ent->idx = ret; 141 clear_bit(ent->idx, &cmd->bitmask); 142 cmd->ent_arr[ent->idx] = ent; 143 } 144 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 145 146 return ret; 147 } 148 149 static void free_ent(struct mlx5_cmd *cmd, int idx) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&cmd->alloc_lock, flags); 154 set_bit(idx, &cmd->bitmask); 155 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 156 } 157 158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 159 { 160 return cmd->cmd_buf + (idx << cmd->log_stride); 161 } 162 163 static u8 xor8_buf(void *buf, int len) 164 { 165 u8 *ptr = buf; 166 u8 sum = 0; 167 int i; 168 169 for (i = 0; i < len; i++) 170 sum ^= ptr[i]; 171 172 return sum; 173 } 174 175 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 176 { 177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 178 return -EINVAL; 179 180 if (xor8_buf(block, sizeof(*block)) != 0xff) 181 return -EINVAL; 182 183 return 0; 184 } 185 186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 187 int csum) 188 { 189 block->token = token; 190 if (csum) { 191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 192 sizeof(block->data) - 2); 193 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 194 } 195 } 196 197 static void 198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 199 { 200 size_t i; 201 202 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 203 struct mlx5_cmd_prot_block *block; 204 205 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 206 207 /* compute signature */ 208 calc_block_sig(block, token, csum); 209 210 /* check for last block */ 211 if (block->next == 0) 212 break; 213 } 214 215 /* make sure data gets written to RAM */ 216 mlx5_fwp_flush(msg); 217 } 218 219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 220 { 221 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 222 calc_chain_sig(ent->in, ent->token, csum); 223 calc_chain_sig(ent->out, ent->token, csum); 224 } 225 226 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 227 { 228 struct mlx5_core_dev *dev = container_of(ent->cmd, 229 struct mlx5_core_dev, cmd); 230 int poll_end = jiffies + 231 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 232 u8 own; 233 234 do { 235 own = ent->lay->status_own; 236 if (!(own & CMD_OWNER_HW) || 237 dev->state != MLX5_DEVICE_STATE_UP) { 238 ent->ret = 0; 239 return; 240 } 241 usleep_range(5000, 10000); 242 } while (time_before(jiffies, poll_end)); 243 244 ent->ret = -ETIMEDOUT; 245 } 246 247 static void free_cmd(struct mlx5_cmd_work_ent *ent) 248 { 249 kfree(ent); 250 } 251 252 static int 253 verify_signature(struct mlx5_cmd_work_ent *ent) 254 { 255 struct mlx5_cmd_msg *msg = ent->out; 256 size_t i; 257 int err; 258 u8 sig; 259 260 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 261 if (sig != 0xff) 262 return -EINVAL; 263 264 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 265 struct mlx5_cmd_prot_block *block; 266 267 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 268 269 /* compute signature */ 270 err = verify_block_sig(block); 271 if (err != 0) 272 return (err); 273 274 /* check for last block */ 275 if (block->next == 0) 276 break; 277 } 278 return (0); 279 } 280 281 static void dump_buf(void *buf, int size, int data_only, int offset) 282 { 283 __be32 *p = buf; 284 int i; 285 286 for (i = 0; i < size; i += 16) { 287 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 288 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 289 be32_to_cpu(p[3])); 290 p += 4; 291 offset += 16; 292 } 293 if (!data_only) 294 pr_debug("\n"); 295 } 296 297 const char *mlx5_command_str(int command) 298 { 299 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 300 301 switch (command) { 302 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 303 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 304 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 305 MLX5_COMMAND_STR_CASE(INIT_HCA); 306 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 307 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 308 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 309 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 310 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 311 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 312 MLX5_COMMAND_STR_CASE(SET_ISSI); 313 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 314 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 315 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 316 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 317 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 318 MLX5_COMMAND_STR_CASE(CREATE_EQ); 319 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 320 MLX5_COMMAND_STR_CASE(QUERY_EQ); 321 MLX5_COMMAND_STR_CASE(GEN_EQE); 322 MLX5_COMMAND_STR_CASE(CREATE_CQ); 323 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 324 MLX5_COMMAND_STR_CASE(QUERY_CQ); 325 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 326 MLX5_COMMAND_STR_CASE(CREATE_QP); 327 MLX5_COMMAND_STR_CASE(DESTROY_QP); 328 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 329 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 330 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 331 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 332 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 333 MLX5_COMMAND_STR_CASE(2ERR_QP); 334 MLX5_COMMAND_STR_CASE(2RST_QP); 335 MLX5_COMMAND_STR_CASE(QUERY_QP); 336 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 337 MLX5_COMMAND_STR_CASE(MAD_IFC); 338 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 339 MLX5_COMMAND_STR_CASE(CREATE_PSV); 340 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 341 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 342 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 343 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 344 MLX5_COMMAND_STR_CASE(ARM_RQ); 345 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 346 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 347 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 348 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 349 MLX5_COMMAND_STR_CASE(CREATE_DCT); 350 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 351 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 352 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 353 MLX5_COMMAND_STR_CASE(QUERY_DCT); 354 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 355 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 356 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 357 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 358 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 359 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 360 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 361 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 362 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 363 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 364 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 365 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 366 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 367 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 368 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 369 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 370 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 371 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 372 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 373 MLX5_COMMAND_STR_CASE(ALLOC_PD); 374 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 375 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 376 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 377 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 378 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 379 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 380 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 381 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 382 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 383 MLX5_COMMAND_STR_CASE(NOP); 384 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 385 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 386 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 387 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 388 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 389 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 390 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 391 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 392 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 393 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 394 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 395 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 396 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 397 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 398 MLX5_COMMAND_STR_CASE(CREATE_RMP); 399 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 400 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 401 MLX5_COMMAND_STR_CASE(QUERY_RMP); 402 MLX5_COMMAND_STR_CASE(CREATE_RQT); 403 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 404 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 405 MLX5_COMMAND_STR_CASE(QUERY_RQT); 406 MLX5_COMMAND_STR_CASE(ACCESS_REG); 407 MLX5_COMMAND_STR_CASE(CREATE_SQ); 408 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 409 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 410 MLX5_COMMAND_STR_CASE(QUERY_SQ); 411 MLX5_COMMAND_STR_CASE(CREATE_RQ); 412 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 413 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 414 MLX5_COMMAND_STR_CASE(QUERY_RQ); 415 MLX5_COMMAND_STR_CASE(CREATE_TIR); 416 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 417 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 418 MLX5_COMMAND_STR_CASE(QUERY_TIR); 419 MLX5_COMMAND_STR_CASE(CREATE_TIS); 420 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 421 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 422 MLX5_COMMAND_STR_CASE(QUERY_TIS); 423 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 424 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 425 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 426 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 427 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 428 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 429 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 430 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 431 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 432 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 433 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 434 default: return "unknown command opcode"; 435 } 436 } 437 438 static void dump_command(struct mlx5_core_dev *dev, 439 struct mlx5_cmd_work_ent *ent, int input) 440 { 441 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 442 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 443 size_t i; 444 int data_only; 445 int offset = 0; 446 int msg_len = input ? ent->uin_size : ent->uout_size; 447 int dump_len; 448 449 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 450 451 if (data_only) 452 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 453 "dump command data %s(0x%x) %s\n", 454 mlx5_command_str(op), op, 455 input ? "INPUT" : "OUTPUT"); 456 else 457 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 458 mlx5_command_str(op), op, 459 input ? "INPUT" : "OUTPUT"); 460 461 if (data_only) { 462 if (input) { 463 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 464 offset += sizeof(ent->lay->in); 465 } else { 466 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 467 offset += sizeof(ent->lay->out); 468 } 469 } else { 470 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 471 offset += sizeof(*ent->lay); 472 } 473 474 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 475 struct mlx5_cmd_prot_block *block; 476 477 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 478 479 if (data_only) { 480 if (offset >= msg_len) 481 break; 482 dump_len = min_t(int, 483 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 484 485 dump_buf(block->data, dump_len, 1, offset); 486 offset += MLX5_CMD_DATA_BLOCK_SIZE; 487 } else { 488 mlx5_core_dbg(dev, "command block:\n"); 489 dump_buf(block, sizeof(*block), 0, offset); 490 offset += sizeof(*block); 491 } 492 493 /* check for last block */ 494 if (block->next == 0) 495 break; 496 } 497 498 if (data_only) 499 pr_debug("\n"); 500 } 501 502 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, 503 struct mlx5_outbox_hdr *hdr) 504 { 505 hdr->status = 0; 506 hdr->syndrome = 0; 507 508 switch (opcode) { 509 case MLX5_CMD_OP_TEARDOWN_HCA: 510 case MLX5_CMD_OP_DISABLE_HCA: 511 case MLX5_CMD_OP_MANAGE_PAGES: 512 case MLX5_CMD_OP_DESTROY_MKEY: 513 case MLX5_CMD_OP_DESTROY_EQ: 514 case MLX5_CMD_OP_DESTROY_CQ: 515 case MLX5_CMD_OP_DESTROY_QP: 516 case MLX5_CMD_OP_DESTROY_PSV: 517 case MLX5_CMD_OP_DESTROY_SRQ: 518 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 519 case MLX5_CMD_OP_DESTROY_DCT: 520 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 521 case MLX5_CMD_OP_DEALLOC_PD: 522 case MLX5_CMD_OP_DEALLOC_UAR: 523 case MLX5_CMD_OP_DETACH_FROM_MCG: 524 case MLX5_CMD_OP_DEALLOC_XRCD: 525 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 526 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 527 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 528 case MLX5_CMD_OP_DESTROY_LAG: 529 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 530 case MLX5_CMD_OP_DESTROY_TIR: 531 case MLX5_CMD_OP_DESTROY_SQ: 532 case MLX5_CMD_OP_DESTROY_RQ: 533 case MLX5_CMD_OP_DESTROY_RMP: 534 case MLX5_CMD_OP_DESTROY_TIS: 535 case MLX5_CMD_OP_DESTROY_RQT: 536 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 537 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 538 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 539 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 540 case MLX5_CMD_OP_2ERR_QP: 541 case MLX5_CMD_OP_2RST_QP: 542 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 543 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 544 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 545 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 546 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 547 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 548 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 549 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 550 case MLX5_CMD_OP_MODIFY_SQ: 551 case MLX5_CMD_OP_MODIFY_RQ: 552 case MLX5_CMD_OP_MODIFY_TIS: 553 case MLX5_CMD_OP_MODIFY_LAG: 554 case MLX5_CMD_OP_MODIFY_TIR: 555 case MLX5_CMD_OP_MODIFY_RMP: 556 case MLX5_CMD_OP_MODIFY_RQT: 557 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 558 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 559 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 560 case MLX5_CMD_OP_MODIFY_CQ: 561 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 562 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 563 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: 564 case MLX5_CMD_OP_ACCESS_REG: 565 case MLX5_CMD_OP_DRAIN_DCT: 566 return 0; 567 568 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 569 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 570 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 571 case MLX5_CMD_OP_ALLOC_PD: 572 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 573 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 574 case MLX5_CMD_OP_ALLOC_UAR: 575 case MLX5_CMD_OP_ALLOC_XRCD: 576 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 577 case MLX5_CMD_OP_ARM_RQ: 578 case MLX5_CMD_OP_ARM_XRC_SRQ: 579 case MLX5_CMD_OP_ATTACH_TO_MCG: 580 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 581 case MLX5_CMD_OP_CREATE_CQ: 582 case MLX5_CMD_OP_CREATE_DCT: 583 case MLX5_CMD_OP_CREATE_EQ: 584 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 585 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 586 case MLX5_CMD_OP_CREATE_LAG: 587 case MLX5_CMD_OP_CREATE_MKEY: 588 case MLX5_CMD_OP_CREATE_PSV: 589 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 590 case MLX5_CMD_OP_CREATE_QP: 591 case MLX5_CMD_OP_CREATE_RMP: 592 case MLX5_CMD_OP_CREATE_RQ: 593 case MLX5_CMD_OP_CREATE_RQT: 594 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 595 case MLX5_CMD_OP_CREATE_SQ: 596 case MLX5_CMD_OP_CREATE_SRQ: 597 case MLX5_CMD_OP_CREATE_TIR: 598 case MLX5_CMD_OP_CREATE_TIS: 599 case MLX5_CMD_OP_CREATE_VPORT_LAG: 600 case MLX5_CMD_OP_CREATE_XRC_SRQ: 601 case MLX5_CMD_OP_ENABLE_HCA: 602 case MLX5_CMD_OP_GEN_EQE: 603 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 604 case MLX5_CMD_OP_INIT2INIT_QP: 605 case MLX5_CMD_OP_INIT2RTR_QP: 606 case MLX5_CMD_OP_INIT_HCA: 607 case MLX5_CMD_OP_MAD_IFC: 608 case MLX5_CMD_OP_NOP: 609 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 610 case MLX5_CMD_OP_QUERY_ADAPTER: 611 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 612 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 613 case MLX5_CMD_OP_QUERY_CONG_STATUS: 614 case MLX5_CMD_OP_QUERY_CQ: 615 case MLX5_CMD_OP_QUERY_DCT: 616 case MLX5_CMD_OP_QUERY_EQ: 617 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 618 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 619 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 620 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 621 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 622 case MLX5_CMD_OP_QUERY_HCA_CAP: 623 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 624 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 625 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 626 case MLX5_CMD_OP_QUERY_ISSI: 627 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 628 case MLX5_CMD_OP_QUERY_LAG: 629 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 630 case MLX5_CMD_OP_QUERY_MKEY: 631 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 632 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: 633 case MLX5_CMD_OP_QUERY_PAGES: 634 case MLX5_CMD_OP_QUERY_QP: 635 case MLX5_CMD_OP_QUERY_Q_COUNTER: 636 case MLX5_CMD_OP_QUERY_RMP: 637 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 638 case MLX5_CMD_OP_QUERY_RQ: 639 case MLX5_CMD_OP_QUERY_RQT: 640 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 641 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 642 case MLX5_CMD_OP_QUERY_SQ: 643 case MLX5_CMD_OP_QUERY_SRQ: 644 case MLX5_CMD_OP_QUERY_TIR: 645 case MLX5_CMD_OP_QUERY_TIS: 646 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 647 case MLX5_CMD_OP_QUERY_VPORT_STATE: 648 case MLX5_CMD_OP_QUERY_XRC_SRQ: 649 case MLX5_CMD_OP_RST2INIT_QP: 650 case MLX5_CMD_OP_RTR2RTS_QP: 651 case MLX5_CMD_OP_RTS2RTS_QP: 652 case MLX5_CMD_OP_SET_DC_CNAK_TRACE: 653 case MLX5_CMD_OP_SET_HCA_CAP: 654 case MLX5_CMD_OP_SET_ISSI: 655 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 656 case MLX5_CMD_OP_SET_MAD_DEMUX: 657 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 658 case MLX5_CMD_OP_SQD_RTS_QP: 659 case MLX5_CMD_OP_SQERR2RTS_QP: 660 hdr->status = MLX5_CMD_STAT_INT_ERR; 661 hdr->syndrome = 0xFFFFFFFF; 662 return -ECANCELED; 663 default: 664 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); 665 return -EINVAL; 666 } 667 } 668 669 static void complete_command(struct mlx5_cmd_work_ent *ent) 670 { 671 struct mlx5_cmd *cmd = ent->cmd; 672 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 673 cmd); 674 mlx5_cmd_cbk_t callback; 675 void *context; 676 677 s64 ds; 678 struct mlx5_cmd_stats *stats; 679 unsigned long flags; 680 int err; 681 struct semaphore *sem; 682 683 if (ent->page_queue) 684 sem = &cmd->pages_sem; 685 else 686 sem = &cmd->sem; 687 688 if (dev->state != MLX5_DEVICE_STATE_UP) { 689 struct mlx5_outbox_hdr *out_hdr = 690 (struct mlx5_outbox_hdr *)ent->out; 691 struct mlx5_inbox_hdr *in_hdr = 692 (struct mlx5_inbox_hdr *)(ent->in->first.data); 693 u16 opcode = be16_to_cpu(in_hdr->opcode); 694 695 ent->ret = set_internal_err_outbox(dev, 696 opcode, 697 out_hdr); 698 } 699 700 if (ent->callback) { 701 ds = ent->ts2 - ent->ts1; 702 if (ent->op < ARRAY_SIZE(cmd->stats)) { 703 stats = &cmd->stats[ent->op]; 704 spin_lock_irqsave(&stats->lock, flags); 705 stats->sum += ds; 706 ++stats->n; 707 spin_unlock_irqrestore(&stats->lock, flags); 708 } 709 710 callback = ent->callback; 711 context = ent->context; 712 err = ent->ret; 713 if (!err) 714 err = mlx5_copy_from_msg(ent->uout, 715 ent->out, 716 ent->uout_size); 717 718 mlx5_free_cmd_msg(dev, ent->out); 719 free_msg(dev, ent->in); 720 721 free_cmd(ent); 722 callback(err, context); 723 } else { 724 complete(&ent->done); 725 } 726 up(sem); 727 } 728 729 static void cmd_work_handler(struct work_struct *work) 730 { 731 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 732 struct mlx5_cmd *cmd = ent->cmd; 733 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 734 struct mlx5_cmd_layout *lay; 735 struct semaphore *sem; 736 737 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 738 down(sem); 739 740 if (alloc_ent(ent) < 0) { 741 complete_command(ent); 742 return; 743 } 744 745 ent->token = alloc_token(cmd); 746 lay = get_inst(cmd, ent->idx); 747 ent->lay = lay; 748 memset(lay, 0, sizeof(*lay)); 749 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 750 ent->op = be32_to_cpu(lay->in[0]) >> 16; 751 if (ent->in->numpages != 0) 752 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 753 if (ent->out->numpages != 0) 754 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 755 lay->inlen = cpu_to_be32(ent->uin_size); 756 lay->outlen = cpu_to_be32(ent->uout_size); 757 lay->type = MLX5_PCI_CMD_XPORT; 758 lay->token = ent->token; 759 lay->status_own = CMD_OWNER_HW; 760 set_signature(ent, !cmd->checksum_disabled); 761 dump_command(dev, ent, 1); 762 ent->ts1 = ktime_get_ns(); 763 ent->busy = 0; 764 /* ring doorbell after the descriptor is valid */ 765 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 766 /* make sure data is written to RAM */ 767 mlx5_fwp_flush(cmd->cmd_page); 768 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 769 mmiowb(); 770 /* if not in polling don't use ent after this point*/ 771 if (cmd->mode == CMD_MODE_POLLING) { 772 poll_timeout(ent); 773 /* make sure we read the descriptor after ownership is SW */ 774 mlx5_cmd_comp_handler(dev, 1U << ent->idx); 775 } 776 } 777 778 static const char *deliv_status_to_str(u8 status) 779 { 780 switch (status) { 781 case MLX5_CMD_DELIVERY_STAT_OK: 782 return "no errors"; 783 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 784 return "signature error"; 785 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 786 return "token error"; 787 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 788 return "bad block number"; 789 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 790 return "output pointer not aligned to block size"; 791 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 792 return "input pointer not aligned to block size"; 793 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 794 return "firmware internal error"; 795 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 796 return "command input length error"; 797 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 798 return "command ouput length error"; 799 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 800 return "reserved fields not cleared"; 801 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 802 return "bad command descriptor type"; 803 default: 804 return "unknown status code"; 805 } 806 } 807 808 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 809 { 810 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 811 812 return be16_to_cpu(hdr->opcode); 813 } 814 815 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 816 { 817 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 818 struct mlx5_cmd *cmd = &dev->cmd; 819 int err; 820 821 if (cmd->mode == CMD_MODE_POLLING) { 822 wait_for_completion(&ent->done); 823 err = ent->ret; 824 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { 825 ent->ret = -ETIMEDOUT; 826 mlx5_cmd_comp_handler(dev, 1UL << ent->idx); 827 } 828 829 err = ent->ret; 830 831 if (err == -ETIMEDOUT) { 832 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 833 mlx5_command_str(msg_to_opcode(ent->in)), 834 msg_to_opcode(ent->in)); 835 } 836 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 837 err, deliv_status_to_str(ent->status), ent->status); 838 839 return err; 840 } 841 842 /* Notes: 843 * 1. Callback functions may not sleep 844 * 2. page queue commands do not support asynchrous completion 845 */ 846 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 847 int uin_size, 848 struct mlx5_cmd_msg *out, void *uout, int uout_size, 849 mlx5_cmd_cbk_t callback, 850 void *context, int page_queue, u8 *status) 851 { 852 struct mlx5_cmd *cmd = &dev->cmd; 853 struct mlx5_cmd_work_ent *ent; 854 struct mlx5_cmd_stats *stats; 855 int err = 0; 856 s64 ds; 857 u16 op; 858 859 if (callback && page_queue) 860 return -EINVAL; 861 862 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 863 context, page_queue); 864 if (IS_ERR(ent)) 865 return PTR_ERR(ent); 866 867 if (!callback) 868 init_completion(&ent->done); 869 870 INIT_WORK(&ent->work, cmd_work_handler); 871 if (page_queue) { 872 cmd_work_handler(&ent->work); 873 } else if (!queue_work(cmd->wq, &ent->work)) { 874 mlx5_core_warn(dev, "failed to queue work\n"); 875 err = -ENOMEM; 876 goto out_free; 877 } 878 879 if (callback) 880 goto out; 881 882 err = wait_func(dev, ent); 883 if (err == -ETIMEDOUT) 884 goto out; 885 886 ds = ent->ts2 - ent->ts1; 887 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 888 if (op < ARRAY_SIZE(cmd->stats)) { 889 stats = &cmd->stats[op]; 890 spin_lock_irq(&stats->lock); 891 stats->sum += ds; 892 ++stats->n; 893 spin_unlock_irq(&stats->lock); 894 } 895 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 896 "fw exec time for %s is %lld nsec\n", 897 mlx5_command_str(op), (long long)ds); 898 *status = ent->status; 899 free_cmd(ent); 900 901 return err; 902 903 out_free: 904 free_cmd(ent); 905 out: 906 return err; 907 } 908 909 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 910 { 911 size_t delta; 912 size_t i; 913 914 if (to == NULL || from == NULL) 915 return (-ENOMEM); 916 917 delta = min_t(size_t, size, sizeof(to->first.data)); 918 memcpy(to->first.data, from, delta); 919 from = (char *)from + delta; 920 size -= delta; 921 922 for (i = 0; size != 0; i++) { 923 struct mlx5_cmd_prot_block *block; 924 925 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 926 927 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 928 memcpy(block->data, from, delta); 929 from = (char *)from + delta; 930 size -= delta; 931 } 932 return (0); 933 } 934 935 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 936 { 937 size_t delta; 938 size_t i; 939 940 if (to == NULL || from == NULL) 941 return (-ENOMEM); 942 943 delta = min_t(size_t, size, sizeof(from->first.data)); 944 memcpy(to, from->first.data, delta); 945 to = (char *)to + delta; 946 size -= delta; 947 948 for (i = 0; size != 0; i++) { 949 struct mlx5_cmd_prot_block *block; 950 951 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 952 953 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 954 memcpy(to, block->data, delta); 955 to = (char *)to + delta; 956 size -= delta; 957 } 958 return (0); 959 } 960 961 static struct mlx5_cmd_msg * 962 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 963 { 964 struct mlx5_cmd_msg *msg; 965 size_t blen; 966 size_t n; 967 size_t i; 968 969 blen = size - min_t(size_t, sizeof(msg->first.data), size); 970 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 971 972 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 973 if (msg == NULL) 974 return (ERR_PTR(-ENOMEM)); 975 976 for (i = 0; i != n; i++) { 977 struct mlx5_cmd_prot_block *block; 978 979 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 980 981 memset(block, 0, MLX5_CMD_MBOX_SIZE); 982 983 if (i != (n - 1)) { 984 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 985 block->next = cpu_to_be64(dma); 986 } 987 block->block_num = cpu_to_be32(i); 988 } 989 990 /* make sure initial data is written to RAM */ 991 mlx5_fwp_flush(msg); 992 993 return (msg); 994 } 995 996 static void 997 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 998 { 999 1000 mlx5_fwp_free(msg); 1001 } 1002 1003 static void set_wqname(struct mlx5_core_dev *dev) 1004 { 1005 struct mlx5_cmd *cmd = &dev->cmd; 1006 1007 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1008 dev_name(&dev->pdev->dev)); 1009 } 1010 1011 static void clean_debug_files(struct mlx5_core_dev *dev) 1012 { 1013 } 1014 1015 1016 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1017 { 1018 struct mlx5_cmd *cmd = &dev->cmd; 1019 int i; 1020 1021 for (i = 0; i < cmd->max_reg_cmds; i++) 1022 down(&cmd->sem); 1023 1024 down(&cmd->pages_sem); 1025 cmd->mode = mode; 1026 1027 up(&cmd->pages_sem); 1028 for (i = 0; i < cmd->max_reg_cmds; i++) 1029 up(&cmd->sem); 1030 } 1031 1032 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1033 { 1034 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1035 } 1036 1037 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1038 { 1039 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1040 } 1041 1042 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1043 { 1044 unsigned long flags; 1045 1046 if (msg->cache) { 1047 spin_lock_irqsave(&msg->cache->lock, flags); 1048 list_add_tail(&msg->list, &msg->cache->head); 1049 spin_unlock_irqrestore(&msg->cache->lock, flags); 1050 } else { 1051 mlx5_free_cmd_msg(dev, msg); 1052 } 1053 } 1054 1055 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) 1056 { 1057 struct mlx5_cmd *cmd = &dev->cmd; 1058 struct mlx5_cmd_work_ent *ent; 1059 int i; 1060 1061 /* make sure data gets read from RAM */ 1062 mlx5_fwp_invalidate(cmd->cmd_page); 1063 1064 while (vector != 0) { 1065 i = ffs(vector) - 1; 1066 vector &= ~(1U << i); 1067 ent = cmd->ent_arr[i]; 1068 ent->ts2 = ktime_get_ns(); 1069 memcpy(ent->out->first.data, ent->lay->out, 1070 sizeof(ent->lay->out)); 1071 /* make sure data gets read from RAM */ 1072 mlx5_fwp_invalidate(ent->out); 1073 dump_command(dev, ent, 0); 1074 if (!ent->ret) { 1075 if (!cmd->checksum_disabled) 1076 ent->ret = verify_signature(ent); 1077 else 1078 ent->ret = 0; 1079 ent->status = ent->lay->status_own >> 1; 1080 1081 mlx5_core_dbg(dev, 1082 "FW command ret 0x%x, status %s(0x%x)\n", 1083 ent->ret, 1084 deliv_status_to_str(ent->status), 1085 ent->status); 1086 } 1087 free_ent(cmd, ent->idx); 1088 complete_command(ent); 1089 } 1090 } 1091 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1092 1093 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) 1094 { 1095 unsigned long vector; 1096 int i = 0; 1097 unsigned long flags; 1098 synchronize_irq(dev->priv.eq_table.cmd_eq.irqn); 1099 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1100 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1101 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1102 1103 if (!vector) 1104 return; 1105 1106 for (i = 0; i < (1 << dev->cmd.log_sz); i++) { 1107 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; 1108 1109 if (!test_bit(i, &vector)) 1110 continue; 1111 1112 while (ent->busy) 1113 usleep_range(1000, 1100); 1114 free_ent(&dev->cmd, i); 1115 complete_command(ent); 1116 } 1117 } 1118 EXPORT_SYMBOL(mlx5_trigger_cmd_completions); 1119 1120 static int status_to_err(u8 status) 1121 { 1122 return status ? -1 : 0; /* TBD more meaningful codes */ 1123 } 1124 1125 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1126 gfp_t gfp) 1127 { 1128 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1129 struct mlx5_cmd *cmd = &dev->cmd; 1130 struct cache_ent *ent = NULL; 1131 1132 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1133 ent = &cmd->cache.large; 1134 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1135 ent = &cmd->cache.med; 1136 1137 if (ent) { 1138 spin_lock_irq(&ent->lock); 1139 if (!list_empty(&ent->head)) { 1140 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1141 list); 1142 list_del(&msg->list); 1143 } 1144 spin_unlock_irq(&ent->lock); 1145 } 1146 1147 if (IS_ERR(msg)) 1148 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1149 1150 return msg; 1151 } 1152 1153 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1154 { 1155 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1156 } 1157 1158 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1159 void *in, int in_size, 1160 void *out, int out_size, 1161 mlx5_cmd_cbk_t callback, void *context) 1162 { 1163 struct mlx5_cmd_msg *inb; 1164 struct mlx5_cmd_msg *outb; 1165 int pages_queue; 1166 const gfp_t gfp = GFP_KERNEL; 1167 int err; 1168 u8 status = 0; 1169 1170 pages_queue = is_manage_pages(in); 1171 1172 inb = alloc_msg(dev, in_size, gfp); 1173 if (IS_ERR(inb)) { 1174 err = PTR_ERR(inb); 1175 return err; 1176 } 1177 1178 err = mlx5_copy_to_msg(inb, in, in_size); 1179 if (err) { 1180 mlx5_core_warn(dev, "err %d\n", err); 1181 goto out_in; 1182 } 1183 1184 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1185 if (IS_ERR(outb)) { 1186 err = PTR_ERR(outb); 1187 goto out_in; 1188 } 1189 1190 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1191 context, pages_queue, &status); 1192 if (err) { 1193 if (err == -ETIMEDOUT) 1194 return err; 1195 goto out_out; 1196 } 1197 1198 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1199 if (status) { 1200 err = status_to_err(status); 1201 goto out_out; 1202 } 1203 1204 if (callback) 1205 return err; 1206 1207 err = mlx5_copy_from_msg(out, outb, out_size); 1208 1209 out_out: 1210 mlx5_free_cmd_msg(dev, outb); 1211 1212 out_in: 1213 free_msg(dev, inb); 1214 return err; 1215 } 1216 1217 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1218 int out_size) 1219 { 1220 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1221 } 1222 EXPORT_SYMBOL(mlx5_cmd_exec); 1223 1224 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1225 void *out, int out_size, mlx5_cmd_cbk_t callback, 1226 void *context) 1227 { 1228 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1229 } 1230 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1231 1232 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1233 { 1234 struct mlx5_cmd *cmd = &dev->cmd; 1235 struct mlx5_cmd_msg *msg; 1236 struct mlx5_cmd_msg *n; 1237 1238 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1239 list_del(&msg->list); 1240 mlx5_free_cmd_msg(dev, msg); 1241 } 1242 1243 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1244 list_del(&msg->list); 1245 mlx5_free_cmd_msg(dev, msg); 1246 } 1247 } 1248 1249 static int create_msg_cache(struct mlx5_core_dev *dev) 1250 { 1251 struct mlx5_cmd *cmd = &dev->cmd; 1252 struct mlx5_cmd_msg *msg; 1253 int err; 1254 int i; 1255 1256 spin_lock_init(&cmd->cache.large.lock); 1257 INIT_LIST_HEAD(&cmd->cache.large.head); 1258 spin_lock_init(&cmd->cache.med.lock); 1259 INIT_LIST_HEAD(&cmd->cache.med.head); 1260 1261 for (i = 0; i < NUM_LONG_LISTS; i++) { 1262 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1263 if (IS_ERR(msg)) { 1264 err = PTR_ERR(msg); 1265 goto ex_err; 1266 } 1267 msg->cache = &cmd->cache.large; 1268 list_add_tail(&msg->list, &cmd->cache.large.head); 1269 } 1270 1271 for (i = 0; i < NUM_MED_LISTS; i++) { 1272 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1273 if (IS_ERR(msg)) { 1274 err = PTR_ERR(msg); 1275 goto ex_err; 1276 } 1277 msg->cache = &cmd->cache.med; 1278 list_add_tail(&msg->list, &cmd->cache.med.head); 1279 } 1280 1281 return 0; 1282 1283 ex_err: 1284 destroy_msg_cache(dev); 1285 return err; 1286 } 1287 1288 static int 1289 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1290 { 1291 int err; 1292 1293 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1294 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1295 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1296 1297 /* 1298 * Create global DMA descriptor tag for allocating 1299 * 4K firmware pages: 1300 */ 1301 err = -bus_dma_tag_create( 1302 bus_get_dma_tag(dev->pdev->dev.bsddev), 1303 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1304 0, /* no boundary */ 1305 BUS_SPACE_MAXADDR, /* lowaddr */ 1306 BUS_SPACE_MAXADDR, /* highaddr */ 1307 NULL, NULL, /* filter, filterarg */ 1308 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1309 1, /* nsegments */ 1310 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1311 0, /* flags */ 1312 NULL, NULL, /* lockfunc, lockfuncarg */ 1313 &cmd->dma_tag); 1314 if (err != 0) 1315 goto failure_destroy_sx; 1316 1317 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1318 if (cmd->cmd_page == NULL) { 1319 err = -ENOMEM; 1320 goto failure_alloc_page; 1321 } 1322 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1323 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1324 return (0); 1325 1326 failure_alloc_page: 1327 bus_dma_tag_destroy(cmd->dma_tag); 1328 1329 failure_destroy_sx: 1330 cv_destroy(&cmd->dma_cv); 1331 mtx_destroy(&cmd->dma_mtx); 1332 sx_destroy(&cmd->dma_sx); 1333 return (err); 1334 } 1335 1336 static void 1337 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1338 { 1339 1340 mlx5_fwp_free(cmd->cmd_page); 1341 bus_dma_tag_destroy(cmd->dma_tag); 1342 cv_destroy(&cmd->dma_cv); 1343 mtx_destroy(&cmd->dma_mtx); 1344 sx_destroy(&cmd->dma_sx); 1345 } 1346 1347 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1348 { 1349 struct mlx5_cmd *cmd = &dev->cmd; 1350 u32 cmd_h, cmd_l; 1351 u16 cmd_if_rev; 1352 int err; 1353 int i; 1354 1355 cmd_if_rev = cmdif_rev_get(dev); 1356 if (cmd_if_rev != CMD_IF_REV) { 1357 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1358 return -EINVAL; 1359 } 1360 1361 err = alloc_cmd_page(dev, cmd); 1362 if (err) 1363 goto err_free_pool; 1364 1365 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1366 cmd->log_sz = cmd_l >> 4 & 0xf; 1367 cmd->log_stride = cmd_l & 0xf; 1368 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1369 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1370 err = -EINVAL; 1371 goto err_free_page; 1372 } 1373 1374 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1375 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1376 err = -EINVAL; 1377 goto err_free_page; 1378 } 1379 1380 cmd->checksum_disabled = 1; 1381 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1382 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1383 1384 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1385 if (cmd->cmdif_rev > CMD_IF_REV) { 1386 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1387 err = -ENOTSUPP; 1388 goto err_free_page; 1389 } 1390 1391 spin_lock_init(&cmd->alloc_lock); 1392 spin_lock_init(&cmd->token_lock); 1393 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1394 spin_lock_init(&cmd->stats[i].lock); 1395 1396 sema_init(&cmd->sem, cmd->max_reg_cmds); 1397 sema_init(&cmd->pages_sem, 1); 1398 1399 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1400 cmd_l = (u32)(cmd->dma); 1401 if (cmd_l & 0xfff) { 1402 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1403 err = -ENOMEM; 1404 goto err_free_page; 1405 } 1406 1407 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1408 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1409 1410 /* Make sure firmware sees the complete address before we proceed */ 1411 wmb(); 1412 1413 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1414 1415 cmd->mode = CMD_MODE_POLLING; 1416 1417 err = create_msg_cache(dev); 1418 if (err) { 1419 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1420 goto err_free_page; 1421 } 1422 1423 set_wqname(dev); 1424 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1425 if (!cmd->wq) { 1426 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1427 err = -ENOMEM; 1428 goto err_cache; 1429 } 1430 1431 return 0; 1432 1433 err_cache: 1434 destroy_msg_cache(dev); 1435 1436 err_free_page: 1437 free_cmd_page(dev, cmd); 1438 1439 err_free_pool: 1440 return err; 1441 } 1442 EXPORT_SYMBOL(mlx5_cmd_init); 1443 1444 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1445 { 1446 struct mlx5_cmd *cmd = &dev->cmd; 1447 1448 clean_debug_files(dev); 1449 destroy_workqueue(cmd->wq); 1450 destroy_msg_cache(dev); 1451 free_cmd_page(dev, cmd); 1452 } 1453 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1454 1455 static const char *cmd_status_str(u8 status) 1456 { 1457 switch (status) { 1458 case MLX5_CMD_STAT_OK: 1459 return "OK"; 1460 case MLX5_CMD_STAT_INT_ERR: 1461 return "internal error"; 1462 case MLX5_CMD_STAT_BAD_OP_ERR: 1463 return "bad operation"; 1464 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1465 return "bad parameter"; 1466 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1467 return "bad system state"; 1468 case MLX5_CMD_STAT_BAD_RES_ERR: 1469 return "bad resource"; 1470 case MLX5_CMD_STAT_RES_BUSY: 1471 return "resource busy"; 1472 case MLX5_CMD_STAT_LIM_ERR: 1473 return "limits exceeded"; 1474 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1475 return "bad resource state"; 1476 case MLX5_CMD_STAT_IX_ERR: 1477 return "bad index"; 1478 case MLX5_CMD_STAT_NO_RES_ERR: 1479 return "no resources"; 1480 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1481 return "bad input length"; 1482 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1483 return "bad output length"; 1484 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1485 return "bad QP state"; 1486 case MLX5_CMD_STAT_BAD_PKT_ERR: 1487 return "bad packet (discarded)"; 1488 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1489 return "bad size too many outstanding CQEs"; 1490 default: 1491 return "unknown status"; 1492 } 1493 } 1494 1495 static int cmd_status_to_err_helper(u8 status) 1496 { 1497 switch (status) { 1498 case MLX5_CMD_STAT_OK: return 0; 1499 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1500 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1501 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1502 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1503 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1504 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1505 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1506 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1507 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1508 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1509 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1510 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1511 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1512 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1513 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1514 default: return -EIO; 1515 } 1516 } 1517 1518 /* this will be available till all the commands use set/get macros */ 1519 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1520 { 1521 if (!hdr->status) 1522 return 0; 1523 1524 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1525 1526 return cmd_status_to_err_helper(hdr->status); 1527 } 1528 1529 int mlx5_cmd_status_to_err_v2(void *ptr) 1530 { 1531 u32 syndrome; 1532 u8 status; 1533 1534 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1535 if (!status) 1536 return 0; 1537 1538 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1539 1540 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1541 1542 return cmd_status_to_err_helper(status); 1543 } 1544 1545