1 /*- 2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD$ 26 */ 27 28 #include <linux/module.h> 29 #include <linux/errno.h> 30 #include <linux/pci.h> 31 #include <linux/dma-mapping.h> 32 #include <linux/slab.h> 33 #include <linux/delay.h> 34 #include <linux/random.h> 35 #include <linux/io-mapping.h> 36 #include <linux/hardirq.h> 37 #include <linux/ktime.h> 38 #include <dev/mlx5/driver.h> 39 40 #include "mlx5_core.h" 41 42 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size); 43 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 44 struct mlx5_cmd_msg *msg); 45 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 46 47 enum { 48 CMD_IF_REV = 5, 49 }; 50 51 enum { 52 CMD_MODE_POLLING, 53 CMD_MODE_EVENTS 54 }; 55 56 enum { 57 NUM_LONG_LISTS = 2, 58 NUM_MED_LISTS = 64, 59 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 MLX5_CMD_DATA_BLOCK_SIZE, 61 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 }; 63 64 enum { 65 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 71 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 76 }; 77 78 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd, 79 struct mlx5_cmd_msg *in, 80 int uin_size, 81 struct mlx5_cmd_msg *out, 82 void *uout, int uout_size, 83 mlx5_cmd_cbk_t cbk, 84 void *context, int page_queue) 85 { 86 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 87 struct mlx5_cmd_work_ent *ent; 88 89 ent = kzalloc(sizeof(*ent), alloc_flags); 90 if (!ent) 91 return ERR_PTR(-ENOMEM); 92 93 ent->in = in; 94 ent->uin_size = uin_size; 95 ent->out = out; 96 ent->uout = uout; 97 ent->uout_size = uout_size; 98 ent->callback = cbk; 99 ent->context = context; 100 ent->cmd = cmd; 101 ent->page_queue = page_queue; 102 103 return ent; 104 } 105 106 static u8 alloc_token(struct mlx5_cmd *cmd) 107 { 108 u8 token; 109 110 spin_lock(&cmd->token_lock); 111 cmd->token++; 112 if (cmd->token == 0) 113 cmd->token++; 114 token = cmd->token; 115 spin_unlock(&cmd->token_lock); 116 117 return token; 118 } 119 120 static int alloc_ent(struct mlx5_cmd_work_ent *ent) 121 { 122 unsigned long flags; 123 struct mlx5_cmd *cmd = ent->cmd; 124 struct mlx5_core_dev *dev = 125 container_of(cmd, struct mlx5_core_dev, cmd); 126 int ret = cmd->max_reg_cmds; 127 128 spin_lock_irqsave(&cmd->alloc_lock, flags); 129 if (!ent->page_queue) { 130 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 131 if (ret >= cmd->max_reg_cmds) 132 ret = -1; 133 } 134 135 if (dev->state != MLX5_DEVICE_STATE_UP) 136 ret = -1; 137 138 if (ret != -1) { 139 ent->busy = 1; 140 ent->idx = ret; 141 clear_bit(ent->idx, &cmd->bitmask); 142 cmd->ent_arr[ent->idx] = ent; 143 } 144 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 145 146 return ret; 147 } 148 149 static void free_ent(struct mlx5_cmd *cmd, int idx) 150 { 151 unsigned long flags; 152 153 spin_lock_irqsave(&cmd->alloc_lock, flags); 154 set_bit(idx, &cmd->bitmask); 155 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 156 } 157 158 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 159 { 160 return cmd->cmd_buf + (idx << cmd->log_stride); 161 } 162 163 static u8 xor8_buf(void *buf, int len) 164 { 165 u8 *ptr = buf; 166 u8 sum = 0; 167 int i; 168 169 for (i = 0; i < len; i++) 170 sum ^= ptr[i]; 171 172 return sum; 173 } 174 175 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 176 { 177 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff) 178 return -EINVAL; 179 180 if (xor8_buf(block, sizeof(*block)) != 0xff) 181 return -EINVAL; 182 183 return 0; 184 } 185 186 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token, 187 int csum) 188 { 189 block->token = token; 190 if (csum) { 191 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) - 192 sizeof(block->data) - 2); 193 block->sig = ~xor8_buf(block, sizeof(*block) - 1); 194 } 195 } 196 197 static void 198 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum) 199 { 200 size_t i; 201 202 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 203 struct mlx5_cmd_prot_block *block; 204 205 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 206 207 /* compute signature */ 208 calc_block_sig(block, token, csum); 209 210 /* check for last block */ 211 if (block->next == 0) 212 break; 213 } 214 215 /* make sure data gets written to RAM */ 216 mlx5_fwp_flush(msg); 217 } 218 219 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 220 { 221 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay)); 222 calc_chain_sig(ent->in, ent->token, csum); 223 calc_chain_sig(ent->out, ent->token, csum); 224 } 225 226 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 227 { 228 struct mlx5_core_dev *dev = container_of(ent->cmd, 229 struct mlx5_core_dev, cmd); 230 int poll_end = jiffies + 231 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000); 232 u8 own; 233 234 do { 235 own = ent->lay->status_own; 236 if (!(own & CMD_OWNER_HW) || 237 dev->state != MLX5_DEVICE_STATE_UP) { 238 ent->ret = 0; 239 return; 240 } 241 usleep_range(5000, 10000); 242 } while (time_before(jiffies, poll_end)); 243 244 ent->ret = -ETIMEDOUT; 245 } 246 247 static void free_cmd(struct mlx5_cmd_work_ent *ent) 248 { 249 kfree(ent); 250 } 251 252 static int 253 verify_signature(struct mlx5_cmd_work_ent *ent) 254 { 255 struct mlx5_cmd_msg *msg = ent->out; 256 size_t i; 257 int err; 258 u8 sig; 259 260 sig = xor8_buf(ent->lay, sizeof(*ent->lay)); 261 if (sig != 0xff) 262 return -EINVAL; 263 264 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 265 struct mlx5_cmd_prot_block *block; 266 267 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 268 269 /* compute signature */ 270 err = verify_block_sig(block); 271 if (err != 0) 272 return (err); 273 274 /* check for last block */ 275 if (block->next == 0) 276 break; 277 } 278 return (0); 279 } 280 281 static void dump_buf(void *buf, int size, int data_only, int offset) 282 { 283 __be32 *p = buf; 284 int i; 285 286 for (i = 0; i < size; i += 16) { 287 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]), 288 be32_to_cpu(p[1]), be32_to_cpu(p[2]), 289 be32_to_cpu(p[3])); 290 p += 4; 291 offset += 16; 292 } 293 if (!data_only) 294 pr_debug("\n"); 295 } 296 297 const char *mlx5_command_str(int command) 298 { 299 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 300 301 switch (command) { 302 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 303 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 304 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 305 MLX5_COMMAND_STR_CASE(INIT_HCA); 306 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 307 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 308 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 309 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 310 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 311 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 312 MLX5_COMMAND_STR_CASE(SET_ISSI); 313 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 314 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 315 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 316 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 317 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 318 MLX5_COMMAND_STR_CASE(CREATE_EQ); 319 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 320 MLX5_COMMAND_STR_CASE(QUERY_EQ); 321 MLX5_COMMAND_STR_CASE(GEN_EQE); 322 MLX5_COMMAND_STR_CASE(CREATE_CQ); 323 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 324 MLX5_COMMAND_STR_CASE(QUERY_CQ); 325 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 326 MLX5_COMMAND_STR_CASE(CREATE_QP); 327 MLX5_COMMAND_STR_CASE(DESTROY_QP); 328 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 329 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 330 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 331 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 332 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 333 MLX5_COMMAND_STR_CASE(2ERR_QP); 334 MLX5_COMMAND_STR_CASE(2RST_QP); 335 MLX5_COMMAND_STR_CASE(QUERY_QP); 336 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 337 MLX5_COMMAND_STR_CASE(MAD_IFC); 338 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 339 MLX5_COMMAND_STR_CASE(CREATE_PSV); 340 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 341 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 342 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 343 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 344 MLX5_COMMAND_STR_CASE(ARM_RQ); 345 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 346 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 347 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 348 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 349 MLX5_COMMAND_STR_CASE(CREATE_DCT); 350 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE); 351 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 352 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 353 MLX5_COMMAND_STR_CASE(QUERY_DCT); 354 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 355 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 356 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 357 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 358 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 359 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 360 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 361 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 362 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 363 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 364 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 365 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 366 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 367 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 368 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 369 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 370 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 371 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 372 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 373 MLX5_COMMAND_STR_CASE(ALLOC_PD); 374 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 375 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 376 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 377 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 378 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 379 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 380 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 381 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 382 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 383 MLX5_COMMAND_STR_CASE(NOP); 384 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 385 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 386 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 387 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 388 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 389 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 390 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 391 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 392 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 393 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 394 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 395 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 396 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 397 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 398 MLX5_COMMAND_STR_CASE(CREATE_RMP); 399 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 400 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 401 MLX5_COMMAND_STR_CASE(QUERY_RMP); 402 MLX5_COMMAND_STR_CASE(CREATE_RQT); 403 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 404 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 405 MLX5_COMMAND_STR_CASE(QUERY_RQT); 406 MLX5_COMMAND_STR_CASE(ACCESS_REG); 407 MLX5_COMMAND_STR_CASE(CREATE_SQ); 408 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 409 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 410 MLX5_COMMAND_STR_CASE(QUERY_SQ); 411 MLX5_COMMAND_STR_CASE(CREATE_RQ); 412 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 413 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 414 MLX5_COMMAND_STR_CASE(QUERY_RQ); 415 MLX5_COMMAND_STR_CASE(CREATE_TIR); 416 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 417 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 418 MLX5_COMMAND_STR_CASE(QUERY_TIR); 419 MLX5_COMMAND_STR_CASE(CREATE_TIS); 420 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 421 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 422 MLX5_COMMAND_STR_CASE(QUERY_TIS); 423 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 424 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 425 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 426 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 427 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 428 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 429 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 430 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 431 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 432 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS); 433 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS); 434 default: return "unknown command opcode"; 435 } 436 } 437 438 static void dump_command(struct mlx5_core_dev *dev, 439 struct mlx5_cmd_work_ent *ent, int input) 440 { 441 u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode); 442 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 443 size_t i; 444 int data_only; 445 int offset = 0; 446 int msg_len = input ? ent->uin_size : ent->uout_size; 447 int dump_len; 448 449 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 450 451 if (data_only) 452 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 453 "dump command data %s(0x%x) %s\n", 454 mlx5_command_str(op), op, 455 input ? "INPUT" : "OUTPUT"); 456 else 457 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n", 458 mlx5_command_str(op), op, 459 input ? "INPUT" : "OUTPUT"); 460 461 if (data_only) { 462 if (input) { 463 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset); 464 offset += sizeof(ent->lay->in); 465 } else { 466 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset); 467 offset += sizeof(ent->lay->out); 468 } 469 } else { 470 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset); 471 offset += sizeof(*ent->lay); 472 } 473 474 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) { 475 struct mlx5_cmd_prot_block *block; 476 477 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 478 479 if (data_only) { 480 if (offset >= msg_len) 481 break; 482 dump_len = min_t(int, 483 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset); 484 485 dump_buf(block->data, dump_len, 1, offset); 486 offset += MLX5_CMD_DATA_BLOCK_SIZE; 487 } else { 488 mlx5_core_dbg(dev, "command block:\n"); 489 dump_buf(block, sizeof(*block), 0, offset); 490 offset += sizeof(*block); 491 } 492 493 /* check for last block */ 494 if (block->next == 0) 495 break; 496 } 497 498 if (data_only) 499 pr_debug("\n"); 500 } 501 502 static int set_internal_err_outbox(struct mlx5_core_dev *dev, u16 opcode, 503 struct mlx5_outbox_hdr *hdr) 504 { 505 hdr->status = 0; 506 hdr->syndrome = 0; 507 508 switch (opcode) { 509 case MLX5_CMD_OP_TEARDOWN_HCA: 510 case MLX5_CMD_OP_DISABLE_HCA: 511 case MLX5_CMD_OP_MANAGE_PAGES: 512 case MLX5_CMD_OP_DESTROY_MKEY: 513 case MLX5_CMD_OP_DESTROY_EQ: 514 case MLX5_CMD_OP_DESTROY_CQ: 515 case MLX5_CMD_OP_DESTROY_QP: 516 case MLX5_CMD_OP_DESTROY_PSV: 517 case MLX5_CMD_OP_DESTROY_SRQ: 518 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 519 case MLX5_CMD_OP_DESTROY_DCT: 520 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 521 case MLX5_CMD_OP_DEALLOC_PD: 522 case MLX5_CMD_OP_DEALLOC_UAR: 523 case MLX5_CMD_OP_DETACH_FROM_MCG: 524 case MLX5_CMD_OP_DEALLOC_XRCD: 525 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 526 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 527 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 528 case MLX5_CMD_OP_DESTROY_LAG: 529 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 530 case MLX5_CMD_OP_DESTROY_TIR: 531 case MLX5_CMD_OP_DESTROY_SQ: 532 case MLX5_CMD_OP_DESTROY_RQ: 533 case MLX5_CMD_OP_DESTROY_RMP: 534 case MLX5_CMD_OP_DESTROY_TIS: 535 case MLX5_CMD_OP_DESTROY_RQT: 536 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 537 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 538 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 539 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 540 case MLX5_CMD_OP_2ERR_QP: 541 case MLX5_CMD_OP_2RST_QP: 542 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 543 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 544 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 545 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 546 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 547 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 548 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 549 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 550 case MLX5_CMD_OP_MODIFY_SQ: 551 case MLX5_CMD_OP_MODIFY_RQ: 552 case MLX5_CMD_OP_MODIFY_TIS: 553 case MLX5_CMD_OP_MODIFY_LAG: 554 case MLX5_CMD_OP_MODIFY_TIR: 555 case MLX5_CMD_OP_MODIFY_RMP: 556 case MLX5_CMD_OP_MODIFY_RQT: 557 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 558 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 559 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 560 case MLX5_CMD_OP_MODIFY_CQ: 561 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 562 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 563 case MLX5_CMD_OP_MODIFY_OTHER_HCA_CAP: 564 case MLX5_CMD_OP_ACCESS_REG: 565 case MLX5_CMD_OP_DRAIN_DCT: 566 return 0; 567 568 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 569 case MLX5_CMD_OP_ALLOC_ENCAP_HEADER: 570 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 571 case MLX5_CMD_OP_ALLOC_PD: 572 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 573 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 574 case MLX5_CMD_OP_ALLOC_UAR: 575 case MLX5_CMD_OP_ALLOC_XRCD: 576 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 577 case MLX5_CMD_OP_ARM_RQ: 578 case MLX5_CMD_OP_ARM_XRC_SRQ: 579 case MLX5_CMD_OP_ATTACH_TO_MCG: 580 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 581 case MLX5_CMD_OP_CREATE_CQ: 582 case MLX5_CMD_OP_CREATE_DCT: 583 case MLX5_CMD_OP_CREATE_EQ: 584 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 585 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 586 case MLX5_CMD_OP_CREATE_LAG: 587 case MLX5_CMD_OP_CREATE_MKEY: 588 case MLX5_CMD_OP_CREATE_PSV: 589 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 590 case MLX5_CMD_OP_CREATE_QP: 591 case MLX5_CMD_OP_CREATE_RMP: 592 case MLX5_CMD_OP_CREATE_RQ: 593 case MLX5_CMD_OP_CREATE_RQT: 594 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 595 case MLX5_CMD_OP_CREATE_SQ: 596 case MLX5_CMD_OP_CREATE_SRQ: 597 case MLX5_CMD_OP_CREATE_TIR: 598 case MLX5_CMD_OP_CREATE_TIS: 599 case MLX5_CMD_OP_CREATE_VPORT_LAG: 600 case MLX5_CMD_OP_CREATE_XRC_SRQ: 601 case MLX5_CMD_OP_ENABLE_HCA: 602 case MLX5_CMD_OP_GEN_EQE: 603 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 604 case MLX5_CMD_OP_INIT2INIT_QP: 605 case MLX5_CMD_OP_INIT2RTR_QP: 606 case MLX5_CMD_OP_INIT_HCA: 607 case MLX5_CMD_OP_MAD_IFC: 608 case MLX5_CMD_OP_NOP: 609 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 610 case MLX5_CMD_OP_QUERY_ADAPTER: 611 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 612 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 613 case MLX5_CMD_OP_QUERY_CONG_STATUS: 614 case MLX5_CMD_OP_QUERY_CQ: 615 case MLX5_CMD_OP_QUERY_DCT: 616 case MLX5_CMD_OP_QUERY_EQ: 617 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 618 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 619 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 620 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 621 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 622 case MLX5_CMD_OP_QUERY_HCA_CAP: 623 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 624 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 625 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 626 case MLX5_CMD_OP_QUERY_ISSI: 627 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 628 case MLX5_CMD_OP_QUERY_LAG: 629 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 630 case MLX5_CMD_OP_QUERY_MKEY: 631 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 632 case MLX5_CMD_OP_QUERY_OTHER_HCA_CAP: 633 case MLX5_CMD_OP_QUERY_PAGES: 634 case MLX5_CMD_OP_QUERY_QP: 635 case MLX5_CMD_OP_QUERY_Q_COUNTER: 636 case MLX5_CMD_OP_QUERY_RMP: 637 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 638 case MLX5_CMD_OP_QUERY_RQ: 639 case MLX5_CMD_OP_QUERY_RQT: 640 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 641 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 642 case MLX5_CMD_OP_QUERY_SQ: 643 case MLX5_CMD_OP_QUERY_SRQ: 644 case MLX5_CMD_OP_QUERY_TIR: 645 case MLX5_CMD_OP_QUERY_TIS: 646 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 647 case MLX5_CMD_OP_QUERY_VPORT_STATE: 648 case MLX5_CMD_OP_QUERY_XRC_SRQ: 649 case MLX5_CMD_OP_RST2INIT_QP: 650 case MLX5_CMD_OP_RTR2RTS_QP: 651 case MLX5_CMD_OP_RTS2RTS_QP: 652 case MLX5_CMD_OP_SET_DC_CNAK_TRACE: 653 case MLX5_CMD_OP_SET_HCA_CAP: 654 case MLX5_CMD_OP_SET_ISSI: 655 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 656 case MLX5_CMD_OP_SET_MAD_DEMUX: 657 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 658 case MLX5_CMD_OP_SQD_RTS_QP: 659 case MLX5_CMD_OP_SQERR2RTS_QP: 660 hdr->status = MLX5_CMD_STAT_INT_ERR; 661 hdr->syndrome = 0xFFFFFFFF; 662 return -ECANCELED; 663 default: 664 mlx5_core_err(dev, "Unknown FW command (%d)\n", opcode); 665 return -EINVAL; 666 } 667 } 668 669 static void complete_command(struct mlx5_cmd_work_ent *ent) 670 { 671 struct mlx5_cmd *cmd = ent->cmd; 672 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, 673 cmd); 674 mlx5_cmd_cbk_t callback; 675 void *context; 676 677 s64 ds; 678 struct mlx5_cmd_stats *stats; 679 unsigned long flags; 680 int err; 681 struct semaphore *sem; 682 683 if (ent->page_queue) 684 sem = &cmd->pages_sem; 685 else 686 sem = &cmd->sem; 687 688 if (dev->state != MLX5_DEVICE_STATE_UP) { 689 struct mlx5_outbox_hdr *out_hdr = 690 (struct mlx5_outbox_hdr *)ent->out; 691 struct mlx5_inbox_hdr *in_hdr = 692 (struct mlx5_inbox_hdr *)(ent->in->first.data); 693 u16 opcode = be16_to_cpu(in_hdr->opcode); 694 695 ent->ret = set_internal_err_outbox(dev, 696 opcode, 697 out_hdr); 698 } 699 700 if (ent->callback) { 701 ds = ent->ts2 - ent->ts1; 702 if (ent->op < ARRAY_SIZE(cmd->stats)) { 703 stats = &cmd->stats[ent->op]; 704 spin_lock_irqsave(&stats->lock, flags); 705 stats->sum += ds; 706 ++stats->n; 707 spin_unlock_irqrestore(&stats->lock, flags); 708 } 709 710 callback = ent->callback; 711 context = ent->context; 712 err = ent->ret; 713 if (!err) 714 err = mlx5_copy_from_msg(ent->uout, 715 ent->out, 716 ent->uout_size); 717 718 mlx5_free_cmd_msg(dev, ent->out); 719 free_msg(dev, ent->in); 720 721 free_cmd(ent); 722 callback(err, context); 723 } else { 724 complete(&ent->done); 725 } 726 up(sem); 727 } 728 729 static void cmd_work_handler(struct work_struct *work) 730 { 731 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 732 struct mlx5_cmd *cmd = ent->cmd; 733 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd); 734 struct mlx5_cmd_layout *lay; 735 struct semaphore *sem; 736 737 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 738 if (cmd->moving_to_polling) { 739 mlx5_core_warn(dev, "not expecting command execution, ignoring...\n"); 740 return; 741 } 742 743 down(sem); 744 745 if (alloc_ent(ent) < 0) { 746 complete_command(ent); 747 return; 748 } 749 750 ent->token = alloc_token(cmd); 751 lay = get_inst(cmd, ent->idx); 752 ent->lay = lay; 753 memset(lay, 0, sizeof(*lay)); 754 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 755 ent->op = be32_to_cpu(lay->in[0]) >> 16; 756 if (ent->in->numpages != 0) 757 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0)); 758 if (ent->out->numpages != 0) 759 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0)); 760 lay->inlen = cpu_to_be32(ent->uin_size); 761 lay->outlen = cpu_to_be32(ent->uout_size); 762 lay->type = MLX5_PCI_CMD_XPORT; 763 lay->token = ent->token; 764 lay->status_own = CMD_OWNER_HW; 765 set_signature(ent, !cmd->checksum_disabled); 766 dump_command(dev, ent, 1); 767 ent->ts1 = ktime_get_ns(); 768 ent->busy = 0; 769 /* ring doorbell after the descriptor is valid */ 770 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 771 /* make sure data is written to RAM */ 772 mlx5_fwp_flush(cmd->cmd_page); 773 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 774 mmiowb(); 775 /* if not in polling don't use ent after this point*/ 776 if (cmd->mode == CMD_MODE_POLLING) { 777 poll_timeout(ent); 778 /* make sure we read the descriptor after ownership is SW */ 779 mlx5_cmd_comp_handler(dev, 1U << ent->idx); 780 } 781 } 782 783 static const char *deliv_status_to_str(u8 status) 784 { 785 switch (status) { 786 case MLX5_CMD_DELIVERY_STAT_OK: 787 return "no errors"; 788 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 789 return "signature error"; 790 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 791 return "token error"; 792 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 793 return "bad block number"; 794 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 795 return "output pointer not aligned to block size"; 796 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 797 return "input pointer not aligned to block size"; 798 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 799 return "firmware internal error"; 800 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 801 return "command input length error"; 802 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 803 return "command ouput length error"; 804 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 805 return "reserved fields not cleared"; 806 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 807 return "bad command descriptor type"; 808 default: 809 return "unknown status code"; 810 } 811 } 812 813 static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 814 { 815 struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data); 816 817 return be16_to_cpu(hdr->opcode); 818 } 819 820 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 821 { 822 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC); 823 struct mlx5_cmd *cmd = &dev->cmd; 824 int err; 825 826 if (cmd->mode == CMD_MODE_POLLING) { 827 wait_for_completion(&ent->done); 828 err = ent->ret; 829 } else { 830 if (!wait_for_completion_timeout(&ent->done, timeout)) 831 err = -ETIMEDOUT; 832 else 833 err = 0; 834 } 835 836 if (err == -ETIMEDOUT) { 837 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 838 mlx5_command_str(msg_to_opcode(ent->in)), 839 msg_to_opcode(ent->in)); 840 } 841 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 842 err, deliv_status_to_str(ent->status), ent->status); 843 844 return err; 845 } 846 847 /* Notes: 848 * 1. Callback functions may not sleep 849 * 2. page queue commands do not support asynchrous completion 850 */ 851 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 852 int uin_size, 853 struct mlx5_cmd_msg *out, void *uout, int uout_size, 854 mlx5_cmd_cbk_t callback, 855 void *context, int page_queue, u8 *status) 856 { 857 struct mlx5_cmd *cmd = &dev->cmd; 858 struct mlx5_cmd_work_ent *ent; 859 struct mlx5_cmd_stats *stats; 860 int err = 0; 861 s64 ds; 862 u16 op; 863 864 if (callback && page_queue) 865 return -EINVAL; 866 867 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback, 868 context, page_queue); 869 if (IS_ERR(ent)) 870 return PTR_ERR(ent); 871 872 if (!callback) 873 init_completion(&ent->done); 874 875 INIT_WORK(&ent->work, cmd_work_handler); 876 if (page_queue) { 877 cmd_work_handler(&ent->work); 878 } else if (!queue_work(cmd->wq, &ent->work)) { 879 mlx5_core_warn(dev, "failed to queue work\n"); 880 err = -ENOMEM; 881 goto out_free; 882 } 883 884 if (!callback) { 885 err = wait_func(dev, ent); 886 if (err == -ETIMEDOUT) 887 goto out; 888 889 ds = ent->ts2 - ent->ts1; 890 op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode); 891 if (op < ARRAY_SIZE(cmd->stats)) { 892 stats = &cmd->stats[op]; 893 spin_lock_irq(&stats->lock); 894 stats->sum += ds; 895 ++stats->n; 896 spin_unlock_irq(&stats->lock); 897 } 898 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 899 "fw exec time for %s is %lld nsec\n", 900 mlx5_command_str(op), (long long)ds); 901 *status = ent->status; 902 free_cmd(ent); 903 } 904 905 return err; 906 907 out_free: 908 free_cmd(ent); 909 out: 910 return err; 911 } 912 913 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size) 914 { 915 size_t delta; 916 size_t i; 917 918 if (to == NULL || from == NULL) 919 return (-ENOMEM); 920 921 delta = min_t(size_t, size, sizeof(to->first.data)); 922 memcpy(to->first.data, from, delta); 923 from = (char *)from + delta; 924 size -= delta; 925 926 for (i = 0; size != 0; i++) { 927 struct mlx5_cmd_prot_block *block; 928 929 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE); 930 931 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 932 memcpy(block->data, from, delta); 933 from = (char *)from + delta; 934 size -= delta; 935 } 936 return (0); 937 } 938 939 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 940 { 941 size_t delta; 942 size_t i; 943 944 if (to == NULL || from == NULL) 945 return (-ENOMEM); 946 947 delta = min_t(size_t, size, sizeof(from->first.data)); 948 memcpy(to, from->first.data, delta); 949 to = (char *)to + delta; 950 size -= delta; 951 952 for (i = 0; size != 0; i++) { 953 struct mlx5_cmd_prot_block *block; 954 955 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE); 956 957 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE); 958 memcpy(to, block->data, delta); 959 to = (char *)to + delta; 960 size -= delta; 961 } 962 return (0); 963 } 964 965 static struct mlx5_cmd_msg * 966 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size) 967 { 968 struct mlx5_cmd_msg *msg; 969 size_t blen; 970 size_t n; 971 size_t i; 972 973 blen = size - min_t(size_t, sizeof(msg->first.data), size); 974 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE); 975 976 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE)); 977 if (msg == NULL) 978 return (ERR_PTR(-ENOMEM)); 979 980 for (i = 0; i != n; i++) { 981 struct mlx5_cmd_prot_block *block; 982 983 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE); 984 985 memset(block, 0, MLX5_CMD_MBOX_SIZE); 986 987 if (i != (n - 1)) { 988 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE); 989 block->next = cpu_to_be64(dma); 990 } 991 block->block_num = cpu_to_be32(i); 992 } 993 994 /* make sure initial data is written to RAM */ 995 mlx5_fwp_flush(msg); 996 997 return (msg); 998 } 999 1000 static void 1001 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1002 { 1003 1004 mlx5_fwp_free(msg); 1005 } 1006 1007 static void set_wqname(struct mlx5_core_dev *dev) 1008 { 1009 struct mlx5_cmd *cmd = &dev->cmd; 1010 1011 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1012 dev_name(&dev->pdev->dev)); 1013 } 1014 1015 static void clean_debug_files(struct mlx5_core_dev *dev) 1016 { 1017 } 1018 1019 1020 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1021 { 1022 struct mlx5_cmd *cmd = &dev->cmd; 1023 int i; 1024 1025 for (i = 0; i < cmd->max_reg_cmds; i++) 1026 down(&cmd->sem); 1027 1028 down(&cmd->pages_sem); 1029 1030 flush_workqueue(cmd->wq); 1031 1032 cmd->mode = CMD_MODE_EVENTS; 1033 1034 up(&cmd->pages_sem); 1035 for (i = 0; i < cmd->max_reg_cmds; i++) 1036 up(&cmd->sem); 1037 } 1038 1039 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1040 { 1041 struct mlx5_cmd *cmd = &dev->cmd; 1042 1043 synchronize_irq(dev->priv.eq_table.pages_eq.irqn); 1044 flush_workqueue(dev->priv.pg_wq); 1045 cmd->moving_to_polling = 1; 1046 flush_workqueue(cmd->wq); 1047 cmd->mode = CMD_MODE_POLLING; 1048 cmd->moving_to_polling = 0; 1049 } 1050 1051 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1052 { 1053 unsigned long flags; 1054 1055 if (msg->cache) { 1056 spin_lock_irqsave(&msg->cache->lock, flags); 1057 list_add_tail(&msg->list, &msg->cache->head); 1058 spin_unlock_irqrestore(&msg->cache->lock, flags); 1059 } else { 1060 mlx5_free_cmd_msg(dev, msg); 1061 } 1062 } 1063 1064 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u32 vector) 1065 { 1066 struct mlx5_cmd *cmd = &dev->cmd; 1067 struct mlx5_cmd_work_ent *ent; 1068 int i; 1069 1070 /* make sure data gets read from RAM */ 1071 mlx5_fwp_invalidate(cmd->cmd_page); 1072 1073 while (vector != 0) { 1074 i = ffs(vector) - 1; 1075 vector &= ~(1U << i); 1076 ent = cmd->ent_arr[i]; 1077 ent->ts2 = ktime_get_ns(); 1078 memcpy(ent->out->first.data, ent->lay->out, 1079 sizeof(ent->lay->out)); 1080 /* make sure data gets read from RAM */ 1081 mlx5_fwp_invalidate(ent->out); 1082 dump_command(dev, ent, 0); 1083 if (!ent->ret) { 1084 if (!cmd->checksum_disabled) 1085 ent->ret = verify_signature(ent); 1086 else 1087 ent->ret = 0; 1088 ent->status = ent->lay->status_own >> 1; 1089 1090 mlx5_core_dbg(dev, 1091 "FW command ret 0x%x, status %s(0x%x)\n", 1092 ent->ret, 1093 deliv_status_to_str(ent->status), 1094 ent->status); 1095 } 1096 free_ent(cmd, ent->idx); 1097 complete_command(ent); 1098 } 1099 } 1100 EXPORT_SYMBOL(mlx5_cmd_comp_handler); 1101 1102 void mlx5_trigger_cmd_completions(struct mlx5_core_dev *dev) 1103 { 1104 unsigned long vector; 1105 int i = 0; 1106 unsigned long flags; 1107 synchronize_irq(dev->priv.eq_table.cmd_eq.irqn); 1108 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1109 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1110 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1111 1112 if (!vector) 1113 return; 1114 1115 for (i = 0; i < (1 << dev->cmd.log_sz); i++) { 1116 struct mlx5_cmd_work_ent *ent = dev->cmd.ent_arr[i]; 1117 1118 if (!test_bit(i, &vector)) 1119 continue; 1120 1121 while (ent->busy) 1122 usleep_range(1000, 1100); 1123 free_ent(&dev->cmd, i); 1124 complete_command(ent); 1125 } 1126 } 1127 EXPORT_SYMBOL(mlx5_trigger_cmd_completions); 1128 1129 static int status_to_err(u8 status) 1130 { 1131 return status ? -1 : 0; /* TBD more meaningful codes */ 1132 } 1133 1134 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1135 gfp_t gfp) 1136 { 1137 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1138 struct mlx5_cmd *cmd = &dev->cmd; 1139 struct cache_ent *ent = NULL; 1140 1141 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1142 ent = &cmd->cache.large; 1143 else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1144 ent = &cmd->cache.med; 1145 1146 if (ent) { 1147 spin_lock_irq(&ent->lock); 1148 if (!list_empty(&ent->head)) { 1149 msg = list_entry(ent->head.next, struct mlx5_cmd_msg, 1150 list); 1151 list_del(&msg->list); 1152 } 1153 spin_unlock_irq(&ent->lock); 1154 } 1155 1156 if (IS_ERR(msg)) 1157 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size); 1158 1159 return msg; 1160 } 1161 1162 static int is_manage_pages(struct mlx5_inbox_hdr *in) 1163 { 1164 return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1165 } 1166 1167 static int cmd_exec_helper(struct mlx5_core_dev *dev, 1168 void *in, int in_size, 1169 void *out, int out_size, 1170 mlx5_cmd_cbk_t callback, void *context) 1171 { 1172 struct mlx5_cmd_msg *inb; 1173 struct mlx5_cmd_msg *outb; 1174 int pages_queue; 1175 const gfp_t gfp = GFP_KERNEL; 1176 int err; 1177 u8 status = 0; 1178 1179 pages_queue = is_manage_pages(in); 1180 1181 inb = alloc_msg(dev, in_size, gfp); 1182 if (IS_ERR(inb)) { 1183 err = PTR_ERR(inb); 1184 return err; 1185 } 1186 1187 err = mlx5_copy_to_msg(inb, in, in_size); 1188 if (err) { 1189 mlx5_core_warn(dev, "err %d\n", err); 1190 goto out_in; 1191 } 1192 1193 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size); 1194 if (IS_ERR(outb)) { 1195 err = PTR_ERR(outb); 1196 goto out_in; 1197 } 1198 1199 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback, 1200 context, pages_queue, &status); 1201 if (err) { 1202 if (err == -ETIMEDOUT) 1203 return err; 1204 goto out_out; 1205 } 1206 1207 mlx5_core_dbg(dev, "err %d, status %d\n", err, status); 1208 if (status) { 1209 err = status_to_err(status); 1210 goto out_out; 1211 } 1212 1213 if (callback) 1214 return err; 1215 1216 err = mlx5_copy_from_msg(out, outb, out_size); 1217 1218 out_out: 1219 mlx5_free_cmd_msg(dev, outb); 1220 1221 out_in: 1222 free_msg(dev, inb); 1223 return err; 1224 } 1225 1226 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1227 int out_size) 1228 { 1229 return cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL); 1230 } 1231 EXPORT_SYMBOL(mlx5_cmd_exec); 1232 1233 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 1234 void *out, int out_size, mlx5_cmd_cbk_t callback, 1235 void *context) 1236 { 1237 return cmd_exec_helper(dev, in, in_size, out, out_size, callback, context); 1238 } 1239 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 1240 1241 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1242 { 1243 struct mlx5_cmd *cmd = &dev->cmd; 1244 struct mlx5_cmd_msg *msg; 1245 struct mlx5_cmd_msg *n; 1246 1247 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1248 list_del(&msg->list); 1249 mlx5_free_cmd_msg(dev, msg); 1250 } 1251 1252 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1253 list_del(&msg->list); 1254 mlx5_free_cmd_msg(dev, msg); 1255 } 1256 } 1257 1258 static int create_msg_cache(struct mlx5_core_dev *dev) 1259 { 1260 struct mlx5_cmd *cmd = &dev->cmd; 1261 struct mlx5_cmd_msg *msg; 1262 int err; 1263 int i; 1264 1265 spin_lock_init(&cmd->cache.large.lock); 1266 INIT_LIST_HEAD(&cmd->cache.large.head); 1267 spin_lock_init(&cmd->cache.med.lock); 1268 INIT_LIST_HEAD(&cmd->cache.med.head); 1269 1270 for (i = 0; i < NUM_LONG_LISTS; i++) { 1271 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE); 1272 if (IS_ERR(msg)) { 1273 err = PTR_ERR(msg); 1274 goto ex_err; 1275 } 1276 msg->cache = &cmd->cache.large; 1277 list_add_tail(&msg->list, &cmd->cache.large.head); 1278 } 1279 1280 for (i = 0; i < NUM_MED_LISTS; i++) { 1281 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE); 1282 if (IS_ERR(msg)) { 1283 err = PTR_ERR(msg); 1284 goto ex_err; 1285 } 1286 msg->cache = &cmd->cache.med; 1287 list_add_tail(&msg->list, &cmd->cache.med.head); 1288 } 1289 1290 return 0; 1291 1292 ex_err: 1293 destroy_msg_cache(dev); 1294 return err; 1295 } 1296 1297 static int 1298 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1299 { 1300 int err; 1301 1302 sx_init(&cmd->dma_sx, "MLX5-DMA-SX"); 1303 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF); 1304 cv_init(&cmd->dma_cv, "MLX5-DMA-CV"); 1305 1306 /* 1307 * Create global DMA descriptor tag for allocating 1308 * 4K firmware pages: 1309 */ 1310 err = -bus_dma_tag_create( 1311 bus_get_dma_tag(dev->pdev->dev.bsddev), 1312 MLX5_ADAPTER_PAGE_SIZE, /* alignment */ 1313 0, /* no boundary */ 1314 BUS_SPACE_MAXADDR, /* lowaddr */ 1315 BUS_SPACE_MAXADDR, /* highaddr */ 1316 NULL, NULL, /* filter, filterarg */ 1317 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */ 1318 1, /* nsegments */ 1319 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */ 1320 0, /* flags */ 1321 NULL, NULL, /* lockfunc, lockfuncarg */ 1322 &cmd->dma_tag); 1323 if (err != 0) 1324 goto failure_destroy_sx; 1325 1326 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1); 1327 if (cmd->cmd_page == NULL) { 1328 err = -ENOMEM; 1329 goto failure_alloc_page; 1330 } 1331 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0); 1332 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0); 1333 return (0); 1334 1335 failure_alloc_page: 1336 bus_dma_tag_destroy(cmd->dma_tag); 1337 1338 failure_destroy_sx: 1339 cv_destroy(&cmd->dma_cv); 1340 mtx_destroy(&cmd->dma_mtx); 1341 sx_destroy(&cmd->dma_sx); 1342 return (err); 1343 } 1344 1345 static void 1346 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 1347 { 1348 1349 mlx5_fwp_free(cmd->cmd_page); 1350 bus_dma_tag_destroy(cmd->dma_tag); 1351 cv_destroy(&cmd->dma_cv); 1352 mtx_destroy(&cmd->dma_mtx); 1353 sx_destroy(&cmd->dma_sx); 1354 } 1355 1356 int mlx5_cmd_init(struct mlx5_core_dev *dev) 1357 { 1358 struct mlx5_cmd *cmd = &dev->cmd; 1359 u32 cmd_h, cmd_l; 1360 u16 cmd_if_rev; 1361 int err; 1362 int i; 1363 1364 cmd_if_rev = cmdif_rev_get(dev); 1365 if (cmd_if_rev != CMD_IF_REV) { 1366 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""Driver cmdif rev(%d) differs from firmware's(%d)\n", CMD_IF_REV, cmd_if_rev); 1367 return -EINVAL; 1368 } 1369 1370 err = alloc_cmd_page(dev, cmd); 1371 if (err) 1372 goto err_free_pool; 1373 1374 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 1375 cmd->log_sz = cmd_l >> 4 & 0xf; 1376 cmd->log_stride = cmd_l & 0xf; 1377 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 1378 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""firmware reports too many outstanding commands %d\n", 1 << cmd->log_sz); 1379 err = -EINVAL; 1380 goto err_free_page; 1381 } 1382 1383 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 1384 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""command queue size overflow\n"); 1385 err = -EINVAL; 1386 goto err_free_page; 1387 } 1388 1389 cmd->checksum_disabled = 1; 1390 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1391 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1; 1392 1393 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 1394 if (cmd->cmdif_rev > CMD_IF_REV) { 1395 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""driver does not support command interface version. driver %d, firmware %d\n", CMD_IF_REV, cmd->cmdif_rev); 1396 err = -ENOTSUPP; 1397 goto err_free_page; 1398 } 1399 1400 spin_lock_init(&cmd->alloc_lock); 1401 spin_lock_init(&cmd->token_lock); 1402 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++) 1403 spin_lock_init(&cmd->stats[i].lock); 1404 1405 sema_init(&cmd->sem, cmd->max_reg_cmds); 1406 sema_init(&cmd->pages_sem, 1); 1407 1408 cmd_h = (u32)((u64)(cmd->dma) >> 32); 1409 cmd_l = (u32)(cmd->dma); 1410 if (cmd_l & 0xfff) { 1411 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""invalid command queue address\n"); 1412 err = -ENOMEM; 1413 goto err_free_page; 1414 } 1415 1416 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 1417 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 1418 1419 /* Make sure firmware sees the complete address before we proceed */ 1420 wmb(); 1421 1422 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1423 1424 cmd->mode = CMD_MODE_POLLING; 1425 1426 err = create_msg_cache(dev); 1427 if (err) { 1428 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command cache\n"); 1429 goto err_free_page; 1430 } 1431 1432 set_wqname(dev); 1433 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 1434 if (!cmd->wq) { 1435 device_printf((&dev->pdev->dev)->bsddev, "ERR: ""failed to create command workqueue\n"); 1436 err = -ENOMEM; 1437 goto err_cache; 1438 } 1439 1440 return 0; 1441 1442 err_cache: 1443 destroy_msg_cache(dev); 1444 1445 err_free_page: 1446 free_cmd_page(dev, cmd); 1447 1448 err_free_pool: 1449 return err; 1450 } 1451 EXPORT_SYMBOL(mlx5_cmd_init); 1452 1453 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 1454 { 1455 struct mlx5_cmd *cmd = &dev->cmd; 1456 1457 clean_debug_files(dev); 1458 destroy_workqueue(cmd->wq); 1459 destroy_msg_cache(dev); 1460 free_cmd_page(dev, cmd); 1461 } 1462 EXPORT_SYMBOL(mlx5_cmd_cleanup); 1463 1464 static const char *cmd_status_str(u8 status) 1465 { 1466 switch (status) { 1467 case MLX5_CMD_STAT_OK: 1468 return "OK"; 1469 case MLX5_CMD_STAT_INT_ERR: 1470 return "internal error"; 1471 case MLX5_CMD_STAT_BAD_OP_ERR: 1472 return "bad operation"; 1473 case MLX5_CMD_STAT_BAD_PARAM_ERR: 1474 return "bad parameter"; 1475 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 1476 return "bad system state"; 1477 case MLX5_CMD_STAT_BAD_RES_ERR: 1478 return "bad resource"; 1479 case MLX5_CMD_STAT_RES_BUSY: 1480 return "resource busy"; 1481 case MLX5_CMD_STAT_LIM_ERR: 1482 return "limits exceeded"; 1483 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 1484 return "bad resource state"; 1485 case MLX5_CMD_STAT_IX_ERR: 1486 return "bad index"; 1487 case MLX5_CMD_STAT_NO_RES_ERR: 1488 return "no resources"; 1489 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 1490 return "bad input length"; 1491 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 1492 return "bad output length"; 1493 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 1494 return "bad QP state"; 1495 case MLX5_CMD_STAT_BAD_PKT_ERR: 1496 return "bad packet (discarded)"; 1497 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 1498 return "bad size too many outstanding CQEs"; 1499 default: 1500 return "unknown status"; 1501 } 1502 } 1503 1504 static int cmd_status_to_err_helper(u8 status) 1505 { 1506 switch (status) { 1507 case MLX5_CMD_STAT_OK: return 0; 1508 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1509 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1510 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 1511 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 1512 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 1513 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 1514 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 1515 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 1516 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 1517 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 1518 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 1519 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 1520 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 1521 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 1522 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 1523 default: return -EIO; 1524 } 1525 } 1526 1527 /* this will be available till all the commands use set/get macros */ 1528 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1529 { 1530 if (!hdr->status) 1531 return 0; 1532 1533 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(hdr->status), hdr->status, be32_to_cpu(hdr->syndrome)); 1534 1535 return cmd_status_to_err_helper(hdr->status); 1536 } 1537 1538 int mlx5_cmd_status_to_err_v2(void *ptr) 1539 { 1540 u32 syndrome; 1541 u8 status; 1542 1543 status = be32_to_cpu(*(__be32 *)ptr) >> 24; 1544 if (!status) 1545 return 0; 1546 1547 syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); 1548 1549 printf("mlx5_core: WARN: ""command failed, status %s(0x%x), syndrome 0x%x\n", cmd_status_str(status), status, syndrome); 1550 1551 return cmd_status_to_err_helper(status); 1552 } 1553 1554