1 /* 2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/highmem.h> 34 #include <linux/errno.h> 35 #include <linux/pci.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/slab.h> 38 #include <linux/delay.h> 39 #include <linux/random.h> 40 #include <linux/mlx5/driver.h> 41 #include <linux/mlx5/eq.h> 42 #include <linux/debugfs.h> 43 44 #include "mlx5_core.h" 45 #include "lib/eq.h" 46 #include "lib/tout.h" 47 #define CREATE_TRACE_POINTS 48 #include "diag/cmd_tracepoint.h" 49 50 struct mlx5_ifc_mbox_out_bits { 51 u8 status[0x8]; 52 u8 reserved_at_8[0x18]; 53 54 u8 syndrome[0x20]; 55 56 u8 reserved_at_40[0x40]; 57 }; 58 59 struct mlx5_ifc_mbox_in_bits { 60 u8 opcode[0x10]; 61 u8 uid[0x10]; 62 63 u8 reserved_at_20[0x10]; 64 u8 op_mod[0x10]; 65 66 u8 reserved_at_40[0x40]; 67 }; 68 69 enum { 70 CMD_IF_REV = 5, 71 }; 72 73 enum { 74 CMD_MODE_POLLING, 75 CMD_MODE_EVENTS 76 }; 77 78 enum { 79 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 80 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 81 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, 82 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3, 83 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4, 84 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5, 85 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6, 86 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7, 87 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8, 88 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9, 89 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 90 }; 91 92 static u16 in_to_opcode(void *in) 93 { 94 return MLX5_GET(mbox_in, in, opcode); 95 } 96 97 /* Returns true for opcodes that might be triggered very frequently and throttle 98 * the command interface. Limit their command slots usage. 99 */ 100 static bool mlx5_cmd_is_throttle_opcode(u16 op) 101 { 102 switch (op) { 103 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 104 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 105 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 106 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 107 case MLX5_CMD_OP_SYNC_CRYPTO: 108 return true; 109 } 110 return false; 111 } 112 113 static struct mlx5_cmd_work_ent * 114 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, 115 struct mlx5_cmd_msg *out, void *uout, int uout_size, 116 mlx5_cmd_cbk_t cbk, void *context, int page_queue) 117 { 118 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL; 119 struct mlx5_cmd_work_ent *ent; 120 121 ent = kzalloc(sizeof(*ent), alloc_flags); 122 if (!ent) 123 return ERR_PTR(-ENOMEM); 124 125 ent->idx = -EINVAL; 126 ent->in = in; 127 ent->out = out; 128 ent->uout = uout; 129 ent->uout_size = uout_size; 130 ent->callback = cbk; 131 ent->context = context; 132 ent->cmd = cmd; 133 ent->page_queue = page_queue; 134 ent->op = in_to_opcode(in->first.data); 135 refcount_set(&ent->refcnt, 1); 136 137 return ent; 138 } 139 140 static void cmd_free_ent(struct mlx5_cmd_work_ent *ent) 141 { 142 kfree(ent); 143 } 144 145 static u8 alloc_token(struct mlx5_cmd *cmd) 146 { 147 u8 token; 148 149 spin_lock(&cmd->token_lock); 150 cmd->token++; 151 if (cmd->token == 0) 152 cmd->token++; 153 token = cmd->token; 154 spin_unlock(&cmd->token_lock); 155 156 return token; 157 } 158 159 static int cmd_alloc_index(struct mlx5_cmd *cmd) 160 { 161 unsigned long flags; 162 int ret; 163 164 spin_lock_irqsave(&cmd->alloc_lock, flags); 165 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds); 166 if (ret < cmd->max_reg_cmds) 167 clear_bit(ret, &cmd->bitmask); 168 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 169 170 return ret < cmd->max_reg_cmds ? ret : -ENOMEM; 171 } 172 173 static void cmd_free_index(struct mlx5_cmd *cmd, int idx) 174 { 175 lockdep_assert_held(&cmd->alloc_lock); 176 set_bit(idx, &cmd->bitmask); 177 } 178 179 static void cmd_ent_get(struct mlx5_cmd_work_ent *ent) 180 { 181 refcount_inc(&ent->refcnt); 182 } 183 184 static void cmd_ent_put(struct mlx5_cmd_work_ent *ent) 185 { 186 struct mlx5_cmd *cmd = ent->cmd; 187 unsigned long flags; 188 189 spin_lock_irqsave(&cmd->alloc_lock, flags); 190 if (!refcount_dec_and_test(&ent->refcnt)) 191 goto out; 192 193 if (ent->idx >= 0) { 194 cmd_free_index(cmd, ent->idx); 195 up(ent->page_queue ? &cmd->pages_sem : &cmd->sem); 196 } 197 198 cmd_free_ent(ent); 199 out: 200 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 201 } 202 203 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx) 204 { 205 return cmd->cmd_buf + (idx << cmd->log_stride); 206 } 207 208 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg *msg) 209 { 210 int size = msg->len; 211 int blen = size - min_t(int, sizeof(msg->first.data), size); 212 213 return DIV_ROUND_UP(blen, MLX5_CMD_DATA_BLOCK_SIZE); 214 } 215 216 static u8 xor8_buf(void *buf, size_t offset, int len) 217 { 218 u8 *ptr = buf; 219 u8 sum = 0; 220 int i; 221 int end = len + offset; 222 223 for (i = offset; i < end; i++) 224 sum ^= ptr[i]; 225 226 return sum; 227 } 228 229 static int verify_block_sig(struct mlx5_cmd_prot_block *block) 230 { 231 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 232 int xor_len = sizeof(*block) - sizeof(block->data) - 1; 233 234 if (xor8_buf(block, rsvd0_off, xor_len) != 0xff) 235 return -EHWPOISON; 236 237 if (xor8_buf(block, 0, sizeof(*block)) != 0xff) 238 return -EHWPOISON; 239 240 return 0; 241 } 242 243 static void calc_block_sig(struct mlx5_cmd_prot_block *block) 244 { 245 int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2; 246 size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0); 247 248 block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len); 249 block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1); 250 } 251 252 static void calc_chain_sig(struct mlx5_cmd_msg *msg) 253 { 254 struct mlx5_cmd_mailbox *next = msg->next; 255 int n = mlx5_calc_cmd_blocks(msg); 256 int i = 0; 257 258 for (i = 0; i < n && next; i++) { 259 calc_block_sig(next->buf); 260 next = next->next; 261 } 262 } 263 264 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum) 265 { 266 ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 267 if (csum) { 268 calc_chain_sig(ent->in); 269 calc_chain_sig(ent->out); 270 } 271 } 272 273 static void poll_timeout(struct mlx5_cmd_work_ent *ent) 274 { 275 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd); 276 u64 cmd_to_ms = mlx5_tout_ms(dev, CMD); 277 unsigned long poll_end; 278 u8 own; 279 280 poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000); 281 282 do { 283 own = READ_ONCE(ent->lay->status_own); 284 if (!(own & CMD_OWNER_HW)) { 285 ent->ret = 0; 286 return; 287 } 288 cond_resched(); 289 } while (time_before(jiffies, poll_end)); 290 291 ent->ret = -ETIMEDOUT; 292 } 293 294 static int verify_signature(struct mlx5_cmd_work_ent *ent) 295 { 296 struct mlx5_cmd_mailbox *next = ent->out->next; 297 int n = mlx5_calc_cmd_blocks(ent->out); 298 int err; 299 u8 sig; 300 int i = 0; 301 302 sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay)); 303 if (sig != 0xff) 304 return -EHWPOISON; 305 306 for (i = 0; i < n && next; i++) { 307 err = verify_block_sig(next->buf); 308 if (err) 309 return -EHWPOISON; 310 311 next = next->next; 312 } 313 314 return 0; 315 } 316 317 static void dump_buf(void *buf, int size, int data_only, int offset, int idx) 318 { 319 __be32 *p = buf; 320 int i; 321 322 for (i = 0; i < size; i += 16) { 323 pr_debug("cmd[%d]: %03x: %08x %08x %08x %08x\n", idx, offset, 324 be32_to_cpu(p[0]), be32_to_cpu(p[1]), 325 be32_to_cpu(p[2]), be32_to_cpu(p[3])); 326 p += 4; 327 offset += 16; 328 } 329 if (!data_only) 330 pr_debug("\n"); 331 } 332 333 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, 334 u32 *synd, u8 *status) 335 { 336 *synd = 0; 337 *status = 0; 338 339 switch (op) { 340 case MLX5_CMD_OP_TEARDOWN_HCA: 341 case MLX5_CMD_OP_DISABLE_HCA: 342 case MLX5_CMD_OP_MANAGE_PAGES: 343 case MLX5_CMD_OP_DESTROY_MKEY: 344 case MLX5_CMD_OP_DESTROY_EQ: 345 case MLX5_CMD_OP_DESTROY_CQ: 346 case MLX5_CMD_OP_DESTROY_QP: 347 case MLX5_CMD_OP_DESTROY_PSV: 348 case MLX5_CMD_OP_DESTROY_SRQ: 349 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 350 case MLX5_CMD_OP_DESTROY_XRQ: 351 case MLX5_CMD_OP_DESTROY_DCT: 352 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 353 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: 354 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT: 355 case MLX5_CMD_OP_DEALLOC_PD: 356 case MLX5_CMD_OP_DEALLOC_UAR: 357 case MLX5_CMD_OP_DETACH_FROM_MCG: 358 case MLX5_CMD_OP_DEALLOC_XRCD: 359 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN: 360 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: 361 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY: 362 case MLX5_CMD_OP_DESTROY_LAG: 363 case MLX5_CMD_OP_DESTROY_VPORT_LAG: 364 case MLX5_CMD_OP_DESTROY_TIR: 365 case MLX5_CMD_OP_DESTROY_SQ: 366 case MLX5_CMD_OP_DESTROY_RQ: 367 case MLX5_CMD_OP_DESTROY_RMP: 368 case MLX5_CMD_OP_DESTROY_TIS: 369 case MLX5_CMD_OP_DESTROY_RQT: 370 case MLX5_CMD_OP_DESTROY_FLOW_TABLE: 371 case MLX5_CMD_OP_DESTROY_FLOW_GROUP: 372 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY: 373 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER: 374 case MLX5_CMD_OP_2ERR_QP: 375 case MLX5_CMD_OP_2RST_QP: 376 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT: 377 case MLX5_CMD_OP_MODIFY_FLOW_TABLE: 378 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY: 379 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT: 380 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT: 381 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 382 case MLX5_CMD_OP_FPGA_DESTROY_QP: 383 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 384 case MLX5_CMD_OP_DEALLOC_MEMIC: 385 case MLX5_CMD_OP_PAGE_FAULT_RESUME: 386 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS: 387 case MLX5_CMD_OP_DEALLOC_SF: 388 case MLX5_CMD_OP_DESTROY_UCTX: 389 case MLX5_CMD_OP_DESTROY_UMEM: 390 case MLX5_CMD_OP_MODIFY_RQT: 391 return MLX5_CMD_STAT_OK; 392 393 case MLX5_CMD_OP_QUERY_HCA_CAP: 394 case MLX5_CMD_OP_QUERY_ADAPTER: 395 case MLX5_CMD_OP_INIT_HCA: 396 case MLX5_CMD_OP_ENABLE_HCA: 397 case MLX5_CMD_OP_QUERY_PAGES: 398 case MLX5_CMD_OP_SET_HCA_CAP: 399 case MLX5_CMD_OP_QUERY_ISSI: 400 case MLX5_CMD_OP_SET_ISSI: 401 case MLX5_CMD_OP_CREATE_MKEY: 402 case MLX5_CMD_OP_QUERY_MKEY: 403 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: 404 case MLX5_CMD_OP_CREATE_EQ: 405 case MLX5_CMD_OP_QUERY_EQ: 406 case MLX5_CMD_OP_GEN_EQE: 407 case MLX5_CMD_OP_CREATE_CQ: 408 case MLX5_CMD_OP_QUERY_CQ: 409 case MLX5_CMD_OP_MODIFY_CQ: 410 case MLX5_CMD_OP_CREATE_QP: 411 case MLX5_CMD_OP_RST2INIT_QP: 412 case MLX5_CMD_OP_INIT2RTR_QP: 413 case MLX5_CMD_OP_RTR2RTS_QP: 414 case MLX5_CMD_OP_RTS2RTS_QP: 415 case MLX5_CMD_OP_SQERR2RTS_QP: 416 case MLX5_CMD_OP_QUERY_QP: 417 case MLX5_CMD_OP_SQD_RTS_QP: 418 case MLX5_CMD_OP_INIT2INIT_QP: 419 case MLX5_CMD_OP_CREATE_PSV: 420 case MLX5_CMD_OP_CREATE_SRQ: 421 case MLX5_CMD_OP_QUERY_SRQ: 422 case MLX5_CMD_OP_ARM_RQ: 423 case MLX5_CMD_OP_CREATE_XRC_SRQ: 424 case MLX5_CMD_OP_QUERY_XRC_SRQ: 425 case MLX5_CMD_OP_ARM_XRC_SRQ: 426 case MLX5_CMD_OP_CREATE_XRQ: 427 case MLX5_CMD_OP_QUERY_XRQ: 428 case MLX5_CMD_OP_ARM_XRQ: 429 case MLX5_CMD_OP_CREATE_DCT: 430 case MLX5_CMD_OP_DRAIN_DCT: 431 case MLX5_CMD_OP_QUERY_DCT: 432 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 433 case MLX5_CMD_OP_QUERY_VPORT_STATE: 434 case MLX5_CMD_OP_MODIFY_VPORT_STATE: 435 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT: 436 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT: 437 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT: 438 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS: 439 case MLX5_CMD_OP_SET_ROCE_ADDRESS: 440 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT: 441 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT: 442 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID: 443 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY: 444 case MLX5_CMD_OP_QUERY_VNIC_ENV: 445 case MLX5_CMD_OP_QUERY_VPORT_COUNTER: 446 case MLX5_CMD_OP_ALLOC_Q_COUNTER: 447 case MLX5_CMD_OP_QUERY_Q_COUNTER: 448 case MLX5_CMD_OP_SET_MONITOR_COUNTER: 449 case MLX5_CMD_OP_ARM_MONITOR_COUNTER: 450 case MLX5_CMD_OP_SET_PP_RATE_LIMIT: 451 case MLX5_CMD_OP_QUERY_RATE_LIMIT: 452 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: 453 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT: 454 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT: 455 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT: 456 case MLX5_CMD_OP_ALLOC_PD: 457 case MLX5_CMD_OP_ALLOC_UAR: 458 case MLX5_CMD_OP_CONFIG_INT_MODERATION: 459 case MLX5_CMD_OP_ACCESS_REG: 460 case MLX5_CMD_OP_ATTACH_TO_MCG: 461 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG: 462 case MLX5_CMD_OP_MAD_IFC: 463 case MLX5_CMD_OP_QUERY_MAD_DEMUX: 464 case MLX5_CMD_OP_SET_MAD_DEMUX: 465 case MLX5_CMD_OP_NOP: 466 case MLX5_CMD_OP_ALLOC_XRCD: 467 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN: 468 case MLX5_CMD_OP_QUERY_CONG_STATUS: 469 case MLX5_CMD_OP_MODIFY_CONG_STATUS: 470 case MLX5_CMD_OP_QUERY_CONG_PARAMS: 471 case MLX5_CMD_OP_MODIFY_CONG_PARAMS: 472 case MLX5_CMD_OP_QUERY_CONG_STATISTICS: 473 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: 474 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY: 475 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY: 476 case MLX5_CMD_OP_CREATE_LAG: 477 case MLX5_CMD_OP_MODIFY_LAG: 478 case MLX5_CMD_OP_QUERY_LAG: 479 case MLX5_CMD_OP_CREATE_VPORT_LAG: 480 case MLX5_CMD_OP_CREATE_TIR: 481 case MLX5_CMD_OP_MODIFY_TIR: 482 case MLX5_CMD_OP_QUERY_TIR: 483 case MLX5_CMD_OP_CREATE_SQ: 484 case MLX5_CMD_OP_MODIFY_SQ: 485 case MLX5_CMD_OP_QUERY_SQ: 486 case MLX5_CMD_OP_CREATE_RQ: 487 case MLX5_CMD_OP_MODIFY_RQ: 488 case MLX5_CMD_OP_QUERY_RQ: 489 case MLX5_CMD_OP_CREATE_RMP: 490 case MLX5_CMD_OP_MODIFY_RMP: 491 case MLX5_CMD_OP_QUERY_RMP: 492 case MLX5_CMD_OP_CREATE_TIS: 493 case MLX5_CMD_OP_MODIFY_TIS: 494 case MLX5_CMD_OP_QUERY_TIS: 495 case MLX5_CMD_OP_CREATE_RQT: 496 case MLX5_CMD_OP_QUERY_RQT: 497 498 case MLX5_CMD_OP_CREATE_FLOW_TABLE: 499 case MLX5_CMD_OP_QUERY_FLOW_TABLE: 500 case MLX5_CMD_OP_CREATE_FLOW_GROUP: 501 case MLX5_CMD_OP_QUERY_FLOW_GROUP: 502 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY: 503 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER: 504 case MLX5_CMD_OP_QUERY_FLOW_COUNTER: 505 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT: 506 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT: 507 case MLX5_CMD_OP_FPGA_CREATE_QP: 508 case MLX5_CMD_OP_FPGA_MODIFY_QP: 509 case MLX5_CMD_OP_FPGA_QUERY_QP: 510 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: 511 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 512 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 513 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 514 case MLX5_CMD_OP_CREATE_UCTX: 515 case MLX5_CMD_OP_CREATE_UMEM: 516 case MLX5_CMD_OP_ALLOC_MEMIC: 517 case MLX5_CMD_OP_MODIFY_XRQ: 518 case MLX5_CMD_OP_RELEASE_XRQ_ERROR: 519 case MLX5_CMD_OP_QUERY_VHCA_STATE: 520 case MLX5_CMD_OP_MODIFY_VHCA_STATE: 521 case MLX5_CMD_OP_ALLOC_SF: 522 case MLX5_CMD_OP_SUSPEND_VHCA: 523 case MLX5_CMD_OP_RESUME_VHCA: 524 case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE: 525 case MLX5_CMD_OP_SAVE_VHCA_STATE: 526 case MLX5_CMD_OP_LOAD_VHCA_STATE: 527 case MLX5_CMD_OP_SYNC_CRYPTO: 528 *status = MLX5_DRIVER_STATUS_ABORTED; 529 *synd = MLX5_DRIVER_SYND; 530 return -ENOLINK; 531 default: 532 mlx5_core_err(dev, "Unknown FW command (%d)\n", op); 533 return -EINVAL; 534 } 535 } 536 537 const char *mlx5_command_str(int command) 538 { 539 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd 540 541 switch (command) { 542 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP); 543 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER); 544 MLX5_COMMAND_STR_CASE(INIT_HCA); 545 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA); 546 MLX5_COMMAND_STR_CASE(ENABLE_HCA); 547 MLX5_COMMAND_STR_CASE(DISABLE_HCA); 548 MLX5_COMMAND_STR_CASE(QUERY_PAGES); 549 MLX5_COMMAND_STR_CASE(MANAGE_PAGES); 550 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 551 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 552 MLX5_COMMAND_STR_CASE(SET_ISSI); 553 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); 554 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 555 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 556 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); 557 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS); 558 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME); 559 MLX5_COMMAND_STR_CASE(CREATE_EQ); 560 MLX5_COMMAND_STR_CASE(DESTROY_EQ); 561 MLX5_COMMAND_STR_CASE(QUERY_EQ); 562 MLX5_COMMAND_STR_CASE(GEN_EQE); 563 MLX5_COMMAND_STR_CASE(CREATE_CQ); 564 MLX5_COMMAND_STR_CASE(DESTROY_CQ); 565 MLX5_COMMAND_STR_CASE(QUERY_CQ); 566 MLX5_COMMAND_STR_CASE(MODIFY_CQ); 567 MLX5_COMMAND_STR_CASE(CREATE_QP); 568 MLX5_COMMAND_STR_CASE(DESTROY_QP); 569 MLX5_COMMAND_STR_CASE(RST2INIT_QP); 570 MLX5_COMMAND_STR_CASE(INIT2RTR_QP); 571 MLX5_COMMAND_STR_CASE(RTR2RTS_QP); 572 MLX5_COMMAND_STR_CASE(RTS2RTS_QP); 573 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP); 574 MLX5_COMMAND_STR_CASE(2ERR_QP); 575 MLX5_COMMAND_STR_CASE(2RST_QP); 576 MLX5_COMMAND_STR_CASE(QUERY_QP); 577 MLX5_COMMAND_STR_CASE(SQD_RTS_QP); 578 MLX5_COMMAND_STR_CASE(INIT2INIT_QP); 579 MLX5_COMMAND_STR_CASE(CREATE_PSV); 580 MLX5_COMMAND_STR_CASE(DESTROY_PSV); 581 MLX5_COMMAND_STR_CASE(CREATE_SRQ); 582 MLX5_COMMAND_STR_CASE(DESTROY_SRQ); 583 MLX5_COMMAND_STR_CASE(QUERY_SRQ); 584 MLX5_COMMAND_STR_CASE(ARM_RQ); 585 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ); 586 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ); 587 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ); 588 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ); 589 MLX5_COMMAND_STR_CASE(CREATE_DCT); 590 MLX5_COMMAND_STR_CASE(DESTROY_DCT); 591 MLX5_COMMAND_STR_CASE(DRAIN_DCT); 592 MLX5_COMMAND_STR_CASE(QUERY_DCT); 593 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION); 594 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE); 595 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE); 596 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT); 597 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT); 598 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT); 599 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT); 600 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS); 601 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS); 602 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT); 603 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT); 604 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID); 605 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY); 606 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV); 607 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER); 608 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); 609 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); 610 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); 611 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); 612 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); 613 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); 614 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); 615 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); 616 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT); 617 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT); 618 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT); 619 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT); 620 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT); 621 MLX5_COMMAND_STR_CASE(ALLOC_PD); 622 MLX5_COMMAND_STR_CASE(DEALLOC_PD); 623 MLX5_COMMAND_STR_CASE(ALLOC_UAR); 624 MLX5_COMMAND_STR_CASE(DEALLOC_UAR); 625 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION); 626 MLX5_COMMAND_STR_CASE(ACCESS_REG); 627 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG); 628 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG); 629 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG); 630 MLX5_COMMAND_STR_CASE(MAD_IFC); 631 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX); 632 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX); 633 MLX5_COMMAND_STR_CASE(NOP); 634 MLX5_COMMAND_STR_CASE(ALLOC_XRCD); 635 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD); 636 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN); 637 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN); 638 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS); 639 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS); 640 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS); 641 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS); 642 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS); 643 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT); 644 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT); 645 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY); 646 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY); 647 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY); 648 MLX5_COMMAND_STR_CASE(SET_WOL_ROL); 649 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL); 650 MLX5_COMMAND_STR_CASE(CREATE_LAG); 651 MLX5_COMMAND_STR_CASE(MODIFY_LAG); 652 MLX5_COMMAND_STR_CASE(QUERY_LAG); 653 MLX5_COMMAND_STR_CASE(DESTROY_LAG); 654 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG); 655 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG); 656 MLX5_COMMAND_STR_CASE(CREATE_TIR); 657 MLX5_COMMAND_STR_CASE(MODIFY_TIR); 658 MLX5_COMMAND_STR_CASE(DESTROY_TIR); 659 MLX5_COMMAND_STR_CASE(QUERY_TIR); 660 MLX5_COMMAND_STR_CASE(CREATE_SQ); 661 MLX5_COMMAND_STR_CASE(MODIFY_SQ); 662 MLX5_COMMAND_STR_CASE(DESTROY_SQ); 663 MLX5_COMMAND_STR_CASE(QUERY_SQ); 664 MLX5_COMMAND_STR_CASE(CREATE_RQ); 665 MLX5_COMMAND_STR_CASE(MODIFY_RQ); 666 MLX5_COMMAND_STR_CASE(DESTROY_RQ); 667 MLX5_COMMAND_STR_CASE(QUERY_RQ); 668 MLX5_COMMAND_STR_CASE(CREATE_RMP); 669 MLX5_COMMAND_STR_CASE(MODIFY_RMP); 670 MLX5_COMMAND_STR_CASE(DESTROY_RMP); 671 MLX5_COMMAND_STR_CASE(QUERY_RMP); 672 MLX5_COMMAND_STR_CASE(CREATE_TIS); 673 MLX5_COMMAND_STR_CASE(MODIFY_TIS); 674 MLX5_COMMAND_STR_CASE(DESTROY_TIS); 675 MLX5_COMMAND_STR_CASE(QUERY_TIS); 676 MLX5_COMMAND_STR_CASE(CREATE_RQT); 677 MLX5_COMMAND_STR_CASE(MODIFY_RQT); 678 MLX5_COMMAND_STR_CASE(DESTROY_RQT); 679 MLX5_COMMAND_STR_CASE(QUERY_RQT); 680 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT); 681 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE); 682 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE); 683 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE); 684 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP); 685 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP); 686 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP); 687 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY); 688 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY); 689 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY); 690 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER); 691 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER); 692 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER); 693 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE); 694 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT); 695 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT); 696 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT); 697 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT); 698 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP); 699 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP); 700 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); 701 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); 702 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); 703 MLX5_COMMAND_STR_CASE(CREATE_XRQ); 704 MLX5_COMMAND_STR_CASE(DESTROY_XRQ); 705 MLX5_COMMAND_STR_CASE(QUERY_XRQ); 706 MLX5_COMMAND_STR_CASE(ARM_XRQ); 707 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); 708 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); 709 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT); 710 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT); 711 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT); 712 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC); 713 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC); 714 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS); 715 MLX5_COMMAND_STR_CASE(CREATE_UCTX); 716 MLX5_COMMAND_STR_CASE(DESTROY_UCTX); 717 MLX5_COMMAND_STR_CASE(CREATE_UMEM); 718 MLX5_COMMAND_STR_CASE(DESTROY_UMEM); 719 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR); 720 MLX5_COMMAND_STR_CASE(MODIFY_XRQ); 721 MLX5_COMMAND_STR_CASE(QUERY_VHCA_STATE); 722 MLX5_COMMAND_STR_CASE(MODIFY_VHCA_STATE); 723 MLX5_COMMAND_STR_CASE(ALLOC_SF); 724 MLX5_COMMAND_STR_CASE(DEALLOC_SF); 725 MLX5_COMMAND_STR_CASE(SUSPEND_VHCA); 726 MLX5_COMMAND_STR_CASE(RESUME_VHCA); 727 MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE); 728 MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE); 729 MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE); 730 MLX5_COMMAND_STR_CASE(SYNC_CRYPTO); 731 default: return "unknown command opcode"; 732 } 733 } 734 735 static const char *cmd_status_str(u8 status) 736 { 737 switch (status) { 738 case MLX5_CMD_STAT_OK: 739 return "OK"; 740 case MLX5_CMD_STAT_INT_ERR: 741 return "internal error"; 742 case MLX5_CMD_STAT_BAD_OP_ERR: 743 return "bad operation"; 744 case MLX5_CMD_STAT_BAD_PARAM_ERR: 745 return "bad parameter"; 746 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: 747 return "bad system state"; 748 case MLX5_CMD_STAT_BAD_RES_ERR: 749 return "bad resource"; 750 case MLX5_CMD_STAT_RES_BUSY: 751 return "resource busy"; 752 case MLX5_CMD_STAT_LIM_ERR: 753 return "limits exceeded"; 754 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: 755 return "bad resource state"; 756 case MLX5_CMD_STAT_IX_ERR: 757 return "bad index"; 758 case MLX5_CMD_STAT_NO_RES_ERR: 759 return "no resources"; 760 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: 761 return "bad input length"; 762 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: 763 return "bad output length"; 764 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: 765 return "bad QP state"; 766 case MLX5_CMD_STAT_BAD_PKT_ERR: 767 return "bad packet (discarded)"; 768 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: 769 return "bad size too many outstanding CQEs"; 770 default: 771 return "unknown status"; 772 } 773 } 774 775 static int cmd_status_to_err(u8 status) 776 { 777 switch (status) { 778 case MLX5_CMD_STAT_OK: return 0; 779 case MLX5_CMD_STAT_INT_ERR: return -EIO; 780 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 781 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL; 782 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO; 783 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL; 784 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY; 785 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM; 786 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL; 787 case MLX5_CMD_STAT_IX_ERR: return -EINVAL; 788 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN; 789 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO; 790 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO; 791 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL; 792 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL; 793 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL; 794 default: return -EIO; 795 } 796 } 797 798 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 799 { 800 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 801 u8 status = MLX5_GET(mbox_out, out, status); 802 803 mlx5_core_err_rl(dev, 804 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x), err(%d)\n", 805 mlx5_command_str(opcode), opcode, op_mod, 806 cmd_status_str(status), status, syndrome, cmd_status_to_err(status)); 807 } 808 EXPORT_SYMBOL(mlx5_cmd_out_err); 809 810 static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) 811 { 812 u16 opcode, op_mod; 813 u16 uid; 814 815 opcode = in_to_opcode(in); 816 op_mod = MLX5_GET(mbox_in, in, op_mod); 817 uid = MLX5_GET(mbox_in, in, uid); 818 819 if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY && 820 opcode != MLX5_CMD_OP_CREATE_UCTX) 821 mlx5_cmd_out_err(dev, opcode, op_mod, out); 822 } 823 824 int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out) 825 { 826 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ 827 if (err == -ENXIO) { 828 u16 opcode = in_to_opcode(in); 829 u32 syndrome; 830 u8 status; 831 832 /* PCI Error, emulate command return status, for smooth reset */ 833 err = mlx5_internal_err_ret_value(dev, opcode, &syndrome, &status); 834 MLX5_SET(mbox_out, out, status, status); 835 MLX5_SET(mbox_out, out, syndrome, syndrome); 836 if (!err) 837 return 0; 838 } 839 840 /* driver or FW delivery error */ 841 if (err != -EREMOTEIO && err) 842 return err; 843 844 /* check outbox status */ 845 err = cmd_status_to_err(MLX5_GET(mbox_out, out, status)); 846 if (err) 847 cmd_status_print(dev, in, out); 848 849 return err; 850 } 851 EXPORT_SYMBOL(mlx5_cmd_check); 852 853 static void dump_command(struct mlx5_core_dev *dev, 854 struct mlx5_cmd_work_ent *ent, int input) 855 { 856 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 857 struct mlx5_cmd_mailbox *next = msg->next; 858 int n = mlx5_calc_cmd_blocks(msg); 859 u16 op = ent->op; 860 int data_only; 861 u32 offset = 0; 862 int dump_len; 863 int i; 864 865 mlx5_core_dbg(dev, "cmd[%d]: start dump\n", ent->idx); 866 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA)); 867 868 if (data_only) 869 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA, 870 "cmd[%d]: dump command data %s(0x%x) %s\n", 871 ent->idx, mlx5_command_str(op), op, 872 input ? "INPUT" : "OUTPUT"); 873 else 874 mlx5_core_dbg(dev, "cmd[%d]: dump command %s(0x%x) %s\n", 875 ent->idx, mlx5_command_str(op), op, 876 input ? "INPUT" : "OUTPUT"); 877 878 if (data_only) { 879 if (input) { 880 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset, ent->idx); 881 offset += sizeof(ent->lay->in); 882 } else { 883 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset, ent->idx); 884 offset += sizeof(ent->lay->out); 885 } 886 } else { 887 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset, ent->idx); 888 offset += sizeof(*ent->lay); 889 } 890 891 for (i = 0; i < n && next; i++) { 892 if (data_only) { 893 dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset); 894 dump_buf(next->buf, dump_len, 1, offset, ent->idx); 895 offset += MLX5_CMD_DATA_BLOCK_SIZE; 896 } else { 897 mlx5_core_dbg(dev, "cmd[%d]: command block:\n", ent->idx); 898 dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset, 899 ent->idx); 900 offset += sizeof(struct mlx5_cmd_prot_block); 901 } 902 next = next->next; 903 } 904 905 if (data_only) 906 pr_debug("\n"); 907 908 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); 909 } 910 911 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 912 913 static void cb_timeout_handler(struct work_struct *work) 914 { 915 struct delayed_work *dwork = container_of(work, struct delayed_work, 916 work); 917 struct mlx5_cmd_work_ent *ent = container_of(dwork, 918 struct mlx5_cmd_work_ent, 919 cb_timeout_work); 920 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, 921 cmd); 922 923 mlx5_cmd_eq_recover(dev); 924 925 /* Maybe got handled by eq recover ? */ 926 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { 927 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, 928 mlx5_command_str(ent->op), ent->op); 929 goto out; /* phew, already handled */ 930 } 931 932 ent->ret = -ETIMEDOUT; 933 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 934 ent->idx, mlx5_command_str(ent->op), ent->op); 935 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 936 937 out: 938 cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ 939 } 940 941 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg); 942 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 943 struct mlx5_cmd_msg *msg); 944 945 static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 946 { 947 if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 948 return true; 949 950 return cmd->allowed_opcode == opcode; 951 } 952 953 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev) 954 { 955 return pci_channel_offline(dev->pdev) || 956 dev->cmd.state != MLX5_CMDIF_STATE_UP || 957 dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR; 958 } 959 960 static void cmd_work_handler(struct work_struct *work) 961 { 962 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); 963 struct mlx5_cmd *cmd = ent->cmd; 964 bool poll_cmd = ent->polling; 965 struct mlx5_cmd_layout *lay; 966 struct mlx5_core_dev *dev; 967 unsigned long cb_timeout; 968 struct semaphore *sem; 969 unsigned long flags; 970 int alloc_ret; 971 int cmd_mode; 972 973 dev = container_of(cmd, struct mlx5_core_dev, cmd); 974 cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 975 976 complete(&ent->handling); 977 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 978 down(sem); 979 if (!ent->page_queue) { 980 alloc_ret = cmd_alloc_index(cmd); 981 if (alloc_ret < 0) { 982 mlx5_core_err_rl(dev, "failed to allocate command entry\n"); 983 if (ent->callback) { 984 ent->callback(-EAGAIN, ent->context); 985 mlx5_free_cmd_msg(dev, ent->out); 986 free_msg(dev, ent->in); 987 cmd_ent_put(ent); 988 } else { 989 ent->ret = -EAGAIN; 990 complete(&ent->done); 991 } 992 up(sem); 993 return; 994 } 995 ent->idx = alloc_ret; 996 } else { 997 ent->idx = cmd->max_reg_cmds; 998 spin_lock_irqsave(&cmd->alloc_lock, flags); 999 clear_bit(ent->idx, &cmd->bitmask); 1000 spin_unlock_irqrestore(&cmd->alloc_lock, flags); 1001 } 1002 1003 cmd->ent_arr[ent->idx] = ent; 1004 lay = get_inst(cmd, ent->idx); 1005 ent->lay = lay; 1006 memset(lay, 0, sizeof(*lay)); 1007 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 1008 if (ent->in->next) 1009 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 1010 lay->inlen = cpu_to_be32(ent->in->len); 1011 if (ent->out->next) 1012 lay->out_ptr = cpu_to_be64(ent->out->next->dma); 1013 lay->outlen = cpu_to_be32(ent->out->len); 1014 lay->type = MLX5_PCI_CMD_XPORT; 1015 lay->token = ent->token; 1016 lay->status_own = CMD_OWNER_HW; 1017 set_signature(ent, !cmd->checksum_disabled); 1018 dump_command(dev, ent, 1); 1019 ent->ts1 = ktime_get_ns(); 1020 cmd_mode = cmd->mode; 1021 1022 if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout)) 1023 cmd_ent_get(ent); 1024 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state); 1025 1026 cmd_ent_get(ent); /* for the _real_ FW event on completion */ 1027 /* Skip sending command to fw if internal error */ 1028 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, ent->op)) { 1029 ent->ret = -ENXIO; 1030 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1031 return; 1032 } 1033 1034 /* ring doorbell after the descriptor is valid */ 1035 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx); 1036 wmb(); 1037 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 1038 /* if not in polling don't use ent after this point */ 1039 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) { 1040 poll_timeout(ent); 1041 /* make sure we read the descriptor after ownership is SW */ 1042 rmb(); 1043 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); 1044 } 1045 } 1046 1047 static int deliv_status_to_err(u8 status) 1048 { 1049 switch (status) { 1050 case MLX5_CMD_DELIVERY_STAT_OK: 1051 case MLX5_DRIVER_STATUS_ABORTED: 1052 return 0; 1053 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1054 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1055 return -EBADR; 1056 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1057 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1058 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1059 return -EFAULT; /* Bad address */ 1060 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1061 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1062 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1063 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1064 return -ENOMSG; 1065 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1066 return -EIO; 1067 default: 1068 return -EINVAL; 1069 } 1070 } 1071 1072 static const char *deliv_status_to_str(u8 status) 1073 { 1074 switch (status) { 1075 case MLX5_CMD_DELIVERY_STAT_OK: 1076 return "no errors"; 1077 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1078 return "signature error"; 1079 case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1080 return "token error"; 1081 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1082 return "bad block number"; 1083 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1084 return "output pointer not aligned to block size"; 1085 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1086 return "input pointer not aligned to block size"; 1087 case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1088 return "firmware internal error"; 1089 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1090 return "command input length error"; 1091 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1092 return "command output length error"; 1093 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1094 return "reserved fields not cleared"; 1095 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1096 return "bad command descriptor type"; 1097 default: 1098 return "unknown status code"; 1099 } 1100 } 1101 1102 enum { 1103 MLX5_CMD_TIMEOUT_RECOVER_MSEC = 5 * 1000, 1104 }; 1105 1106 static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, 1107 struct mlx5_cmd_work_ent *ent) 1108 { 1109 unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC); 1110 1111 mlx5_cmd_eq_recover(dev); 1112 1113 /* Re-wait on the ent->done after executing the recovery flow. If the 1114 * recovery flow (or any other recovery flow running simultaneously) 1115 * has recovered an EQE, it should cause the entry to be completed by 1116 * the command interface. 1117 */ 1118 if (wait_for_completion_timeout(&ent->done, timeout)) { 1119 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, 1120 mlx5_command_str(ent->op), ent->op); 1121 return; 1122 } 1123 1124 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, 1125 mlx5_command_str(ent->op), ent->op); 1126 1127 ent->ret = -ETIMEDOUT; 1128 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 1129 } 1130 1131 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) 1132 { 1133 unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD)); 1134 struct mlx5_cmd *cmd = &dev->cmd; 1135 int err; 1136 1137 if (!wait_for_completion_timeout(&ent->handling, timeout) && 1138 cancel_work_sync(&ent->work)) { 1139 ent->ret = -ECANCELED; 1140 goto out_err; 1141 } 1142 if (cmd->mode == CMD_MODE_POLLING || ent->polling) 1143 wait_for_completion(&ent->done); 1144 else if (!wait_for_completion_timeout(&ent->done, timeout)) 1145 wait_func_handle_exec_timeout(dev, ent); 1146 1147 out_err: 1148 err = ent->ret; 1149 1150 if (err == -ETIMEDOUT) { 1151 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1152 mlx5_command_str(ent->op), ent->op); 1153 } else if (err == -ECANCELED) { 1154 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1155 mlx5_command_str(ent->op), ent->op); 1156 } 1157 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1158 err, deliv_status_to_str(ent->status), ent->status); 1159 1160 return err; 1161 } 1162 1163 /* Notes: 1164 * 1. Callback functions may not sleep 1165 * 2. page queue commands do not support asynchrous completion 1166 * 1167 * return value in case (!callback): 1168 * ret < 0 : Command execution couldn't be submitted by driver 1169 * ret > 0 : Command execution couldn't be performed by firmware 1170 * ret == 0: Command was executed by FW, Caller must check FW outbox status. 1171 * 1172 * return value in case (callback): 1173 * ret < 0 : Command execution couldn't be submitted by driver 1174 * ret == 0: Command will be submitted to FW for execution 1175 * and the callback will be called for further status updates 1176 */ 1177 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in, 1178 struct mlx5_cmd_msg *out, void *uout, int uout_size, 1179 mlx5_cmd_cbk_t callback, 1180 void *context, int page_queue, 1181 u8 token, bool force_polling) 1182 { 1183 struct mlx5_cmd *cmd = &dev->cmd; 1184 struct mlx5_cmd_work_ent *ent; 1185 struct mlx5_cmd_stats *stats; 1186 u8 status = 0; 1187 int err = 0; 1188 s64 ds; 1189 1190 if (callback && page_queue) 1191 return -EINVAL; 1192 1193 ent = cmd_alloc_ent(cmd, in, out, uout, uout_size, 1194 callback, context, page_queue); 1195 if (IS_ERR(ent)) 1196 return PTR_ERR(ent); 1197 1198 /* put for this ent is when consumed, depending on the use case 1199 * 1) (!callback) blocking flow: by caller after wait_func completes 1200 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled 1201 */ 1202 1203 ent->token = token; 1204 ent->polling = force_polling; 1205 1206 init_completion(&ent->handling); 1207 if (!callback) 1208 init_completion(&ent->done); 1209 1210 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler); 1211 INIT_WORK(&ent->work, cmd_work_handler); 1212 if (page_queue) { 1213 cmd_work_handler(&ent->work); 1214 } else if (!queue_work(cmd->wq, &ent->work)) { 1215 mlx5_core_warn(dev, "failed to queue work\n"); 1216 err = -EALREADY; 1217 goto out_free; 1218 } 1219 1220 if (callback) 1221 return 0; /* mlx5_cmd_comp_handler() will put(ent) */ 1222 1223 err = wait_func(dev, ent); 1224 if (err == -ETIMEDOUT || err == -ECANCELED) 1225 goto out_free; 1226 1227 ds = ent->ts2 - ent->ts1; 1228 if (ent->op < MLX5_CMD_OP_MAX) { 1229 stats = &cmd->stats[ent->op]; 1230 spin_lock_irq(&stats->lock); 1231 stats->sum += ds; 1232 ++stats->n; 1233 spin_unlock_irq(&stats->lock); 1234 } 1235 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1236 "fw exec time for %s is %lld nsec\n", 1237 mlx5_command_str(ent->op), ds); 1238 1239 out_free: 1240 status = ent->status; 1241 cmd_ent_put(ent); 1242 return err ? : status; 1243 } 1244 1245 static ssize_t dbg_write(struct file *filp, const char __user *buf, 1246 size_t count, loff_t *pos) 1247 { 1248 struct mlx5_core_dev *dev = filp->private_data; 1249 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1250 char lbuf[3]; 1251 int err; 1252 1253 if (!dbg->in_msg || !dbg->out_msg) 1254 return -ENOMEM; 1255 1256 if (count < sizeof(lbuf) - 1) 1257 return -EINVAL; 1258 1259 if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) 1260 return -EFAULT; 1261 1262 lbuf[sizeof(lbuf) - 1] = 0; 1263 1264 if (strcmp(lbuf, "go")) 1265 return -EINVAL; 1266 1267 err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen); 1268 1269 return err ? err : count; 1270 } 1271 1272 static const struct file_operations fops = { 1273 .owner = THIS_MODULE, 1274 .open = simple_open, 1275 .write = dbg_write, 1276 }; 1277 1278 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size, 1279 u8 token) 1280 { 1281 struct mlx5_cmd_prot_block *block; 1282 struct mlx5_cmd_mailbox *next; 1283 int copy; 1284 1285 if (!to || !from) 1286 return -ENOMEM; 1287 1288 copy = min_t(int, size, sizeof(to->first.data)); 1289 memcpy(to->first.data, from, copy); 1290 size -= copy; 1291 from += copy; 1292 1293 next = to->next; 1294 while (size) { 1295 if (!next) { 1296 /* this is a BUG */ 1297 return -ENOMEM; 1298 } 1299 1300 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1301 block = next->buf; 1302 memcpy(block->data, from, copy); 1303 from += copy; 1304 size -= copy; 1305 block->token = token; 1306 next = next->next; 1307 } 1308 1309 return 0; 1310 } 1311 1312 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size) 1313 { 1314 struct mlx5_cmd_prot_block *block; 1315 struct mlx5_cmd_mailbox *next; 1316 int copy; 1317 1318 if (!to || !from) 1319 return -ENOMEM; 1320 1321 copy = min_t(int, size, sizeof(from->first.data)); 1322 memcpy(to, from->first.data, copy); 1323 size -= copy; 1324 to += copy; 1325 1326 next = from->next; 1327 while (size) { 1328 if (!next) { 1329 /* this is a BUG */ 1330 return -ENOMEM; 1331 } 1332 1333 copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE); 1334 block = next->buf; 1335 1336 memcpy(to, block->data, copy); 1337 to += copy; 1338 size -= copy; 1339 next = next->next; 1340 } 1341 1342 return 0; 1343 } 1344 1345 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev, 1346 gfp_t flags) 1347 { 1348 struct mlx5_cmd_mailbox *mailbox; 1349 1350 mailbox = kmalloc(sizeof(*mailbox), flags); 1351 if (!mailbox) 1352 return ERR_PTR(-ENOMEM); 1353 1354 mailbox->buf = dma_pool_zalloc(dev->cmd.pool, flags, 1355 &mailbox->dma); 1356 if (!mailbox->buf) { 1357 mlx5_core_dbg(dev, "failed allocation\n"); 1358 kfree(mailbox); 1359 return ERR_PTR(-ENOMEM); 1360 } 1361 mailbox->next = NULL; 1362 1363 return mailbox; 1364 } 1365 1366 static void free_cmd_box(struct mlx5_core_dev *dev, 1367 struct mlx5_cmd_mailbox *mailbox) 1368 { 1369 dma_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); 1370 kfree(mailbox); 1371 } 1372 1373 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, 1374 gfp_t flags, int size, 1375 u8 token) 1376 { 1377 struct mlx5_cmd_mailbox *tmp, *head = NULL; 1378 struct mlx5_cmd_prot_block *block; 1379 struct mlx5_cmd_msg *msg; 1380 int err; 1381 int n; 1382 int i; 1383 1384 msg = kzalloc(sizeof(*msg), flags); 1385 if (!msg) 1386 return ERR_PTR(-ENOMEM); 1387 1388 msg->len = size; 1389 n = mlx5_calc_cmd_blocks(msg); 1390 1391 for (i = 0; i < n; i++) { 1392 tmp = alloc_cmd_box(dev, flags); 1393 if (IS_ERR(tmp)) { 1394 mlx5_core_warn(dev, "failed allocating block\n"); 1395 err = PTR_ERR(tmp); 1396 goto err_alloc; 1397 } 1398 1399 block = tmp->buf; 1400 tmp->next = head; 1401 block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0); 1402 block->block_num = cpu_to_be32(n - i - 1); 1403 block->token = token; 1404 head = tmp; 1405 } 1406 msg->next = head; 1407 return msg; 1408 1409 err_alloc: 1410 while (head) { 1411 tmp = head->next; 1412 free_cmd_box(dev, head); 1413 head = tmp; 1414 } 1415 kfree(msg); 1416 1417 return ERR_PTR(err); 1418 } 1419 1420 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 1421 struct mlx5_cmd_msg *msg) 1422 { 1423 struct mlx5_cmd_mailbox *head = msg->next; 1424 struct mlx5_cmd_mailbox *next; 1425 1426 while (head) { 1427 next = head->next; 1428 free_cmd_box(dev, head); 1429 head = next; 1430 } 1431 kfree(msg); 1432 } 1433 1434 static ssize_t data_write(struct file *filp, const char __user *buf, 1435 size_t count, loff_t *pos) 1436 { 1437 struct mlx5_core_dev *dev = filp->private_data; 1438 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1439 void *ptr; 1440 1441 if (*pos != 0) 1442 return -EINVAL; 1443 1444 kfree(dbg->in_msg); 1445 dbg->in_msg = NULL; 1446 dbg->inlen = 0; 1447 ptr = memdup_user(buf, count); 1448 if (IS_ERR(ptr)) 1449 return PTR_ERR(ptr); 1450 dbg->in_msg = ptr; 1451 dbg->inlen = count; 1452 1453 *pos = count; 1454 1455 return count; 1456 } 1457 1458 static ssize_t data_read(struct file *filp, char __user *buf, size_t count, 1459 loff_t *pos) 1460 { 1461 struct mlx5_core_dev *dev = filp->private_data; 1462 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1463 1464 if (!dbg->out_msg) 1465 return -ENOMEM; 1466 1467 return simple_read_from_buffer(buf, count, pos, dbg->out_msg, 1468 dbg->outlen); 1469 } 1470 1471 static const struct file_operations dfops = { 1472 .owner = THIS_MODULE, 1473 .open = simple_open, 1474 .write = data_write, 1475 .read = data_read, 1476 }; 1477 1478 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count, 1479 loff_t *pos) 1480 { 1481 struct mlx5_core_dev *dev = filp->private_data; 1482 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1483 char outlen[8]; 1484 int err; 1485 1486 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1487 if (err < 0) 1488 return err; 1489 1490 return simple_read_from_buffer(buf, count, pos, outlen, err); 1491 } 1492 1493 static ssize_t outlen_write(struct file *filp, const char __user *buf, 1494 size_t count, loff_t *pos) 1495 { 1496 struct mlx5_core_dev *dev = filp->private_data; 1497 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1498 char outlen_str[8] = {0}; 1499 int outlen; 1500 void *ptr; 1501 int err; 1502 1503 if (*pos != 0 || count > 6) 1504 return -EINVAL; 1505 1506 kfree(dbg->out_msg); 1507 dbg->out_msg = NULL; 1508 dbg->outlen = 0; 1509 1510 if (copy_from_user(outlen_str, buf, count)) 1511 return -EFAULT; 1512 1513 err = sscanf(outlen_str, "%d", &outlen); 1514 if (err != 1) 1515 return -EINVAL; 1516 1517 ptr = kzalloc(outlen, GFP_KERNEL); 1518 if (!ptr) 1519 return -ENOMEM; 1520 1521 dbg->out_msg = ptr; 1522 dbg->outlen = outlen; 1523 1524 *pos = count; 1525 1526 return count; 1527 } 1528 1529 static const struct file_operations olfops = { 1530 .owner = THIS_MODULE, 1531 .open = simple_open, 1532 .write = outlen_write, 1533 .read = outlen_read, 1534 }; 1535 1536 static void set_wqname(struct mlx5_core_dev *dev) 1537 { 1538 struct mlx5_cmd *cmd = &dev->cmd; 1539 1540 snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s", 1541 dev_name(dev->device)); 1542 } 1543 1544 static void clean_debug_files(struct mlx5_core_dev *dev) 1545 { 1546 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1547 1548 if (!mlx5_debugfs_root) 1549 return; 1550 1551 mlx5_cmdif_debugfs_cleanup(dev); 1552 debugfs_remove_recursive(dbg->dbg_root); 1553 } 1554 1555 static void create_debugfs_files(struct mlx5_core_dev *dev) 1556 { 1557 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1558 1559 dbg->dbg_root = debugfs_create_dir("cmd", mlx5_debugfs_get_dev_root(dev)); 1560 1561 debugfs_create_file("in", 0400, dbg->dbg_root, dev, &dfops); 1562 debugfs_create_file("out", 0200, dbg->dbg_root, dev, &dfops); 1563 debugfs_create_file("out_len", 0600, dbg->dbg_root, dev, &olfops); 1564 debugfs_create_u8("status", 0600, dbg->dbg_root, &dbg->status); 1565 debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops); 1566 1567 mlx5_cmdif_debugfs_init(dev); 1568 } 1569 1570 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1571 { 1572 struct mlx5_cmd *cmd = &dev->cmd; 1573 int i; 1574 1575 for (i = 0; i < cmd->max_reg_cmds; i++) 1576 down(&cmd->sem); 1577 down(&cmd->pages_sem); 1578 1579 cmd->allowed_opcode = opcode; 1580 1581 up(&cmd->pages_sem); 1582 for (i = 0; i < cmd->max_reg_cmds; i++) 1583 up(&cmd->sem); 1584 } 1585 1586 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1587 { 1588 struct mlx5_cmd *cmd = &dev->cmd; 1589 int i; 1590 1591 for (i = 0; i < cmd->max_reg_cmds; i++) 1592 down(&cmd->sem); 1593 down(&cmd->pages_sem); 1594 1595 cmd->mode = mode; 1596 1597 up(&cmd->pages_sem); 1598 for (i = 0; i < cmd->max_reg_cmds; i++) 1599 up(&cmd->sem); 1600 } 1601 1602 static int cmd_comp_notifier(struct notifier_block *nb, 1603 unsigned long type, void *data) 1604 { 1605 struct mlx5_core_dev *dev; 1606 struct mlx5_cmd *cmd; 1607 struct mlx5_eqe *eqe; 1608 1609 cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); 1610 dev = container_of(cmd, struct mlx5_core_dev, cmd); 1611 eqe = data; 1612 1613 mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); 1614 1615 return NOTIFY_OK; 1616 } 1617 void mlx5_cmd_use_events(struct mlx5_core_dev *dev) 1618 { 1619 MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); 1620 mlx5_eq_notifier_register(dev, &dev->cmd.nb); 1621 mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); 1622 } 1623 1624 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) 1625 { 1626 mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); 1627 mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); 1628 } 1629 1630 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) 1631 { 1632 unsigned long flags; 1633 1634 if (msg->parent) { 1635 spin_lock_irqsave(&msg->parent->lock, flags); 1636 list_add_tail(&msg->list, &msg->parent->head); 1637 spin_unlock_irqrestore(&msg->parent->lock, flags); 1638 } else { 1639 mlx5_free_cmd_msg(dev, msg); 1640 } 1641 } 1642 1643 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) 1644 { 1645 struct mlx5_cmd *cmd = &dev->cmd; 1646 struct mlx5_cmd_work_ent *ent; 1647 mlx5_cmd_cbk_t callback; 1648 void *context; 1649 int err; 1650 int i; 1651 s64 ds; 1652 struct mlx5_cmd_stats *stats; 1653 unsigned long flags; 1654 unsigned long vector; 1655 1656 /* there can be at most 32 command queues */ 1657 vector = vec & 0xffffffff; 1658 for (i = 0; i < (1 << cmd->log_sz); i++) { 1659 if (test_bit(i, &vector)) { 1660 ent = cmd->ent_arr[i]; 1661 1662 /* if we already completed the command, ignore it */ 1663 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, 1664 &ent->state)) { 1665 /* only real completion can free the cmd slot */ 1666 if (!forced) { 1667 mlx5_core_err(dev, "Command completion arrived after timeout (entry idx = %d).\n", 1668 ent->idx); 1669 cmd_ent_put(ent); 1670 } 1671 continue; 1672 } 1673 1674 if (ent->callback && cancel_delayed_work(&ent->cb_timeout_work)) 1675 cmd_ent_put(ent); /* timeout work was canceled */ 1676 1677 if (!forced || /* Real FW completion */ 1678 mlx5_cmd_is_down(dev) || /* No real FW completion is expected */ 1679 !opcode_allowed(cmd, ent->op)) 1680 cmd_ent_put(ent); 1681 1682 ent->ts2 = ktime_get_ns(); 1683 memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out)); 1684 dump_command(dev, ent, 0); 1685 1686 if (vec & MLX5_TRIGGERED_CMD_COMP) 1687 ent->ret = -ENXIO; 1688 1689 if (!ent->ret) { /* Command completed by FW */ 1690 if (!cmd->checksum_disabled) 1691 ent->ret = verify_signature(ent); 1692 1693 ent->status = ent->lay->status_own >> 1; 1694 1695 mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n", 1696 ent->ret, deliv_status_to_str(ent->status), ent->status); 1697 } 1698 1699 if (ent->callback) { 1700 ds = ent->ts2 - ent->ts1; 1701 if (ent->op < MLX5_CMD_OP_MAX) { 1702 stats = &cmd->stats[ent->op]; 1703 spin_lock_irqsave(&stats->lock, flags); 1704 stats->sum += ds; 1705 ++stats->n; 1706 spin_unlock_irqrestore(&stats->lock, flags); 1707 } 1708 1709 callback = ent->callback; 1710 context = ent->context; 1711 err = ent->ret ? : ent->status; 1712 if (err > 0) /* Failed in FW, command didn't execute */ 1713 err = deliv_status_to_err(err); 1714 1715 if (!err) 1716 err = mlx5_copy_from_msg(ent->uout, 1717 ent->out, 1718 ent->uout_size); 1719 1720 mlx5_free_cmd_msg(dev, ent->out); 1721 free_msg(dev, ent->in); 1722 1723 /* final consumer is done, release ent */ 1724 cmd_ent_put(ent); 1725 callback(err, context); 1726 } else { 1727 /* release wait_func() so mlx5_cmd_invoke() 1728 * can make the final ent_put() 1729 */ 1730 complete(&ent->done); 1731 } 1732 } 1733 } 1734 } 1735 1736 static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) 1737 { 1738 struct mlx5_cmd *cmd = &dev->cmd; 1739 unsigned long bitmask; 1740 unsigned long flags; 1741 u64 vector; 1742 int i; 1743 1744 /* wait for pending handlers to complete */ 1745 mlx5_eq_synchronize_cmd_irq(dev); 1746 spin_lock_irqsave(&dev->cmd.alloc_lock, flags); 1747 vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); 1748 if (!vector) 1749 goto no_trig; 1750 1751 bitmask = vector; 1752 /* we must increment the allocated entries refcount before triggering the completions 1753 * to guarantee pending commands will not get freed in the meanwhile. 1754 * For that reason, it also has to be done inside the alloc_lock. 1755 */ 1756 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1757 cmd_ent_get(cmd->ent_arr[i]); 1758 vector |= MLX5_TRIGGERED_CMD_COMP; 1759 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1760 1761 mlx5_core_dbg(dev, "vector 0x%llx\n", vector); 1762 mlx5_cmd_comp_handler(dev, vector, true); 1763 for_each_set_bit(i, &bitmask, (1 << cmd->log_sz)) 1764 cmd_ent_put(cmd->ent_arr[i]); 1765 return; 1766 1767 no_trig: 1768 spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); 1769 } 1770 1771 void mlx5_cmd_flush(struct mlx5_core_dev *dev) 1772 { 1773 struct mlx5_cmd *cmd = &dev->cmd; 1774 int i; 1775 1776 for (i = 0; i < cmd->max_reg_cmds; i++) { 1777 while (down_trylock(&cmd->sem)) { 1778 mlx5_cmd_trigger_completions(dev); 1779 cond_resched(); 1780 } 1781 } 1782 1783 while (down_trylock(&cmd->pages_sem)) { 1784 mlx5_cmd_trigger_completions(dev); 1785 cond_resched(); 1786 } 1787 1788 /* Unlock cmdif */ 1789 up(&cmd->pages_sem); 1790 for (i = 0; i < cmd->max_reg_cmds; i++) 1791 up(&cmd->sem); 1792 } 1793 1794 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size, 1795 gfp_t gfp) 1796 { 1797 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1798 struct cmd_msg_cache *ch = NULL; 1799 struct mlx5_cmd *cmd = &dev->cmd; 1800 int i; 1801 1802 if (in_size <= 16) 1803 goto cache_miss; 1804 1805 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1806 ch = &cmd->cache[i]; 1807 if (in_size > ch->max_inbox_size) 1808 continue; 1809 spin_lock_irq(&ch->lock); 1810 if (list_empty(&ch->head)) { 1811 spin_unlock_irq(&ch->lock); 1812 continue; 1813 } 1814 msg = list_entry(ch->head.next, typeof(*msg), list); 1815 /* For cached lists, we must explicitly state what is 1816 * the real size 1817 */ 1818 msg->len = in_size; 1819 list_del(&msg->list); 1820 spin_unlock_irq(&ch->lock); 1821 break; 1822 } 1823 1824 if (!IS_ERR(msg)) 1825 return msg; 1826 1827 cache_miss: 1828 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1829 return msg; 1830 } 1831 1832 static int is_manage_pages(void *in) 1833 { 1834 return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; 1835 } 1836 1837 /* Notes: 1838 * 1. Callback functions may not sleep 1839 * 2. Page queue commands do not support asynchrous completion 1840 */ 1841 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 1842 int out_size, mlx5_cmd_cbk_t callback, void *context, 1843 bool force_polling) 1844 { 1845 struct mlx5_cmd_msg *inb, *outb; 1846 u16 opcode = in_to_opcode(in); 1847 bool throttle_op; 1848 int pages_queue; 1849 gfp_t gfp; 1850 u8 token; 1851 int err; 1852 1853 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) 1854 return -ENXIO; 1855 1856 throttle_op = mlx5_cmd_is_throttle_opcode(opcode); 1857 if (throttle_op) { 1858 /* atomic context may not sleep */ 1859 if (callback) 1860 return -EINVAL; 1861 down(&dev->cmd.throttle_sem); 1862 } 1863 1864 pages_queue = is_manage_pages(in); 1865 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1866 1867 inb = alloc_msg(dev, in_size, gfp); 1868 if (IS_ERR(inb)) { 1869 err = PTR_ERR(inb); 1870 goto out_up; 1871 } 1872 1873 token = alloc_token(&dev->cmd); 1874 1875 err = mlx5_copy_to_msg(inb, in, in_size, token); 1876 if (err) { 1877 mlx5_core_warn(dev, "err %d\n", err); 1878 goto out_in; 1879 } 1880 1881 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token); 1882 if (IS_ERR(outb)) { 1883 err = PTR_ERR(outb); 1884 goto out_in; 1885 } 1886 1887 err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, 1888 pages_queue, token, force_polling); 1889 if (callback) 1890 return err; 1891 1892 if (err > 0) /* Failed in FW, command didn't execute */ 1893 err = deliv_status_to_err(err); 1894 1895 if (err) 1896 goto out_out; 1897 1898 /* command completed by FW */ 1899 err = mlx5_copy_from_msg(out, outb, out_size); 1900 out_out: 1901 mlx5_free_cmd_msg(dev, outb); 1902 out_in: 1903 free_msg(dev, inb); 1904 out_up: 1905 if (throttle_op) 1906 up(&dev->cmd.throttle_sem); 1907 return err; 1908 } 1909 1910 static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 1911 { 1912 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 1913 u8 status = MLX5_GET(mbox_out, out, status); 1914 1915 trace_mlx5_cmd(mlx5_command_str(opcode), opcode, op_mod, 1916 cmd_status_str(status), status, syndrome, 1917 cmd_status_to_err(status)); 1918 } 1919 1920 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status, 1921 u32 syndrome, int err) 1922 { 1923 struct mlx5_cmd_stats *stats; 1924 1925 if (!err) 1926 return; 1927 1928 stats = &dev->cmd.stats[opcode]; 1929 spin_lock_irq(&stats->lock); 1930 stats->failed++; 1931 if (err < 0) 1932 stats->last_failed_errno = -err; 1933 if (err == -EREMOTEIO) { 1934 stats->failed_mbox_status++; 1935 stats->last_failed_mbox_status = status; 1936 stats->last_failed_syndrome = syndrome; 1937 } 1938 spin_unlock_irq(&stats->lock); 1939 } 1940 1941 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */ 1942 static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op_mod, void *out) 1943 { 1944 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); 1945 u8 status = MLX5_GET(mbox_out, out, status); 1946 1947 if (err == -EREMOTEIO) /* -EREMOTEIO is preserved */ 1948 err = -EIO; 1949 1950 if (!err && status != MLX5_CMD_STAT_OK) { 1951 err = -EREMOTEIO; 1952 mlx5_cmd_err_trace(dev, opcode, op_mod, out); 1953 } 1954 1955 cmd_status_log(dev, opcode, status, syndrome, err); 1956 return err; 1957 } 1958 1959 /** 1960 * mlx5_cmd_do - Executes a fw command, wait for completion. 1961 * Unlike mlx5_cmd_exec, this function will not translate or intercept 1962 * outbox.status and will return -EREMOTEIO when 1963 * outbox.status != MLX5_CMD_STAT_OK 1964 * 1965 * @dev: mlx5 core device 1966 * @in: inbox mlx5_ifc command buffer 1967 * @in_size: inbox buffer size 1968 * @out: outbox mlx5_ifc buffer 1969 * @out_size: outbox size 1970 * 1971 * @return: 1972 * -EREMOTEIO : Command executed by FW, outbox.status != MLX5_CMD_STAT_OK. 1973 * Caller must check FW outbox status. 1974 * 0 : Command execution successful, outbox.status == MLX5_CMD_STAT_OK. 1975 * < 0 : Command execution couldn't be performed by firmware or driver 1976 */ 1977 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) 1978 { 1979 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); 1980 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 1981 u16 opcode = in_to_opcode(in); 1982 1983 return cmd_status_err(dev, err, opcode, op_mod, out); 1984 } 1985 EXPORT_SYMBOL(mlx5_cmd_do); 1986 1987 /** 1988 * mlx5_cmd_exec - Executes a fw command, wait for completion 1989 * 1990 * @dev: mlx5 core device 1991 * @in: inbox mlx5_ifc command buffer 1992 * @in_size: inbox buffer size 1993 * @out: outbox mlx5_ifc buffer 1994 * @out_size: outbox size 1995 * 1996 * @return: 0 if no error, FW command execution was successful 1997 * and outbox status is ok. 1998 */ 1999 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 2000 int out_size) 2001 { 2002 int err = mlx5_cmd_do(dev, in, in_size, out, out_size); 2003 2004 return mlx5_cmd_check(dev, err, in, out); 2005 } 2006 EXPORT_SYMBOL(mlx5_cmd_exec); 2007 2008 /** 2009 * mlx5_cmd_exec_polling - Executes a fw command, poll for completion 2010 * Needed for driver force teardown, when command completion EQ 2011 * will not be available to complete the command 2012 * 2013 * @dev: mlx5 core device 2014 * @in: inbox mlx5_ifc command buffer 2015 * @in_size: inbox buffer size 2016 * @out: outbox mlx5_ifc buffer 2017 * @out_size: outbox size 2018 * 2019 * @return: 0 if no error, FW command execution was successful 2020 * and outbox status is ok. 2021 */ 2022 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, 2023 void *out, int out_size) 2024 { 2025 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); 2026 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 2027 u16 opcode = in_to_opcode(in); 2028 2029 err = cmd_status_err(dev, err, opcode, op_mod, out); 2030 return mlx5_cmd_check(dev, err, in, out); 2031 } 2032 EXPORT_SYMBOL(mlx5_cmd_exec_polling); 2033 2034 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, 2035 struct mlx5_async_ctx *ctx) 2036 { 2037 ctx->dev = dev; 2038 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ 2039 atomic_set(&ctx->num_inflight, 1); 2040 init_completion(&ctx->inflight_done); 2041 } 2042 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); 2043 2044 /** 2045 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx 2046 * @ctx: The ctx to clean 2047 * 2048 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The 2049 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after 2050 * the call mlx5_cleanup_async_ctx(). 2051 */ 2052 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) 2053 { 2054 if (!atomic_dec_and_test(&ctx->num_inflight)) 2055 wait_for_completion(&ctx->inflight_done); 2056 } 2057 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); 2058 2059 static void mlx5_cmd_exec_cb_handler(int status, void *_work) 2060 { 2061 struct mlx5_async_work *work = _work; 2062 struct mlx5_async_ctx *ctx; 2063 2064 ctx = work->ctx; 2065 status = cmd_status_err(ctx->dev, status, work->opcode, work->op_mod, work->out); 2066 work->user_callback(status, work); 2067 if (atomic_dec_and_test(&ctx->num_inflight)) 2068 complete(&ctx->inflight_done); 2069 } 2070 2071 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, 2072 void *out, int out_size, mlx5_async_cbk_t callback, 2073 struct mlx5_async_work *work) 2074 { 2075 int ret; 2076 2077 work->ctx = ctx; 2078 work->user_callback = callback; 2079 work->opcode = in_to_opcode(in); 2080 work->op_mod = MLX5_GET(mbox_in, in, op_mod); 2081 work->out = out; 2082 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) 2083 return -EIO; 2084 ret = cmd_exec(ctx->dev, in, in_size, out, out_size, 2085 mlx5_cmd_exec_cb_handler, work, false); 2086 if (ret && atomic_dec_and_test(&ctx->num_inflight)) 2087 complete(&ctx->inflight_done); 2088 2089 return ret; 2090 } 2091 EXPORT_SYMBOL(mlx5_cmd_exec_cb); 2092 2093 static void destroy_msg_cache(struct mlx5_core_dev *dev) 2094 { 2095 struct cmd_msg_cache *ch; 2096 struct mlx5_cmd_msg *msg; 2097 struct mlx5_cmd_msg *n; 2098 int i; 2099 2100 for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 2101 ch = &dev->cmd.cache[i]; 2102 list_for_each_entry_safe(msg, n, &ch->head, list) { 2103 list_del(&msg->list); 2104 mlx5_free_cmd_msg(dev, msg); 2105 } 2106 } 2107 } 2108 2109 static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { 2110 512, 32, 16, 8, 2 2111 }; 2112 2113 static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { 2114 16 + MLX5_CMD_DATA_BLOCK_SIZE, 2115 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, 2116 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, 2117 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, 2118 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, 2119 }; 2120 2121 static void create_msg_cache(struct mlx5_core_dev *dev) 2122 { 2123 struct mlx5_cmd *cmd = &dev->cmd; 2124 struct cmd_msg_cache *ch; 2125 struct mlx5_cmd_msg *msg; 2126 int i; 2127 int k; 2128 2129 /* Initialize and fill the caches with initial entries */ 2130 for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { 2131 ch = &cmd->cache[k]; 2132 spin_lock_init(&ch->lock); 2133 INIT_LIST_HEAD(&ch->head); 2134 ch->num_ent = cmd_cache_num_ent[k]; 2135 ch->max_inbox_size = cmd_cache_ent_size[k]; 2136 for (i = 0; i < ch->num_ent; i++) { 2137 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, 2138 ch->max_inbox_size, 0); 2139 if (IS_ERR(msg)) 2140 break; 2141 msg->parent = ch; 2142 list_add_tail(&msg->list, &ch->head); 2143 } 2144 } 2145 } 2146 2147 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2148 { 2149 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, 2150 &cmd->alloc_dma, GFP_KERNEL); 2151 if (!cmd->cmd_alloc_buf) 2152 return -ENOMEM; 2153 2154 /* make sure it is aligned to 4K */ 2155 if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) { 2156 cmd->cmd_buf = cmd->cmd_alloc_buf; 2157 cmd->dma = cmd->alloc_dma; 2158 cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE; 2159 return 0; 2160 } 2161 2162 dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf, 2163 cmd->alloc_dma); 2164 cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), 2165 2 * MLX5_ADAPTER_PAGE_SIZE - 1, 2166 &cmd->alloc_dma, GFP_KERNEL); 2167 if (!cmd->cmd_alloc_buf) 2168 return -ENOMEM; 2169 2170 cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE); 2171 cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE); 2172 cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1; 2173 return 0; 2174 } 2175 2176 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) 2177 { 2178 dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf, 2179 cmd->alloc_dma); 2180 } 2181 2182 static u16 cmdif_rev(struct mlx5_core_dev *dev) 2183 { 2184 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2185 } 2186 2187 int mlx5_cmd_init(struct mlx5_core_dev *dev) 2188 { 2189 int size = sizeof(struct mlx5_cmd_prot_block); 2190 int align = roundup_pow_of_two(size); 2191 struct mlx5_cmd *cmd = &dev->cmd; 2192 u32 cmd_h, cmd_l; 2193 u16 cmd_if_rev; 2194 int err; 2195 int i; 2196 2197 memset(cmd, 0, sizeof(*cmd)); 2198 cmd_if_rev = cmdif_rev(dev); 2199 if (cmd_if_rev != CMD_IF_REV) { 2200 mlx5_core_err(dev, 2201 "Driver cmdif rev(%d) differs from firmware's(%d)\n", 2202 CMD_IF_REV, cmd_if_rev); 2203 return -EINVAL; 2204 } 2205 2206 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2207 if (!cmd->pool) 2208 return -ENOMEM; 2209 2210 err = alloc_cmd_page(dev, cmd); 2211 if (err) 2212 goto err_free_pool; 2213 2214 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff; 2215 cmd->log_sz = cmd_l >> 4 & 0xf; 2216 cmd->log_stride = cmd_l & 0xf; 2217 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) { 2218 mlx5_core_err(dev, "firmware reports too many outstanding commands %d\n", 2219 1 << cmd->log_sz); 2220 err = -EINVAL; 2221 goto err_free_page; 2222 } 2223 2224 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) { 2225 mlx5_core_err(dev, "command queue size overflow\n"); 2226 err = -EINVAL; 2227 goto err_free_page; 2228 } 2229 2230 cmd->state = MLX5_CMDIF_STATE_DOWN; 2231 cmd->checksum_disabled = 1; 2232 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 2233 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; 2234 2235 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16; 2236 if (cmd->cmdif_rev > CMD_IF_REV) { 2237 mlx5_core_err(dev, "driver does not support command interface version. driver %d, firmware %d\n", 2238 CMD_IF_REV, cmd->cmdif_rev); 2239 err = -EOPNOTSUPP; 2240 goto err_free_page; 2241 } 2242 2243 spin_lock_init(&cmd->alloc_lock); 2244 spin_lock_init(&cmd->token_lock); 2245 for (i = 0; i < MLX5_CMD_OP_MAX; i++) 2246 spin_lock_init(&cmd->stats[i].lock); 2247 2248 sema_init(&cmd->sem, cmd->max_reg_cmds); 2249 sema_init(&cmd->pages_sem, 1); 2250 sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2)); 2251 2252 cmd_h = (u32)((u64)(cmd->dma) >> 32); 2253 cmd_l = (u32)(cmd->dma); 2254 if (cmd_l & 0xfff) { 2255 mlx5_core_err(dev, "invalid command queue address\n"); 2256 err = -ENOMEM; 2257 goto err_free_page; 2258 } 2259 2260 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h); 2261 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz); 2262 2263 /* Make sure firmware sees the complete address before we proceed */ 2264 wmb(); 2265 2266 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2267 2268 cmd->mode = CMD_MODE_POLLING; 2269 cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 2270 2271 create_msg_cache(dev); 2272 2273 set_wqname(dev); 2274 cmd->wq = create_singlethread_workqueue(cmd->wq_name); 2275 if (!cmd->wq) { 2276 mlx5_core_err(dev, "failed to create command workqueue\n"); 2277 err = -ENOMEM; 2278 goto err_cache; 2279 } 2280 2281 create_debugfs_files(dev); 2282 2283 return 0; 2284 2285 err_cache: 2286 destroy_msg_cache(dev); 2287 2288 err_free_page: 2289 free_cmd_page(dev, cmd); 2290 2291 err_free_pool: 2292 dma_pool_destroy(cmd->pool); 2293 return err; 2294 } 2295 2296 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev) 2297 { 2298 struct mlx5_cmd *cmd = &dev->cmd; 2299 2300 clean_debug_files(dev); 2301 destroy_workqueue(cmd->wq); 2302 destroy_msg_cache(dev); 2303 free_cmd_page(dev, cmd); 2304 dma_pool_destroy(cmd->pool); 2305 } 2306 2307 void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2308 enum mlx5_cmdif_state cmdif_state) 2309 { 2310 dev->cmd.state = cmdif_state; 2311 } 2312