1 /*-
2 * Copyright (c) 2013-2019, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28
29 #include <linux/module.h>
30 #include <linux/errno.h>
31 #include <linux/pci.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/slab.h>
34 #include <linux/delay.h>
35 #include <linux/random.h>
36 #include <linux/io-mapping.h>
37 #include <linux/hardirq.h>
38 #include <linux/ktime.h>
39 #include <dev/mlx5/driver.h>
40 #include <dev/mlx5/cmd.h>
41 #include <dev/mlx5/mlx5_core/mlx5_core.h>
42
43 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size);
44 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
45 struct mlx5_cmd_msg *msg);
46 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
47
48 enum {
49 CMD_IF_REV = 5,
50 };
51
52 enum {
53 NUM_LONG_LISTS = 2,
54 NUM_MED_LISTS = 64,
55 LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
56 MLX5_CMD_DATA_BLOCK_SIZE,
57 MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
58 };
59
60 enum {
61 MLX5_CMD_DELIVERY_STAT_OK = 0x0,
62 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1,
63 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2,
64 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR = 0x3,
65 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR = 0x4,
66 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR = 0x5,
67 MLX5_CMD_DELIVERY_STAT_FW_ERR = 0x6,
68 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR = 0x7,
69 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR = 0x8,
70 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR = 0x9,
71 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
72 };
73
74 struct mlx5_ifc_mbox_out_bits {
75 u8 status[0x8];
76 u8 reserved_at_8[0x18];
77
78 u8 syndrome[0x20];
79
80 u8 reserved_at_40[0x40];
81 };
82
83 struct mlx5_ifc_mbox_in_bits {
84 u8 opcode[0x10];
85 u8 reserved_at_10[0x10];
86
87 u8 reserved_at_20[0x10];
88 u8 op_mod[0x10];
89
90 u8 reserved_at_40[0x40];
91 };
92
93
alloc_cmd(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,int uin_size,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)94 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
95 struct mlx5_cmd_msg *in,
96 int uin_size,
97 struct mlx5_cmd_msg *out,
98 void *uout, int uout_size,
99 mlx5_cmd_cbk_t cbk,
100 void *context, int page_queue)
101 {
102 gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
103 struct mlx5_cmd_work_ent *ent;
104
105 ent = kzalloc(sizeof(*ent), alloc_flags);
106 if (!ent)
107 return ERR_PTR(-ENOMEM);
108
109 ent->in = in;
110 ent->uin_size = uin_size;
111 ent->out = out;
112 ent->uout = uout;
113 ent->uout_size = uout_size;
114 ent->callback = cbk;
115 ent->context = context;
116 ent->cmd = cmd;
117 ent->page_queue = page_queue;
118
119 return ent;
120 }
121
alloc_token(struct mlx5_cmd * cmd)122 static u8 alloc_token(struct mlx5_cmd *cmd)
123 {
124 u8 token;
125
126 spin_lock(&cmd->token_lock);
127 cmd->token++;
128 if (cmd->token == 0)
129 cmd->token++;
130 token = cmd->token;
131 spin_unlock(&cmd->token_lock);
132
133 return token;
134 }
135
alloc_ent(struct mlx5_cmd_work_ent * ent)136 static int alloc_ent(struct mlx5_cmd_work_ent *ent)
137 {
138 unsigned long flags;
139 struct mlx5_cmd *cmd = ent->cmd;
140 struct mlx5_core_dev *dev =
141 container_of(cmd, struct mlx5_core_dev, cmd);
142 int ret = cmd->max_reg_cmds;
143
144 spin_lock_irqsave(&cmd->alloc_lock, flags);
145 if (!ent->page_queue) {
146 ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
147 if (ret >= cmd->max_reg_cmds)
148 ret = -1;
149 }
150
151 if (dev->state != MLX5_DEVICE_STATE_UP)
152 ret = -1;
153
154 if (ret != -1) {
155 ent->busy = 1;
156 ent->idx = ret;
157 clear_bit(ent->idx, &cmd->bitmask);
158 cmd->ent_mode[ent->idx] =
159 ent->polling ? MLX5_CMD_MODE_POLLING : MLX5_CMD_MODE_EVENTS;
160 cmd->ent_arr[ent->idx] = ent;
161 }
162 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
163
164 return ret;
165 }
166
free_ent(struct mlx5_cmd * cmd,int idx)167 static void free_ent(struct mlx5_cmd *cmd, int idx)
168 {
169 unsigned long flags;
170
171 spin_lock_irqsave(&cmd->alloc_lock, flags);
172 cmd->ent_arr[idx] = NULL; /* safety clear */
173 cmd->ent_mode[idx] = MLX5_CMD_MODE_POLLING; /* reset mode */
174 set_bit(idx, &cmd->bitmask);
175 spin_unlock_irqrestore(&cmd->alloc_lock, flags);
176 }
177
get_inst(struct mlx5_cmd * cmd,int idx)178 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
179 {
180 return cmd->cmd_buf + (idx << cmd->log_stride);
181 }
182
xor8_buf(void * buf,int len)183 static u8 xor8_buf(void *buf, int len)
184 {
185 u8 *ptr = buf;
186 u8 sum = 0;
187 int i;
188
189 for (i = 0; i < len; i++)
190 sum ^= ptr[i];
191
192 return sum;
193 }
194
verify_block_sig(struct mlx5_cmd_prot_block * block)195 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
196 {
197 if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
198 return -EINVAL;
199
200 if (xor8_buf(block, sizeof(*block)) != 0xff)
201 return -EINVAL;
202
203 return 0;
204 }
205
calc_block_sig(struct mlx5_cmd_prot_block * block,u8 token,int csum)206 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
207 int csum)
208 {
209 block->token = token;
210 if (csum) {
211 block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
212 sizeof(block->data) - 2);
213 block->sig = ~xor8_buf(block, sizeof(*block) - 1);
214 }
215 }
216
217 static void
calc_chain_sig(struct mlx5_cmd_msg * msg,u8 token,int csum)218 calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
219 {
220 size_t i;
221
222 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
223 struct mlx5_cmd_prot_block *block;
224
225 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
226
227 /* compute signature */
228 calc_block_sig(block, token, csum);
229
230 /* check for last block */
231 if (block->next == 0)
232 break;
233 }
234
235 /* make sure data gets written to RAM */
236 mlx5_fwp_flush(msg);
237 }
238
set_signature(struct mlx5_cmd_work_ent * ent,int csum)239 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
240 {
241 ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
242 calc_chain_sig(ent->in, ent->token, csum);
243 calc_chain_sig(ent->out, ent->token, csum);
244 }
245
poll_timeout(struct mlx5_cmd_work_ent * ent)246 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
247 {
248 struct mlx5_core_dev *dev = container_of(ent->cmd,
249 struct mlx5_core_dev, cmd);
250 int poll_end = jiffies +
251 msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
252 u8 own;
253
254 do {
255 own = ent->lay->status_own;
256 if (!(own & CMD_OWNER_HW) ||
257 dev->state != MLX5_DEVICE_STATE_UP) {
258 ent->ret = 0;
259 return;
260 }
261 usleep_range(5000, 10000);
262 } while (time_before(jiffies, poll_end));
263
264 ent->ret = -ETIMEDOUT;
265 }
266
free_cmd(struct mlx5_cmd_work_ent * ent)267 static void free_cmd(struct mlx5_cmd_work_ent *ent)
268 {
269 cancel_delayed_work_sync(&ent->cb_timeout_work);
270 kfree(ent);
271 }
272
273 static int
verify_signature(struct mlx5_cmd_work_ent * ent)274 verify_signature(struct mlx5_cmd_work_ent *ent)
275 {
276 struct mlx5_cmd_msg *msg = ent->out;
277 size_t i;
278 int err;
279 u8 sig;
280
281 sig = xor8_buf(ent->lay, sizeof(*ent->lay));
282 if (sig != 0xff)
283 return -EINVAL;
284
285 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
286 struct mlx5_cmd_prot_block *block;
287
288 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
289
290 /* compute signature */
291 err = verify_block_sig(block);
292 if (err != 0)
293 return (err);
294
295 /* check for last block */
296 if (block->next == 0)
297 break;
298 }
299 return (0);
300 }
301
dump_buf(void * buf,int size,int data_only,int offset)302 static void dump_buf(void *buf, int size, int data_only, int offset)
303 {
304 __be32 *p = buf;
305 int i;
306
307 for (i = 0; i < size; i += 16) {
308 pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
309 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
310 be32_to_cpu(p[3]));
311 p += 4;
312 offset += 16;
313 }
314 if (!data_only)
315 pr_debug("\n");
316 }
317
318 enum {
319 MLX5_DRIVER_STATUS_ABORTED = 0xfe,
320 MLX5_DRIVER_SYND = 0xbadd00de,
321 };
322
mlx5_internal_err_ret_value(struct mlx5_core_dev * dev,u16 op,u32 * synd,u8 * status)323 static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
324 u32 *synd, u8 *status)
325 {
326 *synd = 0;
327 *status = 0;
328
329 switch (op) {
330 case MLX5_CMD_OP_TEARDOWN_HCA:
331 case MLX5_CMD_OP_DISABLE_HCA:
332 case MLX5_CMD_OP_MANAGE_PAGES:
333 case MLX5_CMD_OP_DESTROY_MKEY:
334 case MLX5_CMD_OP_DESTROY_EQ:
335 case MLX5_CMD_OP_DESTROY_CQ:
336 case MLX5_CMD_OP_DESTROY_QP:
337 case MLX5_CMD_OP_DESTROY_PSV:
338 case MLX5_CMD_OP_DESTROY_SRQ:
339 case MLX5_CMD_OP_DESTROY_XRC_SRQ:
340 case MLX5_CMD_OP_DESTROY_DCT:
341 case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
342 case MLX5_CMD_OP_DEALLOC_PD:
343 case MLX5_CMD_OP_DEALLOC_UAR:
344 case MLX5_CMD_OP_DETACH_FROM_MCG:
345 case MLX5_CMD_OP_DEALLOC_XRCD:
346 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
347 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
348 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
349 case MLX5_CMD_OP_DESTROY_TIR:
350 case MLX5_CMD_OP_DESTROY_SQ:
351 case MLX5_CMD_OP_DESTROY_RQ:
352 case MLX5_CMD_OP_DESTROY_RMP:
353 case MLX5_CMD_OP_DESTROY_TIS:
354 case MLX5_CMD_OP_DESTROY_RQT:
355 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
356 case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
357 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
358 case MLX5_CMD_OP_2ERR_QP:
359 case MLX5_CMD_OP_2RST_QP:
360 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
361 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
362 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
363 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
364 case MLX5_CMD_OP_DESTROY_GENERAL_OBJ:
365 return MLX5_CMD_STAT_OK;
366
367 case MLX5_CMD_OP_QUERY_HCA_CAP:
368 case MLX5_CMD_OP_QUERY_ADAPTER:
369 case MLX5_CMD_OP_INIT_HCA:
370 case MLX5_CMD_OP_ENABLE_HCA:
371 case MLX5_CMD_OP_QUERY_PAGES:
372 case MLX5_CMD_OP_SET_HCA_CAP:
373 case MLX5_CMD_OP_QUERY_ISSI:
374 case MLX5_CMD_OP_SET_ISSI:
375 case MLX5_CMD_OP_CREATE_MKEY:
376 case MLX5_CMD_OP_QUERY_MKEY:
377 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
378 case MLX5_CMD_OP_PAGE_FAULT_RESUME:
379 case MLX5_CMD_OP_CREATE_EQ:
380 case MLX5_CMD_OP_QUERY_EQ:
381 case MLX5_CMD_OP_GEN_EQE:
382 case MLX5_CMD_OP_CREATE_CQ:
383 case MLX5_CMD_OP_QUERY_CQ:
384 case MLX5_CMD_OP_MODIFY_CQ:
385 case MLX5_CMD_OP_CREATE_QP:
386 case MLX5_CMD_OP_RST2INIT_QP:
387 case MLX5_CMD_OP_INIT2RTR_QP:
388 case MLX5_CMD_OP_RTR2RTS_QP:
389 case MLX5_CMD_OP_RTS2RTS_QP:
390 case MLX5_CMD_OP_SQERR2RTS_QP:
391 case MLX5_CMD_OP_QUERY_QP:
392 case MLX5_CMD_OP_SQD_RTS_QP:
393 case MLX5_CMD_OP_INIT2INIT_QP:
394 case MLX5_CMD_OP_CREATE_PSV:
395 case MLX5_CMD_OP_CREATE_SRQ:
396 case MLX5_CMD_OP_QUERY_SRQ:
397 case MLX5_CMD_OP_ARM_RQ:
398 case MLX5_CMD_OP_CREATE_XRC_SRQ:
399 case MLX5_CMD_OP_QUERY_XRC_SRQ:
400 case MLX5_CMD_OP_ARM_XRC_SRQ:
401 case MLX5_CMD_OP_CREATE_DCT:
402 case MLX5_CMD_OP_DRAIN_DCT:
403 case MLX5_CMD_OP_QUERY_DCT:
404 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
405 case MLX5_CMD_OP_QUERY_VPORT_STATE:
406 case MLX5_CMD_OP_MODIFY_VPORT_STATE:
407 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
408 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
409 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
410 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
411 case MLX5_CMD_OP_SET_ROCE_ADDRESS:
412 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
413 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
414 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
415 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
416 case MLX5_CMD_OP_QUERY_VNIC_ENV:
417 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
418 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
419 case MLX5_CMD_OP_QUERY_Q_COUNTER:
420 case MLX5_CMD_OP_ALLOC_PD:
421 case MLX5_CMD_OP_ALLOC_UAR:
422 case MLX5_CMD_OP_CONFIG_INT_MODERATION:
423 case MLX5_CMD_OP_ACCESS_REG:
424 case MLX5_CMD_OP_ATTACH_TO_MCG:
425 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
426 case MLX5_CMD_OP_MAD_IFC:
427 case MLX5_CMD_OP_QUERY_MAD_DEMUX:
428 case MLX5_CMD_OP_SET_MAD_DEMUX:
429 case MLX5_CMD_OP_NOP:
430 case MLX5_CMD_OP_ALLOC_XRCD:
431 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
432 case MLX5_CMD_OP_QUERY_CONG_STATUS:
433 case MLX5_CMD_OP_MODIFY_CONG_STATUS:
434 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
435 case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
436 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
437 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
438 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
439 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
440 case MLX5_CMD_OP_CREATE_TIR:
441 case MLX5_CMD_OP_MODIFY_TIR:
442 case MLX5_CMD_OP_QUERY_TIR:
443 case MLX5_CMD_OP_CREATE_SQ:
444 case MLX5_CMD_OP_MODIFY_SQ:
445 case MLX5_CMD_OP_QUERY_SQ:
446 case MLX5_CMD_OP_CREATE_RQ:
447 case MLX5_CMD_OP_MODIFY_RQ:
448 case MLX5_CMD_OP_QUERY_RQ:
449 case MLX5_CMD_OP_CREATE_RMP:
450 case MLX5_CMD_OP_MODIFY_RMP:
451 case MLX5_CMD_OP_QUERY_RMP:
452 case MLX5_CMD_OP_CREATE_TIS:
453 case MLX5_CMD_OP_MODIFY_TIS:
454 case MLX5_CMD_OP_QUERY_TIS:
455 case MLX5_CMD_OP_CREATE_RQT:
456 case MLX5_CMD_OP_MODIFY_RQT:
457 case MLX5_CMD_OP_QUERY_RQT:
458 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
459 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
460 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
461 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
462 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
463 case MLX5_CMD_OP_CREATE_GENERAL_OBJ:
464 case MLX5_CMD_OP_MODIFY_GENERAL_OBJ:
465 case MLX5_CMD_OP_QUERY_GENERAL_OBJ:
466 *status = MLX5_DRIVER_STATUS_ABORTED;
467 *synd = MLX5_DRIVER_SYND;
468 return -EIO;
469 default:
470 mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
471 return -EINVAL;
472 }
473 }
474
mlx5_command_str(int command)475 const char *mlx5_command_str(int command)
476 {
477 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
478
479 switch (command) {
480 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
481 MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
482 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
483 MLX5_COMMAND_STR_CASE(INIT_HCA);
484 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
485 MLX5_COMMAND_STR_CASE(ENABLE_HCA);
486 MLX5_COMMAND_STR_CASE(DISABLE_HCA);
487 MLX5_COMMAND_STR_CASE(QUERY_PAGES);
488 MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
489 MLX5_COMMAND_STR_CASE(QUERY_ISSI);
490 MLX5_COMMAND_STR_CASE(SET_ISSI);
491 MLX5_COMMAND_STR_CASE(CREATE_MKEY);
492 MLX5_COMMAND_STR_CASE(QUERY_MKEY);
493 MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
494 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
495 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
496 MLX5_COMMAND_STR_CASE(CREATE_EQ);
497 MLX5_COMMAND_STR_CASE(DESTROY_EQ);
498 MLX5_COMMAND_STR_CASE(QUERY_EQ);
499 MLX5_COMMAND_STR_CASE(GEN_EQE);
500 MLX5_COMMAND_STR_CASE(CREATE_CQ);
501 MLX5_COMMAND_STR_CASE(DESTROY_CQ);
502 MLX5_COMMAND_STR_CASE(QUERY_CQ);
503 MLX5_COMMAND_STR_CASE(MODIFY_CQ);
504 MLX5_COMMAND_STR_CASE(CREATE_QP);
505 MLX5_COMMAND_STR_CASE(DESTROY_QP);
506 MLX5_COMMAND_STR_CASE(RST2INIT_QP);
507 MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
508 MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
509 MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
510 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
511 MLX5_COMMAND_STR_CASE(2ERR_QP);
512 MLX5_COMMAND_STR_CASE(2RST_QP);
513 MLX5_COMMAND_STR_CASE(QUERY_QP);
514 MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
515 MLX5_COMMAND_STR_CASE(MAD_IFC);
516 MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
517 MLX5_COMMAND_STR_CASE(CREATE_PSV);
518 MLX5_COMMAND_STR_CASE(DESTROY_PSV);
519 MLX5_COMMAND_STR_CASE(CREATE_SRQ);
520 MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
521 MLX5_COMMAND_STR_CASE(QUERY_SRQ);
522 MLX5_COMMAND_STR_CASE(ARM_RQ);
523 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
524 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
525 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
526 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
527 MLX5_COMMAND_STR_CASE(CREATE_DCT);
528 MLX5_COMMAND_STR_CASE(SET_DC_CNAK_TRACE);
529 MLX5_COMMAND_STR_CASE(DESTROY_DCT);
530 MLX5_COMMAND_STR_CASE(DRAIN_DCT);
531 MLX5_COMMAND_STR_CASE(QUERY_DCT);
532 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
533 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
534 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
535 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
536 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
537 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
538 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
539 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
540 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
541 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
542 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
543 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
544 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
545 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV);
546 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
547 MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
548 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
549 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
550 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
551 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
552 MLX5_COMMAND_STR_CASE(ALLOC_PD);
553 MLX5_COMMAND_STR_CASE(DEALLOC_PD);
554 MLX5_COMMAND_STR_CASE(ALLOC_UAR);
555 MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
556 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
557 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
558 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
559 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
560 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
561 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
562 MLX5_COMMAND_STR_CASE(NOP);
563 MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
564 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
565 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
566 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
567 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
568 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
569 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
570 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
571 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
572 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
573 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
574 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
575 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
576 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
577 MLX5_COMMAND_STR_CASE(CREATE_RMP);
578 MLX5_COMMAND_STR_CASE(MODIFY_RMP);
579 MLX5_COMMAND_STR_CASE(DESTROY_RMP);
580 MLX5_COMMAND_STR_CASE(QUERY_RMP);
581 MLX5_COMMAND_STR_CASE(CREATE_RQT);
582 MLX5_COMMAND_STR_CASE(MODIFY_RQT);
583 MLX5_COMMAND_STR_CASE(DESTROY_RQT);
584 MLX5_COMMAND_STR_CASE(QUERY_RQT);
585 MLX5_COMMAND_STR_CASE(ACCESS_REG);
586 MLX5_COMMAND_STR_CASE(CREATE_SQ);
587 MLX5_COMMAND_STR_CASE(MODIFY_SQ);
588 MLX5_COMMAND_STR_CASE(DESTROY_SQ);
589 MLX5_COMMAND_STR_CASE(QUERY_SQ);
590 MLX5_COMMAND_STR_CASE(CREATE_RQ);
591 MLX5_COMMAND_STR_CASE(MODIFY_RQ);
592 MLX5_COMMAND_STR_CASE(DESTROY_RQ);
593 MLX5_COMMAND_STR_CASE(QUERY_RQ);
594 MLX5_COMMAND_STR_CASE(CREATE_TIR);
595 MLX5_COMMAND_STR_CASE(MODIFY_TIR);
596 MLX5_COMMAND_STR_CASE(DESTROY_TIR);
597 MLX5_COMMAND_STR_CASE(QUERY_TIR);
598 MLX5_COMMAND_STR_CASE(CREATE_TIS);
599 MLX5_COMMAND_STR_CASE(MODIFY_TIS);
600 MLX5_COMMAND_STR_CASE(DESTROY_TIS);
601 MLX5_COMMAND_STR_CASE(QUERY_TIS);
602 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
603 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
604 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
605 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
606 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
607 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
608 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
609 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
610 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
611 MLX5_COMMAND_STR_CASE(SET_DIAGNOSTICS);
612 MLX5_COMMAND_STR_CASE(QUERY_DIAGNOSTICS);
613 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJ);
614 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJ);
615 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJ);
616 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJ);
617 default: return "unknown command opcode";
618 }
619 }
620
cmd_status_str(u8 status)621 static const char *cmd_status_str(u8 status)
622 {
623 switch (status) {
624 case MLX5_CMD_STAT_OK:
625 return "OK";
626 case MLX5_CMD_STAT_INT_ERR:
627 return "internal error";
628 case MLX5_CMD_STAT_BAD_OP_ERR:
629 return "bad operation";
630 case MLX5_CMD_STAT_BAD_PARAM_ERR:
631 return "bad parameter";
632 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
633 return "bad system state";
634 case MLX5_CMD_STAT_BAD_RES_ERR:
635 return "bad resource";
636 case MLX5_CMD_STAT_RES_BUSY:
637 return "resource busy";
638 case MLX5_CMD_STAT_LIM_ERR:
639 return "limits exceeded";
640 case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
641 return "bad resource state";
642 case MLX5_CMD_STAT_IX_ERR:
643 return "bad index";
644 case MLX5_CMD_STAT_NO_RES_ERR:
645 return "no resources";
646 case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
647 return "bad input length";
648 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
649 return "bad output length";
650 case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
651 return "bad QP state";
652 case MLX5_CMD_STAT_BAD_PKT_ERR:
653 return "bad packet (discarded)";
654 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
655 return "bad size too many outstanding CQEs";
656 default:
657 return "unknown status";
658 }
659 }
660
cmd_status_to_err_helper(u8 status)661 static int cmd_status_to_err_helper(u8 status)
662 {
663 switch (status) {
664 case MLX5_CMD_STAT_OK: return 0;
665 case MLX5_CMD_STAT_INT_ERR: return -EIO;
666 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
667 case MLX5_CMD_STAT_BAD_PARAM_ERR: return -EINVAL;
668 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
669 case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
670 case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
671 case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
672 case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
673 case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
674 case MLX5_CMD_STAT_NO_RES_ERR: return -EAGAIN;
675 case MLX5_CMD_STAT_BAD_INP_LEN_ERR: return -EIO;
676 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR: return -EIO;
677 case MLX5_CMD_STAT_BAD_QP_STATE_ERR: return -EINVAL;
678 case MLX5_CMD_STAT_BAD_PKT_ERR: return -EINVAL;
679 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR: return -EINVAL;
680 default: return -EIO;
681 }
682 }
683
mlx5_cmd_mbox_status(void * out,u8 * status,u32 * syndrome)684 void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
685 {
686 *status = MLX5_GET(mbox_out, out, status);
687 *syndrome = MLX5_GET(mbox_out, out, syndrome);
688 }
689
mlx5_cmd_check(struct mlx5_core_dev * dev,void * in,void * out)690 static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
691 {
692 u32 syndrome;
693 u8 status;
694 u16 opcode;
695 u16 op_mod;
696
697 mlx5_cmd_mbox_status(out, &status, &syndrome);
698 if (!status)
699 return 0;
700
701 opcode = MLX5_GET(mbox_in, in, opcode);
702 op_mod = MLX5_GET(mbox_in, in, op_mod);
703
704 mlx5_core_err(dev,
705 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
706 mlx5_command_str(opcode),
707 opcode, op_mod,
708 cmd_status_str(status),
709 status,
710 syndrome);
711
712 return cmd_status_to_err_helper(status);
713 }
714
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)715 static void dump_command(struct mlx5_core_dev *dev,
716 struct mlx5_cmd_work_ent *ent, int input)
717 {
718 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
719 u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
720 size_t i;
721 int data_only;
722 int offset = 0;
723 int msg_len = input ? ent->uin_size : ent->uout_size;
724 int dump_len;
725
726 data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
727
728 if (data_only)
729 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
730 "dump command data %s(0x%x) %s\n",
731 mlx5_command_str(op), op,
732 input ? "INPUT" : "OUTPUT");
733 else
734 mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
735 mlx5_command_str(op), op,
736 input ? "INPUT" : "OUTPUT");
737
738 if (data_only) {
739 if (input) {
740 dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
741 offset += sizeof(ent->lay->in);
742 } else {
743 dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
744 offset += sizeof(ent->lay->out);
745 }
746 } else {
747 dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
748 offset += sizeof(*ent->lay);
749 }
750
751 for (i = 0; i != (msg->numpages * MLX5_NUM_CMDS_IN_ADAPTER_PAGE); i++) {
752 struct mlx5_cmd_prot_block *block;
753
754 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
755
756 if (data_only) {
757 if (offset >= msg_len)
758 break;
759 dump_len = min_t(int,
760 MLX5_CMD_DATA_BLOCK_SIZE, msg_len - offset);
761
762 dump_buf(block->data, dump_len, 1, offset);
763 offset += MLX5_CMD_DATA_BLOCK_SIZE;
764 } else {
765 mlx5_core_dbg(dev, "command block:\n");
766 dump_buf(block, sizeof(*block), 0, offset);
767 offset += sizeof(*block);
768 }
769
770 /* check for last block */
771 if (block->next == 0)
772 break;
773 }
774
775 if (data_only)
776 pr_debug("\n");
777 }
778
msg_to_opcode(struct mlx5_cmd_msg * in)779 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
780 {
781 return MLX5_GET(mbox_in, in->first.data, opcode);
782 }
783
cb_timeout_handler(struct work_struct * work)784 static void cb_timeout_handler(struct work_struct *work)
785 {
786 struct delayed_work *dwork = container_of(work, struct delayed_work,
787 work);
788 struct mlx5_cmd_work_ent *ent = container_of(dwork,
789 struct mlx5_cmd_work_ent,
790 cb_timeout_work);
791 struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
792 cmd);
793
794 ent->ret = -ETIMEDOUT;
795 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
796 mlx5_command_str(msg_to_opcode(ent->in)),
797 msg_to_opcode(ent->in));
798 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS);
799 }
800
complete_command(struct mlx5_cmd_work_ent * ent)801 static void complete_command(struct mlx5_cmd_work_ent *ent)
802 {
803 struct mlx5_cmd *cmd = ent->cmd;
804 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev,
805 cmd);
806 mlx5_cmd_cbk_t callback;
807 void *context;
808
809 s64 ds;
810 struct mlx5_cmd_stats *stats;
811 unsigned long flags;
812 int err;
813 struct semaphore *sem;
814
815 if (ent->page_queue)
816 sem = &cmd->pages_sem;
817 else
818 sem = &cmd->sem;
819
820 if (dev->state != MLX5_DEVICE_STATE_UP) {
821 u8 status = 0;
822 u32 drv_synd;
823
824 ent->ret = mlx5_internal_err_ret_value(dev, msg_to_opcode(ent->in), &drv_synd, &status);
825 MLX5_SET(mbox_out, ent->out, status, status);
826 MLX5_SET(mbox_out, ent->out, syndrome, drv_synd);
827 }
828
829 if (ent->callback) {
830 ds = ent->ts2 - ent->ts1;
831 if (ent->op < ARRAY_SIZE(cmd->stats)) {
832 stats = &cmd->stats[ent->op];
833 spin_lock_irqsave(&stats->lock, flags);
834 stats->sum += ds;
835 ++stats->n;
836 spin_unlock_irqrestore(&stats->lock, flags);
837 }
838
839 callback = ent->callback;
840 context = ent->context;
841 err = ent->ret;
842 if (!err) {
843 err = mlx5_copy_from_msg(ent->uout,
844 ent->out,
845 ent->uout_size);
846 err = err ? err : mlx5_cmd_check(dev,
847 ent->in->first.data,
848 ent->uout);
849 }
850
851 mlx5_free_cmd_msg(dev, ent->out);
852 free_msg(dev, ent->in);
853
854 err = err ? err : ent->status;
855 free_cmd(ent);
856 callback(err, context);
857 } else {
858 complete(&ent->done);
859 }
860 up(sem);
861 }
862
cmd_work_handler(struct work_struct * work)863 static void cmd_work_handler(struct work_struct *work)
864 {
865 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
866 struct mlx5_cmd *cmd = ent->cmd;
867 struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
868 unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
869 struct mlx5_cmd_layout *lay;
870 struct semaphore *sem;
871 bool poll_cmd = ent->polling;
872
873 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
874 down(sem);
875
876 if (alloc_ent(ent) < 0) {
877 complete_command(ent);
878 return;
879 }
880
881 ent->token = alloc_token(cmd);
882 lay = get_inst(cmd, ent->idx);
883 ent->lay = lay;
884 memset(lay, 0, sizeof(*lay));
885 memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
886 ent->op = be32_to_cpu(lay->in[0]) >> 16;
887 if (ent->in->numpages != 0)
888 lay->in_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->in, 0));
889 if (ent->out->numpages != 0)
890 lay->out_ptr = cpu_to_be64(mlx5_fwp_get_dma(ent->out, 0));
891 lay->inlen = cpu_to_be32(ent->uin_size);
892 lay->outlen = cpu_to_be32(ent->uout_size);
893 lay->type = MLX5_PCI_CMD_XPORT;
894 lay->token = ent->token;
895 lay->status_own = CMD_OWNER_HW;
896 set_signature(ent, !cmd->checksum_disabled);
897 dump_command(dev, ent, 1);
898 ent->ts1 = ktime_get_ns();
899 ent->busy = 0;
900 if (ent->callback)
901 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
902
903 /* ring doorbell after the descriptor is valid */
904 mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
905 /* make sure data is written to RAM */
906 mlx5_fwp_flush(cmd->cmd_page);
907 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
908 mmiowb();
909
910 /* if not in polling don't use ent after this point */
911 if (poll_cmd) {
912 poll_timeout(ent);
913 /* make sure we read the descriptor after ownership is SW */
914 mlx5_cmd_comp_handler(dev, 1U << ent->idx, MLX5_CMD_MODE_POLLING);
915 }
916 }
917
deliv_status_to_str(u8 status)918 static const char *deliv_status_to_str(u8 status)
919 {
920 switch (status) {
921 case MLX5_CMD_DELIVERY_STAT_OK:
922 return "no errors";
923 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
924 return "signature error";
925 case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
926 return "token error";
927 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
928 return "bad block number";
929 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
930 return "output pointer not aligned to block size";
931 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
932 return "input pointer not aligned to block size";
933 case MLX5_CMD_DELIVERY_STAT_FW_ERR:
934 return "firmware internal error";
935 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
936 return "command input length error";
937 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
938 return "command output length error";
939 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
940 return "reserved fields not cleared";
941 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
942 return "bad command descriptor type";
943 default:
944 return "unknown status code";
945 }
946 }
947
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)948 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
949 {
950 int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
951 int err;
952
953 if (ent->polling) {
954 wait_for_completion(&ent->done);
955 } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
956 ent->ret = -ETIMEDOUT;
957 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, MLX5_CMD_MODE_EVENTS);
958 }
959
960 err = ent->ret;
961
962 if (err == -ETIMEDOUT) {
963 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
964 mlx5_command_str(msg_to_opcode(ent->in)),
965 msg_to_opcode(ent->in));
966 }
967 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
968 err, deliv_status_to_str(ent->status), ent->status);
969
970 return err;
971 }
972
973 /* Notes:
974 * 1. Callback functions may not sleep
975 * 2. page queue commands do not support asynchrous completion
976 */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,int uin_size,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 * status,bool force_polling)977 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
978 int uin_size,
979 struct mlx5_cmd_msg *out, void *uout, int uout_size,
980 mlx5_cmd_cbk_t callback,
981 void *context, int page_queue, u8 *status,
982 bool force_polling)
983 {
984 struct mlx5_cmd *cmd = &dev->cmd;
985 struct mlx5_cmd_work_ent *ent;
986 struct mlx5_cmd_stats *stats;
987 int err = 0;
988 s64 ds;
989 u16 op;
990
991 if (callback && page_queue)
992 return -EINVAL;
993
994 ent = alloc_cmd(cmd, in, uin_size, out, uout, uout_size, callback,
995 context, page_queue);
996 if (IS_ERR(ent))
997 return PTR_ERR(ent);
998
999 ent->polling = force_polling || (cmd->mode == MLX5_CMD_MODE_POLLING);
1000
1001 if (!callback)
1002 init_completion(&ent->done);
1003
1004 INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
1005 INIT_WORK(&ent->work, cmd_work_handler);
1006 if (page_queue) {
1007 cmd_work_handler(&ent->work);
1008 } else if (!queue_work(dev->priv.health.wq_cmd, &ent->work)) {
1009 mlx5_core_warn(dev, "failed to queue work\n");
1010 err = -ENOMEM;
1011 goto out_free;
1012 }
1013
1014 if (callback)
1015 goto out;
1016
1017 err = wait_func(dev, ent);
1018 if (err == -ETIMEDOUT)
1019 goto out;
1020
1021 ds = ent->ts2 - ent->ts1;
1022 op = MLX5_GET(mbox_in, in->first.data, opcode);
1023 if (op < ARRAY_SIZE(cmd->stats)) {
1024 stats = &cmd->stats[op];
1025 spin_lock_irq(&stats->lock);
1026 stats->sum += ds;
1027 ++stats->n;
1028 spin_unlock_irq(&stats->lock);
1029 }
1030 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
1031 "fw exec time for %s is %lld nsec\n",
1032 mlx5_command_str(op), (long long)ds);
1033 *status = ent->status;
1034 free_cmd(ent);
1035
1036 return err;
1037
1038 out_free:
1039 free_cmd(ent);
1040 out:
1041 return err;
1042 }
1043
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,size_t size)1044 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, size_t size)
1045 {
1046 size_t delta;
1047 size_t i;
1048
1049 if (to == NULL || from == NULL)
1050 return (-ENOMEM);
1051
1052 delta = min_t(size_t, size, sizeof(to->first.data));
1053 memcpy(to->first.data, from, delta);
1054 from = (char *)from + delta;
1055 size -= delta;
1056
1057 for (i = 0; size != 0; i++) {
1058 struct mlx5_cmd_prot_block *block;
1059
1060 block = mlx5_fwp_get_virt(to, i * MLX5_CMD_MBOX_SIZE);
1061
1062 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1063 memcpy(block->data, from, delta);
1064 from = (char *)from + delta;
1065 size -= delta;
1066 }
1067 return (0);
1068 }
1069
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)1070 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
1071 {
1072 size_t delta;
1073 size_t i;
1074
1075 if (to == NULL || from == NULL)
1076 return (-ENOMEM);
1077
1078 delta = min_t(size_t, size, sizeof(from->first.data));
1079 memcpy(to, from->first.data, delta);
1080 to = (char *)to + delta;
1081 size -= delta;
1082
1083 for (i = 0; size != 0; i++) {
1084 struct mlx5_cmd_prot_block *block;
1085
1086 block = mlx5_fwp_get_virt(from, i * MLX5_CMD_MBOX_SIZE);
1087
1088 delta = min_t(size_t, size, MLX5_CMD_DATA_BLOCK_SIZE);
1089 memcpy(to, block->data, delta);
1090 to = (char *)to + delta;
1091 size -= delta;
1092 }
1093 return (0);
1094 }
1095
1096 static struct mlx5_cmd_msg *
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,size_t size)1097 mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev, gfp_t flags, size_t size)
1098 {
1099 struct mlx5_cmd_msg *msg;
1100 size_t blen;
1101 size_t n;
1102 size_t i;
1103
1104 blen = size - min_t(size_t, sizeof(msg->first.data), size);
1105 n = howmany(blen, MLX5_CMD_DATA_BLOCK_SIZE);
1106
1107 msg = mlx5_fwp_alloc(dev, flags, howmany(n, MLX5_NUM_CMDS_IN_ADAPTER_PAGE));
1108 if (msg == NULL)
1109 return (ERR_PTR(-ENOMEM));
1110
1111 for (i = 0; i != n; i++) {
1112 struct mlx5_cmd_prot_block *block;
1113
1114 block = mlx5_fwp_get_virt(msg, i * MLX5_CMD_MBOX_SIZE);
1115
1116 if (i != (n - 1)) {
1117 memset(block, 0, MLX5_CMD_MBOX_SIZE);
1118
1119 u64 dma = mlx5_fwp_get_dma(msg, (i + 1) * MLX5_CMD_MBOX_SIZE);
1120 block->next = cpu_to_be64(dma);
1121 } else {
1122 /* Zero the rest of the page to satisfy KMSAN. */
1123 memset(block, 0, MLX5_ADAPTER_PAGE_SIZE -
1124 (i % MLX5_NUM_CMDS_IN_ADAPTER_PAGE) *
1125 MLX5_CMD_MBOX_SIZE);
1126 }
1127 block->block_num = cpu_to_be32(i);
1128 }
1129
1130 /* make sure initial data is written to RAM */
1131 mlx5_fwp_flush(msg);
1132
1133 return (msg);
1134 }
1135
1136 static void
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1137 mlx5_free_cmd_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1138 {
1139
1140 mlx5_fwp_free(msg);
1141 }
1142
clean_debug_files(struct mlx5_core_dev * dev)1143 static void clean_debug_files(struct mlx5_core_dev *dev)
1144 {
1145 }
1146
1147
mlx5_cmd_change_mod(struct mlx5_core_dev * dev,int mode)1148 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
1149 {
1150 struct mlx5_cmd *cmd = &dev->cmd;
1151 int i;
1152
1153 if (cmd->mode == mode)
1154 return;
1155
1156 for (i = 0; i < cmd->max_reg_cmds; i++)
1157 down(&cmd->sem);
1158
1159 down(&cmd->pages_sem);
1160 cmd->mode = mode;
1161
1162 up(&cmd->pages_sem);
1163 for (i = 0; i < cmd->max_reg_cmds; i++)
1164 up(&cmd->sem);
1165 }
1166
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1167 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1168 {
1169 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_EVENTS);
1170 }
1171
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1172 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1173 {
1174 mlx5_cmd_change_mod(dev, MLX5_CMD_MODE_POLLING);
1175 }
1176
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1177 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1178 {
1179 unsigned long flags;
1180
1181 if (msg->cache) {
1182 spin_lock_irqsave(&msg->cache->lock, flags);
1183 list_add_tail(&msg->list, &msg->cache->head);
1184 spin_unlock_irqrestore(&msg->cache->lock, flags);
1185 } else {
1186 mlx5_free_cmd_msg(dev, msg);
1187 }
1188 }
1189
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,u64 vector_flags,enum mlx5_cmd_mode cmd_mode)1190 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vector_flags,
1191 enum mlx5_cmd_mode cmd_mode)
1192 {
1193 struct mlx5_cmd *cmd = &dev->cmd;
1194 struct mlx5_cmd_work_ent *ent;
1195 bool triggered = (vector_flags & MLX5_TRIGGERED_CMD_COMP) ? 1 : 0;
1196 u32 vector = vector_flags; /* discard flags in the upper dword */
1197 int i;
1198
1199 /* make sure data gets read from RAM */
1200 mlx5_fwp_invalidate(cmd->cmd_page);
1201
1202 while (vector != 0) {
1203 i = ffs(vector) - 1;
1204 vector &= ~(1U << i);
1205 /* check command mode */
1206 if (cmd->ent_mode[i] != cmd_mode)
1207 continue;
1208 ent = cmd->ent_arr[i];
1209 /* check if command was already handled */
1210 if (ent == NULL)
1211 continue;
1212 if (ent->callback)
1213 cancel_delayed_work(&ent->cb_timeout_work);
1214 ent->ts2 = ktime_get_ns();
1215 memcpy(ent->out->first.data, ent->lay->out,
1216 sizeof(ent->lay->out));
1217 /* make sure data gets read from RAM */
1218 mlx5_fwp_invalidate(ent->out);
1219 dump_command(dev, ent, 0);
1220 if (!ent->ret) {
1221 if (!cmd->checksum_disabled)
1222 ent->ret = verify_signature(ent);
1223 else
1224 ent->ret = 0;
1225
1226 if (triggered)
1227 ent->status = MLX5_DRIVER_STATUS_ABORTED;
1228 else
1229 ent->status = ent->lay->status_own >> 1;
1230
1231 mlx5_core_dbg(dev,
1232 "FW command ret 0x%x, status %s(0x%x)\n",
1233 ent->ret,
1234 deliv_status_to_str(ent->status),
1235 ent->status);
1236 }
1237 free_ent(cmd, ent->idx);
1238 complete_command(ent);
1239 }
1240 }
1241 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1242
status_to_err(u8 status)1243 static int status_to_err(u8 status)
1244 {
1245 return status ? -EIO : 0; /* TBD more meaningful codes */
1246 }
1247
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1248 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1249 gfp_t gfp)
1250 {
1251 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1252 struct mlx5_cmd *cmd = &dev->cmd;
1253 struct cache_ent *ent = NULL;
1254
1255 if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1256 ent = &cmd->cache.large;
1257 else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1258 ent = &cmd->cache.med;
1259
1260 if (ent) {
1261 spin_lock_irq(&ent->lock);
1262 if (!list_empty(&ent->head)) {
1263 msg = list_entry(ent->head.next, struct mlx5_cmd_msg,
1264 list);
1265 list_del(&msg->list);
1266 }
1267 spin_unlock_irq(&ent->lock);
1268 }
1269
1270 if (IS_ERR(msg))
1271 msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1272
1273 return msg;
1274 }
1275
is_manage_pages(void * in)1276 static int is_manage_pages(void *in)
1277 {
1278 return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1279 }
1280
cmd_exec_helper(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context,bool force_polling)1281 static int cmd_exec_helper(struct mlx5_core_dev *dev,
1282 void *in, int in_size,
1283 void *out, int out_size,
1284 mlx5_cmd_cbk_t callback, void *context,
1285 bool force_polling)
1286 {
1287 struct mlx5_cmd_msg *inb;
1288 struct mlx5_cmd_msg *outb;
1289 int pages_queue;
1290 const gfp_t gfp = GFP_KERNEL;
1291 int err;
1292 u8 status = 0;
1293 u32 drv_synd;
1294
1295 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
1296 u16 opcode = MLX5_GET(mbox_in, in, opcode);
1297 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
1298 MLX5_SET(mbox_out, out, status, status);
1299 MLX5_SET(mbox_out, out, syndrome, drv_synd);
1300 return err;
1301 }
1302
1303 pages_queue = is_manage_pages(in);
1304
1305 inb = alloc_msg(dev, in_size, gfp);
1306 if (IS_ERR(inb)) {
1307 err = PTR_ERR(inb);
1308 return err;
1309 }
1310
1311 err = mlx5_copy_to_msg(inb, in, in_size);
1312 if (err) {
1313 mlx5_core_warn(dev, "err %d\n", err);
1314 goto out_in;
1315 }
1316
1317 outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1318 if (IS_ERR(outb)) {
1319 err = PTR_ERR(outb);
1320 goto out_in;
1321 }
1322
1323 err = mlx5_cmd_invoke(dev, inb, in_size, outb, out, out_size, callback,
1324 context, pages_queue, &status, force_polling);
1325 if (err) {
1326 if (err == -ETIMEDOUT)
1327 return err;
1328 goto out_out;
1329 }
1330
1331 mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1332 if (status) {
1333 err = status_to_err(status);
1334 goto out_out;
1335 }
1336
1337 if (callback)
1338 return err;
1339
1340 err = mlx5_copy_from_msg(out, outb, out_size);
1341
1342 out_out:
1343 mlx5_free_cmd_msg(dev, outb);
1344
1345 out_in:
1346 free_msg(dev, inb);
1347 return err;
1348 }
1349
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1350 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1351 int out_size)
1352 {
1353 int err;
1354
1355 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, false);
1356 return err ? : mlx5_cmd_check(dev, in, out);
1357 }
1358 EXPORT_SYMBOL(mlx5_cmd_exec);
1359
mlx5_cmd_init_async_ctx(struct mlx5_core_dev * dev,struct mlx5_async_ctx * ctx)1360 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
1361 struct mlx5_async_ctx *ctx)
1362 {
1363 ctx->dev = dev;
1364 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1365 atomic_set(&ctx->num_inflight, 1);
1366 init_waitqueue_head(&ctx->wait);
1367 }
1368 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx);
1369
1370 /**
1371 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1372 * @ctx: The ctx to clean
1373 *
1374 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1375 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1376 * the call mlx5_cleanup_async_ctx().
1377 */
mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx * ctx)1378 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx)
1379 {
1380 atomic_dec(&ctx->num_inflight);
1381 wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0);
1382 }
1383 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx);
1384
mlx5_cmd_exec_cb_handler(int status,void * _work)1385 static void mlx5_cmd_exec_cb_handler(int status, void *_work)
1386 {
1387 struct mlx5_async_work *work = _work;
1388 struct mlx5_async_ctx *ctx = work->ctx;
1389
1390 work->user_callback(status, work);
1391 if (atomic_dec_and_test(&ctx->num_inflight))
1392 wake_up(&ctx->wait);
1393 }
1394
mlx5_cmd_exec_cb(struct mlx5_async_ctx * ctx,void * in,int in_size,void * out,int out_size,mlx5_async_cbk_t callback,struct mlx5_async_work * work)1395 int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
1396 void *out, int out_size, mlx5_async_cbk_t callback,
1397 struct mlx5_async_work *work)
1398 {
1399 int ret;
1400
1401 work->ctx = ctx;
1402 work->user_callback = callback;
1403 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
1404 return -EIO;
1405 ret = cmd_exec_helper(ctx->dev, in, in_size, out, out_size,
1406 mlx5_cmd_exec_cb_handler, work, false);
1407 if (ret && atomic_dec_and_test(&ctx->num_inflight))
1408 wake_up(&ctx->wait);
1409
1410 return ret;
1411 }
1412 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1413
mlx5_cmd_exec_polling(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1414 int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
1415 void *out, int out_size)
1416 {
1417 int err;
1418
1419 err = cmd_exec_helper(dev, in, in_size, out, out_size, NULL, NULL, true);
1420 return err ? : mlx5_cmd_check(dev, in, out);
1421 }
1422 EXPORT_SYMBOL(mlx5_cmd_exec_polling);
1423
destroy_msg_cache(struct mlx5_core_dev * dev)1424 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1425 {
1426 struct mlx5_cmd *cmd = &dev->cmd;
1427 struct mlx5_cmd_msg *msg;
1428 struct mlx5_cmd_msg *n;
1429
1430 list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1431 list_del(&msg->list);
1432 mlx5_free_cmd_msg(dev, msg);
1433 }
1434
1435 list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1436 list_del(&msg->list);
1437 mlx5_free_cmd_msg(dev, msg);
1438 }
1439 }
1440
create_msg_cache(struct mlx5_core_dev * dev)1441 static int create_msg_cache(struct mlx5_core_dev *dev)
1442 {
1443 struct mlx5_cmd *cmd = &dev->cmd;
1444 struct mlx5_cmd_msg *msg;
1445 int err;
1446 int i;
1447
1448 spin_lock_init(&cmd->cache.large.lock);
1449 INIT_LIST_HEAD(&cmd->cache.large.head);
1450 spin_lock_init(&cmd->cache.med.lock);
1451 INIT_LIST_HEAD(&cmd->cache.med.head);
1452
1453 for (i = 0; i < NUM_LONG_LISTS; i++) {
1454 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1455 if (IS_ERR(msg)) {
1456 err = PTR_ERR(msg);
1457 goto ex_err;
1458 }
1459 msg->cache = &cmd->cache.large;
1460 list_add_tail(&msg->list, &cmd->cache.large.head);
1461 }
1462
1463 for (i = 0; i < NUM_MED_LISTS; i++) {
1464 msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1465 if (IS_ERR(msg)) {
1466 err = PTR_ERR(msg);
1467 goto ex_err;
1468 }
1469 msg->cache = &cmd->cache.med;
1470 list_add_tail(&msg->list, &cmd->cache.med.head);
1471 }
1472
1473 return 0;
1474
1475 ex_err:
1476 destroy_msg_cache(dev);
1477 return err;
1478 }
1479
1480 static int
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1481 alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1482 {
1483 int err;
1484
1485 sx_init(&cmd->dma_sx, "MLX5-DMA-SX");
1486 mtx_init(&cmd->dma_mtx, "MLX5-DMA-MTX", NULL, MTX_DEF);
1487 cv_init(&cmd->dma_cv, "MLX5-DMA-CV");
1488
1489 /*
1490 * Create global DMA descriptor tag for allocating
1491 * 4K firmware pages:
1492 */
1493 err = -bus_dma_tag_create(
1494 bus_get_dma_tag(dev->pdev->dev.bsddev),
1495 MLX5_ADAPTER_PAGE_SIZE, /* alignment */
1496 0, /* no boundary */
1497 BUS_SPACE_MAXADDR, /* lowaddr */
1498 BUS_SPACE_MAXADDR, /* highaddr */
1499 NULL, NULL, /* filter, filterarg */
1500 MLX5_ADAPTER_PAGE_SIZE, /* maxsize */
1501 1, /* nsegments */
1502 MLX5_ADAPTER_PAGE_SIZE, /* maxsegsize */
1503 0, /* flags */
1504 NULL, NULL, /* lockfunc, lockfuncarg */
1505 &cmd->dma_tag);
1506 if (err != 0)
1507 goto failure_destroy_sx;
1508
1509 cmd->cmd_page = mlx5_fwp_alloc(dev, GFP_KERNEL, 1);
1510 if (cmd->cmd_page == NULL) {
1511 err = -ENOMEM;
1512 goto failure_alloc_page;
1513 }
1514 cmd->dma = mlx5_fwp_get_dma(cmd->cmd_page, 0);
1515 cmd->cmd_buf = mlx5_fwp_get_virt(cmd->cmd_page, 0);
1516 memset(cmd->cmd_buf, 0, MLX5_ADAPTER_PAGE_SIZE);
1517 return (0);
1518
1519 failure_alloc_page:
1520 bus_dma_tag_destroy(cmd->dma_tag);
1521
1522 failure_destroy_sx:
1523 cv_destroy(&cmd->dma_cv);
1524 mtx_destroy(&cmd->dma_mtx);
1525 sx_destroy(&cmd->dma_sx);
1526 return (err);
1527 }
1528
1529 static void
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1530 free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1531 {
1532
1533 mlx5_fwp_free(cmd->cmd_page);
1534 bus_dma_tag_destroy(cmd->dma_tag);
1535 cv_destroy(&cmd->dma_cv);
1536 mtx_destroy(&cmd->dma_mtx);
1537 sx_destroy(&cmd->dma_sx);
1538 }
1539
mlx5_cmd_init(struct mlx5_core_dev * dev)1540 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1541 {
1542 struct mlx5_cmd *cmd = &dev->cmd;
1543 u32 cmd_h, cmd_l;
1544 u16 cmd_if_rev;
1545 int err;
1546 int i;
1547
1548 memset(cmd, 0, sizeof(*cmd));
1549 cmd_if_rev = cmdif_rev_get(dev);
1550 if (cmd_if_rev != CMD_IF_REV) {
1551 mlx5_core_err(dev,
1552 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1553 CMD_IF_REV, cmd_if_rev);
1554 return -EINVAL;
1555 }
1556
1557 err = alloc_cmd_page(dev, cmd);
1558 if (err)
1559 goto err_free_pool;
1560
1561 cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1562 cmd->log_sz = cmd_l >> 4 & 0xf;
1563 cmd->log_stride = cmd_l & 0xf;
1564 if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1565 mlx5_core_err(dev,
1566 "firmware reports too many outstanding commands %d\n",
1567 1 << cmd->log_sz);
1568 err = -EINVAL;
1569 goto err_free_page;
1570 }
1571
1572 if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1573 mlx5_core_err(dev,
1574 "command queue size overflow\n");
1575 err = -EINVAL;
1576 goto err_free_page;
1577 }
1578
1579 cmd->checksum_disabled = 1;
1580 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1581 cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1582
1583 cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1584 if (cmd->cmdif_rev > CMD_IF_REV) {
1585 mlx5_core_err(dev,
1586 "driver does not support command interface version. driver %d, firmware %d\n",
1587 CMD_IF_REV, cmd->cmdif_rev);
1588 err = -ENOTSUPP;
1589 goto err_free_page;
1590 }
1591
1592 spin_lock_init(&cmd->alloc_lock);
1593 spin_lock_init(&cmd->token_lock);
1594 for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1595 spin_lock_init(&cmd->stats[i].lock);
1596
1597 sema_init(&cmd->sem, cmd->max_reg_cmds);
1598 sema_init(&cmd->pages_sem, 1);
1599
1600 cmd_h = (u32)((u64)(cmd->dma) >> 32);
1601 cmd_l = (u32)(cmd->dma);
1602 if (cmd_l & 0xfff) {
1603 mlx5_core_err(dev, "invalid command queue address\n");
1604 err = -ENOMEM;
1605 goto err_free_page;
1606 }
1607
1608 iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1609 iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1610
1611 /* Make sure firmware sees the complete address before we proceed */
1612 wmb();
1613
1614 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1615
1616 cmd->mode = MLX5_CMD_MODE_POLLING;
1617
1618 err = create_msg_cache(dev);
1619 if (err) {
1620 mlx5_core_err(dev, "failed to create command cache\n");
1621 goto err_free_page;
1622 }
1623 return 0;
1624
1625 err_free_page:
1626 free_cmd_page(dev, cmd);
1627
1628 err_free_pool:
1629 return err;
1630 }
1631 EXPORT_SYMBOL(mlx5_cmd_init);
1632
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)1633 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1634 {
1635 struct mlx5_cmd *cmd = &dev->cmd;
1636
1637 clean_debug_files(dev);
1638 flush_workqueue(dev->priv.health.wq_cmd);
1639 destroy_msg_cache(dev);
1640 free_cmd_page(dev, cmd);
1641 }
1642 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1643
mlx5_cmd_query_cong_counter(struct mlx5_core_dev * dev,bool reset,void * out,int out_size)1644 int mlx5_cmd_query_cong_counter(struct mlx5_core_dev *dev,
1645 bool reset, void *out, int out_size)
1646 {
1647 u32 in[MLX5_ST_SZ_DW(query_cong_statistics_in)] = { };
1648
1649 MLX5_SET(query_cong_statistics_in, in, opcode,
1650 MLX5_CMD_OP_QUERY_CONG_STATISTICS);
1651 MLX5_SET(query_cong_statistics_in, in, clear, reset);
1652 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
1653 }
1654 EXPORT_SYMBOL(mlx5_cmd_query_cong_counter);
1655
mlx5_cmd_query_cong_params(struct mlx5_core_dev * dev,int cong_point,void * out,int out_size)1656 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
1657 void *out, int out_size)
1658 {
1659 u32 in[MLX5_ST_SZ_DW(query_cong_params_in)] = { };
1660
1661 MLX5_SET(query_cong_params_in, in, opcode,
1662 MLX5_CMD_OP_QUERY_CONG_PARAMS);
1663 MLX5_SET(query_cong_params_in, in, cong_protocol, cong_point);
1664
1665 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
1666 }
1667 EXPORT_SYMBOL(mlx5_cmd_query_cong_params);
1668
mlx5_cmd_modify_cong_params(struct mlx5_core_dev * dev,void * in,int in_size)1669 int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *dev,
1670 void *in, int in_size)
1671 {
1672 u32 out[MLX5_ST_SZ_DW(modify_cong_params_out)] = { };
1673
1674 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
1675 }
1676 EXPORT_SYMBOL(mlx5_cmd_modify_cong_params);
1677
mlx5_cmd_query_cong_status(struct mlx5_core_dev * dev,int cong_point,int prio,void * out,int out_size)1678 int mlx5_cmd_query_cong_status(struct mlx5_core_dev *dev, int cong_point,
1679 int prio, void *out, int out_size)
1680 {
1681 u32 in[MLX5_ST_SZ_DW(query_cong_status_in)] = { };
1682
1683 MLX5_SET(query_cong_status_in, in, opcode,
1684 MLX5_CMD_OP_QUERY_CONG_STATUS);
1685 MLX5_SET(query_cong_status_in, in, priority, prio);
1686 MLX5_SET(query_cong_status_in, in, cong_protocol, cong_point);
1687
1688 return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
1689 }
1690 EXPORT_SYMBOL(mlx5_cmd_query_cong_status);
1691
mlx5_cmd_modify_cong_status(struct mlx5_core_dev * dev,void * in,int in_size)1692 int mlx5_cmd_modify_cong_status(struct mlx5_core_dev *dev,
1693 void *in, int in_size)
1694 {
1695 u32 out[MLX5_ST_SZ_DW(modify_cong_status_out)] = { };
1696
1697 return mlx5_cmd_exec(dev, in, in_size, out, sizeof(out));
1698 }
1699 EXPORT_SYMBOL(mlx5_cmd_modify_cong_status);
1700