xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_fw.c (revision 95ee2897)
1 /*-
2  * Copyright (c) 2013-2020, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <dev/mlx5/driver.h>
30 #include <linux/module.h>
31 #include <dev/mlx5/mlx5_core/mlx5_core.h>
32 
mlx5_cmd_query_adapter(struct mlx5_core_dev * dev,u32 * out,int outlen)33 static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
34 				  int outlen)
35 {
36 	u32 in[MLX5_ST_SZ_DW(query_adapter_in)];
37 	int err;
38 
39 	memset(in, 0, sizeof(in));
40 
41 	MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
42 
43 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
44 	return err;
45 }
46 
mlx5_query_board_id(struct mlx5_core_dev * dev)47 int mlx5_query_board_id(struct mlx5_core_dev *dev)
48 {
49 	u32 *out;
50 	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
51 	int err;
52 
53 	out = kzalloc(outlen, GFP_KERNEL);
54 
55 	err = mlx5_cmd_query_adapter(dev, out, outlen);
56 	if (err)
57 		goto out_out;
58 
59 	memcpy(dev->board_id,
60 	       MLX5_ADDR_OF(query_adapter_out, out,
61 			    query_adapter_struct.vsd_contd_psid),
62 	       MLX5_FLD_SZ_BYTES(query_adapter_out,
63 				 query_adapter_struct.vsd_contd_psid));
64 
65 out_out:
66 	kfree(out);
67 
68 	return err;
69 }
70 
mlx5_core_query_vendor_id(struct mlx5_core_dev * mdev,u32 * vendor_id)71 int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
72 {
73 	u32 *out;
74 	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
75 	int err;
76 
77 	out = kzalloc(outlen, GFP_KERNEL);
78 
79 	err = mlx5_cmd_query_adapter(mdev, out, outlen);
80 	if (err)
81 		goto out_out;
82 
83 	*vendor_id = MLX5_GET(query_adapter_out, out,
84 			      query_adapter_struct.ieee_vendor_id);
85 
86 out_out:
87 	kfree(out);
88 
89 	return err;
90 }
91 EXPORT_SYMBOL(mlx5_core_query_vendor_id);
92 
mlx5_core_query_special_contexts(struct mlx5_core_dev * dev)93 static int mlx5_core_query_special_contexts(struct mlx5_core_dev *dev)
94 {
95 	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)];
96 	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)];
97 	int err;
98 
99 	memset(in, 0, sizeof(in));
100 	memset(out, 0, sizeof(out));
101 
102 	MLX5_SET(query_special_contexts_in, in, opcode,
103 		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
104 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
105 	if (err)
106 		return err;
107 
108 	dev->special_contexts.resd_lkey = MLX5_GET(query_special_contexts_out,
109 						   out, resd_lkey);
110 
111 	return err;
112 }
113 
mlx5_get_qcam_reg(struct mlx5_core_dev * dev)114 static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
115 {
116 	return mlx5_query_qcam_reg(dev, dev->caps.qcam,
117 				   MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
118 				   MLX5_QCAM_REGS_FIRST_128);
119 }
120 
mlx5_get_pcam_reg(struct mlx5_core_dev * dev)121 static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
122 {
123 	return mlx5_query_pcam_reg(dev, dev->caps.pcam,
124 				   MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
125 				   MLX5_PCAM_REGS_5000_TO_507F);
126 }
127 
mlx5_get_mcam_reg(struct mlx5_core_dev * dev)128 static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
129 {
130 	return mlx5_query_mcam_reg(dev, dev->caps.mcam,
131 				   MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
132 				   MLX5_MCAM_REGS_FIRST_128);
133 }
134 
mlx5_query_hca_caps(struct mlx5_core_dev * dev)135 int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
136 {
137 	int err;
138 
139 	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
140 	if (err)
141 		return err;
142 
143 	if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
144 		err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
145 		if (err)
146 			return err;
147 	}
148 
149 	if (MLX5_CAP_GEN(dev, pg)) {
150 		err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
151 		if (err)
152 			return err;
153 	}
154 
155 	if (MLX5_CAP_GEN(dev, atomic)) {
156 		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
157 		if (err)
158 			return err;
159 	}
160 
161 	if (MLX5_CAP_GEN(dev, roce)) {
162 		err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
163 		if (err)
164 			return err;
165 	}
166 
167 	if ((MLX5_CAP_GEN(dev, port_type) ==
168 	    MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET &&
169 	    MLX5_CAP_GEN(dev, nic_flow_table)) ||
170 	    (MLX5_CAP_GEN(dev, port_type) == MLX5_CMD_HCA_CAP_PORT_TYPE_IB &&
171 	    MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) {
172 		err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
173 		if (err)
174 			return err;
175 	}
176 
177 	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
178 		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
179 		if (err)
180 			return err;
181 	}
182 
183 	if (MLX5_CAP_GEN(dev, vport_group_manager)) {
184 		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
185 		if (err)
186 			return err;
187 	}
188 
189 	if (MLX5_CAP_GEN(dev, snapshot)) {
190 		err = mlx5_core_get_caps(dev, MLX5_CAP_SNAPSHOT);
191 		if (err)
192 			return err;
193 	}
194 
195 	if (MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
196 		err = mlx5_core_get_caps(dev, MLX5_CAP_EOIB_OFFLOADS);
197 		if (err)
198 			return err;
199 	}
200 
201 	if (MLX5_CAP_GEN(dev, debug)) {
202 		err = mlx5_core_get_caps(dev, MLX5_CAP_DEBUG);
203 		if (err)
204 			return err;
205 	}
206 
207 	if (MLX5_CAP_GEN(dev, qos)) {
208 		err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
209 		if (err)
210 			return err;
211 	}
212 
213 	if (MLX5_CAP_GEN(dev, qcam_reg)) {
214 		err = mlx5_get_qcam_reg(dev);
215 		if (err)
216 			return err;
217 	}
218 
219 	if (MLX5_CAP_GEN(dev, mcam_reg)) {
220 		err = mlx5_get_mcam_reg(dev);
221 		if (err)
222 			return err;
223 	}
224 
225 	if (MLX5_CAP_GEN(dev, pcam_reg)) {
226 		err = mlx5_get_pcam_reg(dev);
227 		if (err)
228 			return err;
229 	}
230 
231 	if (MLX5_CAP_GEN(dev, tls_tx)) {
232 		err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
233 		if (err)
234 			return err;
235 	}
236 
237 	if (MLX5_CAP_GEN(dev, event_cap)) {
238 		err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT);
239 		if (err)
240 			return err;
241 	}
242 
243 	err = mlx5_core_query_special_contexts(dev);
244 	if (err)
245 		return err;
246 
247 	return 0;
248 }
249 
mlx5_cmd_init_hca(struct mlx5_core_dev * dev)250 int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
251 {
252 	u32 in[MLX5_ST_SZ_DW(init_hca_in)];
253 	u32 out[MLX5_ST_SZ_DW(init_hca_out)];
254 
255 	memset(in, 0, sizeof(in));
256 
257 	MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
258 
259 	memset(out, 0, sizeof(out));
260 	return mlx5_cmd_exec(dev, in,  sizeof(in), out, sizeof(out));
261 }
262 
mlx5_cmd_teardown_hca(struct mlx5_core_dev * dev)263 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
264 {
265 	u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
266 	u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
267 
268 	MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
269 	return mlx5_cmd_exec(dev, in,  sizeof(in), out, sizeof(out));
270 }
271 
mlx5_cmd_force_teardown_hca(struct mlx5_core_dev * dev)272 int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
273 {
274 	u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
275 	u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {0};
276 	int force_state;
277 	int ret;
278 
279 	if (!MLX5_CAP_GEN(dev, force_teardown)) {
280 		mlx5_core_dbg(dev, "force teardown is not supported in the firmware\n");
281 		return -EOPNOTSUPP;
282 	}
283 
284 	MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
285 	MLX5_SET(teardown_hca_in, in, profile, MLX5_TEARDOWN_HCA_IN_PROFILE_FORCE_CLOSE);
286 
287 	ret = mlx5_cmd_exec_polling(dev, in, sizeof(in), out, sizeof(out));
288 	if (ret)
289 		return ret;
290 
291 	force_state = MLX5_GET(teardown_hca_out, out, state);
292 	if (force_state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL)  {
293 		mlx5_core_err(dev, "teardown with force mode failed\n");
294 		return -EIO;
295 	}
296 
297 	return 0;
298 }
299 
300 #define	MLX5_FAST_TEARDOWN_WAIT_MS 3000
mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev * dev)301 int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
302 {
303 	int end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
304 	u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
305 	u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
306 	int state;
307 	int ret;
308 
309 	if (!MLX5_CAP_GEN(dev, fast_teardown)) {
310 		mlx5_core_dbg(dev, "fast teardown is not supported in the firmware\n");
311 		return -EOPNOTSUPP;
312 	}
313 
314 	MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
315 	MLX5_SET(teardown_hca_in, in, profile,
316 		 MLX5_TEARDOWN_HCA_IN_PROFILE_PREPARE_FAST_TEARDOWN);
317 
318 	ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
319 	if (ret)
320 		return ret;
321 
322 	state = MLX5_GET(teardown_hca_out, out, state);
323 	if (state == MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL) {
324 		mlx5_core_warn(dev, "teardown with fast mode failed\n");
325 		return -EIO;
326 	}
327 
328 	mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED);
329 
330 	/* Loop until device state turns to disable */
331 	end = jiffies + msecs_to_jiffies(delay_ms);
332 	do {
333 		if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
334 			break;
335 
336 		pause("W", 1);
337 	} while (!time_after(jiffies, end));
338 
339 	if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
340 		mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
341 			mlx5_get_nic_state(dev), delay_ms);
342 		return -EIO;
343 	}
344 	return 0;
345 }
346 
mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev * dev,int enable,u64 addr)347 int mlx5_core_set_dc_cnak_trace(struct mlx5_core_dev *dev, int enable,
348 				u64 addr)
349 {
350 	u32 in[MLX5_ST_SZ_DW(set_dc_cnak_trace_in)] = {0};
351 	u32 out[MLX5_ST_SZ_DW(set_dc_cnak_trace_out)] = {0};
352 	__be64 be_addr;
353 	void *pas;
354 
355 	MLX5_SET(set_dc_cnak_trace_in, in, opcode, MLX5_CMD_OP_SET_DC_CNAK_TRACE);
356 	MLX5_SET(set_dc_cnak_trace_in, in, enable, enable);
357 	pas = MLX5_ADDR_OF(set_dc_cnak_trace_in, in, pas);
358 	be_addr = cpu_to_be64(addr);
359 	memcpy(MLX5_ADDR_OF(cmd_pas, pas, pa_h), &be_addr, sizeof(be_addr));
360 
361 	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
362 }
363 
364 enum mlxsw_reg_mcc_instruction {
365 	MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE = 0x01,
366 	MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE = 0x02,
367 	MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT = 0x03,
368 	MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT = 0x04,
369 	MLX5_REG_MCC_INSTRUCTION_ACTIVATE = 0x06,
370 	MLX5_REG_MCC_INSTRUCTION_CANCEL = 0x08,
371 };
372 
mlx5_reg_mcc_set(struct mlx5_core_dev * dev,enum mlxsw_reg_mcc_instruction instr,u16 component_index,u32 update_handle,u32 component_size)373 static int mlx5_reg_mcc_set(struct mlx5_core_dev *dev,
374 			    enum mlxsw_reg_mcc_instruction instr,
375 			    u16 component_index, u32 update_handle,
376 			    u32 component_size)
377 {
378 	u32 out[MLX5_ST_SZ_DW(mcc_reg)];
379 	u32 in[MLX5_ST_SZ_DW(mcc_reg)];
380 
381 	memset(in, 0, sizeof(in));
382 
383 	MLX5_SET(mcc_reg, in, instruction, instr);
384 	MLX5_SET(mcc_reg, in, component_index, component_index);
385 	MLX5_SET(mcc_reg, in, update_handle, update_handle);
386 	MLX5_SET(mcc_reg, in, component_size, component_size);
387 
388 	return mlx5_core_access_reg(dev, in, sizeof(in), out,
389 				    sizeof(out), MLX5_REG_MCC, 0, 1);
390 }
391 
mlx5_reg_mcc_query(struct mlx5_core_dev * dev,u32 * update_handle,u8 * error_code,u8 * control_state)392 static int mlx5_reg_mcc_query(struct mlx5_core_dev *dev,
393 			      u32 *update_handle, u8 *error_code,
394 			      u8 *control_state)
395 {
396 	u32 out[MLX5_ST_SZ_DW(mcc_reg)];
397 	u32 in[MLX5_ST_SZ_DW(mcc_reg)];
398 	int err;
399 
400 	memset(in, 0, sizeof(in));
401 	memset(out, 0, sizeof(out));
402 	MLX5_SET(mcc_reg, in, update_handle, *update_handle);
403 
404 	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
405 				   sizeof(out), MLX5_REG_MCC, 0, 0);
406 	if (err)
407 		goto out;
408 
409 	*update_handle = MLX5_GET(mcc_reg, out, update_handle);
410 	*error_code = MLX5_GET(mcc_reg, out, error_code);
411 	*control_state = MLX5_GET(mcc_reg, out, control_state);
412 
413 out:
414 	return err;
415 }
416 
mlx5_reg_mcda_set(struct mlx5_core_dev * dev,u32 update_handle,u32 offset,u16 size,u8 * data)417 static int mlx5_reg_mcda_set(struct mlx5_core_dev *dev,
418 			     u32 update_handle,
419 			     u32 offset, u16 size,
420 			     u8 *data)
421 {
422 	int err, in_size = MLX5_ST_SZ_BYTES(mcda_reg) + size;
423 	u32 out[MLX5_ST_SZ_DW(mcda_reg)];
424 	int i, j, dw_size = size >> 2;
425 	__be32 data_element;
426 	u32 *in;
427 
428 	in = kzalloc(in_size, GFP_KERNEL);
429 	if (!in)
430 		return -ENOMEM;
431 
432 	MLX5_SET(mcda_reg, in, update_handle, update_handle);
433 	MLX5_SET(mcda_reg, in, offset, offset);
434 	MLX5_SET(mcda_reg, in, size, size);
435 
436 	for (i = 0; i < dw_size; i++) {
437 		j = i * 4;
438 		data_element = htonl(*(u32 *)&data[j]);
439 		memcpy(MLX5_ADDR_OF(mcda_reg, in, data) + j, &data_element, 4);
440 	}
441 
442 	err = mlx5_core_access_reg(dev, in, in_size, out,
443 				   sizeof(out), MLX5_REG_MCDA, 0, 1);
444 	kfree(in);
445 	return err;
446 }
447 
mlx5_reg_mcqi_query(struct mlx5_core_dev * dev,u16 component_index,u32 * max_component_size,u8 * log_mcda_word_size,u16 * mcda_max_write_size)448 static int mlx5_reg_mcqi_query(struct mlx5_core_dev *dev,
449 			       u16 component_index,
450 			       u32 *max_component_size,
451 			       u8 *log_mcda_word_size,
452 			       u16 *mcda_max_write_size)
453 {
454 	u32 out[MLX5_ST_SZ_DW(mcqi_reg) + MLX5_ST_SZ_DW(mcqi_cap)];
455 	int offset = MLX5_ST_SZ_DW(mcqi_reg);
456 	u32 in[MLX5_ST_SZ_DW(mcqi_reg)];
457 	int err;
458 
459 	memset(in, 0, sizeof(in));
460 	memset(out, 0, sizeof(out));
461 
462 	MLX5_SET(mcqi_reg, in, component_index, component_index);
463 	MLX5_SET(mcqi_reg, in, data_size, MLX5_ST_SZ_BYTES(mcqi_cap));
464 
465 	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
466 				   sizeof(out), MLX5_REG_MCQI, 0, 0);
467 	if (err)
468 		goto out;
469 
470 	*max_component_size = MLX5_GET(mcqi_cap, out + offset, max_component_size);
471 	*log_mcda_word_size = MLX5_GET(mcqi_cap, out + offset, log_mcda_word_size);
472 	*mcda_max_write_size = MLX5_GET(mcqi_cap, out + offset, mcda_max_write_size);
473 
474 out:
475 	return err;
476 }
477 
478 struct mlx5_mlxfw_dev {
479 	struct mlxfw_dev mlxfw_dev;
480 	struct mlx5_core_dev *mlx5_core_dev;
481 };
482 
mlx5_component_query(struct mlxfw_dev * mlxfw_dev,u16 component_index,u32 * p_max_size,u8 * p_align_bits,u16 * p_max_write_size)483 static int mlx5_component_query(struct mlxfw_dev *mlxfw_dev,
484 				u16 component_index, u32 *p_max_size,
485 				u8 *p_align_bits, u16 *p_max_write_size)
486 {
487 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
488 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
489 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
490 
491 	return mlx5_reg_mcqi_query(dev, component_index, p_max_size,
492 				   p_align_bits, p_max_write_size);
493 }
494 
mlx5_fsm_lock(struct mlxfw_dev * mlxfw_dev,u32 * fwhandle)495 static int mlx5_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
496 {
497 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
498 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
499 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
500 	u8 control_state, error_code;
501 	int err;
502 
503 	*fwhandle = 0;
504 	err = mlx5_reg_mcc_query(dev, fwhandle, &error_code, &control_state);
505 	if (err)
506 		return err;
507 
508 	if (control_state != MLXFW_FSM_STATE_IDLE)
509 		return -EBUSY;
510 
511 	return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
512 				0, *fwhandle, 0);
513 }
514 
mlx5_fsm_component_update(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index,u32 component_size)515 static int mlx5_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
516 				     u16 component_index, u32 component_size)
517 {
518 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
519 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
520 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
521 
522 	return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
523 				component_index, fwhandle, component_size);
524 }
525 
mlx5_fsm_block_download(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u8 * data,u16 size,u32 offset)526 static int mlx5_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
527 				   u8 *data, u16 size, u32 offset)
528 {
529 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
530 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
531 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
532 
533 	return mlx5_reg_mcda_set(dev, fwhandle, offset, size, data);
534 }
535 
mlx5_fsm_component_verify(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,u16 component_index)536 static int mlx5_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
537 				     u16 component_index)
538 {
539 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
540 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
541 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
542 
543 	return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
544 				component_index, fwhandle, 0);
545 }
546 
mlx5_fsm_activate(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)547 static int mlx5_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
548 {
549 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
550 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
551 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
552 
553 	return mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_ACTIVATE,	0,
554 				fwhandle, 0);
555 }
556 
mlx5_fsm_query_state(struct mlxfw_dev * mlxfw_dev,u32 fwhandle,enum mlxfw_fsm_state * fsm_state,enum mlxfw_fsm_state_err * fsm_state_err)557 static int mlx5_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
558 				enum mlxfw_fsm_state *fsm_state,
559 				enum mlxfw_fsm_state_err *fsm_state_err)
560 {
561 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
562 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
563 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
564 	u8 control_state, error_code;
565 	int err;
566 
567 	err = mlx5_reg_mcc_query(dev, &fwhandle, &error_code, &control_state);
568 	if (err)
569 		return err;
570 
571 	*fsm_state = control_state;
572 	*fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
573 			       MLXFW_FSM_STATE_ERR_MAX);
574 	return 0;
575 }
576 
mlx5_fsm_cancel(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)577 static void mlx5_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
578 {
579 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
580 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
581 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
582 
583 	mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
584 }
585 
mlx5_fsm_release(struct mlxfw_dev * mlxfw_dev,u32 fwhandle)586 static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
587 {
588 	struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
589 		container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
590 	struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
591 
592 	mlx5_reg_mcc_set(dev, MLX5_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
593 			 fwhandle, 0);
594 }
595 
596 static const struct mlxfw_dev_ops mlx5_mlxfw_dev_ops = {
597 	.component_query	= mlx5_component_query,
598 	.fsm_lock		= mlx5_fsm_lock,
599 	.fsm_component_update	= mlx5_fsm_component_update,
600 	.fsm_block_download	= mlx5_fsm_block_download,
601 	.fsm_component_verify	= mlx5_fsm_component_verify,
602 	.fsm_activate		= mlx5_fsm_activate,
603 	.fsm_query_state	= mlx5_fsm_query_state,
604 	.fsm_cancel		= mlx5_fsm_cancel,
605 	.fsm_release		= mlx5_fsm_release
606 };
607 
mlx5_firmware_flash(struct mlx5_core_dev * dev,const struct firmware * firmware)608 int mlx5_firmware_flash(struct mlx5_core_dev *dev,
609 			const struct firmware *firmware)
610 {
611 	struct mlx5_mlxfw_dev mlx5_mlxfw_dev = {
612 		.mlxfw_dev = {
613 			.ops = &mlx5_mlxfw_dev_ops,
614 			.psid = dev->board_id,
615 			.psid_size = strlen(dev->board_id),
616 		},
617 		.mlx5_core_dev = dev
618 	};
619 
620 	if (!MLX5_CAP_GEN(dev, mcam_reg)  ||
621 	    !MLX5_CAP_MCAM_REG(dev, mcqi) ||
622 	    !MLX5_CAP_MCAM_REG(dev, mcc)  ||
623 	    !MLX5_CAP_MCAM_REG(dev, mcda)) {
624 		pr_info("%s flashing isn't supported by the running FW\n", __func__);
625 		return -EOPNOTSUPP;
626 	}
627 
628 	return mlxfw_firmware_flash(&mlx5_mlxfw_dev.mlxfw_dev, firmware);
629 }
630