1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7 
8 /* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
9  * abstraction layer.
10  */
11 
12 #include "../aq_nic.h"
13 #include "../aq_hw_utils.h"
14 #include "hw_atl_utils.h"
15 #include "hw_atl_llh.h"
16 #include "hw_atl_llh_internal.h"
17 
18 #include <linux/random.h>
19 
20 #define HW_ATL_UCP_0X370_REG    0x0370U
21 
22 #define HW_ATL_MIF_CMD          0x0200U
23 #define HW_ATL_MIF_ADDR         0x0208U
24 #define HW_ATL_MIF_VAL          0x020CU
25 
26 #define HW_ATL_MPI_RPC_ADDR     0x0334U
27 #define HW_ATL_RPC_CONTROL_ADR  0x0338U
28 #define HW_ATL_RPC_STATE_ADR    0x033CU
29 
30 #define HW_ATL_MPI_FW_VERSION	0x18
31 #define HW_ATL_MPI_CONTROL_ADR  0x0368U
32 #define HW_ATL_MPI_STATE_ADR    0x036CU
33 
34 #define HW_ATL_MPI_STATE_MSK      0x00FFU
35 #define HW_ATL_MPI_STATE_SHIFT    0U
36 #define HW_ATL_MPI_SPEED_MSK      0x00FF0000U
37 #define HW_ATL_MPI_SPEED_SHIFT    16U
38 #define HW_ATL_MPI_DIRTY_WAKE_MSK 0x02000000U
39 
40 #define HW_ATL_MPI_DAISY_CHAIN_STATUS	0x704
41 #define HW_ATL_MPI_BOOT_EXIT_CODE	0x388
42 
43 #define HW_ATL_MAC_PHY_CONTROL	0x4000
44 #define HW_ATL_MAC_PHY_MPI_RESET_BIT 0x1D
45 
46 #define HW_ATL_FW_VER_1X 0x01050006U
47 #define HW_ATL_FW_VER_2X 0x02000000U
48 #define HW_ATL_FW_VER_3X 0x03000000U
49 #define HW_ATL_FW_VER_4X 0x04000000U
50 
51 #define FORCE_FLASHLESS 0
52 
53 enum mcp_area {
54 	MCP_AREA_CONFIG = 0x80000000,
55 	MCP_AREA_SETTINGS = 0x20000000,
56 };
57 
58 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
59 				      enum hal_atl_utils_fw_state_e state);
60 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
61 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
62 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
63 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
64 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
65 static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
66 
hw_atl_utils_initfw(struct aq_hw_s * self,const struct aq_fw_ops ** fw_ops)67 int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
68 {
69 	int err = 0;
70 
71 	hw_atl_utils_hw_chip_features_init(self,
72 					   &self->chip_features);
73 
74 	self->fw_ver_actual = hw_atl_utils_get_fw_version(self);
75 
76 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, self->fw_ver_actual)) {
77 		*fw_ops = &aq_fw_1x_ops;
78 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_2X, self->fw_ver_actual)) {
79 		*fw_ops = &aq_fw_2x_ops;
80 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_3X, self->fw_ver_actual)) {
81 		*fw_ops = &aq_fw_2x_ops;
82 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, self->fw_ver_actual)) {
83 		*fw_ops = &aq_fw_2x_ops;
84 	} else {
85 		aq_pr_err("Bad FW version detected: %x\n",
86 			  self->fw_ver_actual);
87 		return -EOPNOTSUPP;
88 	}
89 	self->aq_fw_ops = *fw_ops;
90 	err = self->aq_fw_ops->init(self);
91 
92 	return err;
93 }
94 
hw_atl_utils_soft_reset_flb(struct aq_hw_s * self)95 static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
96 {
97 	u32 gsr, val;
98 	int k = 0;
99 
100 	aq_hw_write_reg(self, 0x404, 0x40e1);
101 	AQ_HW_SLEEP(50);
102 
103 	/* Cleanup SPI */
104 	val = aq_hw_read_reg(self, 0x53C);
105 	aq_hw_write_reg(self, 0x53C, val | 0x10);
106 
107 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
108 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
109 
110 	/* Kickstart MAC */
111 	aq_hw_write_reg(self, 0x404, 0x80e0);
112 	aq_hw_write_reg(self, 0x32a8, 0x0);
113 	aq_hw_write_reg(self, 0x520, 0x1);
114 
115 	/* Reset SPI again because of possible interrupted SPI burst */
116 	val = aq_hw_read_reg(self, 0x53C);
117 	aq_hw_write_reg(self, 0x53C, val | 0x10);
118 	AQ_HW_SLEEP(10);
119 	/* Clear SPI reset state */
120 	aq_hw_write_reg(self, 0x53C, val & ~0x10);
121 
122 	aq_hw_write_reg(self, 0x404, 0x180e0);
123 
124 	for (k = 0; k < 1000; k++) {
125 		u32 flb_status = aq_hw_read_reg(self,
126 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
127 
128 		flb_status = flb_status & 0x10;
129 		if (flb_status)
130 			break;
131 		AQ_HW_SLEEP(10);
132 	}
133 	if (k == 1000) {
134 		aq_pr_err("MAC kickstart failed\n");
135 		return -EIO;
136 	}
137 
138 	/* FW reset */
139 	aq_hw_write_reg(self, 0x404, 0x80e0);
140 	AQ_HW_SLEEP(50);
141 	aq_hw_write_reg(self, 0x3a0, 0x1);
142 
143 	/* Kickstart PHY - skipped */
144 
145 	/* Global software reset*/
146 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
147 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
148 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
149 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
150 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
151 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
152 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
153 
154 	for (k = 0; k < 1000; k++) {
155 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
156 
157 		if (fw_state)
158 			break;
159 		AQ_HW_SLEEP(10);
160 	}
161 	if (k == 1000) {
162 		aq_pr_err("FW kickstart failed\n");
163 		return -EIO;
164 	}
165 	/* Old FW requires fixed delay after init */
166 	AQ_HW_SLEEP(15);
167 
168 	return 0;
169 }
170 
hw_atl_utils_soft_reset_rbl(struct aq_hw_s * self)171 static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
172 {
173 	u32 gsr, val, rbl_status;
174 	int k;
175 
176 	aq_hw_write_reg(self, 0x404, 0x40e1);
177 	aq_hw_write_reg(self, 0x3a0, 0x1);
178 	aq_hw_write_reg(self, 0x32a8, 0x0);
179 
180 	/* Alter RBL status */
181 	aq_hw_write_reg(self, 0x388, 0xDEAD);
182 
183 	/* Cleanup SPI */
184 	val = aq_hw_read_reg(self, 0x53C);
185 	aq_hw_write_reg(self, 0x53C, val | 0x10);
186 
187 	/* Global software reset*/
188 	hw_atl_rx_rx_reg_res_dis_set(self, 0U);
189 	hw_atl_tx_tx_reg_res_dis_set(self, 0U);
190 	aq_hw_write_reg_bit(self, HW_ATL_MAC_PHY_CONTROL,
191 			    BIT(HW_ATL_MAC_PHY_MPI_RESET_BIT),
192 			    HW_ATL_MAC_PHY_MPI_RESET_BIT, 0x0);
193 	gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
194 	aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR,
195 			(gsr & 0xFFFFBFFF) | 0x8000);
196 
197 	if (FORCE_FLASHLESS)
198 		aq_hw_write_reg(self, 0x534, 0x0);
199 
200 	aq_hw_write_reg(self, 0x404, 0x40e0);
201 
202 	/* Wait for RBL boot */
203 	for (k = 0; k < 1000; k++) {
204 		rbl_status = aq_hw_read_reg(self, 0x388) & 0xFFFF;
205 		if (rbl_status && rbl_status != 0xDEAD)
206 			break;
207 		AQ_HW_SLEEP(10);
208 	}
209 	if (!rbl_status || rbl_status == 0xDEAD) {
210 		aq_pr_err("RBL Restart failed");
211 		return -EIO;
212 	}
213 
214 	/* Restore NVR */
215 	if (FORCE_FLASHLESS)
216 		aq_hw_write_reg(self, 0x534, 0xA0);
217 
218 	if (rbl_status == 0xF1A7) {
219 		aq_pr_err("No FW detected. Dynamic FW load not implemented\n");
220 		return -EOPNOTSUPP;
221 	}
222 
223 	for (k = 0; k < 1000; k++) {
224 		u32 fw_state = aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
225 
226 		if (fw_state)
227 			break;
228 		AQ_HW_SLEEP(10);
229 	}
230 	if (k == 1000) {
231 		aq_pr_err("FW kickstart failed\n");
232 		return -EIO;
233 	}
234 	/* Old FW requires fixed delay after init */
235 	AQ_HW_SLEEP(15);
236 
237 	return 0;
238 }
239 
hw_atl_utils_soft_reset(struct aq_hw_s * self)240 int hw_atl_utils_soft_reset(struct aq_hw_s *self)
241 {
242 	int ver = hw_atl_utils_get_fw_version(self);
243 	u32 boot_exit_code = 0;
244 	u32 val;
245 	int k;
246 
247 	for (k = 0; k < 1000; ++k) {
248 		u32 flb_status = aq_hw_read_reg(self,
249 						HW_ATL_MPI_DAISY_CHAIN_STATUS);
250 		boot_exit_code = aq_hw_read_reg(self,
251 						HW_ATL_MPI_BOOT_EXIT_CODE);
252 		if (flb_status != 0x06000000 || boot_exit_code != 0)
253 			break;
254 	}
255 
256 	if (k == 1000) {
257 		aq_pr_err("Neither RBL nor FLB firmware started\n");
258 		return -EOPNOTSUPP;
259 	}
260 
261 	self->rbl_enabled = (boot_exit_code != 0);
262 
263 	if (hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, ver)) {
264 		int err = 0;
265 
266 		/* FW 1.x may bootup in an invalid POWER state (WOL feature).
267 		 * We should work around this by forcing its state back to DEINIT
268 		 */
269 		hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
270 		err = readx_poll_timeout_atomic(hw_atl_utils_mpi_get_state,
271 						self, val,
272 						(val & HW_ATL_MPI_STATE_MSK) ==
273 						 MPI_DEINIT,
274 						10, 10000U);
275 		if (err)
276 			return err;
277 	} else if (hw_atl_utils_ver_match(HW_ATL_FW_VER_4X, ver)) {
278 		u64 sem_timeout = aq_hw_read_reg(self, HW_ATL_MIF_RESET_TIMEOUT_ADR);
279 
280 		/* Acquire 2 semaphores before issuing reset for FW 4.x */
281 		if (sem_timeout > 3000)
282 			sem_timeout = 3000;
283 		sem_timeout = sem_timeout * 1000;
284 
285 		if (sem_timeout != 0) {
286 			int err;
287 
288 			err = readx_poll_timeout_atomic(hw_atl_sem_reset1_get, self, val,
289 							val == 1U, 1U, sem_timeout);
290 			if (err)
291 				aq_pr_err("reset sema1 timeout");
292 
293 			err = readx_poll_timeout_atomic(hw_atl_sem_reset2_get, self, val,
294 							val == 1U, 1U, sem_timeout);
295 			if (err)
296 				aq_pr_err("reset sema2 timeout");
297 		}
298 	}
299 
300 	if (self->rbl_enabled)
301 		return hw_atl_utils_soft_reset_rbl(self);
302 	else
303 		return hw_atl_utils_soft_reset_flb(self);
304 }
305 
hw_atl_utils_fw_downld_dwords(struct aq_hw_s * self,u32 a,u32 * p,u32 cnt)306 int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
307 				  u32 *p, u32 cnt)
308 {
309 	int err = 0;
310 	u32 val;
311 
312 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get,
313 					self, val, val == 1U,
314 					1U, 10000U);
315 
316 	if (err < 0) {
317 		bool is_locked;
318 
319 		hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
320 		is_locked = hw_atl_sem_ram_get(self);
321 		if (!is_locked) {
322 			err = -ETIME;
323 			goto err_exit;
324 		}
325 	}
326 
327 	aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
328 
329 	for (++cnt; --cnt && !err;) {
330 		aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
331 
332 		if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
333 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_addr_get,
334 							self, val, val != a,
335 							1U, 1000U);
336 		else
337 			err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
338 							self, val,
339 							!(val & 0x100),
340 							1U, 1000U);
341 
342 		*(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
343 		a += 4;
344 	}
345 
346 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
347 
348 err_exit:
349 	return err;
350 }
351 
hw_atl_utils_write_b1_mbox(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt,enum mcp_area area)352 static int hw_atl_utils_write_b1_mbox(struct aq_hw_s *self, u32 addr,
353 				      u32 *p, u32 cnt, enum mcp_area area)
354 {
355 	u32 data_offset = 0;
356 	u32 offset = addr;
357 	int err = 0;
358 	u32 val;
359 
360 	switch (area) {
361 	case MCP_AREA_CONFIG:
362 		offset -= self->rpc_addr;
363 		break;
364 
365 	case MCP_AREA_SETTINGS:
366 		offset -= self->settings_addr;
367 		break;
368 	}
369 
370 	offset = offset / sizeof(u32);
371 
372 	for (; data_offset < cnt; ++data_offset, ++offset) {
373 		aq_hw_write_reg(self, 0x328, p[data_offset]);
374 		aq_hw_write_reg(self, 0x32C,
375 				(area | (0xFFFF & (offset * 4))));
376 		hw_atl_mcp_up_force_intr_set(self, 1);
377 		/* 1000 times by 10us = 10ms */
378 		err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
379 						self, val,
380 						(val & 0xF0000000) !=
381 						area,
382 						10U, 10000U);
383 
384 		if (err < 0)
385 			break;
386 	}
387 
388 	return err;
389 }
390 
hw_atl_utils_write_b0_mbox(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt)391 static int hw_atl_utils_write_b0_mbox(struct aq_hw_s *self, u32 addr,
392 				      u32 *p, u32 cnt)
393 {
394 	u32 offset = 0;
395 	int err = 0;
396 	u32 val;
397 
398 	aq_hw_write_reg(self, 0x208, addr);
399 
400 	for (; offset < cnt; ++offset) {
401 		aq_hw_write_reg(self, 0x20C, p[offset]);
402 		aq_hw_write_reg(self, 0x200, 0xC000);
403 
404 		err = readx_poll_timeout_atomic(hw_atl_utils_mif_cmd_get,
405 						self, val,
406 						(val & 0x100) == 0U,
407 						10U, 10000U);
408 
409 		if (err < 0)
410 			break;
411 	}
412 
413 	return err;
414 }
415 
hw_atl_utils_fw_upload_dwords(struct aq_hw_s * self,u32 addr,u32 * p,u32 cnt,enum mcp_area area)416 static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 addr, u32 *p,
417 					 u32 cnt, enum mcp_area area)
418 {
419 	int err = 0;
420 	u32 val;
421 
422 	err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
423 					val, val == 1U,
424 					10U, 100000U);
425 	if (err < 0)
426 		goto err_exit;
427 
428 	if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_B1))
429 		err = hw_atl_utils_write_b1_mbox(self, addr, p, cnt, area);
430 	else
431 		err = hw_atl_utils_write_b0_mbox(self, addr, p, cnt);
432 
433 	hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
434 
435 	if (err < 0)
436 		goto err_exit;
437 
438 	err = aq_hw_err_from_flags(self);
439 
440 err_exit:
441 	return err;
442 }
443 
hw_atl_write_fwcfg_dwords(struct aq_hw_s * self,u32 * p,u32 cnt)444 int hw_atl_write_fwcfg_dwords(struct aq_hw_s *self, u32 *p, u32 cnt)
445 {
446 	return hw_atl_utils_fw_upload_dwords(self, self->rpc_addr, p,
447 					     cnt, MCP_AREA_CONFIG);
448 }
449 
hw_atl_write_fwsettings_dwords(struct aq_hw_s * self,u32 offset,u32 * p,u32 cnt)450 int hw_atl_write_fwsettings_dwords(struct aq_hw_s *self, u32 offset, u32 *p,
451 				   u32 cnt)
452 {
453 	return hw_atl_utils_fw_upload_dwords(self, self->settings_addr + offset,
454 					     p, cnt, MCP_AREA_SETTINGS);
455 }
456 
hw_atl_utils_ver_match(u32 ver_expected,u32 ver_actual)457 bool hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
458 {
459 	const u32 dw_major_mask = 0xff000000U;
460 	const u32 dw_minor_mask = 0x00ffffffU;
461 	bool ver_match;
462 
463 	ver_match = (dw_major_mask & (ver_expected ^ ver_actual)) ? false : true;
464 	if (!ver_match)
465 		goto err_exit;
466 	ver_match = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
467 		false : true;
468 
469 err_exit:
470 	return ver_match;
471 }
472 
hw_atl_utils_init_ucp(struct aq_hw_s * self,const struct aq_hw_caps_s * aq_hw_caps)473 static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
474 				 const struct aq_hw_caps_s *aq_hw_caps)
475 {
476 	int err = 0;
477 
478 	if (!aq_hw_read_reg(self, 0x370U)) {
479 		unsigned int rnd = 0U;
480 		unsigned int ucp_0x370 = 0U;
481 
482 		get_random_bytes(&rnd, sizeof(unsigned int));
483 
484 		ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
485 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
486 	}
487 
488 	hw_atl_reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
489 
490 	/* check 10 times by 1ms */
491 	err = readx_poll_timeout_atomic(hw_atl_scrpad25_get,
492 					self, self->mbox_addr,
493 					self->mbox_addr != 0U,
494 					1000U, 10000U);
495 	err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
496 					self->rpc_addr,
497 					self->rpc_addr != 0U,
498 					1000U, 100000U);
499 
500 	return err;
501 }
502 
503 struct aq_hw_atl_utils_fw_rpc_tid_s {
504 	union {
505 		u32 val;
506 		struct {
507 			u16 tid;
508 			u16 len;
509 		};
510 	};
511 };
512 
513 #define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
514 
hw_atl_utils_fw_rpc_call(struct aq_hw_s * self,unsigned int rpc_size)515 int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
516 {
517 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
518 	int err = 0;
519 
520 	if (!ATL_HW_IS_CHIP_FEATURE(self, MIPS)) {
521 		err = -1;
522 		goto err_exit;
523 	}
524 	err = hw_atl_write_fwcfg_dwords(self, (u32 *)(void *)&self->rpc,
525 					(rpc_size + sizeof(u32) -
526 					 sizeof(u8)) / sizeof(u32));
527 	if (err < 0)
528 		goto err_exit;
529 
530 	sw.tid = 0xFFFFU & (++self->rpc_tid);
531 	sw.len = (u16)rpc_size;
532 	aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
533 
534 err_exit:
535 	return err;
536 }
537 
hw_atl_utils_fw_rpc_wait(struct aq_hw_s * self,struct hw_atl_utils_fw_rpc ** rpc)538 int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
539 			     struct hw_atl_utils_fw_rpc **rpc)
540 {
541 	struct aq_hw_atl_utils_fw_rpc_tid_s sw;
542 	struct aq_hw_atl_utils_fw_rpc_tid_s fw;
543 	int err = 0;
544 
545 	do {
546 		sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
547 
548 		self->rpc_tid = sw.tid;
549 
550 		err = readx_poll_timeout_atomic(hw_atl_utils_rpc_state_get,
551 						self, fw.val,
552 						sw.tid == fw.tid,
553 						1000U, 100000U);
554 		if (err < 0)
555 			goto err_exit;
556 
557 		err = aq_hw_err_from_flags(self);
558 		if (err < 0)
559 			goto err_exit;
560 
561 		if (fw.len == 0xFFFFU) {
562 			err = hw_atl_utils_fw_rpc_call(self, sw.len);
563 			if (err < 0)
564 				goto err_exit;
565 		}
566 	} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
567 
568 	if (rpc) {
569 		if (fw.len) {
570 			err =
571 			hw_atl_utils_fw_downld_dwords(self,
572 						      self->rpc_addr,
573 						      (u32 *)(void *)
574 						      &self->rpc,
575 						      (fw.len + sizeof(u32) -
576 						       sizeof(u8)) /
577 						      sizeof(u32));
578 			if (err < 0)
579 				goto err_exit;
580 		}
581 
582 		*rpc = &self->rpc;
583 	}
584 
585 err_exit:
586 	return err;
587 }
588 
hw_atl_utils_mpi_create(struct aq_hw_s * self)589 static int hw_atl_utils_mpi_create(struct aq_hw_s *self)
590 {
591 	int err = 0;
592 
593 	err = hw_atl_utils_init_ucp(self, self->aq_nic_cfg->aq_hw_caps);
594 	if (err < 0)
595 		goto err_exit;
596 
597 	err = hw_atl_utils_fw_rpc_init(self);
598 	if (err < 0)
599 		goto err_exit;
600 
601 err_exit:
602 	return err;
603 }
604 
hw_atl_utils_mpi_read_mbox(struct aq_hw_s * self,struct hw_atl_utils_mbox_header * pmbox)605 int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self,
606 			       struct hw_atl_utils_mbox_header *pmbox)
607 {
608 	return hw_atl_utils_fw_downld_dwords(self,
609 					     self->mbox_addr,
610 					     (u32 *)(void *)pmbox,
611 					     sizeof(*pmbox) / sizeof(u32));
612 }
613 
hw_atl_utils_mpi_read_stats(struct aq_hw_s * self,struct hw_atl_utils_mbox * pmbox)614 void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
615 				 struct hw_atl_utils_mbox *pmbox)
616 {
617 	int err = 0;
618 
619 	err = hw_atl_utils_fw_downld_dwords(self,
620 					    self->mbox_addr,
621 					    (u32 *)(void *)pmbox,
622 					    sizeof(*pmbox) / sizeof(u32));
623 	if (err < 0)
624 		goto err_exit;
625 
626 	if (ATL_HW_IS_CHIP_FEATURE(self, REVISION_A0)) {
627 		unsigned int mtu = self->aq_nic_cfg ?
628 					self->aq_nic_cfg->mtu : 1514U;
629 		pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
630 		pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
631 		pmbox->stats.dpc = atomic_read(&self->dpc);
632 	} else {
633 		pmbox->stats.dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self);
634 	}
635 
636 err_exit:;
637 }
638 
hw_atl_utils_mpi_set_speed(struct aq_hw_s * self,u32 speed)639 static int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed)
640 {
641 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
642 
643 	val = val & ~HW_ATL_MPI_SPEED_MSK;
644 	val |= speed << HW_ATL_MPI_SPEED_SHIFT;
645 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
646 
647 	return 0;
648 }
649 
hw_atl_utils_mpi_set_state(struct aq_hw_s * self,enum hal_atl_utils_fw_state_e state)650 static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
651 				      enum hal_atl_utils_fw_state_e state)
652 {
653 	u32 val = aq_hw_read_reg(self, HW_ATL_MPI_CONTROL_ADR);
654 	struct hw_atl_utils_mbox_header mbox;
655 	u32 transaction_id = 0;
656 	int err = 0;
657 
658 	if (state == MPI_RESET) {
659 		hw_atl_utils_mpi_read_mbox(self, &mbox);
660 
661 		transaction_id = mbox.transaction_id;
662 
663 		err = readx_poll_timeout_atomic(hw_atl_utils_get_mpi_mbox_tid,
664 						self, mbox.transaction_id,
665 						transaction_id !=
666 						mbox.transaction_id,
667 						1000U, 100000U);
668 		if (err < 0)
669 			goto err_exit;
670 	}
671 	/* On interface DEINIT we disable DW (raise bit)
672 	 * Otherwise enable DW (clear bit)
673 	 */
674 	if (state == MPI_DEINIT || state == MPI_POWER)
675 		val |= HW_ATL_MPI_DIRTY_WAKE_MSK;
676 	else
677 		val &= ~HW_ATL_MPI_DIRTY_WAKE_MSK;
678 
679 	/* Set new state bits */
680 	val = val & ~HW_ATL_MPI_STATE_MSK;
681 	val |= state & HW_ATL_MPI_STATE_MSK;
682 
683 	aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, val);
684 
685 err_exit:
686 	return err;
687 }
688 
hw_atl_utils_mpi_get_link_status(struct aq_hw_s * self)689 int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
690 {
691 	struct aq_hw_link_status_s *link_status = &self->aq_link_status;
692 	u32 mpi_state;
693 	u32 speed;
694 
695 	mpi_state = hw_atl_utils_mpi_get_state(self);
696 	speed = mpi_state >> HW_ATL_MPI_SPEED_SHIFT;
697 
698 	if (!speed) {
699 		link_status->mbps = 0U;
700 	} else {
701 		switch (speed) {
702 		case HAL_ATLANTIC_RATE_10G:
703 			link_status->mbps = 10000U;
704 			break;
705 
706 		case HAL_ATLANTIC_RATE_5G:
707 		case HAL_ATLANTIC_RATE_5GSR:
708 			link_status->mbps = 5000U;
709 			break;
710 
711 		case HAL_ATLANTIC_RATE_2G5:
712 			link_status->mbps = 2500U;
713 			break;
714 
715 		case HAL_ATLANTIC_RATE_1G:
716 			link_status->mbps = 1000U;
717 			break;
718 
719 		case HAL_ATLANTIC_RATE_100M:
720 			link_status->mbps = 100U;
721 			break;
722 
723 		default:
724 			return -EBUSY;
725 		}
726 	}
727 	link_status->full_duplex = true;
728 
729 	return 0;
730 }
731 
hw_atl_utils_get_mac_permanent(struct aq_hw_s * self,u8 * mac)732 int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
733 				   u8 *mac)
734 {
735 	u32 mac_addr[2];
736 	u32 efuse_addr;
737 	int err = 0;
738 	u32 h = 0U;
739 	u32 l = 0U;
740 
741 	if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
742 		unsigned int ucp_0x370 = 0;
743 		unsigned int rnd = 0;
744 
745 		get_random_bytes(&rnd, sizeof(unsigned int));
746 
747 		ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
748 		aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
749 	}
750 
751 	efuse_addr = aq_hw_read_reg(self, 0x00000374U);
752 
753 	err = hw_atl_utils_fw_downld_dwords(self, efuse_addr + (40U * 4U),
754 					    mac_addr, ARRAY_SIZE(mac_addr));
755 	if (err < 0) {
756 		mac_addr[0] = 0U;
757 		mac_addr[1] = 0U;
758 		err = 0;
759 	} else {
760 		mac_addr[0] = __swab32(mac_addr[0]);
761 		mac_addr[1] = __swab32(mac_addr[1]);
762 	}
763 
764 	ether_addr_copy(mac, (u8 *)mac_addr);
765 
766 	if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
767 		/* chip revision */
768 		l = 0xE3000000U |
769 		    (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) |
770 		    (0x00 << 16);
771 		h = 0x8001300EU;
772 
773 		mac[5] = (u8)(0xFFU & l);
774 		l >>= 8;
775 		mac[4] = (u8)(0xFFU & l);
776 		l >>= 8;
777 		mac[3] = (u8)(0xFFU & l);
778 		l >>= 8;
779 		mac[2] = (u8)(0xFFU & l);
780 		mac[1] = (u8)(0xFFU & h);
781 		h >>= 8;
782 		mac[0] = (u8)(0xFFU & h);
783 	}
784 
785 	return err;
786 }
787 
hw_atl_utils_mbps_2_speed_index(unsigned int mbps)788 unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
789 {
790 	unsigned int ret = 0U;
791 
792 	switch (mbps) {
793 	case 100U:
794 		ret = 5U;
795 		break;
796 
797 	case 1000U:
798 		ret = 4U;
799 		break;
800 
801 	case 2500U:
802 		ret = 3U;
803 		break;
804 
805 	case 5000U:
806 		ret = 1U;
807 		break;
808 
809 	case 10000U:
810 		ret = 0U;
811 		break;
812 
813 	default:
814 		break;
815 	}
816 
817 	return ret;
818 }
819 
hw_atl_utils_hw_chip_features_init(struct aq_hw_s * self,u32 * p)820 void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
821 {
822 	u32 val = hw_atl_reg_glb_mif_id_get(self);
823 	u32 mif_rev = val & 0xFFU;
824 	u32 chip_features = 0U;
825 
826 	chip_features |= ATL_HW_CHIP_ATLANTIC;
827 
828 	if ((0xFU & mif_rev) == 1U) {
829 		chip_features |= ATL_HW_CHIP_REVISION_A0 |
830 			ATL_HW_CHIP_MPI_AQ |
831 			ATL_HW_CHIP_MIPS;
832 	} else if ((0xFU & mif_rev) == 2U) {
833 		chip_features |= ATL_HW_CHIP_REVISION_B0 |
834 			ATL_HW_CHIP_MPI_AQ |
835 			ATL_HW_CHIP_MIPS |
836 			ATL_HW_CHIP_TPO2 |
837 			ATL_HW_CHIP_RPF2;
838 	} else if ((0xFU & mif_rev) == 0xAU) {
839 		chip_features |= ATL_HW_CHIP_REVISION_B1 |
840 			ATL_HW_CHIP_MPI_AQ |
841 			ATL_HW_CHIP_MIPS |
842 			ATL_HW_CHIP_TPO2 |
843 			ATL_HW_CHIP_RPF2;
844 	}
845 
846 	*p = chip_features;
847 }
848 
hw_atl_fw1x_deinit(struct aq_hw_s * self)849 static int hw_atl_fw1x_deinit(struct aq_hw_s *self)
850 {
851 	hw_atl_utils_mpi_set_speed(self, 0);
852 	hw_atl_utils_mpi_set_state(self, MPI_DEINIT);
853 
854 	return 0;
855 }
856 
hw_atl_utils_update_stats(struct aq_hw_s * self)857 int hw_atl_utils_update_stats(struct aq_hw_s *self)
858 {
859 	struct aq_stats_s *cs = &self->curr_stats;
860 	struct hw_atl_utils_mbox mbox;
861 
862 	hw_atl_utils_mpi_read_stats(self, &mbox);
863 
864 #define AQ_SDELTA(_N_) (self->curr_stats._N_ += \
865 			mbox.stats._N_ - self->last_stats._N_)
866 
867 	if (self->aq_link_status.mbps) {
868 		AQ_SDELTA(uprc);
869 		AQ_SDELTA(mprc);
870 		AQ_SDELTA(bprc);
871 		AQ_SDELTA(erpt);
872 
873 		AQ_SDELTA(uptc);
874 		AQ_SDELTA(mptc);
875 		AQ_SDELTA(bptc);
876 		AQ_SDELTA(erpr);
877 
878 		AQ_SDELTA(ubrc);
879 		AQ_SDELTA(ubtc);
880 		AQ_SDELTA(mbrc);
881 		AQ_SDELTA(mbtc);
882 		AQ_SDELTA(bbrc);
883 		AQ_SDELTA(bbtc);
884 		AQ_SDELTA(dpc);
885 	}
886 #undef AQ_SDELTA
887 
888 	cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self);
889 	cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self);
890 	cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self);
891 	cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self);
892 
893 	memcpy(&self->last_stats, &mbox.stats, sizeof(mbox.stats));
894 
895 	return 0;
896 }
897 
hw_atl_utils_get_hw_stats(struct aq_hw_s * self)898 struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self)
899 {
900 	return &self->curr_stats;
901 }
902 
903 static const u32 hw_atl_utils_hw_mac_regs[] = {
904 	0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
905 	0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
906 	0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
907 	0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
908 	0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
909 	0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
910 	0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
911 	0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
912 	0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
913 	0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
914 	0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
915 	0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
916 	0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
917 	0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
918 	0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
919 	0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
920 	0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
921 	0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
922 	0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
923 	0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
924 	0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
925 	0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
926 };
927 
hw_atl_utils_hw_get_regs(struct aq_hw_s * self,const struct aq_hw_caps_s * aq_hw_caps,u32 * regs_buff)928 int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
929 			     const struct aq_hw_caps_s *aq_hw_caps,
930 			     u32 *regs_buff)
931 {
932 	unsigned int i = 0U;
933 
934 	for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
935 		regs_buff[i] = aq_hw_read_reg(self,
936 					      hw_atl_utils_hw_mac_regs[i]);
937 
938 	return 0;
939 }
940 
hw_atl_utils_get_fw_version(struct aq_hw_s * self)941 u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
942 {
943 	return aq_hw_read_reg(self, HW_ATL_MPI_FW_VERSION);
944 }
945 
aq_fw1x_set_wake_magic(struct aq_hw_s * self,bool wol_enabled,u8 * mac)946 static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
947 				  u8 *mac)
948 {
949 	struct hw_atl_utils_fw_rpc *prpc = NULL;
950 	unsigned int rpc_size = 0U;
951 	int err = 0;
952 
953 	err = hw_atl_utils_fw_rpc_wait(self, &prpc);
954 	if (err < 0)
955 		goto err_exit;
956 
957 	memset(prpc, 0, sizeof(*prpc));
958 
959 	if (wol_enabled) {
960 		rpc_size = offsetof(struct hw_atl_utils_fw_rpc, msg_wol_add) +
961 			   sizeof(prpc->msg_wol_add);
962 
963 
964 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_ADD;
965 		prpc->msg_wol_add.priority =
966 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PRIOR;
967 		prpc->msg_wol_add.pattern_id =
968 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
969 		prpc->msg_wol_add.packet_type =
970 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_MAG_PKT;
971 
972 		ether_addr_copy((u8 *)&prpc->msg_wol_add.magic_packet_pattern,
973 				mac);
974 	} else {
975 		rpc_size = sizeof(prpc->msg_wol_remove) +
976 			   offsetof(struct hw_atl_utils_fw_rpc, msg_wol_remove);
977 
978 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_WOL_DEL;
979 		prpc->msg_wol_add.pattern_id =
980 				HAL_ATLANTIC_UTILS_FW_MSG_WOL_PATTERN;
981 	}
982 
983 	err = hw_atl_utils_fw_rpc_call(self, rpc_size);
984 
985 err_exit:
986 	return err;
987 }
988 
aq_fw1x_set_power(struct aq_hw_s * self,unsigned int power_state,u8 * mac)989 static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
990 			     u8 *mac)
991 {
992 	struct hw_atl_utils_fw_rpc *prpc = NULL;
993 	unsigned int rpc_size = 0U;
994 	int err = 0;
995 
996 	if (self->aq_nic_cfg->wol & WAKE_MAGIC) {
997 		err = aq_fw1x_set_wake_magic(self, 1, mac);
998 
999 		if (err < 0)
1000 			goto err_exit;
1001 
1002 		rpc_size = sizeof(prpc->msg_id) +
1003 			   sizeof(prpc->msg_enable_wakeup);
1004 
1005 		err = hw_atl_utils_fw_rpc_wait(self, &prpc);
1006 
1007 		if (err < 0)
1008 			goto err_exit;
1009 
1010 		memset(prpc, 0, rpc_size);
1011 
1012 		prpc->msg_id = HAL_ATLANTIC_UTILS_FW_MSG_ENABLE_WAKEUP;
1013 		prpc->msg_enable_wakeup.pattern_mask = 0x00000002;
1014 
1015 		err = hw_atl_utils_fw_rpc_call(self, rpc_size);
1016 		if (err < 0)
1017 			goto err_exit;
1018 	}
1019 	hw_atl_utils_mpi_set_speed(self, 0);
1020 	hw_atl_utils_mpi_set_state(self, MPI_POWER);
1021 
1022 err_exit:
1023 	return err;
1024 }
1025 
hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s * self)1026 static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self)
1027 {
1028 	struct hw_atl_utils_mbox_header mbox;
1029 
1030 	hw_atl_utils_mpi_read_mbox(self, &mbox);
1031 
1032 	return mbox.transaction_id;
1033 }
1034 
hw_atl_utils_mpi_get_state(struct aq_hw_s * self)1035 static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self)
1036 {
1037 	return aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
1038 }
1039 
hw_atl_utils_mif_cmd_get(struct aq_hw_s * self)1040 static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self)
1041 {
1042 	return aq_hw_read_reg(self, HW_ATL_MIF_CMD);
1043 }
1044 
hw_atl_utils_mif_addr_get(struct aq_hw_s * self)1045 static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self)
1046 {
1047 	return aq_hw_read_reg(self, HW_ATL_MIF_ADDR);
1048 }
1049 
hw_atl_utils_rpc_state_get(struct aq_hw_s * self)1050 static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
1051 {
1052 	return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
1053 }
1054 
aq_fw1x_rpc_get(struct aq_hw_s * self)1055 static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
1056 {
1057 	return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
1058 }
1059 
1060 const struct aq_fw_ops aq_fw_1x_ops = {
1061 	.init = hw_atl_utils_mpi_create,
1062 	.deinit = hw_atl_fw1x_deinit,
1063 	.reset = NULL,
1064 	.get_mac_permanent = hw_atl_utils_get_mac_permanent,
1065 	.set_link_speed = hw_atl_utils_mpi_set_speed,
1066 	.set_state = hw_atl_utils_mpi_set_state,
1067 	.update_link_status = hw_atl_utils_mpi_get_link_status,
1068 	.update_stats = hw_atl_utils_update_stats,
1069 	.get_mac_temp = NULL,
1070 	.get_phy_temp = NULL,
1071 	.set_power = aq_fw1x_set_power,
1072 	.set_eee_rate = NULL,
1073 	.get_eee_rate = NULL,
1074 	.set_flow_control = NULL,
1075 	.send_fw_request = NULL,
1076 	.enable_ptp = NULL,
1077 	.led_control = NULL,
1078 };
1079