1 // SPDX-License-Identifier:    GPL-2.0
2 /*
3  * Copyright (C) 2018 Marvell International Ltd.
4  */
5 
6 #include <config.h>
7 #include <dm.h>
8 #include <errno.h>
9 #include <fdt_support.h>
10 #include <malloc.h>
11 #include <miiphy.h>
12 #include <misc.h>
13 #include <net.h>
14 #include <netdev.h>
15 #include <pci.h>
16 #include <pci_ids.h>
17 #include <asm/io.h>
18 #include <asm/arch/board.h>
19 #include <linux/delay.h>
20 #include <linux/libfdt.h>
21 
22 #include "nic_reg.h"
23 #include "nic.h"
24 #include "bgx.h"
25 
26 static const phy_interface_t if_mode[] = {
27 	[QLM_MODE_SGMII]  = PHY_INTERFACE_MODE_SGMII,
28 	[QLM_MODE_RGMII]  = PHY_INTERFACE_MODE_RGMII,
29 	[QLM_MODE_QSGMII] = PHY_INTERFACE_MODE_QSGMII,
30 	[QLM_MODE_XAUI]   = PHY_INTERFACE_MODE_XAUI,
31 	[QLM_MODE_RXAUI]  = PHY_INTERFACE_MODE_RXAUI,
32 };
33 
34 struct lmac {
35 	struct bgx		*bgx;
36 	int			dmac;
37 	u8			mac[6];
38 	bool			link_up;
39 	int			lmacid; /* ID within BGX */
40 	int			phy_addr; /* ID on board */
41 	struct udevice		*dev;
42 	struct mii_dev		*mii_bus;
43 	struct phy_device	*phydev;
44 	unsigned int		last_duplex;
45 	unsigned int		last_link;
46 	unsigned int		last_speed;
47 	int			lane_to_sds;
48 	int			use_training;
49 	int			lmac_type;
50 	u8			qlm_mode;
51 	int			qlm;
52 	bool			is_1gx;
53 };
54 
55 struct bgx {
56 	u8			bgx_id;
57 	int			node;
58 	struct	lmac		lmac[MAX_LMAC_PER_BGX];
59 	int			lmac_count;
60 	u8			max_lmac;
61 	void __iomem		*reg_base;
62 	struct pci_dev		*pdev;
63 	bool			is_rgx;
64 };
65 
66 struct bgx_board_info bgx_board_info[MAX_BGX_PER_NODE];
67 
68 struct bgx *bgx_vnic[MAX_BGX_PER_NODE];
69 
70 /* APIs to read/write BGXX CSRs */
bgx_reg_read(struct bgx * bgx,uint8_t lmac,u64 offset)71 static u64 bgx_reg_read(struct bgx *bgx, uint8_t lmac, u64 offset)
72 {
73 	u64 addr = (uintptr_t)bgx->reg_base +
74 				((uint32_t)lmac << 20) + offset;
75 
76 	return readq((void *)addr);
77 }
78 
bgx_reg_write(struct bgx * bgx,uint8_t lmac,u64 offset,u64 val)79 static void bgx_reg_write(struct bgx *bgx, uint8_t lmac,
80 			  u64 offset, u64 val)
81 {
82 	u64 addr = (uintptr_t)bgx->reg_base +
83 				((uint32_t)lmac << 20) + offset;
84 
85 	writeq(val, (void *)addr);
86 }
87 
bgx_reg_modify(struct bgx * bgx,uint8_t lmac,u64 offset,u64 val)88 static void bgx_reg_modify(struct bgx *bgx, uint8_t lmac,
89 			   u64 offset, u64 val)
90 {
91 	u64 addr = (uintptr_t)bgx->reg_base +
92 				((uint32_t)lmac << 20) + offset;
93 
94 	writeq(val | bgx_reg_read(bgx, lmac, offset), (void *)addr);
95 }
96 
bgx_poll_reg(struct bgx * bgx,uint8_t lmac,u64 reg,u64 mask,bool zero)97 static int bgx_poll_reg(struct bgx *bgx, uint8_t lmac,
98 			u64 reg, u64 mask, bool zero)
99 {
100 	int timeout = 200;
101 	u64 reg_val;
102 
103 	while (timeout) {
104 		reg_val = bgx_reg_read(bgx, lmac, reg);
105 		if (zero && !(reg_val & mask))
106 			return 0;
107 		if (!zero && (reg_val & mask))
108 			return 0;
109 		mdelay(1);
110 		timeout--;
111 	}
112 	return 1;
113 }
114 
gser_poll_reg(u64 reg,int bit,u64 mask,u64 expected_val,int timeout)115 static int gser_poll_reg(u64 reg, int bit, u64 mask, u64 expected_val,
116 			 int timeout)
117 {
118 	u64 reg_val;
119 
120 	debug("%s reg = %#llx, mask = %#llx,", __func__, reg, mask);
121 	debug(" expected_val = %#llx, bit = %d\n", expected_val, bit);
122 	while (timeout) {
123 		reg_val = readq(reg) >> bit;
124 		if ((reg_val & mask) == (expected_val))
125 			return 0;
126 		mdelay(1);
127 		timeout--;
128 	}
129 	return 1;
130 }
131 
is_bgx_port_valid(int bgx,int lmac)132 static bool is_bgx_port_valid(int bgx, int lmac)
133 {
134 	debug("%s bgx %d lmac %d valid %d\n", __func__, bgx, lmac,
135 	      bgx_board_info[bgx].lmac_reg[lmac]);
136 
137 	if (bgx_board_info[bgx].lmac_reg[lmac])
138 		return 1;
139 	else
140 		return 0;
141 }
142 
bgx_get_lmac(int node,int bgx_idx,int lmacid)143 struct lmac *bgx_get_lmac(int node, int bgx_idx, int lmacid)
144 {
145 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
146 
147 	if (bgx)
148 		return &bgx->lmac[lmacid];
149 
150 	return NULL;
151 }
152 
bgx_get_lmac_mac(int node,int bgx_idx,int lmacid)153 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
154 {
155 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
156 
157 	if (bgx)
158 		return bgx->lmac[lmacid].mac;
159 
160 	return NULL;
161 }
162 
bgx_set_lmac_mac(int node,int bgx_idx,int lmacid,const u8 * mac)163 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
164 {
165 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
166 
167 	if (!bgx)
168 		return;
169 
170 	memcpy(bgx->lmac[lmacid].mac, mac, 6);
171 }
172 
173 /* Return number of BGX present in HW */
bgx_get_count(int node,int * bgx_count)174 void bgx_get_count(int node, int *bgx_count)
175 {
176 	int i;
177 	struct bgx *bgx;
178 
179 	*bgx_count = 0;
180 	for (i = 0; i < MAX_BGX_PER_NODE; i++) {
181 		bgx = bgx_vnic[node * MAX_BGX_PER_NODE + i];
182 		debug("bgx_vnic[%u]: %p\n", node * MAX_BGX_PER_NODE + i,
183 		      bgx);
184 		if (bgx)
185 			*bgx_count |= (1 << i);
186 	}
187 }
188 
189 /* Return number of LMAC configured for this BGX */
bgx_get_lmac_count(int node,int bgx_idx)190 int bgx_get_lmac_count(int node, int bgx_idx)
191 {
192 	struct bgx *bgx;
193 
194 	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
195 	if (bgx)
196 		return bgx->lmac_count;
197 
198 	return 0;
199 }
200 
bgx_lmac_rx_tx_enable(int node,int bgx_idx,int lmacid,bool enable)201 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
202 {
203 	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
204 	u64 cfg;
205 
206 	if (!bgx)
207 		return;
208 
209 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
210 	if (enable)
211 		cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
212 	else
213 		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
214 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
215 }
216 
bgx_flush_dmac_addrs(struct bgx * bgx,u64 lmac)217 static void bgx_flush_dmac_addrs(struct bgx *bgx, u64 lmac)
218 {
219 	u64 dmac = 0x00;
220 	u64 offset, addr;
221 
222 	while (bgx->lmac[lmac].dmac > 0) {
223 		offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(dmac)) +
224 			(lmac * MAX_DMAC_PER_LMAC * sizeof(dmac));
225 		addr = (uintptr_t)bgx->reg_base +
226 				BGX_CMR_RX_DMACX_CAM + offset;
227 		writeq(dmac, (void *)addr);
228 		bgx->lmac[lmac].dmac--;
229 	}
230 }
231 
232 /* Configure BGX LMAC in internal loopback mode */
bgx_lmac_internal_loopback(int node,int bgx_idx,int lmac_idx,bool enable)233 void bgx_lmac_internal_loopback(int node, int bgx_idx,
234 				int lmac_idx, bool enable)
235 {
236 	struct bgx *bgx;
237 	struct lmac *lmac;
238 	u64    cfg;
239 
240 	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
241 	if (!bgx)
242 		return;
243 
244 	lmac = &bgx->lmac[lmac_idx];
245 	if (lmac->qlm_mode == QLM_MODE_SGMII) {
246 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
247 		if (enable)
248 			cfg |= PCS_MRX_CTL_LOOPBACK1;
249 		else
250 			cfg &= ~PCS_MRX_CTL_LOOPBACK1;
251 		bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
252 	} else {
253 		cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
254 		if (enable)
255 			cfg |= SPU_CTL_LOOPBACK;
256 		else
257 			cfg &= ~SPU_CTL_LOOPBACK;
258 		bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
259 	}
260 }
261 
262 /* Return the DLM used for the BGX */
get_qlm_for_bgx(int node,int bgx_id,int index)263 static int get_qlm_for_bgx(int node, int bgx_id, int index)
264 {
265 	int qlm = 0;
266 	u64 cfg;
267 
268 	if (otx_is_soc(CN81XX)) {
269 		qlm = (bgx_id) ? 2 : 0;
270 		qlm += (index >= 2) ? 1 : 0;
271 	} else if (otx_is_soc(CN83XX)) {
272 		switch (bgx_id) {
273 		case 0:
274 			qlm = 2;
275 			break;
276 		case 1:
277 			qlm = 3;
278 			break;
279 		case 2:
280 			if (index >= 2)
281 				qlm = 6;
282 			else
283 				qlm = 5;
284 			break;
285 		case 3:
286 			qlm = 4;
287 			break;
288 		}
289 	}
290 
291 	cfg = readq(GSERX_CFG(qlm)) & GSERX_CFG_BGX;
292 	debug("%s:qlm%d: cfg = %lld\n", __func__, qlm, cfg);
293 
294 	/* Check if DLM is configured as BGX# */
295 	if (cfg) {
296 		if (readq(GSERX_PHY_CTL(qlm)))
297 			return -1;
298 		return qlm;
299 	}
300 	return -1;
301 }
302 
bgx_lmac_sgmii_init(struct bgx * bgx,int lmacid)303 static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
304 {
305 	u64 cfg;
306 	struct lmac *lmac;
307 
308 	lmac = &bgx->lmac[lmacid];
309 
310 	debug("%s:bgx_id = %d, lmacid = %d\n", __func__, bgx->bgx_id, lmacid);
311 
312 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
313 	/* max packet size */
314 	bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
315 
316 	/* Disable frame alignment if using preamble */
317 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
318 	if (cfg & 1)
319 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
320 
321 	/* Enable lmac */
322 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
323 
324 	/* PCS reset */
325 	bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
326 	if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
327 			 PCS_MRX_CTL_RESET, true)) {
328 		printf("BGX PCS reset not completed\n");
329 		return -1;
330 	}
331 
332 	/* power down, reset autoneg, autoneg enable */
333 	cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
334 	cfg &= ~PCS_MRX_CTL_PWR_DN;
335 
336 	if (bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis)
337 		cfg |= (PCS_MRX_CTL_RST_AN);
338 	else
339 		cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
340 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
341 
342 	/* Disable disparity for QSGMII mode, to prevent propogation across
343 	 * ports.
344 	 */
345 
346 	if (lmac->qlm_mode == QLM_MODE_QSGMII) {
347 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
348 		cfg &= ~PCS_MISCX_CTL_DISP_EN;
349 		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
350 		return 0; /* Skip checking AN_CPT */
351 	}
352 
353 	if (lmac->is_1gx) {
354 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
355 		cfg |= PCS_MISC_CTL_MODE;
356 		bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
357 	}
358 
359 	if (lmac->qlm_mode == QLM_MODE_SGMII) {
360 		if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
361 				 PCS_MRX_STATUS_AN_CPT, false)) {
362 			printf("BGX AN_CPT not completed\n");
363 			return -1;
364 		}
365 	}
366 
367 	return 0;
368 }
369 
bgx_lmac_sgmii_set_link_speed(struct lmac * lmac)370 static int bgx_lmac_sgmii_set_link_speed(struct lmac *lmac)
371 {
372 	u64 prtx_cfg;
373 	u64 pcs_miscx_ctl;
374 	u64 cfg;
375 	struct bgx *bgx = lmac->bgx;
376 	unsigned int lmacid = lmac->lmacid;
377 
378 	debug("%s: lmacid %d\n", __func__, lmac->lmacid);
379 
380 	/* Disable LMAC before setting up speed */
381 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
382 	cfg &= ~CMR_EN;
383 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
384 
385 	/* Read GMX CFG */
386 	prtx_cfg = bgx_reg_read(bgx, lmacid,
387 				BGX_GMP_GMI_PRTX_CFG);
388 	/* Read PCS MISCS CTL */
389 	pcs_miscx_ctl = bgx_reg_read(bgx, lmacid,
390 				     BGX_GMP_PCS_MISCX_CTL);
391 
392 	/* Use GMXENO to force the link down*/
393 	if (lmac->link_up) {
394 		pcs_miscx_ctl &= ~PCS_MISC_CTL_GMX_ENO;
395 		/* change the duplex setting if the link is up */
396 		prtx_cfg |= GMI_PORT_CFG_DUPLEX;
397 	} else {
398 		pcs_miscx_ctl |= PCS_MISC_CTL_GMX_ENO;
399 	}
400 
401 	/* speed based setting for GMX */
402 	switch (lmac->last_speed) {
403 	case 10:
404 		prtx_cfg &= ~GMI_PORT_CFG_SPEED;
405 		prtx_cfg |= GMI_PORT_CFG_SPEED_MSB;
406 		prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
407 		pcs_miscx_ctl |= 50; /* sampling point */
408 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
409 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
410 		break;
411 	case 100:
412 		prtx_cfg &= ~GMI_PORT_CFG_SPEED;
413 		prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
414 		prtx_cfg &= ~GMI_PORT_CFG_SLOT_TIME;
415 		pcs_miscx_ctl |= 0x5; /* sampling point */
416 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x40);
417 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
418 		break;
419 	case 1000:
420 		prtx_cfg |= GMI_PORT_CFG_SPEED;
421 		prtx_cfg &= ~GMI_PORT_CFG_SPEED_MSB;
422 		prtx_cfg |= GMI_PORT_CFG_SLOT_TIME;
423 		pcs_miscx_ctl |= 0x1; /* sampling point */
424 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SLOT, 0x200);
425 		if (lmac->last_duplex)
426 			bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST, 0);
427 		else /* half duplex */
428 			bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_BURST,
429 				      0x2000);
430 		break;
431 	default:
432 		break;
433 	}
434 
435 	/* write back the new PCS misc and GMX settings */
436 	bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, pcs_miscx_ctl);
437 	bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG, prtx_cfg);
438 
439 	/* read back GMX CFG again to check config completion */
440 	bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_PRTX_CFG);
441 
442 	/* enable BGX back */
443 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
444 	cfg |= CMR_EN;
445 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
446 
447 	return 0;
448 }
449 
bgx_lmac_xaui_init(struct bgx * bgx,int lmacid,int lmac_type)450 static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
451 {
452 	u64 cfg;
453 	struct lmac *lmac;
454 
455 	lmac = &bgx->lmac[lmacid];
456 
457 	/* Reset SPU */
458 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
459 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
460 		printf("BGX SPU reset not completed\n");
461 		return -1;
462 	}
463 
464 	/* Disable LMAC */
465 	cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
466 	cfg &= ~CMR_EN;
467 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
468 
469 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
470 	/* Set interleaved running disparity for RXAUI */
471 	if (lmac->qlm_mode != QLM_MODE_RXAUI)
472 		bgx_reg_modify(bgx, lmacid,
473 			       BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
474 	else
475 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
476 			       SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
477 
478 	/* clear all interrupts */
479 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
480 	bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
481 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
482 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
483 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
484 	bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
485 
486 	if (lmac->use_training) {
487 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
488 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
489 		bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
490 		/* training enable */
491 		bgx_reg_modify(bgx, lmacid,
492 			       BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
493 	}
494 
495 	/* Append FCS to each packet */
496 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
497 
498 	/* Disable forward error correction */
499 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
500 	cfg &= ~SPU_FEC_CTL_FEC_EN;
501 	bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
502 
503 	/* Disable autoneg */
504 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
505 	cfg = cfg & ~(SPU_AN_CTL_XNP_EN);
506 	if (lmac->use_training)
507 		cfg = cfg | (SPU_AN_CTL_AN_EN);
508 	else
509 		cfg = cfg & ~(SPU_AN_CTL_AN_EN);
510 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
511 
512 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
513 	/* Clear all KR bits, configure according to the mode */
514 	cfg &= ~((0xfULL << 22) | (1ULL << 12));
515 	if (lmac->qlm_mode == QLM_MODE_10G_KR)
516 		cfg |= (1 << 23);
517 	else if (lmac->qlm_mode == QLM_MODE_40G_KR4)
518 		cfg |= (1 << 24);
519 	bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
520 
521 	cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
522 	if (lmac->use_training)
523 		cfg |= SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
524 	else
525 		cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
526 	bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
527 
528 	/* Enable lmac */
529 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
530 
531 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
532 	cfg &= ~SPU_CTL_LOW_POWER;
533 	bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
534 
535 	cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
536 	cfg &= ~SMU_TX_CTL_UNI_EN;
537 	cfg |= SMU_TX_CTL_DIC_EN;
538 	bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
539 
540 	/* take lmac_count into account */
541 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
542 	/* max packet size */
543 	bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
544 
545 	debug("xaui_init: lmacid = %d, qlm = %d, qlm_mode = %d\n",
546 	      lmacid, lmac->qlm, lmac->qlm_mode);
547 	/* RXAUI with Marvell PHY requires some tweaking */
548 	if (lmac->qlm_mode == QLM_MODE_RXAUI) {
549 		char mii_name[20];
550 		struct phy_info *phy;
551 
552 		phy = &bgx_board_info[bgx->bgx_id].phy_info[lmacid];
553 		snprintf(mii_name, sizeof(mii_name), "smi%d", phy->mdio_bus);
554 
555 		debug("mii_name: %s\n", mii_name);
556 		lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
557 		lmac->phy_addr = phy->phy_addr;
558 		rxaui_phy_xs_init(lmac->mii_bus, lmac->phy_addr);
559 	}
560 
561 	return 0;
562 }
563 
564 /* Get max number of lanes present in a given QLM/DLM */
get_qlm_lanes(int qlm)565 static int get_qlm_lanes(int qlm)
566 {
567 	if (otx_is_soc(CN81XX))
568 		return 2;
569 	else if (otx_is_soc(CN83XX))
570 		return (qlm >= 5) ? 2 : 4;
571 	else
572 		return -1;
573 }
574 
__rx_equalization(int qlm,int lane)575 int __rx_equalization(int qlm, int lane)
576 {
577 	int max_lanes = get_qlm_lanes(qlm);
578 	int l;
579 	int fail = 0;
580 
581 	/* Before completing Rx equalization wait for
582 	 * GSERx_RX_EIE_DETSTS[CDRLOCK] to be set
583 	 * This ensures the rx data is valid
584 	 */
585 	if (lane == -1) {
586 		if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK, 0xf,
587 				  (1 << max_lanes) - 1, 100)) {
588 			debug("ERROR: CDR Lock not detected");
589 			debug(" on DLM%d for 2 lanes\n", qlm);
590 			return -1;
591 		}
592 	} else {
593 		if (gser_poll_reg(GSER_RX_EIE_DETSTS(qlm), GSER_CDRLOCK,
594 				  (0xf & (1 << lane)), (1 << lane), 100)) {
595 			debug("ERROR: DLM%d: CDR Lock not detected", qlm);
596 			debug(" on %d lane\n", lane);
597 			return -1;
598 		}
599 	}
600 
601 	for (l = 0; l < max_lanes; l++) {
602 		u64 rctl, reer;
603 
604 		if (lane != -1 && lane != l)
605 			continue;
606 
607 		/* Enable software control */
608 		rctl = readq(GSER_BR_RXX_CTL(qlm, l));
609 		rctl |= GSER_BR_RXX_CTL_RXT_SWM;
610 		writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
611 
612 		/* Clear the completion flag and initiate a new request */
613 		reer = readq(GSER_BR_RXX_EER(qlm, l));
614 		reer &= ~GSER_BR_RXX_EER_RXT_ESV;
615 		reer |= GSER_BR_RXX_EER_RXT_EER;
616 		writeq(reer, GSER_BR_RXX_EER(qlm, l));
617 	}
618 
619 	/* Wait for RX equalization to complete */
620 	for (l = 0; l < max_lanes; l++) {
621 		u64 rctl, reer;
622 
623 		if (lane != -1 && lane != l)
624 			continue;
625 
626 		gser_poll_reg(GSER_BR_RXX_EER(qlm, l), EER_RXT_ESV, 1, 1, 200);
627 		reer = readq(GSER_BR_RXX_EER(qlm, l));
628 
629 		/* Switch back to hardware control */
630 		rctl = readq(GSER_BR_RXX_CTL(qlm, l));
631 		rctl &= ~GSER_BR_RXX_CTL_RXT_SWM;
632 		writeq(rctl, GSER_BR_RXX_CTL(qlm, l));
633 
634 		if (reer & GSER_BR_RXX_EER_RXT_ESV) {
635 			debug("Rx equalization completed on DLM%d", qlm);
636 			debug(" QLM%d rxt_esm = 0x%llx\n", l, (reer & 0x3fff));
637 		} else {
638 			debug("Rx equalization timedout on DLM%d", qlm);
639 			debug(" lane %d\n", l);
640 			fail = 1;
641 		}
642 	}
643 
644 	return (fail) ? -1 : 0;
645 }
646 
bgx_xaui_check_link(struct lmac * lmac)647 static int bgx_xaui_check_link(struct lmac *lmac)
648 {
649 	struct bgx *bgx = lmac->bgx;
650 	int lmacid = lmac->lmacid;
651 	int lmac_type = lmac->lmac_type;
652 	u64 cfg;
653 
654 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
655 
656 	/* check if auto negotiation is complete */
657 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
658 	if (cfg & SPU_AN_CTL_AN_EN) {
659 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_STATUS);
660 		if (!(cfg & SPU_AN_STS_AN_COMPLETE)) {
661 			/* Restart autonegotiation */
662 			debug("restarting auto-neg\n");
663 			bgx_reg_modify(bgx, lmacid, BGX_SPUX_AN_CONTROL,
664 				       SPU_AN_CTL_AN_RESTART);
665 			return -1;
666 		}
667 	}
668 
669 	debug("%s link use_training %d\n", __func__, lmac->use_training);
670 	if (lmac->use_training) {
671 		cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
672 		if (!(cfg & (1ull << 13))) {
673 			debug("waiting for link training\n");
674 			/* Clear the training interrupts (W1C) */
675 			cfg = (1ull << 13) | (1ull << 14);
676 			bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
677 
678 			udelay(2000);
679 			/* Restart training */
680 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
681 			cfg |= (1ull << 0);
682 			bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
683 			return -1;
684 		}
685 	}
686 
687 	/* Perform RX Equalization. Applies to non-KR interfaces for speeds
688 	 * >= 6.25Gbps.
689 	 */
690 	if (!lmac->use_training) {
691 		int qlm;
692 		bool use_dlm = 0;
693 
694 		if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
695 					   bgx->bgx_id == 2))
696 			use_dlm = 1;
697 		switch (lmac->lmac_type) {
698 		default:
699 		case BGX_MODE_SGMII:
700 		case BGX_MODE_RGMII:
701 		case BGX_MODE_XAUI:
702 			/* Nothing to do */
703 			break;
704 		case BGX_MODE_XLAUI:
705 			if (use_dlm) {
706 				if (__rx_equalization(lmac->qlm, -1) ||
707 				    __rx_equalization(lmac->qlm + 1, -1)) {
708 					printf("BGX%d:%d", bgx->bgx_id, lmacid);
709 					printf(" Waiting for RX Equalization");
710 					printf(" on DLM%d/DLM%d\n",
711 					       lmac->qlm, lmac->qlm + 1);
712 					return -1;
713 				}
714 			} else {
715 				if (__rx_equalization(lmac->qlm, -1)) {
716 					printf("BGX%d:%d", bgx->bgx_id, lmacid);
717 					printf(" Waiting for RX Equalization");
718 					printf(" on QLM%d\n", lmac->qlm);
719 					return -1;
720 				}
721 			}
722 			break;
723 		case BGX_MODE_RXAUI:
724 			/* RXAUI0 uses LMAC0:QLM0/QLM2 and RXAUI1 uses
725 			 * LMAC1:QLM1/QLM3 RXAUI requires 2 lanes
726 			 * for each interface
727 			 */
728 			qlm = lmac->qlm;
729 			if (__rx_equalization(qlm, 0)) {
730 				printf("BGX%d:%d", bgx->bgx_id, lmacid);
731 				printf(" Waiting for RX Equalization");
732 				printf(" on QLM%d, Lane0\n", qlm);
733 				return -1;
734 			}
735 			if (__rx_equalization(qlm, 1)) {
736 				printf("BGX%d:%d", bgx->bgx_id, lmacid);
737 				printf(" Waiting for RX Equalization");
738 				printf(" on QLM%d, Lane1\n", qlm);
739 				return -1;
740 			}
741 			break;
742 		case BGX_MODE_XFI:
743 			{
744 				int lid;
745 				bool altpkg = otx_is_altpkg();
746 
747 				if (bgx->bgx_id == 0 && altpkg && lmacid)
748 					lid = 0;
749 				else if ((lmacid >= 2) && use_dlm)
750 					lid = lmacid - 2;
751 				else
752 					lid = lmacid;
753 
754 				if (__rx_equalization(lmac->qlm, lid)) {
755 					printf("BGX%d:%d", bgx->bgx_id, lid);
756 					printf(" Waiting for RX Equalization");
757 					printf(" on QLM%d\n", lmac->qlm);
758 				}
759 			}
760 			break;
761 		}
762 	}
763 
764 	/* wait for PCS to come out of reset */
765 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
766 		printf("BGX SPU reset not completed\n");
767 		return -1;
768 	}
769 
770 	if (lmac_type == 3 || lmac_type == 4) {
771 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
772 				 SPU_BR_STATUS_BLK_LOCK, false)) {
773 			printf("SPU_BR_STATUS_BLK_LOCK not completed\n");
774 			return -1;
775 		}
776 	} else {
777 		if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
778 				 SPU_BX_STATUS_RX_ALIGN, false)) {
779 			printf("SPU_BX_STATUS_RX_ALIGN not completed\n");
780 			return -1;
781 		}
782 	}
783 
784 	/* Clear rcvflt bit (latching high) and read it back */
785 	bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
786 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
787 		printf("Receive fault, retry training\n");
788 		if (lmac->use_training) {
789 			cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
790 			if (!(cfg & (1ull << 13))) {
791 				cfg = (1ull << 13) | (1ull << 14);
792 				bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
793 				cfg = bgx_reg_read(bgx, lmacid,
794 						   BGX_SPUX_BR_PMD_CRTL);
795 				cfg |= (1ull << 0);
796 				bgx_reg_write(bgx, lmacid,
797 					      BGX_SPUX_BR_PMD_CRTL, cfg);
798 				return -1;
799 			}
800 		}
801 		return -1;
802 	}
803 
804 	/* Wait for MAC RX to be ready */
805 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
806 			 SMU_RX_CTL_STATUS, true)) {
807 		printf("SMU RX link not okay\n");
808 		return -1;
809 	}
810 
811 	/* Wait for BGX RX to be idle */
812 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
813 		printf("SMU RX not idle\n");
814 		return -1;
815 	}
816 
817 	/* Wait for BGX TX to be idle */
818 	if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
819 		printf("SMU TX not idle\n");
820 		return -1;
821 	}
822 
823 	if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
824 		printf("Receive fault\n");
825 		return -1;
826 	}
827 
828 	/* Receive link is latching low. Force it high and verify it */
829 	if (!(bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS1) &
830 	    SPU_STATUS1_RCV_LNK))
831 		bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1,
832 			       SPU_STATUS1_RCV_LNK);
833 	if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
834 			 SPU_STATUS1_RCV_LNK, false)) {
835 		printf("SPU receive link down\n");
836 		return -1;
837 	}
838 
839 	cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
840 	cfg &= ~SPU_MISC_CTL_RX_DIS;
841 	bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
842 	return 0;
843 }
844 
bgx_lmac_enable(struct bgx * bgx,int8_t lmacid)845 static int bgx_lmac_enable(struct bgx *bgx, int8_t lmacid)
846 {
847 	struct lmac *lmac;
848 	u64 cfg;
849 
850 	lmac = &bgx->lmac[lmacid];
851 	lmac->bgx = bgx;
852 
853 	debug("%s: lmac: %p, lmacid = %d\n", __func__, lmac, lmacid);
854 
855 	if (lmac->qlm_mode == QLM_MODE_SGMII ||
856 	    lmac->qlm_mode == QLM_MODE_RGMII ||
857 	    lmac->qlm_mode == QLM_MODE_QSGMII) {
858 		if (bgx_lmac_sgmii_init(bgx, lmacid)) {
859 			debug("bgx_lmac_sgmii_init failed\n");
860 			return -1;
861 		}
862 		cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
863 		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
864 		bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
865 		bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
866 	} else {
867 		if (bgx_lmac_xaui_init(bgx, lmacid, lmac->lmac_type))
868 			return -1;
869 		cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
870 		cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
871 		bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
872 		bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
873 	}
874 
875 	/* Enable lmac */
876 	bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
877 		       CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
878 
879 	return 0;
880 }
881 
bgx_poll_for_link(int node,int bgx_idx,int lmacid)882 int bgx_poll_for_link(int node, int bgx_idx, int lmacid)
883 {
884 	int ret;
885 	struct lmac *lmac = bgx_get_lmac(node, bgx_idx, lmacid);
886 	char mii_name[10];
887 	struct phy_info *phy;
888 
889 	if (!lmac) {
890 		printf("LMAC %d/%d/%d is disabled or doesn't exist\n",
891 		       node, bgx_idx, lmacid);
892 		return 0;
893 	}
894 
895 	debug("%s: %d, lmac: %d/%d/%d %p\n",
896 	      __FILE__, __LINE__,
897 	      node, bgx_idx, lmacid, lmac);
898 	if (lmac->qlm_mode == QLM_MODE_SGMII ||
899 	    lmac->qlm_mode == QLM_MODE_RGMII ||
900 	    lmac->qlm_mode == QLM_MODE_QSGMII) {
901 		if (bgx_board_info[bgx_idx].phy_info[lmacid].phy_addr == -1) {
902 			lmac->link_up = 1;
903 			lmac->last_speed = 1000;
904 			lmac->last_duplex = 1;
905 			printf("BGX%d:LMAC %u link up\n", bgx_idx, lmacid);
906 			return lmac->link_up;
907 		}
908 		snprintf(mii_name, sizeof(mii_name), "smi%d",
909 			 bgx_board_info[bgx_idx].phy_info[lmacid].mdio_bus);
910 
911 		debug("mii_name: %s\n", mii_name);
912 
913 		lmac->mii_bus = miiphy_get_dev_by_name(mii_name);
914 		phy = &bgx_board_info[bgx_idx].phy_info[lmacid];
915 		lmac->phy_addr = phy->phy_addr;
916 
917 		debug("lmac->mii_bus: %p\n", lmac->mii_bus);
918 		if (!lmac->mii_bus) {
919 			printf("MDIO device %s not found\n", mii_name);
920 			ret = -ENODEV;
921 			return ret;
922 		}
923 
924 		lmac->phydev = phy_connect(lmac->mii_bus, lmac->phy_addr,
925 					   lmac->dev,
926 					   if_mode[lmac->qlm_mode]);
927 
928 		if (!lmac->phydev) {
929 			printf("%s: No PHY device\n", __func__);
930 			return -1;
931 		}
932 
933 		ret = phy_config(lmac->phydev);
934 		if (ret) {
935 			printf("%s: Could not initialize PHY %s\n",
936 			       __func__, lmac->phydev->dev->name);
937 			return ret;
938 		}
939 
940 		ret = phy_startup(lmac->phydev);
941 		debug("%s: %d\n", __FILE__, __LINE__);
942 		if (ret) {
943 			printf("%s: Could not initialize PHY %s\n",
944 			       __func__, lmac->phydev->dev->name);
945 		}
946 
947 #ifdef OCTEONTX_XCV
948 		if (lmac->qlm_mode == QLM_MODE_RGMII)
949 			xcv_setup_link(lmac->phydev->link, lmac->phydev->speed);
950 #endif
951 
952 		lmac->link_up = lmac->phydev->link;
953 		lmac->last_speed = lmac->phydev->speed;
954 		lmac->last_duplex = lmac->phydev->duplex;
955 
956 		debug("%s qlm_mode %d phy link status 0x%x,last speed 0x%x,",
957 		      __func__, lmac->qlm_mode, lmac->link_up,
958 		      lmac->last_speed);
959 		debug(" duplex 0x%x\n", lmac->last_duplex);
960 
961 		if (lmac->qlm_mode != QLM_MODE_RGMII)
962 			bgx_lmac_sgmii_set_link_speed(lmac);
963 
964 	} else {
965 		u64 status1;
966 		u64 tx_ctl;
967 		u64 rx_ctl;
968 
969 		status1 = bgx_reg_read(lmac->bgx, lmac->lmacid,
970 				       BGX_SPUX_STATUS1);
971 		tx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_TX_CTL);
972 		rx_ctl = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
973 
974 		debug("BGX%d LMAC%d BGX_SPUX_STATUS2: %lx\n", bgx_idx, lmacid,
975 		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
976 						  BGX_SPUX_STATUS2));
977 		debug("BGX%d LMAC%d BGX_SPUX_STATUS1: %lx\n", bgx_idx, lmacid,
978 		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
979 						  BGX_SPUX_STATUS1));
980 		debug("BGX%d LMAC%d BGX_SMUX_RX_CTL: %lx\n", bgx_idx, lmacid,
981 		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
982 						  BGX_SMUX_RX_CTL));
983 		debug("BGX%d LMAC%d BGX_SMUX_TX_CTL: %lx\n", bgx_idx, lmacid,
984 		      (unsigned long)bgx_reg_read(lmac->bgx, lmac->lmacid,
985 						  BGX_SMUX_TX_CTL));
986 
987 		if ((status1 & SPU_STATUS1_RCV_LNK) &&
988 		    ((tx_ctl & SMU_TX_CTL_LNK_STATUS) == 0) &&
989 		    ((rx_ctl & SMU_RX_CTL_STATUS) == 0)) {
990 			lmac->link_up = 1;
991 			if (lmac->lmac_type == 4)
992 				lmac->last_speed = 40000;
993 			else
994 				lmac->last_speed = 10000;
995 			lmac->last_duplex = 1;
996 		} else {
997 			lmac->link_up = 0;
998 			lmac->last_speed = 0;
999 			lmac->last_duplex = 0;
1000 			return bgx_xaui_check_link(lmac);
1001 		}
1002 
1003 		lmac->last_link = lmac->link_up;
1004 	}
1005 
1006 	printf("BGX%d:LMAC %u link %s\n", bgx_idx, lmacid,
1007 	       (lmac->link_up) ? "up" : "down");
1008 
1009 	return lmac->link_up;
1010 }
1011 
bgx_lmac_disable(struct bgx * bgx,uint8_t lmacid)1012 void bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
1013 {
1014 	struct lmac *lmac;
1015 	u64 cmrx_cfg;
1016 
1017 	lmac = &bgx->lmac[lmacid];
1018 
1019 	cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1020 	cmrx_cfg &= ~(1 << 15);
1021 	bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
1022 	bgx_flush_dmac_addrs(bgx, lmacid);
1023 
1024 	if (lmac->phydev)
1025 		phy_shutdown(lmac->phydev);
1026 
1027 	lmac->phydev = NULL;
1028 }
1029 
1030 /* Program BGXX_CMRX_CONFIG.{lmac_type,lane_to_sds} for each interface.
1031  * And the number of LMACs used by this interface. Each lmac can be in
1032  * programmed in a different mode, so parse each lmac one at a time.
1033  */
bgx_init_hw(struct bgx * bgx)1034 static void bgx_init_hw(struct bgx *bgx)
1035 {
1036 	struct lmac *lmac;
1037 	int i, lmacid, count = 0, inc = 0;
1038 	char buf[40];
1039 	static int qsgmii_configured;
1040 
1041 	for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
1042 		struct lmac *tlmac;
1043 
1044 		lmac = &bgx->lmac[lmacid];
1045 		debug("%s: lmacid = %d, qlm = %d, mode = %d\n",
1046 		      __func__, lmacid, lmac->qlm, lmac->qlm_mode);
1047 		/* If QLM is not programmed, skip */
1048 		if (lmac->qlm == -1)
1049 			continue;
1050 
1051 		switch (lmac->qlm_mode) {
1052 		case QLM_MODE_SGMII:
1053 		{
1054 			/* EBB8000 (alternative pkg) has only lane0 present on
1055 			 * DLM0 and DLM1, skip configuring other lanes
1056 			 */
1057 			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1058 				if (lmacid % 2)
1059 					continue;
1060 			}
1061 			lmac->lane_to_sds = lmacid;
1062 			lmac->lmac_type = 0;
1063 			snprintf(buf, sizeof(buf),
1064 				 "BGX%d QLM%d LMAC%d mode: %s\n",
1065 				 bgx->bgx_id, lmac->qlm, lmacid,
1066 				 lmac->is_1gx ? "1000Base-X" : "SGMII");
1067 			break;
1068 		}
1069 		case QLM_MODE_XAUI:
1070 			if (lmacid != 0)
1071 				continue;
1072 			lmac->lmac_type = 1;
1073 			lmac->lane_to_sds = 0xE4;
1074 			snprintf(buf, sizeof(buf),
1075 				 "BGX%d QLM%d LMAC%d mode: XAUI\n",
1076 				 bgx->bgx_id, lmac->qlm, lmacid);
1077 			break;
1078 		case QLM_MODE_RXAUI:
1079 			if (lmacid == 0) {
1080 				lmac->lmac_type = 2;
1081 				lmac->lane_to_sds = 0x4;
1082 			} else if (lmacid == 1) {
1083 				struct lmac *tlmac;
1084 
1085 				tlmac = &bgx->lmac[2];
1086 				if (tlmac->qlm_mode == QLM_MODE_RXAUI) {
1087 					lmac->lmac_type = 2;
1088 					lmac->lane_to_sds = 0xe;
1089 					lmac->qlm = tlmac->qlm;
1090 				}
1091 			} else {
1092 				continue;
1093 			}
1094 			snprintf(buf, sizeof(buf),
1095 				 "BGX%d QLM%d LMAC%d mode: RXAUI\n",
1096 				 bgx->bgx_id, lmac->qlm, lmacid);
1097 			break;
1098 		case QLM_MODE_XFI:
1099 			/* EBB8000 (alternative pkg) has only lane0 present on
1100 			 * DLM0 and DLM1, skip configuring other lanes
1101 			 */
1102 			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1103 				if (lmacid % 2)
1104 					continue;
1105 			}
1106 			lmac->lane_to_sds = lmacid;
1107 			lmac->lmac_type = 3;
1108 			snprintf(buf, sizeof(buf),
1109 				 "BGX%d QLM%d LMAC%d mode: XFI\n",
1110 				 bgx->bgx_id, lmac->qlm, lmacid);
1111 			break;
1112 		case QLM_MODE_XLAUI:
1113 			if (lmacid != 0)
1114 				continue;
1115 			lmac->lmac_type = 4;
1116 			lmac->lane_to_sds = 0xE4;
1117 			snprintf(buf, sizeof(buf),
1118 				 "BGX%d QLM%d LMAC%d mode: XLAUI\n",
1119 				 bgx->bgx_id, lmac->qlm, lmacid);
1120 			break;
1121 		case QLM_MODE_10G_KR:
1122 			/* EBB8000 (alternative pkg) has only lane0 present on
1123 			 * DLM0 and DLM1, skip configuring other lanes
1124 			 */
1125 			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1126 				if (lmacid % 2)
1127 					continue;
1128 			}
1129 			lmac->lane_to_sds = lmacid;
1130 			lmac->lmac_type = 3;
1131 			lmac->use_training = 1;
1132 			snprintf(buf, sizeof(buf),
1133 				 "BGX%d QLM%d LMAC%d mode: 10G-KR\n",
1134 				 bgx->bgx_id, lmac->qlm, lmacid);
1135 			break;
1136 		case QLM_MODE_40G_KR4:
1137 			if (lmacid != 0)
1138 				continue;
1139 			lmac->lmac_type = 4;
1140 			lmac->lane_to_sds = 0xE4;
1141 			lmac->use_training = 1;
1142 			snprintf(buf, sizeof(buf),
1143 				 "BGX%d QLM%d LMAC%d mode: 40G-KR4\n",
1144 				 bgx->bgx_id, lmac->qlm, lmacid);
1145 			break;
1146 		case QLM_MODE_RGMII:
1147 			if (lmacid != 0)
1148 				continue;
1149 			lmac->lmac_type = 5;
1150 			lmac->lane_to_sds = 0xE4;
1151 			snprintf(buf, sizeof(buf),
1152 				 "BGX%d LMAC%d mode: RGMII\n",
1153 				 bgx->bgx_id, lmacid);
1154 			break;
1155 		case QLM_MODE_QSGMII:
1156 			if (qsgmii_configured)
1157 				continue;
1158 			if (lmacid == 0 || lmacid == 2) {
1159 				count = 4;
1160 				printf("BGX%d QLM%d LMAC%d mode: QSGMII\n",
1161 				       bgx->bgx_id, lmac->qlm, lmacid);
1162 				for (i = 0; i < count; i++) {
1163 					struct lmac *l;
1164 					int type;
1165 
1166 					l = &bgx->lmac[i];
1167 					l->lmac_type = 6;
1168 					type = l->lmac_type;
1169 					l->qlm_mode = QLM_MODE_QSGMII;
1170 					l->lane_to_sds = lmacid + i;
1171 					if (is_bgx_port_valid(bgx->bgx_id, i))
1172 						bgx_reg_write(bgx, i,
1173 							      BGX_CMRX_CFG,
1174 							      (type << 8) |
1175 							      l->lane_to_sds);
1176 				}
1177 				qsgmii_configured = 1;
1178 			}
1179 			continue;
1180 		default:
1181 			continue;
1182 		}
1183 
1184 		/* Reset lmac to the unused slot */
1185 		if (is_bgx_port_valid(bgx->bgx_id, count) &&
1186 		    lmac->qlm_mode != QLM_MODE_QSGMII) {
1187 			int lmac_en = 0;
1188 			int tmp, idx;
1189 
1190 			tlmac = &bgx->lmac[count];
1191 			tlmac->lmac_type = lmac->lmac_type;
1192 			idx = bgx->bgx_id;
1193 			tmp = count + inc;
1194 			/* Adjust lane_to_sds based on BGX-ENABLE */
1195 			for (; tmp < MAX_LMAC_PER_BGX; inc++) {
1196 				lmac_en = bgx_board_info[idx].lmac_enable[tmp];
1197 				if (lmac_en)
1198 					break;
1199 				tmp = count + inc;
1200 			}
1201 
1202 			if (inc != 0 && inc < MAX_LMAC_PER_BGX &&
1203 			    lmac_en && inc != count)
1204 				tlmac->lane_to_sds =
1205 					lmac->lane_to_sds + abs(inc - count);
1206 			else
1207 				tlmac->lane_to_sds = lmac->lane_to_sds;
1208 			tlmac->qlm = lmac->qlm;
1209 			tlmac->qlm_mode = lmac->qlm_mode;
1210 
1211 			printf("%s", buf);
1212 			/* Initialize lmac_type and lane_to_sds */
1213 			bgx_reg_write(bgx, count, BGX_CMRX_CFG,
1214 				      (tlmac->lmac_type << 8) |
1215 				      tlmac->lane_to_sds);
1216 
1217 			if (tlmac->lmac_type == BGX_MODE_SGMII) {
1218 				if (tlmac->is_1gx) {
1219 					/* This is actually 1000BASE-X, so
1220 					 * mark the LMAC as such.
1221 					 */
1222 					bgx_reg_modify(bgx, count,
1223 						       BGX_GMP_PCS_MISCX_CTL,
1224 						       PCS_MISC_CTL_MODE);
1225 				}
1226 
1227 				if (!bgx_board_info[bgx->bgx_id].phy_info[lmacid].autoneg_dis) {
1228 					/* The Linux DTS does not disable
1229 					 * autoneg for this LMAC (in SGMII or
1230 					 * 1000BASE-X mode), so that means
1231 					 * enable autoneg.
1232 					 */
1233 					bgx_reg_modify(bgx, count,
1234 						       BGX_GMP_PCS_MRX_CTL,
1235 						       PCS_MRX_CTL_AN_EN);
1236 				}
1237 			}
1238 
1239 			count += 1;
1240 		}
1241 	}
1242 
1243 	/* Done probing all 4 lmacs, now clear qsgmii_configured */
1244 	qsgmii_configured = 0;
1245 
1246 	printf("BGX%d LMACs: %d\n", bgx->bgx_id, count);
1247 	bgx->lmac_count = count;
1248 	bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, count);
1249 	bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, count);
1250 
1251 	bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1252 	if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1253 		printf("BGX%d BIST failed\n", bgx->bgx_id);
1254 
1255 	/* Set the backpressure AND mask */
1256 	for (i = 0; i < bgx->lmac_count; i++)
1257 		bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1258 			       ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1259 				(i * MAX_BGX_CHANS_PER_LMAC));
1260 
1261 	/* Disable all MAC filtering */
1262 	for (i = 0; i < RX_DMAC_COUNT; i++)
1263 		bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1264 
1265 	/* Disable MAC steering (NCSI traffic) */
1266 	for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1267 		bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1268 }
1269 
bgx_get_qlm_mode(struct bgx * bgx)1270 static void bgx_get_qlm_mode(struct bgx *bgx)
1271 {
1272 	struct lmac *lmac;
1273 	int lmacid;
1274 
1275 	/* Read LMACx type to figure out QLM mode
1276 	 * This is configured by low level firmware
1277 	 */
1278 	for (lmacid = 0; lmacid < MAX_LMAC_PER_BGX; lmacid++) {
1279 		int lmac_type;
1280 		int train_en;
1281 		int index = 0;
1282 
1283 		if (otx_is_soc(CN81XX) || (otx_is_soc(CN83XX) &&
1284 					   bgx->bgx_id == 2))
1285 			index = (lmacid < 2) ? 0 : 2;
1286 
1287 		lmac = &bgx->lmac[lmacid];
1288 
1289 		/* check if QLM is programmed, if not, skip */
1290 		if (lmac->qlm == -1)
1291 			continue;
1292 
1293 		lmac_type = bgx_reg_read(bgx, index, BGX_CMRX_CFG);
1294 		lmac->lmac_type = (lmac_type >> 8) & 0x07;
1295 		debug("%s:%d:%d: lmac_type = %d, altpkg = %d\n", __func__,
1296 		      bgx->bgx_id, lmacid, lmac->lmac_type, otx_is_altpkg());
1297 
1298 		train_en = (readq(GSERX_SCRATCH(lmac->qlm))) & 0xf;
1299 		lmac->is_1gx = bgx_reg_read(bgx, index, BGX_GMP_PCS_MISCX_CTL)
1300 				& (PCS_MISC_CTL_MODE) ? true : false;
1301 
1302 		switch (lmac->lmac_type) {
1303 		case BGX_MODE_SGMII:
1304 			if (bgx->is_rgx) {
1305 				if (lmacid == 0) {
1306 					lmac->qlm_mode = QLM_MODE_RGMII;
1307 					debug("BGX%d LMAC%d mode: RGMII\n",
1308 					      bgx->bgx_id, lmacid);
1309 				}
1310 				continue;
1311 			} else {
1312 				if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1313 					if (lmacid % 2)
1314 						continue;
1315 				}
1316 				lmac->qlm_mode = QLM_MODE_SGMII;
1317 				debug("BGX%d QLM%d LMAC%d mode: %s\n",
1318 				      bgx->bgx_id, lmac->qlm, lmacid,
1319 				      lmac->is_1gx ? "1000Base-X" : "SGMII");
1320 			}
1321 			break;
1322 		case BGX_MODE_XAUI:
1323 			if (bgx->bgx_id == 0 && otx_is_altpkg())
1324 				continue;
1325 			lmac->qlm_mode = QLM_MODE_XAUI;
1326 			if (lmacid != 0)
1327 				continue;
1328 			debug("BGX%d QLM%d LMAC%d mode: XAUI\n",
1329 			      bgx->bgx_id, lmac->qlm, lmacid);
1330 			break;
1331 		case BGX_MODE_RXAUI:
1332 			if (bgx->bgx_id == 0 && otx_is_altpkg())
1333 				continue;
1334 			lmac->qlm_mode = QLM_MODE_RXAUI;
1335 			if (index == lmacid) {
1336 				debug("BGX%d QLM%d LMAC%d mode: RXAUI\n",
1337 				      bgx->bgx_id, lmac->qlm, (index ? 1 : 0));
1338 			}
1339 			break;
1340 		case BGX_MODE_XFI:
1341 			if (bgx->bgx_id == 0 && otx_is_altpkg()) {
1342 				if (lmacid % 2)
1343 					continue;
1344 			}
1345 			if ((lmacid < 2 && (train_en & (1 << lmacid))) ||
1346 			    (train_en & (1 << (lmacid - 2)))) {
1347 				lmac->qlm_mode = QLM_MODE_10G_KR;
1348 				debug("BGX%d QLM%d LMAC%d mode: 10G_KR\n",
1349 				      bgx->bgx_id, lmac->qlm, lmacid);
1350 			} else {
1351 				lmac->qlm_mode = QLM_MODE_XFI;
1352 				debug("BGX%d QLM%d LMAC%d mode: XFI\n",
1353 				      bgx->bgx_id, lmac->qlm, lmacid);
1354 			}
1355 			break;
1356 		case BGX_MODE_XLAUI:
1357 			if (bgx->bgx_id == 0 && otx_is_altpkg())
1358 				continue;
1359 			if (train_en) {
1360 				lmac->qlm_mode = QLM_MODE_40G_KR4;
1361 				if (lmacid != 0)
1362 					break;
1363 				debug("BGX%d QLM%d LMAC%d mode: 40G_KR4\n",
1364 				      bgx->bgx_id, lmac->qlm, lmacid);
1365 			} else {
1366 				lmac->qlm_mode = QLM_MODE_XLAUI;
1367 				if (lmacid != 0)
1368 					break;
1369 				debug("BGX%d QLM%d LMAC%d mode: XLAUI\n",
1370 				      bgx->bgx_id, lmac->qlm, lmacid);
1371 			}
1372 		break;
1373 		case BGX_MODE_QSGMII:
1374 			/* If QLM is configured as QSGMII, use lmac0 */
1375 			if (otx_is_soc(CN83XX) && lmacid == 2 &&
1376 			    bgx->bgx_id != 2) {
1377 				//lmac->qlm_mode = QLM_MODE_DISABLED;
1378 				continue;
1379 			}
1380 
1381 			if (lmacid == 0 || lmacid == 2) {
1382 				lmac->qlm_mode = QLM_MODE_QSGMII;
1383 				debug("BGX%d QLM%d LMAC%d mode: QSGMII\n",
1384 				      bgx->bgx_id, lmac->qlm, lmacid);
1385 			}
1386 			break;
1387 		default:
1388 			break;
1389 		}
1390 	}
1391 }
1392 
bgx_set_board_info(int bgx_id,int * mdio_bus,int * phy_addr,bool * autoneg_dis,bool * lmac_reg,bool * lmac_enable)1393 void bgx_set_board_info(int bgx_id, int *mdio_bus,
1394 			int *phy_addr, bool *autoneg_dis, bool *lmac_reg,
1395 			bool *lmac_enable)
1396 {
1397 	unsigned int i;
1398 
1399 	for (i = 0; i < MAX_LMAC_PER_BGX; i++) {
1400 		bgx_board_info[bgx_id].phy_info[i].phy_addr = phy_addr[i];
1401 		bgx_board_info[bgx_id].phy_info[i].mdio_bus = mdio_bus[i];
1402 		bgx_board_info[bgx_id].phy_info[i].autoneg_dis = autoneg_dis[i];
1403 		bgx_board_info[bgx_id].lmac_reg[i] = lmac_reg[i];
1404 		bgx_board_info[bgx_id].lmac_enable[i] = lmac_enable[i];
1405 		debug("%s bgx_id %d lmac %d\n", __func__, bgx_id, i);
1406 		debug("phy addr %x mdio bus %d autoneg_dis %d lmac_reg %d\n",
1407 		      bgx_board_info[bgx_id].phy_info[i].phy_addr,
1408 		      bgx_board_info[bgx_id].phy_info[i].mdio_bus,
1409 		      bgx_board_info[bgx_id].phy_info[i].autoneg_dis,
1410 		      bgx_board_info[bgx_id].lmac_reg[i]);
1411 		debug("lmac_enable = %x\n",
1412 		      bgx_board_info[bgx_id].lmac_enable[i]);
1413 	}
1414 }
1415 
octeontx_bgx_remove(struct udevice * dev)1416 int octeontx_bgx_remove(struct udevice *dev)
1417 {
1418 	int lmacid;
1419 	u64 cfg;
1420 	int count = MAX_LMAC_PER_BGX;
1421 	struct bgx *bgx = dev_get_priv(dev);
1422 
1423 	if (!bgx->reg_base)
1424 		return 0;
1425 
1426 	if (bgx->is_rgx)
1427 		count = 1;
1428 
1429 	for (lmacid = 0; lmacid < count; lmacid++) {
1430 		struct lmac *lmac;
1431 
1432 		lmac = &bgx->lmac[lmacid];
1433 		cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
1434 		cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
1435 		bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
1436 
1437 		/* Disable PCS for 1G interface */
1438 		if (lmac->lmac_type == BGX_MODE_SGMII ||
1439 		    lmac->lmac_type == BGX_MODE_QSGMII) {
1440 			cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
1441 			cfg |= PCS_MRX_CTL_PWR_DN;
1442 			bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
1443 		}
1444 
1445 		debug("%s disabling bgx%d lmacid%d\n", __func__, bgx->bgx_id,
1446 		      lmacid);
1447 		bgx_lmac_disable(bgx, lmacid);
1448 	}
1449 	return 0;
1450 }
1451 
octeontx_bgx_probe(struct udevice * dev)1452 int octeontx_bgx_probe(struct udevice *dev)
1453 {
1454 	int err;
1455 	struct bgx *bgx = dev_get_priv(dev);
1456 	u8 lmac = 0;
1457 	int qlm[4] = {-1, -1, -1, -1};
1458 	int bgx_idx, node;
1459 	int inc = 1;
1460 
1461 	bgx->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
1462 				       PCI_REGION_MEM);
1463 	if (!bgx->reg_base) {
1464 		debug("No PCI region found\n");
1465 		return 0;
1466 	}
1467 
1468 #ifdef OCTEONTX_XCV
1469 	/* Use FAKE BGX2 for RGX interface */
1470 	if ((((uintptr_t)bgx->reg_base >> 24) & 0xf) == 0x8) {
1471 		bgx->bgx_id = 2;
1472 		bgx->is_rgx = true;
1473 		for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
1474 			if (lmac == 0) {
1475 				bgx->lmac[lmac].lmacid = 0;
1476 				bgx->lmac[lmac].qlm = 0;
1477 			} else {
1478 				bgx->lmac[lmac].qlm = -1;
1479 			}
1480 		}
1481 		xcv_init_hw();
1482 		goto skip_qlm_config;
1483 	}
1484 #endif
1485 
1486 	node = node_id(bgx->reg_base);
1487 	bgx_idx = ((uintptr_t)bgx->reg_base >> 24) & 3;
1488 	bgx->bgx_id = (node * MAX_BGX_PER_NODE) + bgx_idx;
1489 	if (otx_is_soc(CN81XX))
1490 		inc = 2;
1491 	else if (otx_is_soc(CN83XX) && (bgx_idx == 2))
1492 		inc = 2;
1493 
1494 	for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac += inc) {
1495 		/* BGX3 (DLM4), has only 2 lanes */
1496 		if (otx_is_soc(CN83XX) && bgx_idx == 3 && lmac >= 2)
1497 			continue;
1498 		qlm[lmac + 0] = get_qlm_for_bgx(node, bgx_idx, lmac);
1499 		/* Each DLM has 2 lanes, configure both lanes with
1500 		 * same qlm configuration
1501 		 */
1502 		if (inc == 2)
1503 			qlm[lmac + 1] = qlm[lmac];
1504 		debug("qlm[%d] = %d\n", lmac, qlm[lmac]);
1505 	}
1506 
1507 	/* A BGX can take 1 or 2 DLMs, if both the DLMs are not configured
1508 	 * as BGX, then return, nothing to initialize
1509 	 */
1510 	if (otx_is_soc(CN81XX))
1511 		if ((qlm[0] == -1) && (qlm[2] == -1))
1512 			return -ENODEV;
1513 
1514 	/* MAP configuration registers */
1515 	for (lmac = 0; lmac < MAX_LMAC_PER_BGX; lmac++) {
1516 		bgx->lmac[lmac].qlm = qlm[lmac];
1517 		bgx->lmac[lmac].lmacid = lmac;
1518 	}
1519 
1520 #ifdef OCTEONTX_XCV
1521 skip_qlm_config:
1522 #endif
1523 	bgx_vnic[bgx->bgx_id] = bgx;
1524 	bgx_get_qlm_mode(bgx);
1525 	debug("bgx_vnic[%u]: %p\n", bgx->bgx_id, bgx);
1526 
1527 	bgx_init_hw(bgx);
1528 
1529 	/* Init LMACs */
1530 	for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
1531 		struct lmac *tlmac = &bgx->lmac[lmac];
1532 
1533 		tlmac->dev = dev;
1534 		err = bgx_lmac_enable(bgx, lmac);
1535 		if (err) {
1536 			printf("BGX%d failed to enable lmac%d\n",
1537 			       bgx->bgx_id, lmac);
1538 		}
1539 	}
1540 
1541 	return 0;
1542 }
1543 
1544 U_BOOT_DRIVER(octeontx_bgx) = {
1545 	.name	= "octeontx_bgx",
1546 	.id	= UCLASS_MISC,
1547 	.probe	= octeontx_bgx_probe,
1548 	.remove	= octeontx_bgx_remove,
1549 	.priv_auto	= sizeof(struct bgx),
1550 	.flags  = DM_FLAG_OS_PREPARE,
1551 };
1552 
1553 static struct pci_device_id octeontx_bgx_supported[] = {
1554 	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_BGX) },
1555 	{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_RGX) },
1556 	{}
1557 };
1558 
1559 U_BOOT_PCI_DEVICE(octeontx_bgx, octeontx_bgx_supported);
1560