1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
5 * Portions based on U-Boot's rtl8169.c.
6 */
7
8 /*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
29
30 #include <common.h>
31 #include <clk.h>
32 #include <cpu_func.h>
33 #include <dm.h>
34 #include <errno.h>
35 #include <log.h>
36 #include <malloc.h>
37 #include <memalign.h>
38 #include <miiphy.h>
39 #include <net.h>
40 #include <netdev.h>
41 #include <phy.h>
42 #include <reset.h>
43 #include <wait_bit.h>
44 #include <asm/cache.h>
45 #include <asm/gpio.h>
46 #include <asm/io.h>
47 #include <eth_phy.h>
48 #ifdef CONFIG_ARCH_IMX8M
49 #include <asm/arch/clock.h>
50 #include <asm/mach-imx/sys_proto.h>
51 #endif
52 #include <linux/bitops.h>
53 #include <linux/delay.h>
54
55 /* Core registers */
56
57 #define EQOS_MAC_REGS_BASE 0x000
58 struct eqos_mac_regs {
59 uint32_t configuration; /* 0x000 */
60 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
61 uint32_t q0_tx_flow_ctrl; /* 0x070 */
62 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
63 uint32_t rx_flow_ctrl; /* 0x090 */
64 uint32_t unused_094; /* 0x094 */
65 uint32_t txq_prty_map0; /* 0x098 */
66 uint32_t unused_09c; /* 0x09c */
67 uint32_t rxq_ctrl0; /* 0x0a0 */
68 uint32_t unused_0a4; /* 0x0a4 */
69 uint32_t rxq_ctrl2; /* 0x0a8 */
70 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
71 uint32_t us_tic_counter; /* 0x0dc */
72 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
73 uint32_t hw_feature0; /* 0x11c */
74 uint32_t hw_feature1; /* 0x120 */
75 uint32_t hw_feature2; /* 0x124 */
76 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
77 uint32_t mdio_address; /* 0x200 */
78 uint32_t mdio_data; /* 0x204 */
79 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
80 uint32_t address0_high; /* 0x300 */
81 uint32_t address0_low; /* 0x304 */
82 };
83
84 #define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
85 #define EQOS_MAC_CONFIGURATION_CST BIT(21)
86 #define EQOS_MAC_CONFIGURATION_ACS BIT(20)
87 #define EQOS_MAC_CONFIGURATION_WD BIT(19)
88 #define EQOS_MAC_CONFIGURATION_JD BIT(17)
89 #define EQOS_MAC_CONFIGURATION_JE BIT(16)
90 #define EQOS_MAC_CONFIGURATION_PS BIT(15)
91 #define EQOS_MAC_CONFIGURATION_FES BIT(14)
92 #define EQOS_MAC_CONFIGURATION_DM BIT(13)
93 #define EQOS_MAC_CONFIGURATION_LM BIT(12)
94 #define EQOS_MAC_CONFIGURATION_TE BIT(1)
95 #define EQOS_MAC_CONFIGURATION_RE BIT(0)
96
97 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
98 #define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
99 #define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
100
101 #define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
102
103 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
104 #define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
105
106 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
107 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
108 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
109 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
110 #define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
111
112 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
113 #define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
114
115 #define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
116 #define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
117 #define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
118 #define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
119
120 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
121 #define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
122 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
123 #define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
124
125 #define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
126 #define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
127
128 #define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
129 #define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
130 #define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
131 #define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
132 #define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
133 #define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
134 #define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
135 #define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
136 #define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
137 #define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
138 #define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
139
140 #define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
141
142 #define EQOS_MTL_REGS_BASE 0xd00
143 struct eqos_mtl_regs {
144 uint32_t txq0_operation_mode; /* 0xd00 */
145 uint32_t unused_d04; /* 0xd04 */
146 uint32_t txq0_debug; /* 0xd08 */
147 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
148 uint32_t txq0_quantum_weight; /* 0xd18 */
149 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
150 uint32_t rxq0_operation_mode; /* 0xd30 */
151 uint32_t unused_d34; /* 0xd34 */
152 uint32_t rxq0_debug; /* 0xd38 */
153 };
154
155 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
156 #define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
157 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
158 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
159 #define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
160 #define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
161 #define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
162
163 #define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
164 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
165 #define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
166
167 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
168 #define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
169 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
170 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
171 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
172 #define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
173 #define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
174 #define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
175
176 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
177 #define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
178 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
179 #define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
180
181 #define EQOS_DMA_REGS_BASE 0x1000
182 struct eqos_dma_regs {
183 uint32_t mode; /* 0x1000 */
184 uint32_t sysbus_mode; /* 0x1004 */
185 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
186 uint32_t ch0_control; /* 0x1100 */
187 uint32_t ch0_tx_control; /* 0x1104 */
188 uint32_t ch0_rx_control; /* 0x1108 */
189 uint32_t unused_110c; /* 0x110c */
190 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
191 uint32_t ch0_txdesc_list_address; /* 0x1114 */
192 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
193 uint32_t ch0_rxdesc_list_address; /* 0x111c */
194 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
195 uint32_t unused_1124; /* 0x1124 */
196 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
197 uint32_t ch0_txdesc_ring_length; /* 0x112c */
198 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
199 };
200
201 #define EQOS_DMA_MODE_SWR BIT(0)
202
203 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
204 #define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
205 #define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
206 #define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
207 #define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
208 #define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
209
210 #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
211 #define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
212
213 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
214 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
215 #define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
216 #define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
217
218 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
219 #define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
220 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
221 #define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
222 #define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
223
224 /* These registers are Tegra186-specific */
225 #define EQOS_TEGRA186_REGS_BASE 0x8800
226 struct eqos_tegra186_regs {
227 uint32_t sdmemcomppadctrl; /* 0x8800 */
228 uint32_t auto_cal_config; /* 0x8804 */
229 uint32_t unused_8808; /* 0x8808 */
230 uint32_t auto_cal_status; /* 0x880c */
231 };
232
233 #define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
234
235 #define EQOS_AUTO_CAL_CONFIG_START BIT(31)
236 #define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
237
238 #define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
239
240 /* Descriptors */
241 /* We assume ARCH_DMA_MINALIGN >= 16; 16 is the EQOS HW minimum */
242 #define EQOS_DESCRIPTOR_ALIGN ARCH_DMA_MINALIGN
243 #define EQOS_DESCRIPTORS_TX 4
244 #define EQOS_DESCRIPTORS_RX 4
245 #define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
246 #define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
247 #define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
248 #define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
249
250 struct eqos_desc {
251 u32 des0;
252 u32 des1;
253 u32 des2;
254 u32 des3;
255 };
256
257 #define EQOS_DESC3_OWN BIT(31)
258 #define EQOS_DESC3_FD BIT(29)
259 #define EQOS_DESC3_LD BIT(28)
260 #define EQOS_DESC3_BUF1V BIT(24)
261
262 #define EQOS_AXI_WIDTH_32 4
263 #define EQOS_AXI_WIDTH_64 8
264 #define EQOS_AXI_WIDTH_128 16
265
266 struct eqos_config {
267 bool reg_access_always_ok;
268 int mdio_wait;
269 int swr_wait;
270 int config_mac;
271 int config_mac_mdio;
272 unsigned int axi_bus_width;
273 phy_interface_t (*interface)(struct udevice *dev);
274 struct eqos_ops *ops;
275 };
276
277 struct eqos_ops {
278 void (*eqos_inval_desc)(void *desc);
279 void (*eqos_flush_desc)(void *desc);
280 void (*eqos_inval_buffer)(void *buf, size_t size);
281 void (*eqos_flush_buffer)(void *buf, size_t size);
282 int (*eqos_probe_resources)(struct udevice *dev);
283 int (*eqos_remove_resources)(struct udevice *dev);
284 int (*eqos_stop_resets)(struct udevice *dev);
285 int (*eqos_start_resets)(struct udevice *dev);
286 void (*eqos_stop_clks)(struct udevice *dev);
287 int (*eqos_start_clks)(struct udevice *dev);
288 int (*eqos_calibrate_pads)(struct udevice *dev);
289 int (*eqos_disable_calibration)(struct udevice *dev);
290 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
291 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
292 };
293
294 struct eqos_priv {
295 struct udevice *dev;
296 const struct eqos_config *config;
297 fdt_addr_t regs;
298 struct eqos_mac_regs *mac_regs;
299 struct eqos_mtl_regs *mtl_regs;
300 struct eqos_dma_regs *dma_regs;
301 struct eqos_tegra186_regs *tegra186_regs;
302 struct reset_ctl reset_ctl;
303 struct gpio_desc phy_reset_gpio;
304 struct clk clk_master_bus;
305 struct clk clk_rx;
306 struct clk clk_ptp_ref;
307 struct clk clk_tx;
308 struct clk clk_ck;
309 struct clk clk_slave_bus;
310 struct mii_dev *mii;
311 struct phy_device *phy;
312 int phyaddr;
313 u32 max_speed;
314 void *descs;
315 int tx_desc_idx, rx_desc_idx;
316 unsigned int desc_size;
317 void *tx_dma_buf;
318 void *rx_dma_buf;
319 void *rx_pkt;
320 bool started;
321 bool reg_access_ok;
322 bool clk_ck_enabled;
323 };
324
325 /*
326 * TX and RX descriptors are 16 bytes. This causes problems with the cache
327 * maintenance on CPUs where the cache-line size exceeds the size of these
328 * descriptors. What will happen is that when the driver receives a packet
329 * it will be immediately requeued for the hardware to reuse. The CPU will
330 * therefore need to flush the cache-line containing the descriptor, which
331 * will cause all other descriptors in the same cache-line to be flushed
332 * along with it. If one of those descriptors had been written to by the
333 * device those changes (and the associated packet) will be lost.
334 *
335 * To work around this, we make use of non-cached memory if available. If
336 * descriptors are mapped uncached there's no need to manually flush them
337 * or invalidate them.
338 *
339 * Note that this only applies to descriptors. The packet data buffers do
340 * not have the same constraints since they are 1536 bytes large, so they
341 * are unlikely to share cache-lines.
342 */
eqos_alloc_descs(struct eqos_priv * eqos,unsigned int num)343 static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
344 {
345 eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
346 (unsigned int)ARCH_DMA_MINALIGN);
347
348 return memalign(eqos->desc_size, num * eqos->desc_size);
349 }
350
eqos_free_descs(void * descs)351 static void eqos_free_descs(void *descs)
352 {
353 free(descs);
354 }
355
eqos_get_desc(struct eqos_priv * eqos,unsigned int num,bool rx)356 static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
357 unsigned int num, bool rx)
358 {
359 return eqos->descs +
360 ((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
361 }
362
eqos_inval_desc_generic(void * desc)363 static void eqos_inval_desc_generic(void *desc)
364 {
365 unsigned long start = (unsigned long)desc;
366 unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
367 ARCH_DMA_MINALIGN);
368
369 invalidate_dcache_range(start, end);
370 }
371
eqos_flush_desc_generic(void * desc)372 static void eqos_flush_desc_generic(void *desc)
373 {
374 unsigned long start = (unsigned long)desc;
375 unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
376 ARCH_DMA_MINALIGN);
377
378 flush_dcache_range(start, end);
379 }
380
eqos_inval_buffer_tegra186(void * buf,size_t size)381 static void eqos_inval_buffer_tegra186(void *buf, size_t size)
382 {
383 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
384 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
385
386 invalidate_dcache_range(start, end);
387 }
388
eqos_inval_buffer_generic(void * buf,size_t size)389 static void eqos_inval_buffer_generic(void *buf, size_t size)
390 {
391 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
392 unsigned long end = roundup((unsigned long)buf + size,
393 ARCH_DMA_MINALIGN);
394
395 invalidate_dcache_range(start, end);
396 }
397
eqos_flush_buffer_tegra186(void * buf,size_t size)398 static void eqos_flush_buffer_tegra186(void *buf, size_t size)
399 {
400 flush_cache((unsigned long)buf, size);
401 }
402
eqos_flush_buffer_generic(void * buf,size_t size)403 static void eqos_flush_buffer_generic(void *buf, size_t size)
404 {
405 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
406 unsigned long end = roundup((unsigned long)buf + size,
407 ARCH_DMA_MINALIGN);
408
409 flush_dcache_range(start, end);
410 }
411
eqos_mdio_wait_idle(struct eqos_priv * eqos)412 static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
413 {
414 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
415 EQOS_MAC_MDIO_ADDRESS_GB, false,
416 1000000, true);
417 }
418
eqos_mdio_read(struct mii_dev * bus,int mdio_addr,int mdio_devad,int mdio_reg)419 static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
420 int mdio_reg)
421 {
422 struct eqos_priv *eqos = bus->priv;
423 u32 val;
424 int ret;
425
426 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
427 mdio_reg);
428
429 ret = eqos_mdio_wait_idle(eqos);
430 if (ret) {
431 pr_err("MDIO not idle at entry");
432 return ret;
433 }
434
435 val = readl(&eqos->mac_regs->mdio_address);
436 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
437 EQOS_MAC_MDIO_ADDRESS_C45E;
438 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
439 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
440 (eqos->config->config_mac_mdio <<
441 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
442 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
443 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
444 EQOS_MAC_MDIO_ADDRESS_GB;
445 writel(val, &eqos->mac_regs->mdio_address);
446
447 udelay(eqos->config->mdio_wait);
448
449 ret = eqos_mdio_wait_idle(eqos);
450 if (ret) {
451 pr_err("MDIO read didn't complete");
452 return ret;
453 }
454
455 val = readl(&eqos->mac_regs->mdio_data);
456 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
457
458 debug("%s: val=%x\n", __func__, val);
459
460 return val;
461 }
462
eqos_mdio_write(struct mii_dev * bus,int mdio_addr,int mdio_devad,int mdio_reg,u16 mdio_val)463 static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
464 int mdio_reg, u16 mdio_val)
465 {
466 struct eqos_priv *eqos = bus->priv;
467 u32 val;
468 int ret;
469
470 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
471 mdio_addr, mdio_reg, mdio_val);
472
473 ret = eqos_mdio_wait_idle(eqos);
474 if (ret) {
475 pr_err("MDIO not idle at entry");
476 return ret;
477 }
478
479 writel(mdio_val, &eqos->mac_regs->mdio_data);
480
481 val = readl(&eqos->mac_regs->mdio_address);
482 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
483 EQOS_MAC_MDIO_ADDRESS_C45E;
484 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
485 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
486 (eqos->config->config_mac_mdio <<
487 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
488 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
489 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
490 EQOS_MAC_MDIO_ADDRESS_GB;
491 writel(val, &eqos->mac_regs->mdio_address);
492
493 udelay(eqos->config->mdio_wait);
494
495 ret = eqos_mdio_wait_idle(eqos);
496 if (ret) {
497 pr_err("MDIO read didn't complete");
498 return ret;
499 }
500
501 return 0;
502 }
503
eqos_start_clks_tegra186(struct udevice * dev)504 static int eqos_start_clks_tegra186(struct udevice *dev)
505 {
506 #ifdef CONFIG_CLK
507 struct eqos_priv *eqos = dev_get_priv(dev);
508 int ret;
509
510 debug("%s(dev=%p):\n", __func__, dev);
511
512 ret = clk_enable(&eqos->clk_slave_bus);
513 if (ret < 0) {
514 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
515 goto err;
516 }
517
518 ret = clk_enable(&eqos->clk_master_bus);
519 if (ret < 0) {
520 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
521 goto err_disable_clk_slave_bus;
522 }
523
524 ret = clk_enable(&eqos->clk_rx);
525 if (ret < 0) {
526 pr_err("clk_enable(clk_rx) failed: %d", ret);
527 goto err_disable_clk_master_bus;
528 }
529
530 ret = clk_enable(&eqos->clk_ptp_ref);
531 if (ret < 0) {
532 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
533 goto err_disable_clk_rx;
534 }
535
536 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
537 if (ret < 0) {
538 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
539 goto err_disable_clk_ptp_ref;
540 }
541
542 ret = clk_enable(&eqos->clk_tx);
543 if (ret < 0) {
544 pr_err("clk_enable(clk_tx) failed: %d", ret);
545 goto err_disable_clk_ptp_ref;
546 }
547 #endif
548
549 debug("%s: OK\n", __func__);
550 return 0;
551
552 #ifdef CONFIG_CLK
553 err_disable_clk_ptp_ref:
554 clk_disable(&eqos->clk_ptp_ref);
555 err_disable_clk_rx:
556 clk_disable(&eqos->clk_rx);
557 err_disable_clk_master_bus:
558 clk_disable(&eqos->clk_master_bus);
559 err_disable_clk_slave_bus:
560 clk_disable(&eqos->clk_slave_bus);
561 err:
562 debug("%s: FAILED: %d\n", __func__, ret);
563 return ret;
564 #endif
565 }
566
eqos_start_clks_stm32(struct udevice * dev)567 static int eqos_start_clks_stm32(struct udevice *dev)
568 {
569 #ifdef CONFIG_CLK
570 struct eqos_priv *eqos = dev_get_priv(dev);
571 int ret;
572
573 debug("%s(dev=%p):\n", __func__, dev);
574
575 ret = clk_enable(&eqos->clk_master_bus);
576 if (ret < 0) {
577 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
578 goto err;
579 }
580
581 ret = clk_enable(&eqos->clk_rx);
582 if (ret < 0) {
583 pr_err("clk_enable(clk_rx) failed: %d", ret);
584 goto err_disable_clk_master_bus;
585 }
586
587 ret = clk_enable(&eqos->clk_tx);
588 if (ret < 0) {
589 pr_err("clk_enable(clk_tx) failed: %d", ret);
590 goto err_disable_clk_rx;
591 }
592
593 if (clk_valid(&eqos->clk_ck) && !eqos->clk_ck_enabled) {
594 ret = clk_enable(&eqos->clk_ck);
595 if (ret < 0) {
596 pr_err("clk_enable(clk_ck) failed: %d", ret);
597 goto err_disable_clk_tx;
598 }
599 eqos->clk_ck_enabled = true;
600 }
601 #endif
602
603 debug("%s: OK\n", __func__);
604 return 0;
605
606 #ifdef CONFIG_CLK
607 err_disable_clk_tx:
608 clk_disable(&eqos->clk_tx);
609 err_disable_clk_rx:
610 clk_disable(&eqos->clk_rx);
611 err_disable_clk_master_bus:
612 clk_disable(&eqos->clk_master_bus);
613 err:
614 debug("%s: FAILED: %d\n", __func__, ret);
615 return ret;
616 #endif
617 }
618
eqos_start_clks_imx(struct udevice * dev)619 static int eqos_start_clks_imx(struct udevice *dev)
620 {
621 return 0;
622 }
623
eqos_stop_clks_tegra186(struct udevice * dev)624 static void eqos_stop_clks_tegra186(struct udevice *dev)
625 {
626 #ifdef CONFIG_CLK
627 struct eqos_priv *eqos = dev_get_priv(dev);
628
629 debug("%s(dev=%p):\n", __func__, dev);
630
631 clk_disable(&eqos->clk_tx);
632 clk_disable(&eqos->clk_ptp_ref);
633 clk_disable(&eqos->clk_rx);
634 clk_disable(&eqos->clk_master_bus);
635 clk_disable(&eqos->clk_slave_bus);
636 #endif
637
638 debug("%s: OK\n", __func__);
639 }
640
eqos_stop_clks_stm32(struct udevice * dev)641 static void eqos_stop_clks_stm32(struct udevice *dev)
642 {
643 #ifdef CONFIG_CLK
644 struct eqos_priv *eqos = dev_get_priv(dev);
645
646 debug("%s(dev=%p):\n", __func__, dev);
647
648 clk_disable(&eqos->clk_tx);
649 clk_disable(&eqos->clk_rx);
650 clk_disable(&eqos->clk_master_bus);
651 #endif
652
653 debug("%s: OK\n", __func__);
654 }
655
eqos_stop_clks_imx(struct udevice * dev)656 static void eqos_stop_clks_imx(struct udevice *dev)
657 {
658 /* empty */
659 }
660
eqos_start_resets_tegra186(struct udevice * dev)661 static int eqos_start_resets_tegra186(struct udevice *dev)
662 {
663 struct eqos_priv *eqos = dev_get_priv(dev);
664 int ret;
665
666 debug("%s(dev=%p):\n", __func__, dev);
667
668 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
669 if (ret < 0) {
670 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
671 return ret;
672 }
673
674 udelay(2);
675
676 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
677 if (ret < 0) {
678 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
679 return ret;
680 }
681
682 ret = reset_assert(&eqos->reset_ctl);
683 if (ret < 0) {
684 pr_err("reset_assert() failed: %d", ret);
685 return ret;
686 }
687
688 udelay(2);
689
690 ret = reset_deassert(&eqos->reset_ctl);
691 if (ret < 0) {
692 pr_err("reset_deassert() failed: %d", ret);
693 return ret;
694 }
695
696 debug("%s: OK\n", __func__);
697 return 0;
698 }
699
eqos_start_resets_stm32(struct udevice * dev)700 static int eqos_start_resets_stm32(struct udevice *dev)
701 {
702 struct eqos_priv *eqos = dev_get_priv(dev);
703 int ret;
704
705 debug("%s(dev=%p):\n", __func__, dev);
706 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
707 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
708 if (ret < 0) {
709 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
710 ret);
711 return ret;
712 }
713
714 udelay(2);
715
716 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
717 if (ret < 0) {
718 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d",
719 ret);
720 return ret;
721 }
722 }
723 debug("%s: OK\n", __func__);
724
725 return 0;
726 }
727
eqos_start_resets_imx(struct udevice * dev)728 static int eqos_start_resets_imx(struct udevice *dev)
729 {
730 return 0;
731 }
732
eqos_stop_resets_tegra186(struct udevice * dev)733 static int eqos_stop_resets_tegra186(struct udevice *dev)
734 {
735 struct eqos_priv *eqos = dev_get_priv(dev);
736
737 reset_assert(&eqos->reset_ctl);
738 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
739
740 return 0;
741 }
742
eqos_stop_resets_stm32(struct udevice * dev)743 static int eqos_stop_resets_stm32(struct udevice *dev)
744 {
745 struct eqos_priv *eqos = dev_get_priv(dev);
746 int ret;
747
748 if (dm_gpio_is_valid(&eqos->phy_reset_gpio)) {
749 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
750 if (ret < 0) {
751 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d",
752 ret);
753 return ret;
754 }
755 }
756
757 return 0;
758 }
759
eqos_stop_resets_imx(struct udevice * dev)760 static int eqos_stop_resets_imx(struct udevice *dev)
761 {
762 return 0;
763 }
764
eqos_calibrate_pads_tegra186(struct udevice * dev)765 static int eqos_calibrate_pads_tegra186(struct udevice *dev)
766 {
767 struct eqos_priv *eqos = dev_get_priv(dev);
768 int ret;
769
770 debug("%s(dev=%p):\n", __func__, dev);
771
772 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
773 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
774
775 udelay(1);
776
777 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
778 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
779
780 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
781 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
782 if (ret) {
783 pr_err("calibrate didn't start");
784 goto failed;
785 }
786
787 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
788 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
789 if (ret) {
790 pr_err("calibrate didn't finish");
791 goto failed;
792 }
793
794 ret = 0;
795
796 failed:
797 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
798 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
799
800 debug("%s: returns %d\n", __func__, ret);
801
802 return ret;
803 }
804
eqos_disable_calibration_tegra186(struct udevice * dev)805 static int eqos_disable_calibration_tegra186(struct udevice *dev)
806 {
807 struct eqos_priv *eqos = dev_get_priv(dev);
808
809 debug("%s(dev=%p):\n", __func__, dev);
810
811 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
812 EQOS_AUTO_CAL_CONFIG_ENABLE);
813
814 return 0;
815 }
816
eqos_get_tick_clk_rate_tegra186(struct udevice * dev)817 static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
818 {
819 #ifdef CONFIG_CLK
820 struct eqos_priv *eqos = dev_get_priv(dev);
821
822 return clk_get_rate(&eqos->clk_slave_bus);
823 #else
824 return 0;
825 #endif
826 }
827
eqos_get_tick_clk_rate_stm32(struct udevice * dev)828 static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
829 {
830 #ifdef CONFIG_CLK
831 struct eqos_priv *eqos = dev_get_priv(dev);
832
833 return clk_get_rate(&eqos->clk_master_bus);
834 #else
835 return 0;
836 #endif
837 }
838
imx_get_eqos_csr_clk(void)839 __weak u32 imx_get_eqos_csr_clk(void)
840 {
841 return 100 * 1000000;
842 }
imx_eqos_txclk_set_rate(unsigned long rate)843 __weak int imx_eqos_txclk_set_rate(unsigned long rate)
844 {
845 return 0;
846 }
847
eqos_get_tick_clk_rate_imx(struct udevice * dev)848 static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
849 {
850 return imx_get_eqos_csr_clk();
851 }
852
eqos_calibrate_pads_stm32(struct udevice * dev)853 static int eqos_calibrate_pads_stm32(struct udevice *dev)
854 {
855 return 0;
856 }
857
eqos_calibrate_pads_imx(struct udevice * dev)858 static int eqos_calibrate_pads_imx(struct udevice *dev)
859 {
860 return 0;
861 }
862
eqos_disable_calibration_stm32(struct udevice * dev)863 static int eqos_disable_calibration_stm32(struct udevice *dev)
864 {
865 return 0;
866 }
867
eqos_disable_calibration_imx(struct udevice * dev)868 static int eqos_disable_calibration_imx(struct udevice *dev)
869 {
870 return 0;
871 }
872
eqos_set_full_duplex(struct udevice * dev)873 static int eqos_set_full_duplex(struct udevice *dev)
874 {
875 struct eqos_priv *eqos = dev_get_priv(dev);
876
877 debug("%s(dev=%p):\n", __func__, dev);
878
879 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
880
881 return 0;
882 }
883
eqos_set_half_duplex(struct udevice * dev)884 static int eqos_set_half_duplex(struct udevice *dev)
885 {
886 struct eqos_priv *eqos = dev_get_priv(dev);
887
888 debug("%s(dev=%p):\n", __func__, dev);
889
890 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
891
892 /* WAR: Flush TX queue when switching to half-duplex */
893 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
894 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
895
896 return 0;
897 }
898
eqos_set_gmii_speed(struct udevice * dev)899 static int eqos_set_gmii_speed(struct udevice *dev)
900 {
901 struct eqos_priv *eqos = dev_get_priv(dev);
902
903 debug("%s(dev=%p):\n", __func__, dev);
904
905 clrbits_le32(&eqos->mac_regs->configuration,
906 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
907
908 return 0;
909 }
910
eqos_set_mii_speed_100(struct udevice * dev)911 static int eqos_set_mii_speed_100(struct udevice *dev)
912 {
913 struct eqos_priv *eqos = dev_get_priv(dev);
914
915 debug("%s(dev=%p):\n", __func__, dev);
916
917 setbits_le32(&eqos->mac_regs->configuration,
918 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
919
920 return 0;
921 }
922
eqos_set_mii_speed_10(struct udevice * dev)923 static int eqos_set_mii_speed_10(struct udevice *dev)
924 {
925 struct eqos_priv *eqos = dev_get_priv(dev);
926
927 debug("%s(dev=%p):\n", __func__, dev);
928
929 clrsetbits_le32(&eqos->mac_regs->configuration,
930 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
931
932 return 0;
933 }
934
eqos_set_tx_clk_speed_tegra186(struct udevice * dev)935 static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
936 {
937 #ifdef CONFIG_CLK
938 struct eqos_priv *eqos = dev_get_priv(dev);
939 ulong rate;
940 int ret;
941
942 debug("%s(dev=%p):\n", __func__, dev);
943
944 switch (eqos->phy->speed) {
945 case SPEED_1000:
946 rate = 125 * 1000 * 1000;
947 break;
948 case SPEED_100:
949 rate = 25 * 1000 * 1000;
950 break;
951 case SPEED_10:
952 rate = 2.5 * 1000 * 1000;
953 break;
954 default:
955 pr_err("invalid speed %d", eqos->phy->speed);
956 return -EINVAL;
957 }
958
959 ret = clk_set_rate(&eqos->clk_tx, rate);
960 if (ret < 0) {
961 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
962 return ret;
963 }
964 #endif
965
966 return 0;
967 }
968
eqos_set_tx_clk_speed_stm32(struct udevice * dev)969 static int eqos_set_tx_clk_speed_stm32(struct udevice *dev)
970 {
971 return 0;
972 }
973
eqos_set_tx_clk_speed_imx(struct udevice * dev)974 static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
975 {
976 struct eqos_priv *eqos = dev_get_priv(dev);
977 ulong rate;
978 int ret;
979
980 debug("%s(dev=%p):\n", __func__, dev);
981
982 switch (eqos->phy->speed) {
983 case SPEED_1000:
984 rate = 125 * 1000 * 1000;
985 break;
986 case SPEED_100:
987 rate = 25 * 1000 * 1000;
988 break;
989 case SPEED_10:
990 rate = 2.5 * 1000 * 1000;
991 break;
992 default:
993 pr_err("invalid speed %d", eqos->phy->speed);
994 return -EINVAL;
995 }
996
997 ret = imx_eqos_txclk_set_rate(rate);
998 if (ret < 0) {
999 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
1000 return ret;
1001 }
1002
1003 return 0;
1004 }
1005
eqos_adjust_link(struct udevice * dev)1006 static int eqos_adjust_link(struct udevice *dev)
1007 {
1008 struct eqos_priv *eqos = dev_get_priv(dev);
1009 int ret;
1010 bool en_calibration;
1011
1012 debug("%s(dev=%p):\n", __func__, dev);
1013
1014 if (eqos->phy->duplex)
1015 ret = eqos_set_full_duplex(dev);
1016 else
1017 ret = eqos_set_half_duplex(dev);
1018 if (ret < 0) {
1019 pr_err("eqos_set_*_duplex() failed: %d", ret);
1020 return ret;
1021 }
1022
1023 switch (eqos->phy->speed) {
1024 case SPEED_1000:
1025 en_calibration = true;
1026 ret = eqos_set_gmii_speed(dev);
1027 break;
1028 case SPEED_100:
1029 en_calibration = true;
1030 ret = eqos_set_mii_speed_100(dev);
1031 break;
1032 case SPEED_10:
1033 en_calibration = false;
1034 ret = eqos_set_mii_speed_10(dev);
1035 break;
1036 default:
1037 pr_err("invalid speed %d", eqos->phy->speed);
1038 return -EINVAL;
1039 }
1040 if (ret < 0) {
1041 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
1042 return ret;
1043 }
1044
1045 if (en_calibration) {
1046 ret = eqos->config->ops->eqos_calibrate_pads(dev);
1047 if (ret < 0) {
1048 pr_err("eqos_calibrate_pads() failed: %d",
1049 ret);
1050 return ret;
1051 }
1052 } else {
1053 ret = eqos->config->ops->eqos_disable_calibration(dev);
1054 if (ret < 0) {
1055 pr_err("eqos_disable_calibration() failed: %d",
1056 ret);
1057 return ret;
1058 }
1059 }
1060 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
1061 if (ret < 0) {
1062 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
1063 return ret;
1064 }
1065
1066 return 0;
1067 }
1068
eqos_write_hwaddr(struct udevice * dev)1069 static int eqos_write_hwaddr(struct udevice *dev)
1070 {
1071 struct eth_pdata *plat = dev_get_plat(dev);
1072 struct eqos_priv *eqos = dev_get_priv(dev);
1073 uint32_t val;
1074
1075 /*
1076 * This function may be called before start() or after stop(). At that
1077 * time, on at least some configurations of the EQoS HW, all clocks to
1078 * the EQoS HW block will be stopped, and a reset signal applied. If
1079 * any register access is attempted in this state, bus timeouts or CPU
1080 * hangs may occur. This check prevents that.
1081 *
1082 * A simple solution to this problem would be to not implement
1083 * write_hwaddr(), since start() always writes the MAC address into HW
1084 * anyway. However, it is desirable to implement write_hwaddr() to
1085 * support the case of SW that runs subsequent to U-Boot which expects
1086 * the MAC address to already be programmed into the EQoS registers,
1087 * which must happen irrespective of whether the U-Boot user (or
1088 * scripts) actually made use of the EQoS device, and hence
1089 * irrespective of whether start() was ever called.
1090 *
1091 * Note that this requirement by subsequent SW is not valid for
1092 * Tegra186, and is likely not valid for any non-PCI instantiation of
1093 * the EQoS HW block. This function is implemented solely as
1094 * future-proofing with the expectation the driver will eventually be
1095 * ported to some system where the expectation above is true.
1096 */
1097 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1098 return 0;
1099
1100 /* Update the MAC address */
1101 val = (plat->enetaddr[5] << 8) |
1102 (plat->enetaddr[4]);
1103 writel(val, &eqos->mac_regs->address0_high);
1104 val = (plat->enetaddr[3] << 24) |
1105 (plat->enetaddr[2] << 16) |
1106 (plat->enetaddr[1] << 8) |
1107 (plat->enetaddr[0]);
1108 writel(val, &eqos->mac_regs->address0_low);
1109
1110 return 0;
1111 }
1112
eqos_read_rom_hwaddr(struct udevice * dev)1113 static int eqos_read_rom_hwaddr(struct udevice *dev)
1114 {
1115 struct eth_pdata *pdata = dev_get_plat(dev);
1116
1117 #ifdef CONFIG_ARCH_IMX8M
1118 imx_get_mac_from_fuse(dev_seq(dev), pdata->enetaddr);
1119 #endif
1120 return !is_valid_ethaddr(pdata->enetaddr);
1121 }
1122
eqos_start(struct udevice * dev)1123 static int eqos_start(struct udevice *dev)
1124 {
1125 struct eqos_priv *eqos = dev_get_priv(dev);
1126 int ret, i;
1127 ulong rate;
1128 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1129 ulong last_rx_desc;
1130 ulong desc_pad;
1131
1132 debug("%s(dev=%p):\n", __func__, dev);
1133
1134 eqos->tx_desc_idx = 0;
1135 eqos->rx_desc_idx = 0;
1136
1137 ret = eqos->config->ops->eqos_start_clks(dev);
1138 if (ret < 0) {
1139 pr_err("eqos_start_clks() failed: %d", ret);
1140 goto err;
1141 }
1142
1143 ret = eqos->config->ops->eqos_start_resets(dev);
1144 if (ret < 0) {
1145 pr_err("eqos_start_resets() failed: %d", ret);
1146 goto err_stop_clks;
1147 }
1148
1149 udelay(10);
1150
1151 eqos->reg_access_ok = true;
1152
1153 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
1154 EQOS_DMA_MODE_SWR, false,
1155 eqos->config->swr_wait, false);
1156 if (ret) {
1157 pr_err("EQOS_DMA_MODE_SWR stuck");
1158 goto err_stop_resets;
1159 }
1160
1161 ret = eqos->config->ops->eqos_calibrate_pads(dev);
1162 if (ret < 0) {
1163 pr_err("eqos_calibrate_pads() failed: %d", ret);
1164 goto err_stop_resets;
1165 }
1166 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
1167
1168 val = (rate / 1000000) - 1;
1169 writel(val, &eqos->mac_regs->us_tic_counter);
1170
1171 /*
1172 * if PHY was already connected and configured,
1173 * don't need to reconnect/reconfigure again
1174 */
1175 if (!eqos->phy) {
1176 int addr = -1;
1177 #ifdef CONFIG_DM_ETH_PHY
1178 addr = eth_phy_get_addr(dev);
1179 #endif
1180 #ifdef DWC_NET_PHYADDR
1181 addr = DWC_NET_PHYADDR;
1182 #endif
1183 eqos->phy = phy_connect(eqos->mii, addr, dev,
1184 eqos->config->interface(dev));
1185 if (!eqos->phy) {
1186 pr_err("phy_connect() failed");
1187 goto err_stop_resets;
1188 }
1189
1190 if (eqos->max_speed) {
1191 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1192 if (ret) {
1193 pr_err("phy_set_supported() failed: %d", ret);
1194 goto err_shutdown_phy;
1195 }
1196 }
1197
1198 ret = phy_config(eqos->phy);
1199 if (ret < 0) {
1200 pr_err("phy_config() failed: %d", ret);
1201 goto err_shutdown_phy;
1202 }
1203 }
1204
1205 ret = phy_startup(eqos->phy);
1206 if (ret < 0) {
1207 pr_err("phy_startup() failed: %d", ret);
1208 goto err_shutdown_phy;
1209 }
1210
1211 if (!eqos->phy->link) {
1212 pr_err("No link");
1213 goto err_shutdown_phy;
1214 }
1215
1216 ret = eqos_adjust_link(dev);
1217 if (ret < 0) {
1218 pr_err("eqos_adjust_link() failed: %d", ret);
1219 goto err_shutdown_phy;
1220 }
1221
1222 /* Configure MTL */
1223
1224 /* Enable Store and Forward mode for TX */
1225 /* Program Tx operating mode */
1226 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1227 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1228 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1229 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1230
1231 /* Transmit Queue weight */
1232 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1233
1234 /* Enable Store and Forward mode for RX, since no jumbo frame */
1235 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1236 EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
1237
1238 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1239 val = readl(&eqos->mac_regs->hw_feature1);
1240 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1241 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1242 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1243 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1244
1245 /*
1246 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1247 * r/tqs is encoded as (n / 256) - 1.
1248 */
1249 tqs = (128 << tx_fifo_sz) / 256 - 1;
1250 rqs = (128 << rx_fifo_sz) / 256 - 1;
1251
1252 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1253 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1254 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1255 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1256 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1257 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1258 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1259 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1260
1261 /* Flow control used only if each channel gets 4KB or more FIFO */
1262 if (rqs >= ((4096 / 256) - 1)) {
1263 u32 rfd, rfa;
1264
1265 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1266 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1267
1268 /*
1269 * Set Threshold for Activating Flow Contol space for min 2
1270 * frames ie, (1500 * 1) = 1500 bytes.
1271 *
1272 * Set Threshold for Deactivating Flow Contol for space of
1273 * min 1 frame (frame size 1500bytes) in receive fifo
1274 */
1275 if (rqs == ((4096 / 256) - 1)) {
1276 /*
1277 * This violates the above formula because of FIFO size
1278 * limit therefore overflow may occur inspite of this.
1279 */
1280 rfd = 0x3; /* Full-3K */
1281 rfa = 0x1; /* Full-1.5K */
1282 } else if (rqs == ((8192 / 256) - 1)) {
1283 rfd = 0x6; /* Full-4K */
1284 rfa = 0xa; /* Full-6K */
1285 } else if (rqs == ((16384 / 256) - 1)) {
1286 rfd = 0x6; /* Full-4K */
1287 rfa = 0x12; /* Full-10K */
1288 } else {
1289 rfd = 0x6; /* Full-4K */
1290 rfa = 0x1E; /* Full-16K */
1291 }
1292
1293 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1294 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1295 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1296 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1297 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1298 (rfd <<
1299 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1300 (rfa <<
1301 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1302 }
1303
1304 /* Configure MAC */
1305
1306 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1307 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1308 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
1309 eqos->config->config_mac <<
1310 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1311
1312 /* Multicast and Broadcast Queue Enable */
1313 setbits_le32(&eqos->mac_regs->unused_0a4,
1314 0x00100000);
1315 /* enable promise mode */
1316 setbits_le32(&eqos->mac_regs->unused_004[1],
1317 0x1);
1318
1319 /* Set TX flow control parameters */
1320 /* Set Pause Time */
1321 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1322 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1323 /* Assign priority for TX flow control */
1324 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1325 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1326 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1327 /* Assign priority for RX flow control */
1328 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1329 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1330 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1331 /* Enable flow control */
1332 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1333 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1334 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1335 EQOS_MAC_RX_FLOW_CTRL_RFE);
1336
1337 clrsetbits_le32(&eqos->mac_regs->configuration,
1338 EQOS_MAC_CONFIGURATION_GPSLCE |
1339 EQOS_MAC_CONFIGURATION_WD |
1340 EQOS_MAC_CONFIGURATION_JD |
1341 EQOS_MAC_CONFIGURATION_JE,
1342 EQOS_MAC_CONFIGURATION_CST |
1343 EQOS_MAC_CONFIGURATION_ACS);
1344
1345 eqos_write_hwaddr(dev);
1346
1347 /* Configure DMA */
1348
1349 /* Enable OSP mode */
1350 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1351 EQOS_DMA_CH0_TX_CONTROL_OSP);
1352
1353 /* RX buffer size. Must be a multiple of bus width */
1354 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1355 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1356 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1357 EQOS_MAX_PACKET_SIZE <<
1358 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1359
1360 desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
1361 eqos->config->axi_bus_width;
1362
1363 setbits_le32(&eqos->dma_regs->ch0_control,
1364 EQOS_DMA_CH0_CONTROL_PBLX8 |
1365 (desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT));
1366
1367 /*
1368 * Burst length must be < 1/2 FIFO size.
1369 * FIFO size in tqs is encoded as (n / 256) - 1.
1370 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1371 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1372 */
1373 pbl = tqs + 1;
1374 if (pbl > 32)
1375 pbl = 32;
1376 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1377 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1378 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1379 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1380
1381 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1382 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1383 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1384 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1385
1386 /* DMA performance configuration */
1387 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1388 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1389 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1390 writel(val, &eqos->dma_regs->sysbus_mode);
1391
1392 /* Set up descriptors */
1393
1394 memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
1395
1396 for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
1397 struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
1398 eqos->config->ops->eqos_flush_desc(tx_desc);
1399 }
1400
1401 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
1402 struct eqos_desc *rx_desc = eqos_get_desc(eqos, i, true);
1403 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1404 (i * EQOS_MAX_PACKET_SIZE));
1405 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1406 mb();
1407 eqos->config->ops->eqos_flush_desc(rx_desc);
1408 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1409 (i * EQOS_MAX_PACKET_SIZE),
1410 EQOS_MAX_PACKET_SIZE);
1411 }
1412
1413 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
1414 writel((ulong)eqos_get_desc(eqos, 0, false),
1415 &eqos->dma_regs->ch0_txdesc_list_address);
1416 writel(EQOS_DESCRIPTORS_TX - 1,
1417 &eqos->dma_regs->ch0_txdesc_ring_length);
1418
1419 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
1420 writel((ulong)eqos_get_desc(eqos, 0, true),
1421 &eqos->dma_regs->ch0_rxdesc_list_address);
1422 writel(EQOS_DESCRIPTORS_RX - 1,
1423 &eqos->dma_regs->ch0_rxdesc_ring_length);
1424
1425 /* Enable everything */
1426 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1427 EQOS_DMA_CH0_TX_CONTROL_ST);
1428 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1429 EQOS_DMA_CH0_RX_CONTROL_SR);
1430 setbits_le32(&eqos->mac_regs->configuration,
1431 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1432
1433 /* TX tail pointer not written until we need to TX a packet */
1434 /*
1435 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1436 * first descriptor, implying all descriptors were available. However,
1437 * that's not distinguishable from none of the descriptors being
1438 * available.
1439 */
1440 last_rx_desc = (ulong)eqos_get_desc(eqos, EQOS_DESCRIPTORS_RX - 1, true);
1441 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1442
1443 eqos->started = true;
1444
1445 debug("%s: OK\n", __func__);
1446 return 0;
1447
1448 err_shutdown_phy:
1449 phy_shutdown(eqos->phy);
1450 err_stop_resets:
1451 eqos->config->ops->eqos_stop_resets(dev);
1452 err_stop_clks:
1453 eqos->config->ops->eqos_stop_clks(dev);
1454 err:
1455 pr_err("FAILED: %d", ret);
1456 return ret;
1457 }
1458
eqos_stop(struct udevice * dev)1459 static void eqos_stop(struct udevice *dev)
1460 {
1461 struct eqos_priv *eqos = dev_get_priv(dev);
1462 int i;
1463
1464 debug("%s(dev=%p):\n", __func__, dev);
1465
1466 if (!eqos->started)
1467 return;
1468 eqos->started = false;
1469 eqos->reg_access_ok = false;
1470
1471 /* Disable TX DMA */
1472 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1473 EQOS_DMA_CH0_TX_CONTROL_ST);
1474
1475 /* Wait for TX all packets to drain out of MTL */
1476 for (i = 0; i < 1000000; i++) {
1477 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1478 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1479 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1480 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1481 if ((trcsts != 1) && (!txqsts))
1482 break;
1483 }
1484
1485 /* Turn off MAC TX and RX */
1486 clrbits_le32(&eqos->mac_regs->configuration,
1487 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1488
1489 /* Wait for all RX packets to drain out of MTL */
1490 for (i = 0; i < 1000000; i++) {
1491 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1492 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1493 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1494 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1495 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1496 if ((!prxq) && (!rxqsts))
1497 break;
1498 }
1499
1500 /* Turn off RX DMA */
1501 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1502 EQOS_DMA_CH0_RX_CONTROL_SR);
1503
1504 if (eqos->phy) {
1505 phy_shutdown(eqos->phy);
1506 }
1507 eqos->config->ops->eqos_stop_resets(dev);
1508 eqos->config->ops->eqos_stop_clks(dev);
1509
1510 debug("%s: OK\n", __func__);
1511 }
1512
eqos_send(struct udevice * dev,void * packet,int length)1513 static int eqos_send(struct udevice *dev, void *packet, int length)
1514 {
1515 struct eqos_priv *eqos = dev_get_priv(dev);
1516 struct eqos_desc *tx_desc;
1517 int i;
1518
1519 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1520 length);
1521
1522 memcpy(eqos->tx_dma_buf, packet, length);
1523 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
1524
1525 tx_desc = eqos_get_desc(eqos, eqos->tx_desc_idx, false);
1526 eqos->tx_desc_idx++;
1527 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1528
1529 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1530 tx_desc->des1 = 0;
1531 tx_desc->des2 = length;
1532 /*
1533 * Make sure that if HW sees the _OWN write below, it will see all the
1534 * writes to the rest of the descriptor too.
1535 */
1536 mb();
1537 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
1538 eqos->config->ops->eqos_flush_desc(tx_desc);
1539
1540 writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
1541 &eqos->dma_regs->ch0_txdesc_tail_pointer);
1542
1543 for (i = 0; i < 1000000; i++) {
1544 eqos->config->ops->eqos_inval_desc(tx_desc);
1545 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1546 return 0;
1547 udelay(1);
1548 }
1549
1550 debug("%s: TX timeout\n", __func__);
1551
1552 return -ETIMEDOUT;
1553 }
1554
eqos_recv(struct udevice * dev,int flags,uchar ** packetp)1555 static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
1556 {
1557 struct eqos_priv *eqos = dev_get_priv(dev);
1558 struct eqos_desc *rx_desc;
1559 int length;
1560
1561 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1562
1563 rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
1564 eqos->config->ops->eqos_inval_desc(rx_desc);
1565 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1566 debug("%s: RX packet not available\n", __func__);
1567 return -EAGAIN;
1568 }
1569
1570 *packetp = eqos->rx_dma_buf +
1571 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1572 length = rx_desc->des3 & 0x7fff;
1573 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1574
1575 eqos->config->ops->eqos_inval_buffer(*packetp, length);
1576
1577 return length;
1578 }
1579
eqos_free_pkt(struct udevice * dev,uchar * packet,int length)1580 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
1581 {
1582 struct eqos_priv *eqos = dev_get_priv(dev);
1583 uchar *packet_expected;
1584 struct eqos_desc *rx_desc;
1585
1586 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1587
1588 packet_expected = eqos->rx_dma_buf +
1589 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1590 if (packet != packet_expected) {
1591 debug("%s: Unexpected packet (expected %p)\n", __func__,
1592 packet_expected);
1593 return -EINVAL;
1594 }
1595
1596 eqos->config->ops->eqos_inval_buffer(packet, length);
1597
1598 rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
1599
1600 rx_desc->des0 = 0;
1601 mb();
1602 eqos->config->ops->eqos_flush_desc(rx_desc);
1603 eqos->config->ops->eqos_inval_buffer(packet, length);
1604 rx_desc->des0 = (u32)(ulong)packet;
1605 rx_desc->des1 = 0;
1606 rx_desc->des2 = 0;
1607 /*
1608 * Make sure that if HW sees the _OWN write below, it will see all the
1609 * writes to the rest of the descriptor too.
1610 */
1611 mb();
1612 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
1613 eqos->config->ops->eqos_flush_desc(rx_desc);
1614
1615 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1616
1617 eqos->rx_desc_idx++;
1618 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1619
1620 return 0;
1621 }
1622
eqos_probe_resources_core(struct udevice * dev)1623 static int eqos_probe_resources_core(struct udevice *dev)
1624 {
1625 struct eqos_priv *eqos = dev_get_priv(dev);
1626 int ret;
1627
1628 debug("%s(dev=%p):\n", __func__, dev);
1629
1630 eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
1631 if (!eqos->descs) {
1632 debug("%s: eqos_alloc_descs() failed\n", __func__);
1633 ret = -ENOMEM;
1634 goto err;
1635 }
1636
1637 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1638 if (!eqos->tx_dma_buf) {
1639 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1640 ret = -ENOMEM;
1641 goto err_free_descs;
1642 }
1643 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
1644
1645 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1646 if (!eqos->rx_dma_buf) {
1647 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1648 ret = -ENOMEM;
1649 goto err_free_tx_dma_buf;
1650 }
1651 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
1652
1653 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1654 if (!eqos->rx_pkt) {
1655 debug("%s: malloc(rx_pkt) failed\n", __func__);
1656 ret = -ENOMEM;
1657 goto err_free_rx_dma_buf;
1658 }
1659 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1660
1661 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1662 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1663
1664 debug("%s: OK\n", __func__);
1665 return 0;
1666
1667 err_free_rx_dma_buf:
1668 free(eqos->rx_dma_buf);
1669 err_free_tx_dma_buf:
1670 free(eqos->tx_dma_buf);
1671 err_free_descs:
1672 eqos_free_descs(eqos->descs);
1673 err:
1674
1675 debug("%s: returns %d\n", __func__, ret);
1676 return ret;
1677 }
1678
eqos_remove_resources_core(struct udevice * dev)1679 static int eqos_remove_resources_core(struct udevice *dev)
1680 {
1681 struct eqos_priv *eqos = dev_get_priv(dev);
1682
1683 debug("%s(dev=%p):\n", __func__, dev);
1684
1685 free(eqos->rx_pkt);
1686 free(eqos->rx_dma_buf);
1687 free(eqos->tx_dma_buf);
1688 eqos_free_descs(eqos->descs);
1689
1690 debug("%s: OK\n", __func__);
1691 return 0;
1692 }
1693
eqos_probe_resources_tegra186(struct udevice * dev)1694 static int eqos_probe_resources_tegra186(struct udevice *dev)
1695 {
1696 struct eqos_priv *eqos = dev_get_priv(dev);
1697 int ret;
1698
1699 debug("%s(dev=%p):\n", __func__, dev);
1700
1701 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1702 if (ret) {
1703 pr_err("reset_get_by_name(rst) failed: %d", ret);
1704 return ret;
1705 }
1706
1707 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1708 &eqos->phy_reset_gpio,
1709 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1710 if (ret) {
1711 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
1712 goto err_free_reset_eqos;
1713 }
1714
1715 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1716 if (ret) {
1717 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
1718 goto err_free_gpio_phy_reset;
1719 }
1720
1721 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1722 if (ret) {
1723 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1724 goto err_free_clk_slave_bus;
1725 }
1726
1727 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1728 if (ret) {
1729 pr_err("clk_get_by_name(rx) failed: %d", ret);
1730 goto err_free_clk_master_bus;
1731 }
1732
1733 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1734 if (ret) {
1735 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
1736 goto err_free_clk_rx;
1737 return ret;
1738 }
1739
1740 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1741 if (ret) {
1742 pr_err("clk_get_by_name(tx) failed: %d", ret);
1743 goto err_free_clk_ptp_ref;
1744 }
1745
1746 debug("%s: OK\n", __func__);
1747 return 0;
1748
1749 err_free_clk_ptp_ref:
1750 clk_free(&eqos->clk_ptp_ref);
1751 err_free_clk_rx:
1752 clk_free(&eqos->clk_rx);
1753 err_free_clk_master_bus:
1754 clk_free(&eqos->clk_master_bus);
1755 err_free_clk_slave_bus:
1756 clk_free(&eqos->clk_slave_bus);
1757 err_free_gpio_phy_reset:
1758 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1759 err_free_reset_eqos:
1760 reset_free(&eqos->reset_ctl);
1761
1762 debug("%s: returns %d\n", __func__, ret);
1763 return ret;
1764 }
1765
1766 /* board-specific Ethernet Interface initializations. */
board_interface_eth_init(struct udevice * dev,phy_interface_t interface_type)1767 __weak int board_interface_eth_init(struct udevice *dev,
1768 phy_interface_t interface_type)
1769 {
1770 return 0;
1771 }
1772
eqos_probe_resources_stm32(struct udevice * dev)1773 static int eqos_probe_resources_stm32(struct udevice *dev)
1774 {
1775 struct eqos_priv *eqos = dev_get_priv(dev);
1776 int ret;
1777 phy_interface_t interface;
1778 struct ofnode_phandle_args phandle_args;
1779
1780 debug("%s(dev=%p):\n", __func__, dev);
1781
1782 interface = eqos->config->interface(dev);
1783
1784 if (interface == PHY_INTERFACE_MODE_NONE) {
1785 pr_err("Invalid PHY interface\n");
1786 return -EINVAL;
1787 }
1788
1789 ret = board_interface_eth_init(dev, interface);
1790 if (ret)
1791 return -EINVAL;
1792
1793 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1794
1795 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1796 if (ret) {
1797 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1798 goto err_probe;
1799 }
1800
1801 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1802 if (ret) {
1803 pr_err("clk_get_by_name(rx) failed: %d", ret);
1804 goto err_free_clk_master_bus;
1805 }
1806
1807 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1808 if (ret) {
1809 pr_err("clk_get_by_name(tx) failed: %d", ret);
1810 goto err_free_clk_rx;
1811 }
1812
1813 /* Get ETH_CLK clocks (optional) */
1814 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1815 if (ret)
1816 pr_warn("No phy clock provided %d", ret);
1817
1818 eqos->phyaddr = -1;
1819 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
1820 &phandle_args);
1821 if (!ret) {
1822 /* search "reset-gpios" in phy node */
1823 ret = gpio_request_by_name_nodev(phandle_args.node,
1824 "reset-gpios", 0,
1825 &eqos->phy_reset_gpio,
1826 GPIOD_IS_OUT |
1827 GPIOD_IS_OUT_ACTIVE);
1828 if (ret)
1829 pr_warn("gpio_request_by_name(phy reset) not provided %d",
1830 ret);
1831
1832 eqos->phyaddr = ofnode_read_u32_default(phandle_args.node,
1833 "reg", -1);
1834 }
1835
1836 debug("%s: OK\n", __func__);
1837 return 0;
1838
1839 err_free_clk_rx:
1840 clk_free(&eqos->clk_rx);
1841 err_free_clk_master_bus:
1842 clk_free(&eqos->clk_master_bus);
1843 err_probe:
1844
1845 debug("%s: returns %d\n", __func__, ret);
1846 return ret;
1847 }
1848
eqos_get_interface_stm32(struct udevice * dev)1849 static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1850 {
1851 const char *phy_mode;
1852 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1853
1854 debug("%s(dev=%p):\n", __func__, dev);
1855
1856 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
1857 if (phy_mode)
1858 interface = phy_get_interface_by_name(phy_mode);
1859
1860 return interface;
1861 }
1862
eqos_get_interface_tegra186(struct udevice * dev)1863 static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1864 {
1865 return PHY_INTERFACE_MODE_MII;
1866 }
1867
eqos_probe_resources_imx(struct udevice * dev)1868 static int eqos_probe_resources_imx(struct udevice *dev)
1869 {
1870 struct eqos_priv *eqos = dev_get_priv(dev);
1871 phy_interface_t interface;
1872
1873 debug("%s(dev=%p):\n", __func__, dev);
1874
1875 interface = eqos->config->interface(dev);
1876
1877 if (interface == PHY_INTERFACE_MODE_NONE) {
1878 pr_err("Invalid PHY interface\n");
1879 return -EINVAL;
1880 }
1881
1882 debug("%s: OK\n", __func__);
1883 return 0;
1884 }
1885
eqos_get_interface_imx(struct udevice * dev)1886 static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1887 {
1888 const char *phy_mode;
1889 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1890
1891 debug("%s(dev=%p):\n", __func__, dev);
1892
1893 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
1894 if (phy_mode)
1895 interface = phy_get_interface_by_name(phy_mode);
1896
1897 return interface;
1898 }
1899
eqos_remove_resources_tegra186(struct udevice * dev)1900 static int eqos_remove_resources_tegra186(struct udevice *dev)
1901 {
1902 struct eqos_priv *eqos = dev_get_priv(dev);
1903
1904 debug("%s(dev=%p):\n", __func__, dev);
1905
1906 #ifdef CONFIG_CLK
1907 clk_free(&eqos->clk_tx);
1908 clk_free(&eqos->clk_ptp_ref);
1909 clk_free(&eqos->clk_rx);
1910 clk_free(&eqos->clk_slave_bus);
1911 clk_free(&eqos->clk_master_bus);
1912 #endif
1913 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1914 reset_free(&eqos->reset_ctl);
1915
1916 debug("%s: OK\n", __func__);
1917 return 0;
1918 }
1919
eqos_remove_resources_stm32(struct udevice * dev)1920 static int eqos_remove_resources_stm32(struct udevice *dev)
1921 {
1922 #ifdef CONFIG_CLK
1923 struct eqos_priv *eqos = dev_get_priv(dev);
1924
1925 debug("%s(dev=%p):\n", __func__, dev);
1926
1927 clk_free(&eqos->clk_tx);
1928 clk_free(&eqos->clk_rx);
1929 clk_free(&eqos->clk_master_bus);
1930 if (clk_valid(&eqos->clk_ck))
1931 clk_free(&eqos->clk_ck);
1932 #endif
1933
1934 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1935 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1936
1937 debug("%s: OK\n", __func__);
1938 return 0;
1939 }
1940
eqos_remove_resources_imx(struct udevice * dev)1941 static int eqos_remove_resources_imx(struct udevice *dev)
1942 {
1943 return 0;
1944 }
1945
eqos_probe(struct udevice * dev)1946 static int eqos_probe(struct udevice *dev)
1947 {
1948 struct eqos_priv *eqos = dev_get_priv(dev);
1949 int ret;
1950
1951 debug("%s(dev=%p):\n", __func__, dev);
1952
1953 eqos->dev = dev;
1954 eqos->config = (void *)dev_get_driver_data(dev);
1955
1956 eqos->regs = dev_read_addr(dev);
1957 if (eqos->regs == FDT_ADDR_T_NONE) {
1958 pr_err("dev_read_addr() failed");
1959 return -ENODEV;
1960 }
1961 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1962 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1963 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1964 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1965
1966 ret = eqos_probe_resources_core(dev);
1967 if (ret < 0) {
1968 pr_err("eqos_probe_resources_core() failed: %d", ret);
1969 return ret;
1970 }
1971
1972 ret = eqos->config->ops->eqos_probe_resources(dev);
1973 if (ret < 0) {
1974 pr_err("eqos_probe_resources() failed: %d", ret);
1975 goto err_remove_resources_core;
1976 }
1977
1978 #ifdef CONFIG_DM_ETH_PHY
1979 eqos->mii = eth_phy_get_mdio_bus(dev);
1980 #endif
1981 if (!eqos->mii) {
1982 eqos->mii = mdio_alloc();
1983 if (!eqos->mii) {
1984 pr_err("mdio_alloc() failed");
1985 ret = -ENOMEM;
1986 goto err_remove_resources_tegra;
1987 }
1988 eqos->mii->read = eqos_mdio_read;
1989 eqos->mii->write = eqos_mdio_write;
1990 eqos->mii->priv = eqos;
1991 strcpy(eqos->mii->name, dev->name);
1992
1993 ret = mdio_register(eqos->mii);
1994 if (ret < 0) {
1995 pr_err("mdio_register() failed: %d", ret);
1996 goto err_free_mdio;
1997 }
1998 }
1999
2000 #ifdef CONFIG_DM_ETH_PHY
2001 eth_phy_set_mdio_bus(dev, eqos->mii);
2002 #endif
2003
2004 debug("%s: OK\n", __func__);
2005 return 0;
2006
2007 err_free_mdio:
2008 mdio_free(eqos->mii);
2009 err_remove_resources_tegra:
2010 eqos->config->ops->eqos_remove_resources(dev);
2011 err_remove_resources_core:
2012 eqos_remove_resources_core(dev);
2013
2014 debug("%s: returns %d\n", __func__, ret);
2015 return ret;
2016 }
2017
eqos_remove(struct udevice * dev)2018 static int eqos_remove(struct udevice *dev)
2019 {
2020 struct eqos_priv *eqos = dev_get_priv(dev);
2021
2022 debug("%s(dev=%p):\n", __func__, dev);
2023
2024 mdio_unregister(eqos->mii);
2025 mdio_free(eqos->mii);
2026 eqos->config->ops->eqos_remove_resources(dev);
2027
2028 eqos_probe_resources_core(dev);
2029
2030 debug("%s: OK\n", __func__);
2031 return 0;
2032 }
2033
2034 static const struct eth_ops eqos_ops = {
2035 .start = eqos_start,
2036 .stop = eqos_stop,
2037 .send = eqos_send,
2038 .recv = eqos_recv,
2039 .free_pkt = eqos_free_pkt,
2040 .write_hwaddr = eqos_write_hwaddr,
2041 .read_rom_hwaddr = eqos_read_rom_hwaddr,
2042 };
2043
2044 static struct eqos_ops eqos_tegra186_ops = {
2045 .eqos_inval_desc = eqos_inval_desc_generic,
2046 .eqos_flush_desc = eqos_flush_desc_generic,
2047 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
2048 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
2049 .eqos_probe_resources = eqos_probe_resources_tegra186,
2050 .eqos_remove_resources = eqos_remove_resources_tegra186,
2051 .eqos_stop_resets = eqos_stop_resets_tegra186,
2052 .eqos_start_resets = eqos_start_resets_tegra186,
2053 .eqos_stop_clks = eqos_stop_clks_tegra186,
2054 .eqos_start_clks = eqos_start_clks_tegra186,
2055 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
2056 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
2057 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
2058 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
2059 };
2060
2061 static const struct eqos_config __maybe_unused eqos_tegra186_config = {
2062 .reg_access_always_ok = false,
2063 .mdio_wait = 10,
2064 .swr_wait = 10,
2065 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2066 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
2067 .axi_bus_width = EQOS_AXI_WIDTH_128,
2068 .interface = eqos_get_interface_tegra186,
2069 .ops = &eqos_tegra186_ops
2070 };
2071
2072 static struct eqos_ops eqos_stm32_ops = {
2073 .eqos_inval_desc = eqos_inval_desc_generic,
2074 .eqos_flush_desc = eqos_flush_desc_generic,
2075 .eqos_inval_buffer = eqos_inval_buffer_generic,
2076 .eqos_flush_buffer = eqos_flush_buffer_generic,
2077 .eqos_probe_resources = eqos_probe_resources_stm32,
2078 .eqos_remove_resources = eqos_remove_resources_stm32,
2079 .eqos_stop_resets = eqos_stop_resets_stm32,
2080 .eqos_start_resets = eqos_start_resets_stm32,
2081 .eqos_stop_clks = eqos_stop_clks_stm32,
2082 .eqos_start_clks = eqos_start_clks_stm32,
2083 .eqos_calibrate_pads = eqos_calibrate_pads_stm32,
2084 .eqos_disable_calibration = eqos_disable_calibration_stm32,
2085 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_stm32,
2086 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
2087 };
2088
2089 static const struct eqos_config __maybe_unused eqos_stm32_config = {
2090 .reg_access_always_ok = false,
2091 .mdio_wait = 10000,
2092 .swr_wait = 50,
2093 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
2094 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2095 .axi_bus_width = EQOS_AXI_WIDTH_64,
2096 .interface = eqos_get_interface_stm32,
2097 .ops = &eqos_stm32_ops
2098 };
2099
2100 static struct eqos_ops eqos_imx_ops = {
2101 .eqos_inval_desc = eqos_inval_desc_generic,
2102 .eqos_flush_desc = eqos_flush_desc_generic,
2103 .eqos_inval_buffer = eqos_inval_buffer_generic,
2104 .eqos_flush_buffer = eqos_flush_buffer_generic,
2105 .eqos_probe_resources = eqos_probe_resources_imx,
2106 .eqos_remove_resources = eqos_remove_resources_imx,
2107 .eqos_stop_resets = eqos_stop_resets_imx,
2108 .eqos_start_resets = eqos_start_resets_imx,
2109 .eqos_stop_clks = eqos_stop_clks_imx,
2110 .eqos_start_clks = eqos_start_clks_imx,
2111 .eqos_calibrate_pads = eqos_calibrate_pads_imx,
2112 .eqos_disable_calibration = eqos_disable_calibration_imx,
2113 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2114 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2115 };
2116
2117 struct eqos_config __maybe_unused eqos_imx_config = {
2118 .reg_access_always_ok = false,
2119 .mdio_wait = 10,
2120 .swr_wait = 50,
2121 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2122 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
2123 .axi_bus_width = EQOS_AXI_WIDTH_64,
2124 .interface = eqos_get_interface_imx,
2125 .ops = &eqos_imx_ops
2126 };
2127
2128 static const struct udevice_id eqos_ids[] = {
2129 #if IS_ENABLED(CONFIG_DWC_ETH_QOS_TEGRA186)
2130 {
2131 .compatible = "nvidia,tegra186-eqos",
2132 .data = (ulong)&eqos_tegra186_config
2133 },
2134 #endif
2135 #if IS_ENABLED(CONFIG_DWC_ETH_QOS_STM32)
2136 {
2137 .compatible = "st,stm32mp1-dwmac",
2138 .data = (ulong)&eqos_stm32_config
2139 },
2140 #endif
2141 #if IS_ENABLED(CONFIG_DWC_ETH_QOS_IMX)
2142 {
2143 .compatible = "fsl,imx-eqos",
2144 .data = (ulong)&eqos_imx_config
2145 },
2146 #endif
2147
2148 { }
2149 };
2150
2151 U_BOOT_DRIVER(eth_eqos) = {
2152 .name = "eth_eqos",
2153 .id = UCLASS_ETH,
2154 .of_match = of_match_ptr(eqos_ids),
2155 .probe = eqos_probe,
2156 .remove = eqos_remove,
2157 .ops = &eqos_ops,
2158 .priv_auto = sizeof(struct eqos_priv),
2159 .plat_auto = sizeof(struct eth_pdata),
2160 };
2161