1 /*
2  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
3  *
4  * Copyright (C) 2014 Marvell
5  *
6  * Marcin Wojtas <mw@semihalf.com>
7  *
8  * U-Boot version:
9  * Copyright (C) 2016-2017 Stefan Roese <sr@denx.de>
10  *
11  * This file is licensed under the terms of the GNU General Public
12  * License version 2. This program is licensed "as is" without any
13  * warranty of any kind, whether express or implied.
14  */
15 
16 #include <common.h>
17 #include <dm.h>
18 #include <dm/device-internal.h>
19 #include <dm/lists.h>
20 #include <net.h>
21 #include <netdev.h>
22 #include <config.h>
23 #include <malloc.h>
24 #include <asm/io.h>
25 #include <linux/errno.h>
26 #include <phy.h>
27 #include <miiphy.h>
28 #include <watchdog.h>
29 #include <asm/arch/cpu.h>
30 #include <asm/arch/soc.h>
31 #include <linux/compat.h>
32 #include <linux/mbus.h>
33 #include <asm-generic/gpio.h>
34 #include <fdt_support.h>
35 
36 DECLARE_GLOBAL_DATA_PTR;
37 
38 #define __verify_pcpu_ptr(ptr)						\
39 do {									\
40 	const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;	\
41 	(void)__vpp_verify;						\
42 } while (0)
43 
44 #define VERIFY_PERCPU_PTR(__p)						\
45 ({									\
46 	__verify_pcpu_ptr(__p);						\
47 	(typeof(*(__p)) __kernel __force *)(__p);			\
48 })
49 
50 #define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
51 #define smp_processor_id()	0
52 #define num_present_cpus()	1
53 #define for_each_present_cpu(cpu)			\
54 	for ((cpu) = 0; (cpu) < 1; (cpu)++)
55 
56 #define NET_SKB_PAD	max(32, MVPP2_CPU_D_CACHE_LINE_SIZE)
57 
58 #define CONFIG_NR_CPUS		1
59 
60 /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */
61 #define WRAP			(2 + ETH_HLEN + 4 + 32)
62 #define MTU			1500
63 #define RX_BUFFER_SIZE		(ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN))
64 
65 #define MVPP2_SMI_TIMEOUT			10000
66 
67 /* RX Fifo Registers */
68 #define MVPP2_RX_DATA_FIFO_SIZE_REG(port)	(0x00 + 4 * (port))
69 #define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)	(0x20 + 4 * (port))
70 #define MVPP2_RX_MIN_PKT_SIZE_REG		0x60
71 #define MVPP2_RX_FIFO_INIT_REG			0x64
72 
73 /* RX DMA Top Registers */
74 #define MVPP2_RX_CTRL_REG(port)			(0x140 + 4 * (port))
75 #define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)	(((s) & 0xfff) << 16)
76 #define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK	BIT(31)
77 #define MVPP2_POOL_BUF_SIZE_REG(pool)		(0x180 + 4 * (pool))
78 #define     MVPP2_POOL_BUF_SIZE_OFFSET		5
79 #define MVPP2_RXQ_CONFIG_REG(rxq)		(0x800 + 4 * (rxq))
80 #define     MVPP2_SNOOP_PKT_SIZE_MASK		0x1ff
81 #define     MVPP2_SNOOP_BUF_HDR_MASK		BIT(9)
82 #define     MVPP2_RXQ_POOL_SHORT_OFFS		20
83 #define     MVPP21_RXQ_POOL_SHORT_MASK		0x700000
84 #define     MVPP22_RXQ_POOL_SHORT_MASK		0xf00000
85 #define     MVPP2_RXQ_POOL_LONG_OFFS		24
86 #define     MVPP21_RXQ_POOL_LONG_MASK		0x7000000
87 #define     MVPP22_RXQ_POOL_LONG_MASK		0xf000000
88 #define     MVPP2_RXQ_PACKET_OFFSET_OFFS	28
89 #define     MVPP2_RXQ_PACKET_OFFSET_MASK	0x70000000
90 #define     MVPP2_RXQ_DISABLE_MASK		BIT(31)
91 
92 /* Parser Registers */
93 #define MVPP2_PRS_INIT_LOOKUP_REG		0x1000
94 #define     MVPP2_PRS_PORT_LU_MAX		0xf
95 #define     MVPP2_PRS_PORT_LU_MASK(port)	(0xff << ((port) * 4))
96 #define     MVPP2_PRS_PORT_LU_VAL(port, val)	((val) << ((port) * 4))
97 #define MVPP2_PRS_INIT_OFFS_REG(port)		(0x1004 + ((port) & 4))
98 #define     MVPP2_PRS_INIT_OFF_MASK(port)	(0x3f << (((port) % 4) * 8))
99 #define     MVPP2_PRS_INIT_OFF_VAL(port, val)	((val) << (((port) % 4) * 8))
100 #define MVPP2_PRS_MAX_LOOP_REG(port)		(0x100c + ((port) & 4))
101 #define     MVPP2_PRS_MAX_LOOP_MASK(port)	(0xff << (((port) % 4) * 8))
102 #define     MVPP2_PRS_MAX_LOOP_VAL(port, val)	((val) << (((port) % 4) * 8))
103 #define MVPP2_PRS_TCAM_IDX_REG			0x1100
104 #define MVPP2_PRS_TCAM_DATA_REG(idx)		(0x1104 + (idx) * 4)
105 #define     MVPP2_PRS_TCAM_INV_MASK		BIT(31)
106 #define MVPP2_PRS_SRAM_IDX_REG			0x1200
107 #define MVPP2_PRS_SRAM_DATA_REG(idx)		(0x1204 + (idx) * 4)
108 #define MVPP2_PRS_TCAM_CTRL_REG			0x1230
109 #define     MVPP2_PRS_TCAM_EN_MASK		BIT(0)
110 
111 /* Classifier Registers */
112 #define MVPP2_CLS_MODE_REG			0x1800
113 #define     MVPP2_CLS_MODE_ACTIVE_MASK		BIT(0)
114 #define MVPP2_CLS_PORT_WAY_REG			0x1810
115 #define     MVPP2_CLS_PORT_WAY_MASK(port)	(1 << (port))
116 #define MVPP2_CLS_LKP_INDEX_REG			0x1814
117 #define     MVPP2_CLS_LKP_INDEX_WAY_OFFS	6
118 #define MVPP2_CLS_LKP_TBL_REG			0x1818
119 #define     MVPP2_CLS_LKP_TBL_RXQ_MASK		0xff
120 #define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK	BIT(25)
121 #define MVPP2_CLS_FLOW_INDEX_REG		0x1820
122 #define MVPP2_CLS_FLOW_TBL0_REG			0x1824
123 #define MVPP2_CLS_FLOW_TBL1_REG			0x1828
124 #define MVPP2_CLS_FLOW_TBL2_REG			0x182c
125 #define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)	(0x1980 + ((port) * 4))
126 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS	3
127 #define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK	0x7
128 #define MVPP2_CLS_SWFWD_P2HQ_REG(port)		(0x19b0 + ((port) * 4))
129 #define MVPP2_CLS_SWFWD_PCTRL_REG		0x19d0
130 #define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)	(1 << (port))
131 
132 /* Descriptor Manager Top Registers */
133 #define MVPP2_RXQ_NUM_REG			0x2040
134 #define MVPP2_RXQ_DESC_ADDR_REG			0x2044
135 #define     MVPP22_DESC_ADDR_OFFS		8
136 #define MVPP2_RXQ_DESC_SIZE_REG			0x2048
137 #define     MVPP2_RXQ_DESC_SIZE_MASK		0x3ff0
138 #define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)	(0x3000 + 4 * (rxq))
139 #define     MVPP2_RXQ_NUM_PROCESSED_OFFSET	0
140 #define     MVPP2_RXQ_NUM_NEW_OFFSET		16
141 #define MVPP2_RXQ_STATUS_REG(rxq)		(0x3400 + 4 * (rxq))
142 #define     MVPP2_RXQ_OCCUPIED_MASK		0x3fff
143 #define     MVPP2_RXQ_NON_OCCUPIED_OFFSET	16
144 #define     MVPP2_RXQ_NON_OCCUPIED_MASK		0x3fff0000
145 #define MVPP2_RXQ_THRESH_REG			0x204c
146 #define     MVPP2_OCCUPIED_THRESH_OFFSET	0
147 #define     MVPP2_OCCUPIED_THRESH_MASK		0x3fff
148 #define MVPP2_RXQ_INDEX_REG			0x2050
149 #define MVPP2_TXQ_NUM_REG			0x2080
150 #define MVPP2_TXQ_DESC_ADDR_REG			0x2084
151 #define MVPP2_TXQ_DESC_SIZE_REG			0x2088
152 #define     MVPP2_TXQ_DESC_SIZE_MASK		0x3ff0
153 #define MVPP2_AGGR_TXQ_UPDATE_REG		0x2090
154 #define MVPP2_TXQ_THRESH_REG			0x2094
155 #define     MVPP2_TRANSMITTED_THRESH_OFFSET	16
156 #define     MVPP2_TRANSMITTED_THRESH_MASK	0x3fff0000
157 #define MVPP2_TXQ_INDEX_REG			0x2098
158 #define MVPP2_TXQ_PREF_BUF_REG			0x209c
159 #define     MVPP2_PREF_BUF_PTR(desc)		((desc) & 0xfff)
160 #define     MVPP2_PREF_BUF_SIZE_4		(BIT(12) | BIT(13))
161 #define     MVPP2_PREF_BUF_SIZE_16		(BIT(12) | BIT(14))
162 #define     MVPP2_PREF_BUF_THRESH(val)		((val) << 17)
163 #define     MVPP2_TXQ_DRAIN_EN_MASK		BIT(31)
164 #define MVPP2_TXQ_PENDING_REG			0x20a0
165 #define     MVPP2_TXQ_PENDING_MASK		0x3fff
166 #define MVPP2_TXQ_INT_STATUS_REG		0x20a4
167 #define MVPP2_TXQ_SENT_REG(txq)			(0x3c00 + 4 * (txq))
168 #define     MVPP2_TRANSMITTED_COUNT_OFFSET	16
169 #define     MVPP2_TRANSMITTED_COUNT_MASK	0x3fff0000
170 #define MVPP2_TXQ_RSVD_REQ_REG			0x20b0
171 #define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET		16
172 #define MVPP2_TXQ_RSVD_RSLT_REG			0x20b4
173 #define     MVPP2_TXQ_RSVD_RSLT_MASK		0x3fff
174 #define MVPP2_TXQ_RSVD_CLR_REG			0x20b8
175 #define     MVPP2_TXQ_RSVD_CLR_OFFSET		16
176 #define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)	(0x2100 + 4 * (cpu))
177 #define     MVPP22_AGGR_TXQ_DESC_ADDR_OFFS	8
178 #define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)	(0x2140 + 4 * (cpu))
179 #define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK	0x3ff0
180 #define MVPP2_AGGR_TXQ_STATUS_REG(cpu)		(0x2180 + 4 * (cpu))
181 #define     MVPP2_AGGR_TXQ_PENDING_MASK		0x3fff
182 #define MVPP2_AGGR_TXQ_INDEX_REG(cpu)		(0x21c0 + 4 * (cpu))
183 
184 /* MBUS bridge registers */
185 #define MVPP2_WIN_BASE(w)			(0x4000 + ((w) << 2))
186 #define MVPP2_WIN_SIZE(w)			(0x4020 + ((w) << 2))
187 #define MVPP2_WIN_REMAP(w)			(0x4040 + ((w) << 2))
188 #define MVPP2_BASE_ADDR_ENABLE			0x4060
189 
190 /* AXI Bridge Registers */
191 #define MVPP22_AXI_BM_WR_ATTR_REG		0x4100
192 #define MVPP22_AXI_BM_RD_ATTR_REG		0x4104
193 #define MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG	0x4110
194 #define MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG	0x4114
195 #define MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG	0x4118
196 #define MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG	0x411c
197 #define MVPP22_AXI_RX_DATA_WR_ATTR_REG		0x4120
198 #define MVPP22_AXI_TX_DATA_RD_ATTR_REG		0x4130
199 #define MVPP22_AXI_RD_NORMAL_CODE_REG		0x4150
200 #define MVPP22_AXI_RD_SNOOP_CODE_REG		0x4154
201 #define MVPP22_AXI_WR_NORMAL_CODE_REG		0x4160
202 #define MVPP22_AXI_WR_SNOOP_CODE_REG		0x4164
203 
204 /* Values for AXI Bridge registers */
205 #define MVPP22_AXI_ATTR_CACHE_OFFS		0
206 #define MVPP22_AXI_ATTR_DOMAIN_OFFS		12
207 
208 #define MVPP22_AXI_CODE_CACHE_OFFS		0
209 #define MVPP22_AXI_CODE_DOMAIN_OFFS		4
210 
211 #define MVPP22_AXI_CODE_CACHE_NON_CACHE		0x3
212 #define MVPP22_AXI_CODE_CACHE_WR_CACHE		0x7
213 #define MVPP22_AXI_CODE_CACHE_RD_CACHE		0xb
214 
215 #define MVPP22_AXI_CODE_DOMAIN_OUTER_DOM	2
216 #define MVPP22_AXI_CODE_DOMAIN_SYSTEM		3
217 
218 /* Interrupt Cause and Mask registers */
219 #define MVPP2_ISR_RX_THRESHOLD_REG(rxq)		(0x5200 + 4 * (rxq))
220 #define MVPP21_ISR_RXQ_GROUP_REG(rxq)		(0x5400 + 4 * (rxq))
221 
222 #define MVPP22_ISR_RXQ_GROUP_INDEX_REG          0x5400
223 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
224 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
225 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET 7
226 
227 #define MVPP22_ISR_RXQ_GROUP_INDEX_SUBGROUP_MASK 0xf
228 #define MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_MASK   0x380
229 
230 #define MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG     0x5404
231 #define MVPP22_ISR_RXQ_SUB_GROUP_STARTQ_MASK    0x1f
232 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_MASK      0xf00
233 #define MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET    8
234 
235 #define MVPP2_ISR_ENABLE_REG(port)		(0x5420 + 4 * (port))
236 #define     MVPP2_ISR_ENABLE_INTERRUPT(mask)	((mask) & 0xffff)
237 #define     MVPP2_ISR_DISABLE_INTERRUPT(mask)	(((mask) << 16) & 0xffff0000)
238 #define MVPP2_ISR_RX_TX_CAUSE_REG(port)		(0x5480 + 4 * (port))
239 #define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
240 #define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK	0xff0000
241 #define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK	BIT(24)
242 #define     MVPP2_CAUSE_FCS_ERR_MASK		BIT(25)
243 #define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK	BIT(26)
244 #define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK	BIT(29)
245 #define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK	BIT(30)
246 #define     MVPP2_CAUSE_MISC_SUM_MASK		BIT(31)
247 #define MVPP2_ISR_RX_TX_MASK_REG(port)		(0x54a0 + 4 * (port))
248 #define MVPP2_ISR_PON_RX_TX_MASK_REG		0x54bc
249 #define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK	0xffff
250 #define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK	0x3fc00000
251 #define     MVPP2_PON_CAUSE_MISC_SUM_MASK		BIT(31)
252 #define MVPP2_ISR_MISC_CAUSE_REG		0x55b0
253 
254 /* Buffer Manager registers */
255 #define MVPP2_BM_POOL_BASE_REG(pool)		(0x6000 + ((pool) * 4))
256 #define     MVPP2_BM_POOL_BASE_ADDR_MASK	0xfffff80
257 #define MVPP2_BM_POOL_SIZE_REG(pool)		(0x6040 + ((pool) * 4))
258 #define     MVPP2_BM_POOL_SIZE_MASK		0xfff0
259 #define MVPP2_BM_POOL_READ_PTR_REG(pool)	(0x6080 + ((pool) * 4))
260 #define     MVPP2_BM_POOL_GET_READ_PTR_MASK	0xfff0
261 #define MVPP2_BM_POOL_PTRS_NUM_REG(pool)	(0x60c0 + ((pool) * 4))
262 #define     MVPP2_BM_POOL_PTRS_NUM_MASK		0xfff0
263 #define MVPP2_BM_BPPI_READ_PTR_REG(pool)	(0x6100 + ((pool) * 4))
264 #define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)	(0x6140 + ((pool) * 4))
265 #define     MVPP2_BM_BPPI_PTR_NUM_MASK		0x7ff
266 #define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK	BIT(16)
267 #define MVPP2_BM_POOL_CTRL_REG(pool)		(0x6200 + ((pool) * 4))
268 #define     MVPP2_BM_START_MASK			BIT(0)
269 #define     MVPP2_BM_STOP_MASK			BIT(1)
270 #define     MVPP2_BM_STATE_MASK			BIT(4)
271 #define     MVPP2_BM_LOW_THRESH_OFFS		8
272 #define     MVPP2_BM_LOW_THRESH_MASK		0x7f00
273 #define     MVPP2_BM_LOW_THRESH_VALUE(val)	((val) << \
274 						MVPP2_BM_LOW_THRESH_OFFS)
275 #define     MVPP2_BM_HIGH_THRESH_OFFS		16
276 #define     MVPP2_BM_HIGH_THRESH_MASK		0x7f0000
277 #define     MVPP2_BM_HIGH_THRESH_VALUE(val)	((val) << \
278 						MVPP2_BM_HIGH_THRESH_OFFS)
279 #define MVPP2_BM_INTR_CAUSE_REG(pool)		(0x6240 + ((pool) * 4))
280 #define     MVPP2_BM_RELEASED_DELAY_MASK	BIT(0)
281 #define     MVPP2_BM_ALLOC_FAILED_MASK		BIT(1)
282 #define     MVPP2_BM_BPPE_EMPTY_MASK		BIT(2)
283 #define     MVPP2_BM_BPPE_FULL_MASK		BIT(3)
284 #define     MVPP2_BM_AVAILABLE_BP_LOW_MASK	BIT(4)
285 #define MVPP2_BM_INTR_MASK_REG(pool)		(0x6280 + ((pool) * 4))
286 #define MVPP2_BM_PHY_ALLOC_REG(pool)		(0x6400 + ((pool) * 4))
287 #define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK	BIT(0)
288 #define MVPP2_BM_VIRT_ALLOC_REG			0x6440
289 #define MVPP2_BM_ADDR_HIGH_ALLOC		0x6444
290 #define     MVPP2_BM_ADDR_HIGH_PHYS_MASK	0xff
291 #define     MVPP2_BM_ADDR_HIGH_VIRT_MASK	0xff00
292 #define     MVPP2_BM_ADDR_HIGH_VIRT_SHIFT	8
293 #define MVPP2_BM_PHY_RLS_REG(pool)		(0x6480 + ((pool) * 4))
294 #define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK	BIT(0)
295 #define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK	BIT(1)
296 #define     MVPP2_BM_PHY_RLS_GRNTD_MASK		BIT(2)
297 #define MVPP2_BM_VIRT_RLS_REG			0x64c0
298 #define MVPP21_BM_MC_RLS_REG			0x64c4
299 #define     MVPP2_BM_MC_ID_MASK			0xfff
300 #define     MVPP2_BM_FORCE_RELEASE_MASK		BIT(12)
301 #define MVPP22_BM_ADDR_HIGH_RLS_REG		0x64c4
302 #define     MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK	0xff
303 #define	    MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK	0xff00
304 #define     MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT	8
305 #define MVPP22_BM_MC_RLS_REG			0x64d4
306 #define MVPP22_BM_POOL_BASE_HIGH_REG		0x6310
307 #define MVPP22_BM_POOL_BASE_HIGH_MASK		0xff
308 
309 /* TX Scheduler registers */
310 #define MVPP2_TXP_SCHED_PORT_INDEX_REG		0x8000
311 #define MVPP2_TXP_SCHED_Q_CMD_REG		0x8004
312 #define     MVPP2_TXP_SCHED_ENQ_MASK		0xff
313 #define     MVPP2_TXP_SCHED_DISQ_OFFSET		8
314 #define MVPP2_TXP_SCHED_CMD_1_REG		0x8010
315 #define MVPP2_TXP_SCHED_PERIOD_REG		0x8018
316 #define MVPP2_TXP_SCHED_MTU_REG			0x801c
317 #define     MVPP2_TXP_MTU_MAX			0x7FFFF
318 #define MVPP2_TXP_SCHED_REFILL_REG		0x8020
319 #define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK	0x7ffff
320 #define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK	0x3ff00000
321 #define     MVPP2_TXP_REFILL_PERIOD_MASK(v)	((v) << 20)
322 #define MVPP2_TXP_SCHED_TOKEN_SIZE_REG		0x8024
323 #define     MVPP2_TXP_TOKEN_SIZE_MAX		0xffffffff
324 #define MVPP2_TXQ_SCHED_REFILL_REG(q)		(0x8040 + ((q) << 2))
325 #define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK	0x7ffff
326 #define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK	0x3ff00000
327 #define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)	((v) << 20)
328 #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)	(0x8060 + ((q) << 2))
329 #define     MVPP2_TXQ_TOKEN_SIZE_MAX		0x7fffffff
330 #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)	(0x8080 + ((q) << 2))
331 #define     MVPP2_TXQ_TOKEN_CNTR_MAX		0xffffffff
332 
333 /* TX general registers */
334 #define MVPP2_TX_SNOOP_REG			0x8800
335 #define MVPP2_TX_PORT_FLUSH_REG			0x8810
336 #define     MVPP2_TX_PORT_FLUSH_MASK(port)	(1 << (port))
337 
338 /* LMS registers */
339 #define MVPP2_SRC_ADDR_MIDDLE			0x24
340 #define MVPP2_SRC_ADDR_HIGH			0x28
341 #define MVPP2_PHY_AN_CFG0_REG			0x34
342 #define     MVPP2_PHY_AN_STOP_SMI0_MASK		BIT(7)
343 #define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG	0x305c
344 #define     MVPP2_EXT_GLOBAL_CTRL_DEFAULT	0x27
345 
346 /* Per-port registers */
347 #define MVPP2_GMAC_CTRL_0_REG			0x0
348 #define      MVPP2_GMAC_PORT_EN_MASK		BIT(0)
349 #define      MVPP2_GMAC_PORT_TYPE_MASK		BIT(1)
350 #define      MVPP2_GMAC_MAX_RX_SIZE_OFFS	2
351 #define      MVPP2_GMAC_MAX_RX_SIZE_MASK	0x7ffc
352 #define      MVPP2_GMAC_MIB_CNTR_EN_MASK	BIT(15)
353 #define MVPP2_GMAC_CTRL_1_REG			0x4
354 #define      MVPP2_GMAC_PERIODIC_XON_EN_MASK	BIT(1)
355 #define      MVPP2_GMAC_GMII_LB_EN_MASK		BIT(5)
356 #define      MVPP2_GMAC_PCS_LB_EN_BIT		6
357 #define      MVPP2_GMAC_PCS_LB_EN_MASK		BIT(6)
358 #define      MVPP2_GMAC_SA_LOW_OFFS		7
359 #define MVPP2_GMAC_CTRL_2_REG			0x8
360 #define      MVPP2_GMAC_INBAND_AN_MASK		BIT(0)
361 #define      MVPP2_GMAC_SGMII_MODE_MASK		BIT(0)
362 #define      MVPP2_GMAC_PCS_ENABLE_MASK		BIT(3)
363 #define      MVPP2_GMAC_PORT_RGMII_MASK		BIT(4)
364 #define      MVPP2_GMAC_PORT_DIS_PADING_MASK	BIT(5)
365 #define      MVPP2_GMAC_PORT_RESET_MASK		BIT(6)
366 #define      MVPP2_GMAC_CLK_125_BYPS_EN_MASK	BIT(9)
367 #define MVPP2_GMAC_AUTONEG_CONFIG		0xc
368 #define      MVPP2_GMAC_FORCE_LINK_DOWN		BIT(0)
369 #define      MVPP2_GMAC_FORCE_LINK_PASS		BIT(1)
370 #define      MVPP2_GMAC_EN_PCS_AN		BIT(2)
371 #define      MVPP2_GMAC_AN_BYPASS_EN		BIT(3)
372 #define      MVPP2_GMAC_CONFIG_MII_SPEED	BIT(5)
373 #define      MVPP2_GMAC_CONFIG_GMII_SPEED	BIT(6)
374 #define      MVPP2_GMAC_AN_SPEED_EN		BIT(7)
375 #define      MVPP2_GMAC_FC_ADV_EN		BIT(9)
376 #define      MVPP2_GMAC_EN_FC_AN		BIT(11)
377 #define      MVPP2_GMAC_CONFIG_FULL_DUPLEX	BIT(12)
378 #define      MVPP2_GMAC_AN_DUPLEX_EN		BIT(13)
379 #define      MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG	BIT(15)
380 #define MVPP2_GMAC_PORT_FIFO_CFG_1_REG		0x1c
381 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS	6
382 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK	0x1fc0
383 #define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v)	(((v) << 6) & \
384 					MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
385 #define MVPP2_GMAC_CTRL_4_REG			0x90
386 #define      MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK	BIT(0)
387 #define      MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK	BIT(5)
388 #define      MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK	BIT(6)
389 #define      MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK	BIT(7)
390 
391 /*
392  * Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
393  * relative to port->base.
394  */
395 
396 /* Port Mac Control0 */
397 #define MVPP22_XLG_CTRL0_REG			0x100
398 #define      MVPP22_XLG_PORT_EN			BIT(0)
399 #define      MVPP22_XLG_MAC_RESETN		BIT(1)
400 #define      MVPP22_XLG_RX_FC_EN		BIT(7)
401 #define      MVPP22_XLG_MIBCNT_DIS		BIT(13)
402 /* Port Mac Control1 */
403 #define MVPP22_XLG_CTRL1_REG			0x104
404 #define      MVPP22_XLG_MAX_RX_SIZE_OFFS	0
405 #define      MVPP22_XLG_MAX_RX_SIZE_MASK	0x1fff
406 /* Port Interrupt Mask */
407 #define MVPP22_XLG_INTERRUPT_MASK_REG		0x118
408 #define      MVPP22_XLG_INTERRUPT_LINK_CHANGE	BIT(1)
409 /* Port Mac Control3 */
410 #define MVPP22_XLG_CTRL3_REG			0x11c
411 #define      MVPP22_XLG_CTRL3_MACMODESELECT_MASK	(7 << 13)
412 #define      MVPP22_XLG_CTRL3_MACMODESELECT_GMAC	(0 << 13)
413 #define      MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC	(1 << 13)
414 /* Port Mac Control4 */
415 #define MVPP22_XLG_CTRL4_REG			0x184
416 #define      MVPP22_XLG_FORWARD_802_3X_FC_EN	BIT(5)
417 #define      MVPP22_XLG_FORWARD_PFC_EN		BIT(6)
418 #define      MVPP22_XLG_MODE_DMA_1G		BIT(12)
419 #define      MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK	BIT(14)
420 
421 /* XPCS registers */
422 
423 /* Global Configuration 0 */
424 #define MVPP22_XPCS_GLOBAL_CFG_0_REG		0x0
425 #define      MVPP22_XPCS_PCSRESET		BIT(0)
426 #define      MVPP22_XPCS_PCSMODE_OFFS		3
427 #define      MVPP22_XPCS_PCSMODE_MASK		(0x3 << \
428 						 MVPP22_XPCS_PCSMODE_OFFS)
429 #define      MVPP22_XPCS_LANEACTIVE_OFFS	5
430 #define      MVPP22_XPCS_LANEACTIVE_MASK	(0x3 << \
431 						 MVPP22_XPCS_LANEACTIVE_OFFS)
432 
433 /* MPCS registers */
434 
435 #define PCS40G_COMMON_CONTROL			0x14
436 #define      FORWARD_ERROR_CORRECTION_MASK	BIT(10)
437 
438 #define PCS_CLOCK_RESET				0x14c
439 #define      TX_SD_CLK_RESET_MASK		BIT(0)
440 #define      RX_SD_CLK_RESET_MASK		BIT(1)
441 #define      MAC_CLK_RESET_MASK			BIT(2)
442 #define      CLK_DIVISION_RATIO_OFFS		4
443 #define      CLK_DIVISION_RATIO_MASK		(0x7 << CLK_DIVISION_RATIO_OFFS)
444 #define      CLK_DIV_PHASE_SET_MASK		BIT(11)
445 
446 /* System Soft Reset 1 */
447 #define GOP_SOFT_RESET_1_REG			0x108
448 #define     NETC_GOP_SOFT_RESET_OFFS		6
449 #define     NETC_GOP_SOFT_RESET_MASK		(0x1 << \
450 						 NETC_GOP_SOFT_RESET_OFFS)
451 
452 /* Ports Control 0 */
453 #define NETCOMP_PORTS_CONTROL_0_REG		0x110
454 #define     NETC_BUS_WIDTH_SELECT_OFFS		1
455 #define     NETC_BUS_WIDTH_SELECT_MASK		(0x1 << \
456 						 NETC_BUS_WIDTH_SELECT_OFFS)
457 #define     NETC_GIG_RX_DATA_SAMPLE_OFFS	29
458 #define     NETC_GIG_RX_DATA_SAMPLE_MASK	(0x1 << \
459 						 NETC_GIG_RX_DATA_SAMPLE_OFFS)
460 #define     NETC_CLK_DIV_PHASE_OFFS		31
461 #define     NETC_CLK_DIV_PHASE_MASK		(0x1 << NETC_CLK_DIV_PHASE_OFFS)
462 /* Ports Control 1 */
463 #define NETCOMP_PORTS_CONTROL_1_REG		0x114
464 #define     NETC_PORTS_ACTIVE_OFFSET(p)		(0 + p)
465 #define     NETC_PORTS_ACTIVE_MASK(p)		(0x1 << \
466 						 NETC_PORTS_ACTIVE_OFFSET(p))
467 #define     NETC_PORT_GIG_RF_RESET_OFFS(p)	(28 + p)
468 #define     NETC_PORT_GIG_RF_RESET_MASK(p)	(0x1 << \
469 						 NETC_PORT_GIG_RF_RESET_OFFS(p))
470 #define NETCOMP_CONTROL_0_REG			0x120
471 #define     NETC_GBE_PORT0_SGMII_MODE_OFFS	0
472 #define     NETC_GBE_PORT0_SGMII_MODE_MASK	(0x1 << \
473 						 NETC_GBE_PORT0_SGMII_MODE_OFFS)
474 #define     NETC_GBE_PORT1_SGMII_MODE_OFFS	1
475 #define     NETC_GBE_PORT1_SGMII_MODE_MASK	(0x1 << \
476 						 NETC_GBE_PORT1_SGMII_MODE_OFFS)
477 #define     NETC_GBE_PORT1_MII_MODE_OFFS	2
478 #define     NETC_GBE_PORT1_MII_MODE_MASK	(0x1 << \
479 						 NETC_GBE_PORT1_MII_MODE_OFFS)
480 
481 #define MVPP22_SMI_MISC_CFG_REG			(MVPP22_SMI + 0x04)
482 #define      MVPP22_SMI_POLLING_EN		BIT(10)
483 
484 #define MVPP22_SMI_PHY_ADDR_REG(port)		(MVPP22_SMI + 0x04 + \
485 						 (0x4 * (port)))
486 
487 #define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK	0xff
488 
489 /* Descriptor ring Macros */
490 #define MVPP2_QUEUE_NEXT_DESC(q, index) \
491 	(((index) < (q)->last_desc) ? ((index) + 1) : 0)
492 
493 /* SMI: 0xc0054 -> offset 0x54 to lms_base */
494 #define MVPP21_SMI				0x0054
495 /* PP2.2: SMI: 0x12a200 -> offset 0x1200 to iface_base */
496 #define MVPP22_SMI				0x1200
497 #define     MVPP2_PHY_REG_MASK			0x1f
498 /* SMI register fields */
499 #define     MVPP2_SMI_DATA_OFFS			0	/* Data */
500 #define     MVPP2_SMI_DATA_MASK			(0xffff << MVPP2_SMI_DATA_OFFS)
501 #define     MVPP2_SMI_DEV_ADDR_OFFS		16	/* PHY device address */
502 #define     MVPP2_SMI_REG_ADDR_OFFS		21	/* PHY device reg addr*/
503 #define     MVPP2_SMI_OPCODE_OFFS		26	/* Write/Read opcode */
504 #define     MVPP2_SMI_OPCODE_READ		(1 << MVPP2_SMI_OPCODE_OFFS)
505 #define     MVPP2_SMI_READ_VALID		(1 << 27)	/* Read Valid */
506 #define     MVPP2_SMI_BUSY			(1 << 28)	/* Busy */
507 
508 #define     MVPP2_PHY_ADDR_MASK			0x1f
509 #define     MVPP2_PHY_REG_MASK			0x1f
510 
511 /* Additional PPv2.2 offsets */
512 #define MVPP22_MPCS				0x007000
513 #define MVPP22_XPCS				0x007400
514 #define MVPP22_PORT_BASE			0x007e00
515 #define MVPP22_PORT_OFFSET			0x001000
516 #define MVPP22_RFU1				0x318000
517 
518 /* Maximum number of ports */
519 #define MVPP22_GOP_MAC_NUM			4
520 
521 /* Sets the field located at the specified in data */
522 #define MVPP2_RGMII_TX_FIFO_MIN_TH		0x41
523 #define MVPP2_SGMII_TX_FIFO_MIN_TH		0x5
524 #define MVPP2_SGMII2_5_TX_FIFO_MIN_TH		0xb
525 
526 /* Net Complex */
527 enum mv_netc_topology {
528 	MV_NETC_GE_MAC2_SGMII		=	BIT(0),
529 	MV_NETC_GE_MAC3_SGMII		=	BIT(1),
530 	MV_NETC_GE_MAC3_RGMII		=	BIT(2),
531 };
532 
533 enum mv_netc_phase {
534 	MV_NETC_FIRST_PHASE,
535 	MV_NETC_SECOND_PHASE,
536 };
537 
538 enum mv_netc_sgmii_xmi_mode {
539 	MV_NETC_GBE_SGMII,
540 	MV_NETC_GBE_XMII,
541 };
542 
543 enum mv_netc_mii_mode {
544 	MV_NETC_GBE_RGMII,
545 	MV_NETC_GBE_MII,
546 };
547 
548 enum mv_netc_lanes {
549 	MV_NETC_LANE_23,
550 	MV_NETC_LANE_45,
551 };
552 
553 /* Various constants */
554 
555 /* Coalescing */
556 #define MVPP2_TXDONE_COAL_PKTS_THRESH	15
557 #define MVPP2_TXDONE_HRTIMER_PERIOD_NS	1000000UL
558 #define MVPP2_RX_COAL_PKTS		32
559 #define MVPP2_RX_COAL_USEC		100
560 
561 /* The two bytes Marvell header. Either contains a special value used
562  * by Marvell switches when a specific hardware mode is enabled (not
563  * supported by this driver) or is filled automatically by zeroes on
564  * the RX side. Those two bytes being at the front of the Ethernet
565  * header, they allow to have the IP header aligned on a 4 bytes
566  * boundary automatically: the hardware skips those two bytes on its
567  * own.
568  */
569 #define MVPP2_MH_SIZE			2
570 #define MVPP2_ETH_TYPE_LEN		2
571 #define MVPP2_PPPOE_HDR_SIZE		8
572 #define MVPP2_VLAN_TAG_LEN		4
573 
574 /* Lbtd 802.3 type */
575 #define MVPP2_IP_LBDT_TYPE		0xfffa
576 
577 #define MVPP2_CPU_D_CACHE_LINE_SIZE	32
578 #define MVPP2_TX_CSUM_MAX_SIZE		9800
579 
580 /* Timeout constants */
581 #define MVPP2_TX_DISABLE_TIMEOUT_MSEC	1000
582 #define MVPP2_TX_PENDING_TIMEOUT_MSEC	1000
583 
584 #define MVPP2_TX_MTU_MAX		0x7ffff
585 
586 /* Maximum number of T-CONTs of PON port */
587 #define MVPP2_MAX_TCONT			16
588 
589 /* Maximum number of supported ports */
590 #define MVPP2_MAX_PORTS			4
591 
592 /* Maximum number of TXQs used by single port */
593 #define MVPP2_MAX_TXQ			8
594 
595 /* Default number of TXQs in use */
596 #define MVPP2_DEFAULT_TXQ		1
597 
598 /* Dfault number of RXQs in use */
599 #define MVPP2_DEFAULT_RXQ		1
600 #define CONFIG_MV_ETH_RXQ		8	/* increment by 8 */
601 
602 /* Max number of Rx descriptors */
603 #define MVPP2_MAX_RXD			16
604 
605 /* Max number of Tx descriptors */
606 #define MVPP2_MAX_TXD			16
607 
608 /* Amount of Tx descriptors that can be reserved at once by CPU */
609 #define MVPP2_CPU_DESC_CHUNK		16
610 
611 /* Max number of Tx descriptors in each aggregated queue */
612 #define MVPP2_AGGR_TXQ_SIZE		16
613 
614 /* Descriptor aligned size */
615 #define MVPP2_DESC_ALIGNED_SIZE		32
616 
617 /* Descriptor alignment mask */
618 #define MVPP2_TX_DESC_ALIGN		(MVPP2_DESC_ALIGNED_SIZE - 1)
619 
620 /* RX FIFO constants */
621 #define MVPP21_RX_FIFO_PORT_DATA_SIZE		0x2000
622 #define MVPP21_RX_FIFO_PORT_ATTR_SIZE		0x80
623 #define MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE	0x8000
624 #define MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE	0x2000
625 #define MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE	0x1000
626 #define MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE	0x200
627 #define MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE	0x80
628 #define MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE	0x40
629 #define MVPP2_RX_FIFO_PORT_MIN_PKT		0x80
630 
631 /* TX general registers */
632 #define MVPP22_TX_FIFO_SIZE_REG(eth_tx_port)	(0x8860 + ((eth_tx_port) << 2))
633 #define MVPP22_TX_FIFO_SIZE_MASK		0xf
634 
635 /* TX FIFO constants */
636 #define MVPP2_TX_FIFO_DATA_SIZE_10KB		0xa
637 #define MVPP2_TX_FIFO_DATA_SIZE_3KB		0x3
638 
639 /* RX buffer constants */
640 #define MVPP2_SKB_SHINFO_SIZE \
641 	0
642 
643 #define MVPP2_RX_PKT_SIZE(mtu) \
644 	ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
645 	      ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
646 
647 #define MVPP2_RX_BUF_SIZE(pkt_size)	((pkt_size) + NET_SKB_PAD)
648 #define MVPP2_RX_TOTAL_SIZE(buf_size)	((buf_size) + MVPP2_SKB_SHINFO_SIZE)
649 #define MVPP2_RX_MAX_PKT_SIZE(total_size) \
650 	((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
651 
652 #define MVPP2_BIT_TO_BYTE(bit)		((bit) / 8)
653 
654 /* IPv6 max L3 address size */
655 #define MVPP2_MAX_L3_ADDR_SIZE		16
656 
657 /* Port flags */
658 #define MVPP2_F_LOOPBACK		BIT(0)
659 
660 /* Marvell tag types */
661 enum mvpp2_tag_type {
662 	MVPP2_TAG_TYPE_NONE = 0,
663 	MVPP2_TAG_TYPE_MH   = 1,
664 	MVPP2_TAG_TYPE_DSA  = 2,
665 	MVPP2_TAG_TYPE_EDSA = 3,
666 	MVPP2_TAG_TYPE_VLAN = 4,
667 	MVPP2_TAG_TYPE_LAST = 5
668 };
669 
670 /* Parser constants */
671 #define MVPP2_PRS_TCAM_SRAM_SIZE	256
672 #define MVPP2_PRS_TCAM_WORDS		6
673 #define MVPP2_PRS_SRAM_WORDS		4
674 #define MVPP2_PRS_FLOW_ID_SIZE		64
675 #define MVPP2_PRS_FLOW_ID_MASK		0x3f
676 #define MVPP2_PRS_TCAM_ENTRY_INVALID	1
677 #define MVPP2_PRS_TCAM_DSA_TAGGED_BIT	BIT(5)
678 #define MVPP2_PRS_IPV4_HEAD		0x40
679 #define MVPP2_PRS_IPV4_HEAD_MASK	0xf0
680 #define MVPP2_PRS_IPV4_MC		0xe0
681 #define MVPP2_PRS_IPV4_MC_MASK		0xf0
682 #define MVPP2_PRS_IPV4_BC_MASK		0xff
683 #define MVPP2_PRS_IPV4_IHL		0x5
684 #define MVPP2_PRS_IPV4_IHL_MASK		0xf
685 #define MVPP2_PRS_IPV6_MC		0xff
686 #define MVPP2_PRS_IPV6_MC_MASK		0xff
687 #define MVPP2_PRS_IPV6_HOP_MASK		0xff
688 #define MVPP2_PRS_TCAM_PROTO_MASK	0xff
689 #define MVPP2_PRS_TCAM_PROTO_MASK_L	0x3f
690 #define MVPP2_PRS_DBL_VLANS_MAX		100
691 
692 /* Tcam structure:
693  * - lookup ID - 4 bits
694  * - port ID - 1 byte
695  * - additional information - 1 byte
696  * - header data - 8 bytes
697  * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
698  */
699 #define MVPP2_PRS_AI_BITS			8
700 #define MVPP2_PRS_PORT_MASK			0xff
701 #define MVPP2_PRS_LU_MASK			0xf
702 #define MVPP2_PRS_TCAM_DATA_BYTE(offs)		\
703 				    (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
704 #define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)	\
705 					      (((offs) * 2) - ((offs) % 2)  + 2)
706 #define MVPP2_PRS_TCAM_AI_BYTE			16
707 #define MVPP2_PRS_TCAM_PORT_BYTE		17
708 #define MVPP2_PRS_TCAM_LU_BYTE			20
709 #define MVPP2_PRS_TCAM_EN_OFFS(offs)		((offs) + 2)
710 #define MVPP2_PRS_TCAM_INV_WORD			5
711 /* Tcam entries ID */
712 #define MVPP2_PE_DROP_ALL		0
713 #define MVPP2_PE_FIRST_FREE_TID		1
714 #define MVPP2_PE_LAST_FREE_TID		(MVPP2_PRS_TCAM_SRAM_SIZE - 31)
715 #define MVPP2_PE_IP6_EXT_PROTO_UN	(MVPP2_PRS_TCAM_SRAM_SIZE - 30)
716 #define MVPP2_PE_MAC_MC_IP6		(MVPP2_PRS_TCAM_SRAM_SIZE - 29)
717 #define MVPP2_PE_IP6_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 28)
718 #define MVPP2_PE_IP4_ADDR_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 27)
719 #define MVPP2_PE_LAST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 26)
720 #define MVPP2_PE_FIRST_DEFAULT_FLOW	(MVPP2_PRS_TCAM_SRAM_SIZE - 19)
721 #define MVPP2_PE_EDSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 18)
722 #define MVPP2_PE_EDSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 17)
723 #define MVPP2_PE_DSA_TAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 16)
724 #define MVPP2_PE_DSA_UNTAGGED		(MVPP2_PRS_TCAM_SRAM_SIZE - 15)
725 #define MVPP2_PE_ETYPE_EDSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 14)
726 #define MVPP2_PE_ETYPE_EDSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 13)
727 #define MVPP2_PE_ETYPE_DSA_TAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 12)
728 #define MVPP2_PE_ETYPE_DSA_UNTAGGED	(MVPP2_PRS_TCAM_SRAM_SIZE - 11)
729 #define MVPP2_PE_MH_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 10)
730 #define MVPP2_PE_DSA_DEFAULT		(MVPP2_PRS_TCAM_SRAM_SIZE - 9)
731 #define MVPP2_PE_IP6_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 8)
732 #define MVPP2_PE_IP4_PROTO_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 7)
733 #define MVPP2_PE_ETH_TYPE_UN		(MVPP2_PRS_TCAM_SRAM_SIZE - 6)
734 #define MVPP2_PE_VLAN_DBL		(MVPP2_PRS_TCAM_SRAM_SIZE - 5)
735 #define MVPP2_PE_VLAN_NONE		(MVPP2_PRS_TCAM_SRAM_SIZE - 4)
736 #define MVPP2_PE_MAC_MC_ALL		(MVPP2_PRS_TCAM_SRAM_SIZE - 3)
737 #define MVPP2_PE_MAC_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 2)
738 #define MVPP2_PE_MAC_NON_PROMISCUOUS	(MVPP2_PRS_TCAM_SRAM_SIZE - 1)
739 
740 /* Sram structure
741  * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
742  */
743 #define MVPP2_PRS_SRAM_RI_OFFS			0
744 #define MVPP2_PRS_SRAM_RI_WORD			0
745 #define MVPP2_PRS_SRAM_RI_CTRL_OFFS		32
746 #define MVPP2_PRS_SRAM_RI_CTRL_WORD		1
747 #define MVPP2_PRS_SRAM_RI_CTRL_BITS		32
748 #define MVPP2_PRS_SRAM_SHIFT_OFFS		64
749 #define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT		72
750 #define MVPP2_PRS_SRAM_UDF_OFFS			73
751 #define MVPP2_PRS_SRAM_UDF_BITS			8
752 #define MVPP2_PRS_SRAM_UDF_MASK			0xff
753 #define MVPP2_PRS_SRAM_UDF_SIGN_BIT		81
754 #define MVPP2_PRS_SRAM_UDF_TYPE_OFFS		82
755 #define MVPP2_PRS_SRAM_UDF_TYPE_MASK		0x7
756 #define MVPP2_PRS_SRAM_UDF_TYPE_L3		1
757 #define MVPP2_PRS_SRAM_UDF_TYPE_L4		4
758 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS	85
759 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK	0x3
760 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD		1
761 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD	2
762 #define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD	3
763 #define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS		87
764 #define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS		2
765 #define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK		0x3
766 #define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD		0
767 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD	2
768 #define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD	3
769 #define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS		89
770 #define MVPP2_PRS_SRAM_AI_OFFS			90
771 #define MVPP2_PRS_SRAM_AI_CTRL_OFFS		98
772 #define MVPP2_PRS_SRAM_AI_CTRL_BITS		8
773 #define MVPP2_PRS_SRAM_AI_MASK			0xff
774 #define MVPP2_PRS_SRAM_NEXT_LU_OFFS		106
775 #define MVPP2_PRS_SRAM_NEXT_LU_MASK		0xf
776 #define MVPP2_PRS_SRAM_LU_DONE_BIT		110
777 #define MVPP2_PRS_SRAM_LU_GEN_BIT		111
778 
779 /* Sram result info bits assignment */
780 #define MVPP2_PRS_RI_MAC_ME_MASK		0x1
781 #define MVPP2_PRS_RI_DSA_MASK			0x2
782 #define MVPP2_PRS_RI_VLAN_MASK			(BIT(2) | BIT(3))
783 #define MVPP2_PRS_RI_VLAN_NONE			0x0
784 #define MVPP2_PRS_RI_VLAN_SINGLE		BIT(2)
785 #define MVPP2_PRS_RI_VLAN_DOUBLE		BIT(3)
786 #define MVPP2_PRS_RI_VLAN_TRIPLE		(BIT(2) | BIT(3))
787 #define MVPP2_PRS_RI_CPU_CODE_MASK		0x70
788 #define MVPP2_PRS_RI_CPU_CODE_RX_SPEC		BIT(4)
789 #define MVPP2_PRS_RI_L2_CAST_MASK		(BIT(9) | BIT(10))
790 #define MVPP2_PRS_RI_L2_UCAST			0x0
791 #define MVPP2_PRS_RI_L2_MCAST			BIT(9)
792 #define MVPP2_PRS_RI_L2_BCAST			BIT(10)
793 #define MVPP2_PRS_RI_PPPOE_MASK			0x800
794 #define MVPP2_PRS_RI_L3_PROTO_MASK		(BIT(12) | BIT(13) | BIT(14))
795 #define MVPP2_PRS_RI_L3_UN			0x0
796 #define MVPP2_PRS_RI_L3_IP4			BIT(12)
797 #define MVPP2_PRS_RI_L3_IP4_OPT			BIT(13)
798 #define MVPP2_PRS_RI_L3_IP4_OTHER		(BIT(12) | BIT(13))
799 #define MVPP2_PRS_RI_L3_IP6			BIT(14)
800 #define MVPP2_PRS_RI_L3_IP6_EXT			(BIT(12) | BIT(14))
801 #define MVPP2_PRS_RI_L3_ARP			(BIT(13) | BIT(14))
802 #define MVPP2_PRS_RI_L3_ADDR_MASK		(BIT(15) | BIT(16))
803 #define MVPP2_PRS_RI_L3_UCAST			0x0
804 #define MVPP2_PRS_RI_L3_MCAST			BIT(15)
805 #define MVPP2_PRS_RI_L3_BCAST			(BIT(15) | BIT(16))
806 #define MVPP2_PRS_RI_IP_FRAG_MASK		0x20000
807 #define MVPP2_PRS_RI_UDF3_MASK			0x300000
808 #define MVPP2_PRS_RI_UDF3_RX_SPECIAL		BIT(21)
809 #define MVPP2_PRS_RI_L4_PROTO_MASK		0x1c00000
810 #define MVPP2_PRS_RI_L4_TCP			BIT(22)
811 #define MVPP2_PRS_RI_L4_UDP			BIT(23)
812 #define MVPP2_PRS_RI_L4_OTHER			(BIT(22) | BIT(23))
813 #define MVPP2_PRS_RI_UDF7_MASK			0x60000000
814 #define MVPP2_PRS_RI_UDF7_IP6_LITE		BIT(29)
815 #define MVPP2_PRS_RI_DROP_MASK			0x80000000
816 
817 /* Sram additional info bits assignment */
818 #define MVPP2_PRS_IPV4_DIP_AI_BIT		BIT(0)
819 #define MVPP2_PRS_IPV6_NO_EXT_AI_BIT		BIT(0)
820 #define MVPP2_PRS_IPV6_EXT_AI_BIT		BIT(1)
821 #define MVPP2_PRS_IPV6_EXT_AH_AI_BIT		BIT(2)
822 #define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT	BIT(3)
823 #define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT		BIT(4)
824 #define MVPP2_PRS_SINGLE_VLAN_AI		0
825 #define MVPP2_PRS_DBL_VLAN_AI_BIT		BIT(7)
826 
827 /* DSA/EDSA type */
828 #define MVPP2_PRS_TAGGED		true
829 #define MVPP2_PRS_UNTAGGED		false
830 #define MVPP2_PRS_EDSA			true
831 #define MVPP2_PRS_DSA			false
832 
833 /* MAC entries, shadow udf */
834 enum mvpp2_prs_udf {
835 	MVPP2_PRS_UDF_MAC_DEF,
836 	MVPP2_PRS_UDF_MAC_RANGE,
837 	MVPP2_PRS_UDF_L2_DEF,
838 	MVPP2_PRS_UDF_L2_DEF_COPY,
839 	MVPP2_PRS_UDF_L2_USER,
840 };
841 
842 /* Lookup ID */
843 enum mvpp2_prs_lookup {
844 	MVPP2_PRS_LU_MH,
845 	MVPP2_PRS_LU_MAC,
846 	MVPP2_PRS_LU_DSA,
847 	MVPP2_PRS_LU_VLAN,
848 	MVPP2_PRS_LU_L2,
849 	MVPP2_PRS_LU_PPPOE,
850 	MVPP2_PRS_LU_IP4,
851 	MVPP2_PRS_LU_IP6,
852 	MVPP2_PRS_LU_FLOWS,
853 	MVPP2_PRS_LU_LAST,
854 };
855 
856 /* L3 cast enum */
857 enum mvpp2_prs_l3_cast {
858 	MVPP2_PRS_L3_UNI_CAST,
859 	MVPP2_PRS_L3_MULTI_CAST,
860 	MVPP2_PRS_L3_BROAD_CAST
861 };
862 
863 /* Classifier constants */
864 #define MVPP2_CLS_FLOWS_TBL_SIZE	512
865 #define MVPP2_CLS_FLOWS_TBL_DATA_WORDS	3
866 #define MVPP2_CLS_LKP_TBL_SIZE		64
867 
868 /* BM constants */
869 #define MVPP2_BM_POOLS_NUM		1
870 #define MVPP2_BM_LONG_BUF_NUM		16
871 #define MVPP2_BM_SHORT_BUF_NUM		16
872 #define MVPP2_BM_POOL_SIZE_MAX		(16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
873 #define MVPP2_BM_POOL_PTR_ALIGN		128
874 #define MVPP2_BM_SWF_LONG_POOL(port)	0
875 
876 /* BM cookie (32 bits) definition */
877 #define MVPP2_BM_COOKIE_POOL_OFFS	8
878 #define MVPP2_BM_COOKIE_CPU_OFFS	24
879 
880 /* BM short pool packet size
881  * These value assure that for SWF the total number
882  * of bytes allocated for each buffer will be 512
883  */
884 #define MVPP2_BM_SHORT_PKT_SIZE		MVPP2_RX_MAX_PKT_SIZE(512)
885 
886 enum mvpp2_bm_type {
887 	MVPP2_BM_FREE,
888 	MVPP2_BM_SWF_LONG,
889 	MVPP2_BM_SWF_SHORT
890 };
891 
892 /* Definitions */
893 
894 /* Shared Packet Processor resources */
895 struct mvpp2 {
896 	/* Shared registers' base addresses */
897 	void __iomem *base;
898 	void __iomem *lms_base;
899 	void __iomem *iface_base;
900 	void __iomem *mdio_base;
901 
902 	void __iomem *mpcs_base;
903 	void __iomem *xpcs_base;
904 	void __iomem *rfu1_base;
905 
906 	u32 netc_config;
907 
908 	/* List of pointers to port structures */
909 	struct mvpp2_port **port_list;
910 
911 	/* Aggregated TXQs */
912 	struct mvpp2_tx_queue *aggr_txqs;
913 
914 	/* BM pools */
915 	struct mvpp2_bm_pool *bm_pools;
916 
917 	/* PRS shadow table */
918 	struct mvpp2_prs_shadow *prs_shadow;
919 	/* PRS auxiliary table for double vlan entries control */
920 	bool *prs_double_vlans;
921 
922 	/* Tclk value */
923 	u32 tclk;
924 
925 	/* HW version */
926 	enum { MVPP21, MVPP22 } hw_version;
927 
928 	/* Maximum number of RXQs per port */
929 	unsigned int max_port_rxqs;
930 
931 	struct mii_dev *bus;
932 
933 	int probe_done;
934 	u8 num_ports;
935 };
936 
937 struct mvpp2_pcpu_stats {
938 	u64	rx_packets;
939 	u64	rx_bytes;
940 	u64	tx_packets;
941 	u64	tx_bytes;
942 };
943 
944 struct mvpp2_port {
945 	u8 id;
946 
947 	/* Index of the port from the "group of ports" complex point
948 	 * of view
949 	 */
950 	int gop_id;
951 
952 	int irq;
953 
954 	struct mvpp2 *priv;
955 
956 	/* Per-port registers' base address */
957 	void __iomem *base;
958 
959 	struct mvpp2_rx_queue **rxqs;
960 	struct mvpp2_tx_queue **txqs;
961 
962 	int pkt_size;
963 
964 	u32 pending_cause_rx;
965 
966 	/* Per-CPU port control */
967 	struct mvpp2_port_pcpu __percpu *pcpu;
968 
969 	/* Flags */
970 	unsigned long flags;
971 
972 	u16 tx_ring_size;
973 	u16 rx_ring_size;
974 	struct mvpp2_pcpu_stats __percpu *stats;
975 
976 	struct phy_device *phy_dev;
977 	phy_interface_t phy_interface;
978 	int phy_node;
979 	int phyaddr;
980 #ifdef CONFIG_DM_GPIO
981 	struct gpio_desc phy_reset_gpio;
982 	struct gpio_desc phy_tx_disable_gpio;
983 #endif
984 	int init;
985 	unsigned int link;
986 	unsigned int duplex;
987 	unsigned int speed;
988 
989 	unsigned int phy_speed;		/* SGMII 1Gbps vs 2.5Gbps */
990 
991 	struct mvpp2_bm_pool *pool_long;
992 	struct mvpp2_bm_pool *pool_short;
993 
994 	/* Index of first port's physical RXQ */
995 	u8 first_rxq;
996 
997 	u8 dev_addr[ETH_ALEN];
998 };
999 
1000 /* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
1001  * layout of the transmit and reception DMA descriptors, and their
1002  * layout is therefore defined by the hardware design
1003  */
1004 
1005 #define MVPP2_TXD_L3_OFF_SHIFT		0
1006 #define MVPP2_TXD_IP_HLEN_SHIFT		8
1007 #define MVPP2_TXD_L4_CSUM_FRAG		BIT(13)
1008 #define MVPP2_TXD_L4_CSUM_NOT		BIT(14)
1009 #define MVPP2_TXD_IP_CSUM_DISABLE	BIT(15)
1010 #define MVPP2_TXD_PADDING_DISABLE	BIT(23)
1011 #define MVPP2_TXD_L4_UDP		BIT(24)
1012 #define MVPP2_TXD_L3_IP6		BIT(26)
1013 #define MVPP2_TXD_L_DESC		BIT(28)
1014 #define MVPP2_TXD_F_DESC		BIT(29)
1015 
1016 #define MVPP2_RXD_ERR_SUMMARY		BIT(15)
1017 #define MVPP2_RXD_ERR_CODE_MASK		(BIT(13) | BIT(14))
1018 #define MVPP2_RXD_ERR_CRC		0x0
1019 #define MVPP2_RXD_ERR_OVERRUN		BIT(13)
1020 #define MVPP2_RXD_ERR_RESOURCE		(BIT(13) | BIT(14))
1021 #define MVPP2_RXD_BM_POOL_ID_OFFS	16
1022 #define MVPP2_RXD_BM_POOL_ID_MASK	(BIT(16) | BIT(17) | BIT(18))
1023 #define MVPP2_RXD_HWF_SYNC		BIT(21)
1024 #define MVPP2_RXD_L4_CSUM_OK		BIT(22)
1025 #define MVPP2_RXD_IP4_HEADER_ERR	BIT(24)
1026 #define MVPP2_RXD_L4_TCP		BIT(25)
1027 #define MVPP2_RXD_L4_UDP		BIT(26)
1028 #define MVPP2_RXD_L3_IP4		BIT(28)
1029 #define MVPP2_RXD_L3_IP6		BIT(30)
1030 #define MVPP2_RXD_BUF_HDR		BIT(31)
1031 
1032 /* HW TX descriptor for PPv2.1 */
1033 struct mvpp21_tx_desc {
1034 	u32 command;		/* Options used by HW for packet transmitting.*/
1035 	u8  packet_offset;	/* the offset from the buffer beginning	*/
1036 	u8  phys_txq;		/* destination queue ID			*/
1037 	u16 data_size;		/* data size of transmitted packet in bytes */
1038 	u32 buf_dma_addr;	/* physical addr of transmitted buffer	*/
1039 	u32 buf_cookie;		/* cookie for access to TX buffer in tx path */
1040 	u32 reserved1[3];	/* hw_cmd (for future use, BM, PON, PNC) */
1041 	u32 reserved2;		/* reserved (for future use)		*/
1042 };
1043 
1044 /* HW RX descriptor for PPv2.1 */
1045 struct mvpp21_rx_desc {
1046 	u32 status;		/* info about received packet		*/
1047 	u16 reserved1;		/* parser_info (for future use, PnC)	*/
1048 	u16 data_size;		/* size of received packet in bytes	*/
1049 	u32 buf_dma_addr;	/* physical address of the buffer	*/
1050 	u32 buf_cookie;		/* cookie for access to RX buffer in rx path */
1051 	u16 reserved2;		/* gem_port_id (for future use, PON)	*/
1052 	u16 reserved3;		/* csum_l4 (for future use, PnC)	*/
1053 	u8  reserved4;		/* bm_qset (for future use, BM)		*/
1054 	u8  reserved5;
1055 	u16 reserved6;		/* classify_info (for future use, PnC)	*/
1056 	u32 reserved7;		/* flow_id (for future use, PnC) */
1057 	u32 reserved8;
1058 };
1059 
1060 /* HW TX descriptor for PPv2.2 */
1061 struct mvpp22_tx_desc {
1062 	u32 command;
1063 	u8  packet_offset;
1064 	u8  phys_txq;
1065 	u16 data_size;
1066 	u64 reserved1;
1067 	u64 buf_dma_addr_ptp;
1068 	u64 buf_cookie_misc;
1069 };
1070 
1071 /* HW RX descriptor for PPv2.2 */
1072 struct mvpp22_rx_desc {
1073 	u32 status;
1074 	u16 reserved1;
1075 	u16 data_size;
1076 	u32 reserved2;
1077 	u32 reserved3;
1078 	u64 buf_dma_addr_key_hash;
1079 	u64 buf_cookie_misc;
1080 };
1081 
1082 /* Opaque type used by the driver to manipulate the HW TX and RX
1083  * descriptors
1084  */
1085 struct mvpp2_tx_desc {
1086 	union {
1087 		struct mvpp21_tx_desc pp21;
1088 		struct mvpp22_tx_desc pp22;
1089 	};
1090 };
1091 
1092 struct mvpp2_rx_desc {
1093 	union {
1094 		struct mvpp21_rx_desc pp21;
1095 		struct mvpp22_rx_desc pp22;
1096 	};
1097 };
1098 
1099 /* Per-CPU Tx queue control */
1100 struct mvpp2_txq_pcpu {
1101 	int cpu;
1102 
1103 	/* Number of Tx DMA descriptors in the descriptor ring */
1104 	int size;
1105 
1106 	/* Number of currently used Tx DMA descriptor in the
1107 	 * descriptor ring
1108 	 */
1109 	int count;
1110 
1111 	/* Number of Tx DMA descriptors reserved for each CPU */
1112 	int reserved_num;
1113 
1114 	/* Index of last TX DMA descriptor that was inserted */
1115 	int txq_put_index;
1116 
1117 	/* Index of the TX DMA descriptor to be cleaned up */
1118 	int txq_get_index;
1119 };
1120 
1121 struct mvpp2_tx_queue {
1122 	/* Physical number of this Tx queue */
1123 	u8 id;
1124 
1125 	/* Logical number of this Tx queue */
1126 	u8 log_id;
1127 
1128 	/* Number of Tx DMA descriptors in the descriptor ring */
1129 	int size;
1130 
1131 	/* Number of currently used Tx DMA descriptor in the descriptor ring */
1132 	int count;
1133 
1134 	/* Per-CPU control of physical Tx queues */
1135 	struct mvpp2_txq_pcpu __percpu *pcpu;
1136 
1137 	u32 done_pkts_coal;
1138 
1139 	/* Virtual address of thex Tx DMA descriptors array */
1140 	struct mvpp2_tx_desc *descs;
1141 
1142 	/* DMA address of the Tx DMA descriptors array */
1143 	dma_addr_t descs_dma;
1144 
1145 	/* Index of the last Tx DMA descriptor */
1146 	int last_desc;
1147 
1148 	/* Index of the next Tx DMA descriptor to process */
1149 	int next_desc_to_proc;
1150 };
1151 
1152 struct mvpp2_rx_queue {
1153 	/* RX queue number, in the range 0-31 for physical RXQs */
1154 	u8 id;
1155 
1156 	/* Num of rx descriptors in the rx descriptor ring */
1157 	int size;
1158 
1159 	u32 pkts_coal;
1160 	u32 time_coal;
1161 
1162 	/* Virtual address of the RX DMA descriptors array */
1163 	struct mvpp2_rx_desc *descs;
1164 
1165 	/* DMA address of the RX DMA descriptors array */
1166 	dma_addr_t descs_dma;
1167 
1168 	/* Index of the last RX DMA descriptor */
1169 	int last_desc;
1170 
1171 	/* Index of the next RX DMA descriptor to process */
1172 	int next_desc_to_proc;
1173 
1174 	/* ID of port to which physical RXQ is mapped */
1175 	int port;
1176 
1177 	/* Port's logic RXQ number to which physical RXQ is mapped */
1178 	int logic_rxq;
1179 };
1180 
1181 union mvpp2_prs_tcam_entry {
1182 	u32 word[MVPP2_PRS_TCAM_WORDS];
1183 	u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
1184 };
1185 
1186 union mvpp2_prs_sram_entry {
1187 	u32 word[MVPP2_PRS_SRAM_WORDS];
1188 	u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
1189 };
1190 
1191 struct mvpp2_prs_entry {
1192 	u32 index;
1193 	union mvpp2_prs_tcam_entry tcam;
1194 	union mvpp2_prs_sram_entry sram;
1195 };
1196 
1197 struct mvpp2_prs_shadow {
1198 	bool valid;
1199 	bool finish;
1200 
1201 	/* Lookup ID */
1202 	int lu;
1203 
1204 	/* User defined offset */
1205 	int udf;
1206 
1207 	/* Result info */
1208 	u32 ri;
1209 	u32 ri_mask;
1210 };
1211 
1212 struct mvpp2_cls_flow_entry {
1213 	u32 index;
1214 	u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
1215 };
1216 
1217 struct mvpp2_cls_lookup_entry {
1218 	u32 lkpid;
1219 	u32 way;
1220 	u32 data;
1221 };
1222 
1223 struct mvpp2_bm_pool {
1224 	/* Pool number in the range 0-7 */
1225 	int id;
1226 	enum mvpp2_bm_type type;
1227 
1228 	/* Buffer Pointers Pool External (BPPE) size */
1229 	int size;
1230 	/* Number of buffers for this pool */
1231 	int buf_num;
1232 	/* Pool buffer size */
1233 	int buf_size;
1234 	/* Packet size */
1235 	int pkt_size;
1236 
1237 	/* BPPE virtual base address */
1238 	unsigned long *virt_addr;
1239 	/* BPPE DMA base address */
1240 	dma_addr_t dma_addr;
1241 
1242 	/* Ports using BM pool */
1243 	u32 port_map;
1244 };
1245 
1246 /* Static declaractions */
1247 
1248 /* Number of RXQs used by single port */
1249 static int rxq_number = MVPP2_DEFAULT_RXQ;
1250 /* Number of TXQs used by single port */
1251 static int txq_number = MVPP2_DEFAULT_TXQ;
1252 
1253 static int base_id;
1254 
1255 #define MVPP2_DRIVER_NAME "mvpp2"
1256 #define MVPP2_DRIVER_VERSION "1.0"
1257 
1258 /*
1259  * U-Boot internal data, mostly uncached buffers for descriptors and data
1260  */
1261 struct buffer_location {
1262 	struct mvpp2_tx_desc *aggr_tx_descs;
1263 	struct mvpp2_tx_desc *tx_descs;
1264 	struct mvpp2_rx_desc *rx_descs;
1265 	unsigned long *bm_pool[MVPP2_BM_POOLS_NUM];
1266 	unsigned long *rx_buffer[MVPP2_BM_LONG_BUF_NUM];
1267 	int first_rxq;
1268 };
1269 
1270 /*
1271  * All 4 interfaces use the same global buffer, since only one interface
1272  * can be enabled at once
1273  */
1274 static struct buffer_location buffer_loc;
1275 
1276 /*
1277  * Page table entries are set to 1MB, or multiples of 1MB
1278  * (not < 1MB). driver uses less bd's so use 1MB bdspace.
1279  */
1280 #define BD_SPACE	(1 << 20)
1281 
1282 /* Utility/helper methods */
1283 
mvpp2_write(struct mvpp2 * priv,u32 offset,u32 data)1284 static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
1285 {
1286 	writel(data, priv->base + offset);
1287 }
1288 
mvpp2_read(struct mvpp2 * priv,u32 offset)1289 static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
1290 {
1291 	return readl(priv->base + offset);
1292 }
1293 
mvpp2_txdesc_dma_addr_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,dma_addr_t dma_addr)1294 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
1295 				      struct mvpp2_tx_desc *tx_desc,
1296 				      dma_addr_t dma_addr)
1297 {
1298 	if (port->priv->hw_version == MVPP21) {
1299 		tx_desc->pp21.buf_dma_addr = dma_addr;
1300 	} else {
1301 		u64 val = (u64)dma_addr;
1302 
1303 		tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0);
1304 		tx_desc->pp22.buf_dma_addr_ptp |= val;
1305 	}
1306 }
1307 
mvpp2_txdesc_size_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,size_t size)1308 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
1309 				  struct mvpp2_tx_desc *tx_desc,
1310 				  size_t size)
1311 {
1312 	if (port->priv->hw_version == MVPP21)
1313 		tx_desc->pp21.data_size = size;
1314 	else
1315 		tx_desc->pp22.data_size = size;
1316 }
1317 
mvpp2_txdesc_txq_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int txq)1318 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
1319 				 struct mvpp2_tx_desc *tx_desc,
1320 				 unsigned int txq)
1321 {
1322 	if (port->priv->hw_version == MVPP21)
1323 		tx_desc->pp21.phys_txq = txq;
1324 	else
1325 		tx_desc->pp22.phys_txq = txq;
1326 }
1327 
mvpp2_txdesc_cmd_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int command)1328 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
1329 				 struct mvpp2_tx_desc *tx_desc,
1330 				 unsigned int command)
1331 {
1332 	if (port->priv->hw_version == MVPP21)
1333 		tx_desc->pp21.command = command;
1334 	else
1335 		tx_desc->pp22.command = command;
1336 }
1337 
mvpp2_txdesc_offset_set(struct mvpp2_port * port,struct mvpp2_tx_desc * tx_desc,unsigned int offset)1338 static void mvpp2_txdesc_offset_set(struct mvpp2_port *port,
1339 				    struct mvpp2_tx_desc *tx_desc,
1340 				    unsigned int offset)
1341 {
1342 	if (port->priv->hw_version == MVPP21)
1343 		tx_desc->pp21.packet_offset = offset;
1344 	else
1345 		tx_desc->pp22.packet_offset = offset;
1346 }
1347 
mvpp2_rxdesc_dma_addr_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1348 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
1349 					    struct mvpp2_rx_desc *rx_desc)
1350 {
1351 	if (port->priv->hw_version == MVPP21)
1352 		return rx_desc->pp21.buf_dma_addr;
1353 	else
1354 		return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0);
1355 }
1356 
mvpp2_rxdesc_cookie_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1357 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
1358 					     struct mvpp2_rx_desc *rx_desc)
1359 {
1360 	if (port->priv->hw_version == MVPP21)
1361 		return rx_desc->pp21.buf_cookie;
1362 	else
1363 		return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0);
1364 }
1365 
mvpp2_rxdesc_size_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1366 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
1367 				    struct mvpp2_rx_desc *rx_desc)
1368 {
1369 	if (port->priv->hw_version == MVPP21)
1370 		return rx_desc->pp21.data_size;
1371 	else
1372 		return rx_desc->pp22.data_size;
1373 }
1374 
mvpp2_rxdesc_status_get(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)1375 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
1376 				   struct mvpp2_rx_desc *rx_desc)
1377 {
1378 	if (port->priv->hw_version == MVPP21)
1379 		return rx_desc->pp21.status;
1380 	else
1381 		return rx_desc->pp22.status;
1382 }
1383 
mvpp2_txq_inc_get(struct mvpp2_txq_pcpu * txq_pcpu)1384 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
1385 {
1386 	txq_pcpu->txq_get_index++;
1387 	if (txq_pcpu->txq_get_index == txq_pcpu->size)
1388 		txq_pcpu->txq_get_index = 0;
1389 }
1390 
1391 /* Get number of physical egress port */
mvpp2_egress_port(struct mvpp2_port * port)1392 static inline int mvpp2_egress_port(struct mvpp2_port *port)
1393 {
1394 	return MVPP2_MAX_TCONT + port->id;
1395 }
1396 
1397 /* Get number of physical TXQ */
mvpp2_txq_phys(int port,int txq)1398 static inline int mvpp2_txq_phys(int port, int txq)
1399 {
1400 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
1401 }
1402 
1403 /* Parser configuration routines */
1404 
1405 /* Update parser tcam and sram hw entries */
mvpp2_prs_hw_write(struct mvpp2 * priv,struct mvpp2_prs_entry * pe)1406 static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1407 {
1408 	int i;
1409 
1410 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1411 		return -EINVAL;
1412 
1413 	/* Clear entry invalidation bit */
1414 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
1415 
1416 	/* Write tcam index - indirect access */
1417 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1418 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1419 		mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
1420 
1421 	/* Write sram index - indirect access */
1422 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1423 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1424 		mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
1425 
1426 	return 0;
1427 }
1428 
1429 /* Read tcam entry from hw */
mvpp2_prs_hw_read(struct mvpp2 * priv,struct mvpp2_prs_entry * pe)1430 static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
1431 {
1432 	int i;
1433 
1434 	if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
1435 		return -EINVAL;
1436 
1437 	/* Write tcam index - indirect access */
1438 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
1439 
1440 	pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
1441 			      MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
1442 	if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
1443 		return MVPP2_PRS_TCAM_ENTRY_INVALID;
1444 
1445 	for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
1446 		pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
1447 
1448 	/* Write sram index - indirect access */
1449 	mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
1450 	for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
1451 		pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
1452 
1453 	return 0;
1454 }
1455 
1456 /* Invalidate tcam hw entry */
mvpp2_prs_hw_inv(struct mvpp2 * priv,int index)1457 static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
1458 {
1459 	/* Write index - indirect access */
1460 	mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
1461 	mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
1462 		    MVPP2_PRS_TCAM_INV_MASK);
1463 }
1464 
1465 /* Enable shadow table entry and set its lookup ID */
mvpp2_prs_shadow_set(struct mvpp2 * priv,int index,int lu)1466 static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
1467 {
1468 	priv->prs_shadow[index].valid = true;
1469 	priv->prs_shadow[index].lu = lu;
1470 }
1471 
1472 /* Update ri fields in shadow table entry */
mvpp2_prs_shadow_ri_set(struct mvpp2 * priv,int index,unsigned int ri,unsigned int ri_mask)1473 static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
1474 				    unsigned int ri, unsigned int ri_mask)
1475 {
1476 	priv->prs_shadow[index].ri_mask = ri_mask;
1477 	priv->prs_shadow[index].ri = ri;
1478 }
1479 
1480 /* Update lookup field in tcam sw entry */
mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)1481 static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
1482 {
1483 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
1484 
1485 	pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
1486 	pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
1487 }
1488 
1489 /* Update mask for single port in tcam sw entry */
mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry * pe,unsigned int port,bool add)1490 static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
1491 				    unsigned int port, bool add)
1492 {
1493 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1494 
1495 	if (add)
1496 		pe->tcam.byte[enable_off] &= ~(1 << port);
1497 	else
1498 		pe->tcam.byte[enable_off] |= 1 << port;
1499 }
1500 
1501 /* Update port map in tcam sw entry */
mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry * pe,unsigned int ports)1502 static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
1503 					unsigned int ports)
1504 {
1505 	unsigned char port_mask = MVPP2_PRS_PORT_MASK;
1506 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1507 
1508 	pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
1509 	pe->tcam.byte[enable_off] &= ~port_mask;
1510 	pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
1511 }
1512 
1513 /* Obtain port map from tcam sw entry */
mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry * pe)1514 static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
1515 {
1516 	int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
1517 
1518 	return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
1519 }
1520 
1521 /* Set byte of data and its enable bits in tcam sw entry */
mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char byte,unsigned char enable)1522 static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
1523 					 unsigned int offs, unsigned char byte,
1524 					 unsigned char enable)
1525 {
1526 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
1527 	pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
1528 }
1529 
1530 /* Get byte of data and its enable bits from tcam sw entry */
mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry * pe,unsigned int offs,unsigned char * byte,unsigned char * enable)1531 static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
1532 					 unsigned int offs, unsigned char *byte,
1533 					 unsigned char *enable)
1534 {
1535 	*byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
1536 	*enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
1537 }
1538 
1539 /* Set ethertype in tcam sw entry */
mvpp2_prs_match_etype(struct mvpp2_prs_entry * pe,int offset,unsigned short ethertype)1540 static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
1541 				  unsigned short ethertype)
1542 {
1543 	mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
1544 	mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
1545 }
1546 
1547 /* Set bits in sram sw entry */
mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry * pe,int bit_num,int val)1548 static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
1549 				    int val)
1550 {
1551 	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
1552 }
1553 
1554 /* Clear bits in sram sw entry */
mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry * pe,int bit_num,int val)1555 static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
1556 				      int val)
1557 {
1558 	pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
1559 }
1560 
1561 /* Update ri bits in sram sw entry */
mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)1562 static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
1563 				     unsigned int bits, unsigned int mask)
1564 {
1565 	unsigned int i;
1566 
1567 	for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
1568 		int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
1569 
1570 		if (!(mask & BIT(i)))
1571 			continue;
1572 
1573 		if (bits & BIT(i))
1574 			mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
1575 		else
1576 			mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
1577 
1578 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
1579 	}
1580 }
1581 
1582 /* Update ai bits in sram sw entry */
mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry * pe,unsigned int bits,unsigned int mask)1583 static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
1584 				     unsigned int bits, unsigned int mask)
1585 {
1586 	unsigned int i;
1587 	int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
1588 
1589 	for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
1590 
1591 		if (!(mask & BIT(i)))
1592 			continue;
1593 
1594 		if (bits & BIT(i))
1595 			mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
1596 		else
1597 			mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
1598 
1599 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
1600 	}
1601 }
1602 
1603 /* Read ai bits from sram sw entry */
mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry * pe)1604 static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
1605 {
1606 	u8 bits;
1607 	int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
1608 	int ai_en_off = ai_off + 1;
1609 	int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
1610 
1611 	bits = (pe->sram.byte[ai_off] >> ai_shift) |
1612 	       (pe->sram.byte[ai_en_off] << (8 - ai_shift));
1613 
1614 	return bits;
1615 }
1616 
1617 /* In sram sw entry set lookup ID field of the tcam key to be used in the next
1618  * lookup interation
1619  */
mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry * pe,unsigned int lu)1620 static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
1621 				       unsigned int lu)
1622 {
1623 	int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
1624 
1625 	mvpp2_prs_sram_bits_clear(pe, sram_next_off,
1626 				  MVPP2_PRS_SRAM_NEXT_LU_MASK);
1627 	mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
1628 }
1629 
1630 /* In the sram sw entry set sign and value of the next lookup offset
1631  * and the offset value generated to the classifier
1632  */
mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry * pe,int shift,unsigned int op)1633 static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
1634 				     unsigned int op)
1635 {
1636 	/* Set sign */
1637 	if (shift < 0) {
1638 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1639 		shift = 0 - shift;
1640 	} else {
1641 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
1642 	}
1643 
1644 	/* Set value */
1645 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
1646 							   (unsigned char)shift;
1647 
1648 	/* Reset and set operation */
1649 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
1650 				  MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
1651 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
1652 
1653 	/* Set base offset as current */
1654 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1655 }
1656 
1657 /* In the sram sw entry set sign and value of the user defined offset
1658  * generated to the classifier
1659  */
mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry * pe,unsigned int type,int offset,unsigned int op)1660 static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
1661 				      unsigned int type, int offset,
1662 				      unsigned int op)
1663 {
1664 	/* Set sign */
1665 	if (offset < 0) {
1666 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1667 		offset = 0 - offset;
1668 	} else {
1669 		mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
1670 	}
1671 
1672 	/* Set value */
1673 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
1674 				  MVPP2_PRS_SRAM_UDF_MASK);
1675 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
1676 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1677 					MVPP2_PRS_SRAM_UDF_BITS)] &=
1678 	      ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1679 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
1680 					MVPP2_PRS_SRAM_UDF_BITS)] |=
1681 				(offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
1682 
1683 	/* Set offset type */
1684 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
1685 				  MVPP2_PRS_SRAM_UDF_TYPE_MASK);
1686 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
1687 
1688 	/* Set offset operation */
1689 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
1690 				  MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
1691 	mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
1692 
1693 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1694 					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
1695 					     ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
1696 				    (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1697 
1698 	pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
1699 					MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
1700 			     (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
1701 
1702 	/* Set base offset as current */
1703 	mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
1704 }
1705 
1706 /* Find parser flow entry */
mvpp2_prs_flow_find(struct mvpp2 * priv,int flow)1707 static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
1708 {
1709 	struct mvpp2_prs_entry *pe;
1710 	int tid;
1711 
1712 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
1713 	if (!pe)
1714 		return NULL;
1715 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
1716 
1717 	/* Go through the all entires with MVPP2_PRS_LU_FLOWS */
1718 	for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
1719 		u8 bits;
1720 
1721 		if (!priv->prs_shadow[tid].valid ||
1722 		    priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
1723 			continue;
1724 
1725 		pe->index = tid;
1726 		mvpp2_prs_hw_read(priv, pe);
1727 		bits = mvpp2_prs_sram_ai_get(pe);
1728 
1729 		/* Sram store classification lookup ID in AI bits [5:0] */
1730 		if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
1731 			return pe;
1732 	}
1733 	kfree(pe);
1734 
1735 	return NULL;
1736 }
1737 
1738 /* Return first free tcam index, seeking from start to end */
mvpp2_prs_tcam_first_free(struct mvpp2 * priv,unsigned char start,unsigned char end)1739 static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
1740 				     unsigned char end)
1741 {
1742 	int tid;
1743 
1744 	if (start > end)
1745 		swap(start, end);
1746 
1747 	if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
1748 		end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
1749 
1750 	for (tid = start; tid <= end; tid++) {
1751 		if (!priv->prs_shadow[tid].valid)
1752 			return tid;
1753 	}
1754 
1755 	return -EINVAL;
1756 }
1757 
1758 /* Enable/disable dropping all mac da's */
mvpp2_prs_mac_drop_all_set(struct mvpp2 * priv,int port,bool add)1759 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
1760 {
1761 	struct mvpp2_prs_entry pe;
1762 
1763 	if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
1764 		/* Entry exist - update port only */
1765 		pe.index = MVPP2_PE_DROP_ALL;
1766 		mvpp2_prs_hw_read(priv, &pe);
1767 	} else {
1768 		/* Entry doesn't exist - create new */
1769 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1770 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1771 		pe.index = MVPP2_PE_DROP_ALL;
1772 
1773 		/* Non-promiscuous mode for all ports - DROP unknown packets */
1774 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1775 					 MVPP2_PRS_RI_DROP_MASK);
1776 
1777 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1778 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1779 
1780 		/* Update shadow table */
1781 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1782 
1783 		/* Mask all ports */
1784 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1785 	}
1786 
1787 	/* Update port mask */
1788 	mvpp2_prs_tcam_port_set(&pe, port, add);
1789 
1790 	mvpp2_prs_hw_write(priv, &pe);
1791 }
1792 
1793 /* Set port to promiscuous mode */
mvpp2_prs_mac_promisc_set(struct mvpp2 * priv,int port,bool add)1794 static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
1795 {
1796 	struct mvpp2_prs_entry pe;
1797 
1798 	/* Promiscuous mode - Accept unknown packets */
1799 
1800 	if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
1801 		/* Entry exist - update port only */
1802 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1803 		mvpp2_prs_hw_read(priv, &pe);
1804 	} else {
1805 		/* Entry doesn't exist - create new */
1806 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1807 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1808 		pe.index = MVPP2_PE_MAC_PROMISCUOUS;
1809 
1810 		/* Continue - set next lookup */
1811 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1812 
1813 		/* Set result info bits */
1814 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
1815 					 MVPP2_PRS_RI_L2_CAST_MASK);
1816 
1817 		/* Shift to ethertype */
1818 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1819 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1820 
1821 		/* Mask all ports */
1822 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1823 
1824 		/* Update shadow table */
1825 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1826 	}
1827 
1828 	/* Update port mask */
1829 	mvpp2_prs_tcam_port_set(&pe, port, add);
1830 
1831 	mvpp2_prs_hw_write(priv, &pe);
1832 }
1833 
1834 /* Accept multicast */
mvpp2_prs_mac_multi_set(struct mvpp2 * priv,int port,int index,bool add)1835 static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
1836 				    bool add)
1837 {
1838 	struct mvpp2_prs_entry pe;
1839 	unsigned char da_mc;
1840 
1841 	/* Ethernet multicast address first byte is
1842 	 * 0x01 for IPv4 and 0x33 for IPv6
1843 	 */
1844 	da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
1845 
1846 	if (priv->prs_shadow[index].valid) {
1847 		/* Entry exist - update port only */
1848 		pe.index = index;
1849 		mvpp2_prs_hw_read(priv, &pe);
1850 	} else {
1851 		/* Entry doesn't exist - create new */
1852 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1853 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1854 		pe.index = index;
1855 
1856 		/* Continue - set next lookup */
1857 		mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
1858 
1859 		/* Set result info bits */
1860 		mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
1861 					 MVPP2_PRS_RI_L2_CAST_MASK);
1862 
1863 		/* Update tcam entry data first byte */
1864 		mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
1865 
1866 		/* Shift to ethertype */
1867 		mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
1868 					 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1869 
1870 		/* Mask all ports */
1871 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1872 
1873 		/* Update shadow table */
1874 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1875 	}
1876 
1877 	/* Update port mask */
1878 	mvpp2_prs_tcam_port_set(&pe, port, add);
1879 
1880 	mvpp2_prs_hw_write(priv, &pe);
1881 }
1882 
1883 /* Parser per-port initialization */
mvpp2_prs_hw_port_init(struct mvpp2 * priv,int port,int lu_first,int lu_max,int offset)1884 static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
1885 				   int lu_max, int offset)
1886 {
1887 	u32 val;
1888 
1889 	/* Set lookup ID */
1890 	val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
1891 	val &= ~MVPP2_PRS_PORT_LU_MASK(port);
1892 	val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
1893 	mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
1894 
1895 	/* Set maximum number of loops for packet received from port */
1896 	val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
1897 	val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
1898 	val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
1899 	mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
1900 
1901 	/* Set initial offset for packet header extraction for the first
1902 	 * searching loop
1903 	 */
1904 	val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
1905 	val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
1906 	val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
1907 	mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
1908 }
1909 
1910 /* Default flow entries initialization for all ports */
mvpp2_prs_def_flow_init(struct mvpp2 * priv)1911 static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
1912 {
1913 	struct mvpp2_prs_entry pe;
1914 	int port;
1915 
1916 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
1917 		memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1918 		mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1919 		pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
1920 
1921 		/* Mask all ports */
1922 		mvpp2_prs_tcam_port_map_set(&pe, 0);
1923 
1924 		/* Set flow ID*/
1925 		mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
1926 		mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
1927 
1928 		/* Update shadow table and hw entry */
1929 		mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
1930 		mvpp2_prs_hw_write(priv, &pe);
1931 	}
1932 }
1933 
1934 /* Set default entry for Marvell Header field */
mvpp2_prs_mh_init(struct mvpp2 * priv)1935 static void mvpp2_prs_mh_init(struct mvpp2 *priv)
1936 {
1937 	struct mvpp2_prs_entry pe;
1938 
1939 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1940 
1941 	pe.index = MVPP2_PE_MH_DEFAULT;
1942 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
1943 	mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
1944 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1945 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
1946 
1947 	/* Unmask all ports */
1948 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1949 
1950 	/* Update shadow table and hw entry */
1951 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
1952 	mvpp2_prs_hw_write(priv, &pe);
1953 }
1954 
1955 /* Set default entires (place holder) for promiscuous, non-promiscuous and
1956  * multicast MAC addresses
1957  */
mvpp2_prs_mac_init(struct mvpp2 * priv)1958 static void mvpp2_prs_mac_init(struct mvpp2 *priv)
1959 {
1960 	struct mvpp2_prs_entry pe;
1961 
1962 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
1963 
1964 	/* Non-promiscuous mode for all ports - DROP unknown packets */
1965 	pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
1966 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
1967 
1968 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
1969 				 MVPP2_PRS_RI_DROP_MASK);
1970 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1971 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1972 
1973 	/* Unmask all ports */
1974 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1975 
1976 	/* Update shadow table and hw entry */
1977 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
1978 	mvpp2_prs_hw_write(priv, &pe);
1979 
1980 	/* place holders only - no ports */
1981 	mvpp2_prs_mac_drop_all_set(priv, 0, false);
1982 	mvpp2_prs_mac_promisc_set(priv, 0, false);
1983 	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
1984 	mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
1985 }
1986 
1987 /* Match basic ethertypes */
mvpp2_prs_etype_init(struct mvpp2 * priv)1988 static int mvpp2_prs_etype_init(struct mvpp2 *priv)
1989 {
1990 	struct mvpp2_prs_entry pe;
1991 	int tid;
1992 
1993 	/* Ethertype: PPPoE */
1994 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
1995 					MVPP2_PE_LAST_FREE_TID);
1996 	if (tid < 0)
1997 		return tid;
1998 
1999 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2000 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2001 	pe.index = tid;
2002 
2003 	mvpp2_prs_match_etype(&pe, 0, PROT_PPP_SES);
2004 
2005 	mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
2006 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2007 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
2008 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
2009 				 MVPP2_PRS_RI_PPPOE_MASK);
2010 
2011 	/* Update shadow table and hw entry */
2012 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2013 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2014 	priv->prs_shadow[pe.index].finish = false;
2015 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
2016 				MVPP2_PRS_RI_PPPOE_MASK);
2017 	mvpp2_prs_hw_write(priv, &pe);
2018 
2019 	/* Ethertype: ARP */
2020 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2021 					MVPP2_PE_LAST_FREE_TID);
2022 	if (tid < 0)
2023 		return tid;
2024 
2025 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2026 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2027 	pe.index = tid;
2028 
2029 	mvpp2_prs_match_etype(&pe, 0, PROT_ARP);
2030 
2031 	/* Generate flow in the next iteration*/
2032 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2033 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2034 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
2035 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2036 	/* Set L3 offset */
2037 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2038 				  MVPP2_ETH_TYPE_LEN,
2039 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2040 
2041 	/* Update shadow table and hw entry */
2042 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2043 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2044 	priv->prs_shadow[pe.index].finish = true;
2045 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
2046 				MVPP2_PRS_RI_L3_PROTO_MASK);
2047 	mvpp2_prs_hw_write(priv, &pe);
2048 
2049 	/* Ethertype: LBTD */
2050 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2051 					MVPP2_PE_LAST_FREE_TID);
2052 	if (tid < 0)
2053 		return tid;
2054 
2055 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2056 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2057 	pe.index = tid;
2058 
2059 	mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
2060 
2061 	/* Generate flow in the next iteration*/
2062 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2063 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2064 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2065 				 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2066 				 MVPP2_PRS_RI_CPU_CODE_MASK |
2067 				 MVPP2_PRS_RI_UDF3_MASK);
2068 	/* Set L3 offset */
2069 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2070 				  MVPP2_ETH_TYPE_LEN,
2071 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2072 
2073 	/* Update shadow table and hw entry */
2074 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2075 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2076 	priv->prs_shadow[pe.index].finish = true;
2077 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
2078 				MVPP2_PRS_RI_UDF3_RX_SPECIAL,
2079 				MVPP2_PRS_RI_CPU_CODE_MASK |
2080 				MVPP2_PRS_RI_UDF3_MASK);
2081 	mvpp2_prs_hw_write(priv, &pe);
2082 
2083 	/* Ethertype: IPv4 without options */
2084 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2085 					MVPP2_PE_LAST_FREE_TID);
2086 	if (tid < 0)
2087 		return tid;
2088 
2089 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2090 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2091 	pe.index = tid;
2092 
2093 	mvpp2_prs_match_etype(&pe, 0, PROT_IP);
2094 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2095 				     MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
2096 				     MVPP2_PRS_IPV4_HEAD_MASK |
2097 				     MVPP2_PRS_IPV4_IHL_MASK);
2098 
2099 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
2100 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
2101 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2102 	/* Skip eth_type + 4 bytes of IP header */
2103 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
2104 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2105 	/* Set L3 offset */
2106 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2107 				  MVPP2_ETH_TYPE_LEN,
2108 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2109 
2110 	/* Update shadow table and hw entry */
2111 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2112 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2113 	priv->prs_shadow[pe.index].finish = false;
2114 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
2115 				MVPP2_PRS_RI_L3_PROTO_MASK);
2116 	mvpp2_prs_hw_write(priv, &pe);
2117 
2118 	/* Ethertype: IPv4 with options */
2119 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2120 					MVPP2_PE_LAST_FREE_TID);
2121 	if (tid < 0)
2122 		return tid;
2123 
2124 	pe.index = tid;
2125 
2126 	/* Clear tcam data before updating */
2127 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
2128 	pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
2129 
2130 	mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
2131 				     MVPP2_PRS_IPV4_HEAD,
2132 				     MVPP2_PRS_IPV4_HEAD_MASK);
2133 
2134 	/* Clear ri before updating */
2135 	pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
2136 	pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2137 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
2138 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2139 
2140 	/* Update shadow table and hw entry */
2141 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2142 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2143 	priv->prs_shadow[pe.index].finish = false;
2144 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
2145 				MVPP2_PRS_RI_L3_PROTO_MASK);
2146 	mvpp2_prs_hw_write(priv, &pe);
2147 
2148 	/* Ethertype: IPv6 without options */
2149 	tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2150 					MVPP2_PE_LAST_FREE_TID);
2151 	if (tid < 0)
2152 		return tid;
2153 
2154 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2155 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2156 	pe.index = tid;
2157 
2158 	mvpp2_prs_match_etype(&pe, 0, PROT_IPV6);
2159 
2160 	/* Skip DIP of IPV6 header */
2161 	mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
2162 				 MVPP2_MAX_L3_ADDR_SIZE,
2163 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2164 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
2165 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
2166 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2167 	/* Set L3 offset */
2168 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2169 				  MVPP2_ETH_TYPE_LEN,
2170 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2171 
2172 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2173 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2174 	priv->prs_shadow[pe.index].finish = false;
2175 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
2176 				MVPP2_PRS_RI_L3_PROTO_MASK);
2177 	mvpp2_prs_hw_write(priv, &pe);
2178 
2179 	/* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
2180 	memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
2181 	mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
2182 	pe.index = MVPP2_PE_ETH_TYPE_UN;
2183 
2184 	/* Unmask all ports */
2185 	mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
2186 
2187 	/* Generate flow in the next iteration*/
2188 	mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
2189 	mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
2190 	mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
2191 				 MVPP2_PRS_RI_L3_PROTO_MASK);
2192 	/* Set L3 offset even it's unknown L3 */
2193 	mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
2194 				  MVPP2_ETH_TYPE_LEN,
2195 				  MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2196 
2197 	/* Update shadow table and hw entry */
2198 	mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
2199 	priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
2200 	priv->prs_shadow[pe.index].finish = true;
2201 	mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
2202 				MVPP2_PRS_RI_L3_PROTO_MASK);
2203 	mvpp2_prs_hw_write(priv, &pe);
2204 
2205 	return 0;
2206 }
2207 
2208 /* Parser default initialization */
mvpp2_prs_default_init(struct udevice * dev,struct mvpp2 * priv)2209 static int mvpp2_prs_default_init(struct udevice *dev,
2210 				  struct mvpp2 *priv)
2211 {
2212 	int err, index, i;
2213 
2214 	/* Enable tcam table */
2215 	mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
2216 
2217 	/* Clear all tcam and sram entries */
2218 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
2219 		mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
2220 		for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
2221 			mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
2222 
2223 		mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
2224 		for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
2225 			mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
2226 	}
2227 
2228 	/* Invalidate all tcam entries */
2229 	for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
2230 		mvpp2_prs_hw_inv(priv, index);
2231 
2232 	priv->prs_shadow = devm_kcalloc(dev, MVPP2_PRS_TCAM_SRAM_SIZE,
2233 					sizeof(struct mvpp2_prs_shadow),
2234 					GFP_KERNEL);
2235 	if (!priv->prs_shadow)
2236 		return -ENOMEM;
2237 
2238 	/* Always start from lookup = 0 */
2239 	for (index = 0; index < MVPP2_MAX_PORTS; index++)
2240 		mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
2241 				       MVPP2_PRS_PORT_LU_MAX, 0);
2242 
2243 	mvpp2_prs_def_flow_init(priv);
2244 
2245 	mvpp2_prs_mh_init(priv);
2246 
2247 	mvpp2_prs_mac_init(priv);
2248 
2249 	err = mvpp2_prs_etype_init(priv);
2250 	if (err)
2251 		return err;
2252 
2253 	return 0;
2254 }
2255 
2256 /* Compare MAC DA with tcam entry data */
mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry * pe,const u8 * da,unsigned char * mask)2257 static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
2258 				       const u8 *da, unsigned char *mask)
2259 {
2260 	unsigned char tcam_byte, tcam_mask;
2261 	int index;
2262 
2263 	for (index = 0; index < ETH_ALEN; index++) {
2264 		mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
2265 		if (tcam_mask != mask[index])
2266 			return false;
2267 
2268 		if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
2269 			return false;
2270 	}
2271 
2272 	return true;
2273 }
2274 
2275 /* Find tcam entry with matched pair <MAC DA, port> */
2276 static struct mvpp2_prs_entry *
mvpp2_prs_mac_da_range_find(struct mvpp2 * priv,int pmap,const u8 * da,unsigned char * mask,int udf_type)2277 mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
2278 			    unsigned char *mask, int udf_type)
2279 {
2280 	struct mvpp2_prs_entry *pe;
2281 	int tid;
2282 
2283 	pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2284 	if (!pe)
2285 		return NULL;
2286 	mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2287 
2288 	/* Go through the all entires with MVPP2_PRS_LU_MAC */
2289 	for (tid = MVPP2_PE_FIRST_FREE_TID;
2290 	     tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
2291 		unsigned int entry_pmap;
2292 
2293 		if (!priv->prs_shadow[tid].valid ||
2294 		    (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
2295 		    (priv->prs_shadow[tid].udf != udf_type))
2296 			continue;
2297 
2298 		pe->index = tid;
2299 		mvpp2_prs_hw_read(priv, pe);
2300 		entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
2301 
2302 		if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
2303 		    entry_pmap == pmap)
2304 			return pe;
2305 	}
2306 	kfree(pe);
2307 
2308 	return NULL;
2309 }
2310 
2311 /* Update parser's mac da entry */
mvpp2_prs_mac_da_accept(struct mvpp2 * priv,int port,const u8 * da,bool add)2312 static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
2313 				   const u8 *da, bool add)
2314 {
2315 	struct mvpp2_prs_entry *pe;
2316 	unsigned int pmap, len, ri;
2317 	unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
2318 	int tid;
2319 
2320 	/* Scan TCAM and see if entry with this <MAC DA, port> already exist */
2321 	pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
2322 					 MVPP2_PRS_UDF_MAC_DEF);
2323 
2324 	/* No such entry */
2325 	if (!pe) {
2326 		if (!add)
2327 			return 0;
2328 
2329 		/* Create new TCAM entry */
2330 		/* Find first range mac entry*/
2331 		for (tid = MVPP2_PE_FIRST_FREE_TID;
2332 		     tid <= MVPP2_PE_LAST_FREE_TID; tid++)
2333 			if (priv->prs_shadow[tid].valid &&
2334 			    (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
2335 			    (priv->prs_shadow[tid].udf ==
2336 						       MVPP2_PRS_UDF_MAC_RANGE))
2337 				break;
2338 
2339 		/* Go through the all entries from first to last */
2340 		tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2341 						tid - 1);
2342 		if (tid < 0)
2343 			return tid;
2344 
2345 		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2346 		if (!pe)
2347 			return -1;
2348 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
2349 		pe->index = tid;
2350 
2351 		/* Mask all ports */
2352 		mvpp2_prs_tcam_port_map_set(pe, 0);
2353 	}
2354 
2355 	/* Update port mask */
2356 	mvpp2_prs_tcam_port_set(pe, port, add);
2357 
2358 	/* Invalidate the entry if no ports are left enabled */
2359 	pmap = mvpp2_prs_tcam_port_map_get(pe);
2360 	if (pmap == 0) {
2361 		if (add) {
2362 			kfree(pe);
2363 			return -1;
2364 		}
2365 		mvpp2_prs_hw_inv(priv, pe->index);
2366 		priv->prs_shadow[pe->index].valid = false;
2367 		kfree(pe);
2368 		return 0;
2369 	}
2370 
2371 	/* Continue - set next lookup */
2372 	mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
2373 
2374 	/* Set match on DA */
2375 	len = ETH_ALEN;
2376 	while (len--)
2377 		mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
2378 
2379 	/* Set result info bits */
2380 	ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
2381 
2382 	mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2383 				 MVPP2_PRS_RI_MAC_ME_MASK);
2384 	mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
2385 				MVPP2_PRS_RI_MAC_ME_MASK);
2386 
2387 	/* Shift to ethertype */
2388 	mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
2389 				 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
2390 
2391 	/* Update shadow table and hw entry */
2392 	priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
2393 	mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
2394 	mvpp2_prs_hw_write(priv, pe);
2395 
2396 	kfree(pe);
2397 
2398 	return 0;
2399 }
2400 
mvpp2_prs_update_mac_da(struct mvpp2_port * port,const u8 * da)2401 static int mvpp2_prs_update_mac_da(struct mvpp2_port *port, const u8 *da)
2402 {
2403 	int err;
2404 
2405 	/* Remove old parser entry */
2406 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, port->dev_addr,
2407 				      false);
2408 	if (err)
2409 		return err;
2410 
2411 	/* Add new parser entry */
2412 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
2413 	if (err)
2414 		return err;
2415 
2416 	/* Set addr in the device */
2417 	memcpy(port->dev_addr, da, ETH_ALEN);
2418 
2419 	return 0;
2420 }
2421 
2422 /* Set prs flow for the port */
mvpp2_prs_def_flow(struct mvpp2_port * port)2423 static int mvpp2_prs_def_flow(struct mvpp2_port *port)
2424 {
2425 	struct mvpp2_prs_entry *pe;
2426 	int tid;
2427 
2428 	pe = mvpp2_prs_flow_find(port->priv, port->id);
2429 
2430 	/* Such entry not exist */
2431 	if (!pe) {
2432 		/* Go through the all entires from last to first */
2433 		tid = mvpp2_prs_tcam_first_free(port->priv,
2434 						MVPP2_PE_LAST_FREE_TID,
2435 					       MVPP2_PE_FIRST_FREE_TID);
2436 		if (tid < 0)
2437 			return tid;
2438 
2439 		pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2440 		if (!pe)
2441 			return -ENOMEM;
2442 
2443 		mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
2444 		pe->index = tid;
2445 
2446 		/* Set flow ID*/
2447 		mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
2448 		mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
2449 
2450 		/* Update shadow table */
2451 		mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
2452 	}
2453 
2454 	mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
2455 	mvpp2_prs_hw_write(port->priv, pe);
2456 	kfree(pe);
2457 
2458 	return 0;
2459 }
2460 
2461 /* Classifier configuration routines */
2462 
2463 /* Update classification flow table registers */
mvpp2_cls_flow_write(struct mvpp2 * priv,struct mvpp2_cls_flow_entry * fe)2464 static void mvpp2_cls_flow_write(struct mvpp2 *priv,
2465 				 struct mvpp2_cls_flow_entry *fe)
2466 {
2467 	mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
2468 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
2469 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
2470 	mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
2471 }
2472 
2473 /* Update classification lookup table register */
mvpp2_cls_lookup_write(struct mvpp2 * priv,struct mvpp2_cls_lookup_entry * le)2474 static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
2475 				   struct mvpp2_cls_lookup_entry *le)
2476 {
2477 	u32 val;
2478 
2479 	val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
2480 	mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
2481 	mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
2482 }
2483 
2484 /* Classifier default initialization */
mvpp2_cls_init(struct mvpp2 * priv)2485 static void mvpp2_cls_init(struct mvpp2 *priv)
2486 {
2487 	struct mvpp2_cls_lookup_entry le;
2488 	struct mvpp2_cls_flow_entry fe;
2489 	int index;
2490 
2491 	/* Enable classifier */
2492 	mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
2493 
2494 	/* Clear classifier flow table */
2495 	memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
2496 	for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
2497 		fe.index = index;
2498 		mvpp2_cls_flow_write(priv, &fe);
2499 	}
2500 
2501 	/* Clear classifier lookup table */
2502 	le.data = 0;
2503 	for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
2504 		le.lkpid = index;
2505 		le.way = 0;
2506 		mvpp2_cls_lookup_write(priv, &le);
2507 
2508 		le.way = 1;
2509 		mvpp2_cls_lookup_write(priv, &le);
2510 	}
2511 }
2512 
mvpp2_cls_port_config(struct mvpp2_port * port)2513 static void mvpp2_cls_port_config(struct mvpp2_port *port)
2514 {
2515 	struct mvpp2_cls_lookup_entry le;
2516 	u32 val;
2517 
2518 	/* Set way for the port */
2519 	val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
2520 	val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
2521 	mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
2522 
2523 	/* Pick the entry to be accessed in lookup ID decoding table
2524 	 * according to the way and lkpid.
2525 	 */
2526 	le.lkpid = port->id;
2527 	le.way = 0;
2528 	le.data = 0;
2529 
2530 	/* Set initial CPU queue for receiving packets */
2531 	le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
2532 	le.data |= port->first_rxq;
2533 
2534 	/* Disable classification engines */
2535 	le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
2536 
2537 	/* Update lookup ID table entry */
2538 	mvpp2_cls_lookup_write(port->priv, &le);
2539 }
2540 
2541 /* Set CPU queue number for oversize packets */
mvpp2_cls_oversize_rxq_set(struct mvpp2_port * port)2542 static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
2543 {
2544 	u32 val;
2545 
2546 	mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
2547 		    port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
2548 
2549 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
2550 		    (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
2551 
2552 	val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
2553 	val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
2554 	mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
2555 }
2556 
2557 /* Buffer Manager configuration routines */
2558 
2559 /* Create pool */
mvpp2_bm_pool_create(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int size)2560 static int mvpp2_bm_pool_create(struct udevice *dev,
2561 				struct mvpp2 *priv,
2562 				struct mvpp2_bm_pool *bm_pool, int size)
2563 {
2564 	u32 val;
2565 
2566 	/* Number of buffer pointers must be a multiple of 16, as per
2567 	 * hardware constraints
2568 	 */
2569 	if (!IS_ALIGNED(size, 16))
2570 		return -EINVAL;
2571 
2572 	bm_pool->virt_addr = buffer_loc.bm_pool[bm_pool->id];
2573 	bm_pool->dma_addr = (dma_addr_t)buffer_loc.bm_pool[bm_pool->id];
2574 	if (!bm_pool->virt_addr)
2575 		return -ENOMEM;
2576 
2577 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
2578 			MVPP2_BM_POOL_PTR_ALIGN)) {
2579 		dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
2580 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
2581 		return -ENOMEM;
2582 	}
2583 
2584 	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
2585 		    lower_32_bits(bm_pool->dma_addr));
2586 	if (priv->hw_version == MVPP22)
2587 		mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG,
2588 			    (upper_32_bits(bm_pool->dma_addr) &
2589 			    MVPP22_BM_POOL_BASE_HIGH_MASK));
2590 	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
2591 
2592 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2593 	val |= MVPP2_BM_START_MASK;
2594 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2595 
2596 	bm_pool->type = MVPP2_BM_FREE;
2597 	bm_pool->size = size;
2598 	bm_pool->pkt_size = 0;
2599 	bm_pool->buf_num = 0;
2600 
2601 	return 0;
2602 }
2603 
2604 /* Set pool buffer size */
mvpp2_bm_pool_bufsize_set(struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool,int buf_size)2605 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
2606 				      struct mvpp2_bm_pool *bm_pool,
2607 				      int buf_size)
2608 {
2609 	u32 val;
2610 
2611 	bm_pool->buf_size = buf_size;
2612 
2613 	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
2614 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
2615 }
2616 
2617 /* Free all buffers from the pool */
mvpp2_bm_bufs_free(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)2618 static void mvpp2_bm_bufs_free(struct udevice *dev, struct mvpp2 *priv,
2619 			       struct mvpp2_bm_pool *bm_pool)
2620 {
2621 	int i;
2622 
2623 	for (i = 0; i < bm_pool->buf_num; i++) {
2624 		/* Allocate buffer back from the buffer manager */
2625 		mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
2626 	}
2627 
2628 	bm_pool->buf_num = 0;
2629 }
2630 
2631 /* Cleanup pool */
mvpp2_bm_pool_destroy(struct udevice * dev,struct mvpp2 * priv,struct mvpp2_bm_pool * bm_pool)2632 static int mvpp2_bm_pool_destroy(struct udevice *dev,
2633 				 struct mvpp2 *priv,
2634 				 struct mvpp2_bm_pool *bm_pool)
2635 {
2636 	u32 val;
2637 
2638 	mvpp2_bm_bufs_free(dev, priv, bm_pool);
2639 	if (bm_pool->buf_num) {
2640 		dev_err(dev, "cannot free all buffers in pool %d\n", bm_pool->id);
2641 		return 0;
2642 	}
2643 
2644 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
2645 	val |= MVPP2_BM_STOP_MASK;
2646 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
2647 
2648 	return 0;
2649 }
2650 
mvpp2_bm_pools_init(struct udevice * dev,struct mvpp2 * priv)2651 static int mvpp2_bm_pools_init(struct udevice *dev,
2652 			       struct mvpp2 *priv)
2653 {
2654 	int i, err, size;
2655 	struct mvpp2_bm_pool *bm_pool;
2656 
2657 	/* Create all pools with maximum size */
2658 	size = MVPP2_BM_POOL_SIZE_MAX;
2659 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2660 		bm_pool = &priv->bm_pools[i];
2661 		bm_pool->id = i;
2662 		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
2663 		if (err)
2664 			goto err_unroll_pools;
2665 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, RX_BUFFER_SIZE);
2666 	}
2667 	return 0;
2668 
2669 err_unroll_pools:
2670 	dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
2671 	for (i = i - 1; i >= 0; i--)
2672 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
2673 	return err;
2674 }
2675 
mvpp2_bm_init(struct udevice * dev,struct mvpp2 * priv)2676 static int mvpp2_bm_init(struct udevice *dev, struct mvpp2 *priv)
2677 {
2678 	int i, err;
2679 
2680 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
2681 		/* Mask BM all interrupts */
2682 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
2683 		/* Clear BM cause register */
2684 		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
2685 	}
2686 
2687 	/* Allocate and initialize BM pools */
2688 	priv->bm_pools = devm_kcalloc(dev, MVPP2_BM_POOLS_NUM,
2689 				     sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
2690 	if (!priv->bm_pools)
2691 		return -ENOMEM;
2692 
2693 	err = mvpp2_bm_pools_init(dev, priv);
2694 	if (err < 0)
2695 		return err;
2696 	return 0;
2697 }
2698 
2699 /* Attach long pool to rxq */
mvpp2_rxq_long_pool_set(struct mvpp2_port * port,int lrxq,int long_pool)2700 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
2701 				    int lrxq, int long_pool)
2702 {
2703 	u32 val, mask;
2704 	int prxq;
2705 
2706 	/* Get queue physical ID */
2707 	prxq = port->rxqs[lrxq]->id;
2708 
2709 	if (port->priv->hw_version == MVPP21)
2710 		mask = MVPP21_RXQ_POOL_LONG_MASK;
2711 	else
2712 		mask = MVPP22_RXQ_POOL_LONG_MASK;
2713 
2714 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2715 	val &= ~mask;
2716 	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
2717 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2718 }
2719 
2720 /* Set pool number in a BM cookie */
mvpp2_bm_cookie_pool_set(u32 cookie,int pool)2721 static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
2722 {
2723 	u32 bm;
2724 
2725 	bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
2726 	bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
2727 
2728 	return bm;
2729 }
2730 
2731 /* Get pool number from a BM cookie */
mvpp2_bm_cookie_pool_get(unsigned long cookie)2732 static inline int mvpp2_bm_cookie_pool_get(unsigned long cookie)
2733 {
2734 	return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
2735 }
2736 
2737 /* Release buffer to BM */
mvpp2_bm_pool_put(struct mvpp2_port * port,int pool,dma_addr_t buf_dma_addr,unsigned long buf_phys_addr)2738 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
2739 				     dma_addr_t buf_dma_addr,
2740 				     unsigned long buf_phys_addr)
2741 {
2742 	if (port->priv->hw_version == MVPP22) {
2743 		u32 val = 0;
2744 
2745 		if (sizeof(dma_addr_t) == 8)
2746 			val |= upper_32_bits(buf_dma_addr) &
2747 				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
2748 
2749 		if (sizeof(phys_addr_t) == 8)
2750 			val |= (upper_32_bits(buf_phys_addr)
2751 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
2752 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
2753 
2754 		mvpp2_write(port->priv, MVPP22_BM_ADDR_HIGH_RLS_REG, val);
2755 	}
2756 
2757 	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
2758 	 * returned in the "cookie" field of the RX
2759 	 * descriptor. Instead of storing the virtual address, we
2760 	 * store the physical address
2761 	 */
2762 	mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
2763 	mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
2764 }
2765 
2766 /* Refill BM pool */
mvpp2_pool_refill(struct mvpp2_port * port,u32 bm,dma_addr_t dma_addr,phys_addr_t phys_addr)2767 static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
2768 			      dma_addr_t dma_addr,
2769 			      phys_addr_t phys_addr)
2770 {
2771 	int pool = mvpp2_bm_cookie_pool_get(bm);
2772 
2773 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
2774 }
2775 
2776 /* Allocate buffers for the pool */
mvpp2_bm_bufs_add(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,int buf_num)2777 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
2778 			     struct mvpp2_bm_pool *bm_pool, int buf_num)
2779 {
2780 	int i;
2781 
2782 	if (buf_num < 0 ||
2783 	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
2784 		netdev_err(port->dev,
2785 			   "cannot allocate %d buffers for pool %d\n",
2786 			   buf_num, bm_pool->id);
2787 		return 0;
2788 	}
2789 
2790 	for (i = 0; i < buf_num; i++) {
2791 		mvpp2_bm_pool_put(port, bm_pool->id,
2792 				  (dma_addr_t)buffer_loc.rx_buffer[i],
2793 				  (unsigned long)buffer_loc.rx_buffer[i]);
2794 
2795 	}
2796 
2797 	/* Update BM driver with number of buffers added to pool */
2798 	bm_pool->buf_num += i;
2799 
2800 	return i;
2801 }
2802 
2803 /* Notify the driver that BM pool is being used as specific type and return the
2804  * pool pointer on success
2805  */
2806 static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port * port,int pool,enum mvpp2_bm_type type,int pkt_size)2807 mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
2808 		  int pkt_size)
2809 {
2810 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
2811 	int num;
2812 
2813 	if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
2814 		netdev_err(port->dev, "mixing pool types is forbidden\n");
2815 		return NULL;
2816 	}
2817 
2818 	if (new_pool->type == MVPP2_BM_FREE)
2819 		new_pool->type = type;
2820 
2821 	/* Allocate buffers in case BM pool is used as long pool, but packet
2822 	 * size doesn't match MTU or BM pool hasn't being used yet
2823 	 */
2824 	if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
2825 	    (new_pool->pkt_size == 0)) {
2826 		int pkts_num;
2827 
2828 		/* Set default buffer number or free all the buffers in case
2829 		 * the pool is not empty
2830 		 */
2831 		pkts_num = new_pool->buf_num;
2832 		if (pkts_num == 0)
2833 			pkts_num = type == MVPP2_BM_SWF_LONG ?
2834 				   MVPP2_BM_LONG_BUF_NUM :
2835 				   MVPP2_BM_SHORT_BUF_NUM;
2836 		else
2837 			mvpp2_bm_bufs_free(NULL,
2838 					   port->priv, new_pool);
2839 
2840 		new_pool->pkt_size = pkt_size;
2841 
2842 		/* Allocate buffers for this pool */
2843 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
2844 		if (num != pkts_num) {
2845 			dev_err(dev, "pool %d: %d of %d allocated\n",
2846 				new_pool->id, num, pkts_num);
2847 			return NULL;
2848 		}
2849 	}
2850 
2851 	return new_pool;
2852 }
2853 
2854 /* Initialize pools for swf */
mvpp2_swf_bm_pool_init(struct mvpp2_port * port)2855 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
2856 {
2857 	int rxq;
2858 
2859 	if (!port->pool_long) {
2860 		port->pool_long =
2861 		       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
2862 					 MVPP2_BM_SWF_LONG,
2863 					 port->pkt_size);
2864 		if (!port->pool_long)
2865 			return -ENOMEM;
2866 
2867 		port->pool_long->port_map |= (1 << port->id);
2868 
2869 		for (rxq = 0; rxq < rxq_number; rxq++)
2870 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
2871 	}
2872 
2873 	return 0;
2874 }
2875 
2876 /* Port configuration routines */
2877 
mvpp2_port_mii_set(struct mvpp2_port * port)2878 static void mvpp2_port_mii_set(struct mvpp2_port *port)
2879 {
2880 	u32 val;
2881 
2882 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2883 
2884 	switch (port->phy_interface) {
2885 	case PHY_INTERFACE_MODE_SGMII:
2886 		val |= MVPP2_GMAC_INBAND_AN_MASK;
2887 		break;
2888 	case PHY_INTERFACE_MODE_RGMII:
2889 	case PHY_INTERFACE_MODE_RGMII_ID:
2890 		val |= MVPP2_GMAC_PORT_RGMII_MASK;
2891 	default:
2892 		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
2893 	}
2894 
2895 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2896 }
2897 
mvpp2_port_fc_adv_enable(struct mvpp2_port * port)2898 static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
2899 {
2900 	u32 val;
2901 
2902 	val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2903 	val |= MVPP2_GMAC_FC_ADV_EN;
2904 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2905 }
2906 
mvpp2_port_enable(struct mvpp2_port * port)2907 static void mvpp2_port_enable(struct mvpp2_port *port)
2908 {
2909 	u32 val;
2910 
2911 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2912 	val |= MVPP2_GMAC_PORT_EN_MASK;
2913 	val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
2914 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2915 }
2916 
mvpp2_port_disable(struct mvpp2_port * port)2917 static void mvpp2_port_disable(struct mvpp2_port *port)
2918 {
2919 	u32 val;
2920 
2921 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2922 	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
2923 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2924 }
2925 
2926 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
mvpp2_port_periodic_xon_disable(struct mvpp2_port * port)2927 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
2928 {
2929 	u32 val;
2930 
2931 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
2932 		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
2933 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2934 }
2935 
2936 /* Configure loopback port */
mvpp2_port_loopback_set(struct mvpp2_port * port)2937 static void mvpp2_port_loopback_set(struct mvpp2_port *port)
2938 {
2939 	u32 val;
2940 
2941 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
2942 
2943 	if (port->speed == 1000)
2944 		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
2945 	else
2946 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
2947 
2948 	if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
2949 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
2950 	else
2951 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
2952 
2953 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
2954 }
2955 
mvpp2_port_reset(struct mvpp2_port * port)2956 static void mvpp2_port_reset(struct mvpp2_port *port)
2957 {
2958 	u32 val;
2959 
2960 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2961 		    ~MVPP2_GMAC_PORT_RESET_MASK;
2962 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2963 
2964 	while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2965 	       MVPP2_GMAC_PORT_RESET_MASK)
2966 		continue;
2967 }
2968 
2969 /* Change maximum receive size of the port */
mvpp2_gmac_max_rx_size_set(struct mvpp2_port * port)2970 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2971 {
2972 	u32 val;
2973 
2974 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2975 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2976 	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2977 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2978 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2979 }
2980 
2981 /* PPv2.2 GoP/GMAC config */
2982 
2983 /* Set the MAC to reset or exit from reset */
gop_gmac_reset(struct mvpp2_port * port,int reset)2984 static int gop_gmac_reset(struct mvpp2_port *port, int reset)
2985 {
2986 	u32 val;
2987 
2988 	/* read - modify - write */
2989 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2990 	if (reset)
2991 		val |= MVPP2_GMAC_PORT_RESET_MASK;
2992 	else
2993 		val &= ~MVPP2_GMAC_PORT_RESET_MASK;
2994 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2995 
2996 	return 0;
2997 }
2998 
2999 /*
3000  * gop_gpcs_mode_cfg
3001  *
3002  * Configure port to working with Gig PCS or don't.
3003  */
gop_gpcs_mode_cfg(struct mvpp2_port * port,int en)3004 static int gop_gpcs_mode_cfg(struct mvpp2_port *port, int en)
3005 {
3006 	u32 val;
3007 
3008 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3009 	if (en)
3010 		val |= MVPP2_GMAC_PCS_ENABLE_MASK;
3011 	else
3012 		val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
3013 	/* enable / disable PCS on this port */
3014 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3015 
3016 	return 0;
3017 }
3018 
gop_bypass_clk_cfg(struct mvpp2_port * port,int en)3019 static int gop_bypass_clk_cfg(struct mvpp2_port *port, int en)
3020 {
3021 	u32 val;
3022 
3023 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3024 	if (en)
3025 		val |= MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3026 	else
3027 		val &= ~MVPP2_GMAC_CLK_125_BYPS_EN_MASK;
3028 	/* enable / disable PCS on this port */
3029 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3030 
3031 	return 0;
3032 }
3033 
gop_gmac_sgmii2_5_cfg(struct mvpp2_port * port)3034 static void gop_gmac_sgmii2_5_cfg(struct mvpp2_port *port)
3035 {
3036 	u32 val, thresh;
3037 
3038 	/*
3039 	 * Configure minimal level of the Tx FIFO before the lower part
3040 	 * starts to read a packet
3041 	 */
3042 	thresh = MVPP2_SGMII2_5_TX_FIFO_MIN_TH;
3043 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3044 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3045 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3046 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3047 
3048 	/* Disable bypass of sync module */
3049 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3050 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3051 	/* configure DP clock select according to mode */
3052 	val |= MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3053 	/* configure QSGMII bypass according to mode */
3054 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3055 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3056 
3057 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3058 	/*
3059 	 * Configure GIG MAC to 1000Base-X mode connected to a fiber
3060 	 * transceiver
3061 	 */
3062 	val |= MVPP2_GMAC_PORT_TYPE_MASK;
3063 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3064 
3065 	/* configure AN 0x9268 */
3066 	val = MVPP2_GMAC_EN_PCS_AN |
3067 		MVPP2_GMAC_AN_BYPASS_EN |
3068 		MVPP2_GMAC_CONFIG_MII_SPEED  |
3069 		MVPP2_GMAC_CONFIG_GMII_SPEED     |
3070 		MVPP2_GMAC_FC_ADV_EN    |
3071 		MVPP2_GMAC_CONFIG_FULL_DUPLEX |
3072 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3073 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3074 }
3075 
gop_gmac_sgmii_cfg(struct mvpp2_port * port)3076 static void gop_gmac_sgmii_cfg(struct mvpp2_port *port)
3077 {
3078 	u32 val, thresh;
3079 
3080 	/*
3081 	 * Configure minimal level of the Tx FIFO before the lower part
3082 	 * starts to read a packet
3083 	 */
3084 	thresh = MVPP2_SGMII_TX_FIFO_MIN_TH;
3085 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3086 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3087 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3088 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3089 
3090 	/* Disable bypass of sync module */
3091 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3092 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3093 	/* configure DP clock select according to mode */
3094 	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3095 	/* configure QSGMII bypass according to mode */
3096 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3097 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3098 
3099 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3100 	/* configure GIG MAC to SGMII mode */
3101 	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3102 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3103 
3104 	/* configure AN */
3105 	val = MVPP2_GMAC_EN_PCS_AN |
3106 		MVPP2_GMAC_AN_BYPASS_EN |
3107 		MVPP2_GMAC_AN_SPEED_EN  |
3108 		MVPP2_GMAC_EN_FC_AN     |
3109 		MVPP2_GMAC_AN_DUPLEX_EN |
3110 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3111 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3112 }
3113 
gop_gmac_rgmii_cfg(struct mvpp2_port * port)3114 static void gop_gmac_rgmii_cfg(struct mvpp2_port *port)
3115 {
3116 	u32 val, thresh;
3117 
3118 	/*
3119 	 * Configure minimal level of the Tx FIFO before the lower part
3120 	 * starts to read a packet
3121 	 */
3122 	thresh = MVPP2_RGMII_TX_FIFO_MIN_TH;
3123 	val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3124 	val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3125 	val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(thresh);
3126 	writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3127 
3128 	/* Disable bypass of sync module */
3129 	val = readl(port->base + MVPP2_GMAC_CTRL_4_REG);
3130 	val |= MVPP2_GMAC_CTRL4_SYNC_BYPASS_MASK;
3131 	/* configure DP clock select according to mode */
3132 	val &= ~MVPP2_GMAC_CTRL4_DP_CLK_SEL_MASK;
3133 	val |= MVPP2_GMAC_CTRL4_QSGMII_BYPASS_ACTIVE_MASK;
3134 	val |= MVPP2_GMAC_CTRL4_EXT_PIN_GMII_SEL_MASK;
3135 	writel(val, port->base + MVPP2_GMAC_CTRL_4_REG);
3136 
3137 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3138 	/* configure GIG MAC to SGMII mode */
3139 	val &= ~MVPP2_GMAC_PORT_TYPE_MASK;
3140 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3141 
3142 	/* configure AN 0xb8e8 */
3143 	val = MVPP2_GMAC_AN_BYPASS_EN |
3144 		MVPP2_GMAC_AN_SPEED_EN   |
3145 		MVPP2_GMAC_EN_FC_AN      |
3146 		MVPP2_GMAC_AN_DUPLEX_EN  |
3147 		MVPP2_GMAC_CHOOSE_SAMPLE_TX_CONFIG;
3148 	writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3149 }
3150 
3151 /* Set the internal mux's to the required MAC in the GOP */
gop_gmac_mode_cfg(struct mvpp2_port * port)3152 static int gop_gmac_mode_cfg(struct mvpp2_port *port)
3153 {
3154 	u32 val;
3155 
3156 	/* Set TX FIFO thresholds */
3157 	switch (port->phy_interface) {
3158 	case PHY_INTERFACE_MODE_SGMII:
3159 		if (port->phy_speed == 2500)
3160 			gop_gmac_sgmii2_5_cfg(port);
3161 		else
3162 			gop_gmac_sgmii_cfg(port);
3163 		break;
3164 
3165 	case PHY_INTERFACE_MODE_RGMII:
3166 	case PHY_INTERFACE_MODE_RGMII_ID:
3167 		gop_gmac_rgmii_cfg(port);
3168 		break;
3169 
3170 	default:
3171 		return -1;
3172 	}
3173 
3174 	/* Jumbo frame support - 0x1400*2= 0x2800 bytes */
3175 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3176 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
3177 	val |= 0x1400 << MVPP2_GMAC_MAX_RX_SIZE_OFFS;
3178 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
3179 
3180 	/* PeriodicXonEn disable */
3181 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
3182 	val &= ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
3183 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
3184 
3185 	return 0;
3186 }
3187 
gop_xlg_2_gig_mac_cfg(struct mvpp2_port * port)3188 static void gop_xlg_2_gig_mac_cfg(struct mvpp2_port *port)
3189 {
3190 	u32 val;
3191 
3192 	/* relevant only for MAC0 (XLG0 and GMAC0) */
3193 	if (port->gop_id > 0)
3194 		return;
3195 
3196 	/* configure 1Gig MAC mode */
3197 	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3198 	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3199 	val |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
3200 	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3201 }
3202 
gop_gpcs_reset(struct mvpp2_port * port,int reset)3203 static int gop_gpcs_reset(struct mvpp2_port *port, int reset)
3204 {
3205 	u32 val;
3206 
3207 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3208 	if (reset)
3209 		val &= ~MVPP2_GMAC_SGMII_MODE_MASK;
3210 	else
3211 		val |= MVPP2_GMAC_SGMII_MODE_MASK;
3212 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3213 
3214 	return 0;
3215 }
3216 
3217 /* Set the internal mux's to the required PCS in the PI */
gop_xpcs_mode(struct mvpp2_port * port,int num_of_lanes)3218 static int gop_xpcs_mode(struct mvpp2_port *port, int num_of_lanes)
3219 {
3220 	u32 val;
3221 	int lane;
3222 
3223 	switch (num_of_lanes) {
3224 	case 1:
3225 		lane = 0;
3226 		break;
3227 	case 2:
3228 		lane = 1;
3229 		break;
3230 	case 4:
3231 		lane = 2;
3232 		break;
3233 	default:
3234 		return -1;
3235 	}
3236 
3237 	/* configure XG MAC mode */
3238 	val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3239 	val &= ~MVPP22_XPCS_PCSMODE_MASK;
3240 	val &= ~MVPP22_XPCS_LANEACTIVE_MASK;
3241 	val |= (2 * lane) << MVPP22_XPCS_LANEACTIVE_OFFS;
3242 	writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3243 
3244 	return 0;
3245 }
3246 
gop_mpcs_mode(struct mvpp2_port * port)3247 static int gop_mpcs_mode(struct mvpp2_port *port)
3248 {
3249 	u32 val;
3250 
3251 	/* configure PCS40G COMMON CONTROL */
3252 	val = readl(port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3253 	val &= ~FORWARD_ERROR_CORRECTION_MASK;
3254 	writel(val, port->priv->mpcs_base + PCS40G_COMMON_CONTROL);
3255 
3256 	/* configure PCS CLOCK RESET */
3257 	val = readl(port->priv->mpcs_base + PCS_CLOCK_RESET);
3258 	val &= ~CLK_DIVISION_RATIO_MASK;
3259 	val |= 1 << CLK_DIVISION_RATIO_OFFS;
3260 	writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3261 
3262 	val &= ~CLK_DIV_PHASE_SET_MASK;
3263 	val |= MAC_CLK_RESET_MASK;
3264 	val |= RX_SD_CLK_RESET_MASK;
3265 	val |= TX_SD_CLK_RESET_MASK;
3266 	writel(val, port->priv->mpcs_base + PCS_CLOCK_RESET);
3267 
3268 	return 0;
3269 }
3270 
3271 /* Set the internal mux's to the required MAC in the GOP */
gop_xlg_mac_mode_cfg(struct mvpp2_port * port,int num_of_act_lanes)3272 static int gop_xlg_mac_mode_cfg(struct mvpp2_port *port, int num_of_act_lanes)
3273 {
3274 	u32 val;
3275 
3276 	/* configure 10G MAC mode */
3277 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3278 	val |= MVPP22_XLG_RX_FC_EN;
3279 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3280 
3281 	val = readl(port->base + MVPP22_XLG_CTRL3_REG);
3282 	val &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
3283 	val |= MVPP22_XLG_CTRL3_MACMODESELECT_10GMAC;
3284 	writel(val, port->base + MVPP22_XLG_CTRL3_REG);
3285 
3286 	/* read - modify - write */
3287 	val = readl(port->base + MVPP22_XLG_CTRL4_REG);
3288 	val &= ~MVPP22_XLG_MODE_DMA_1G;
3289 	val |= MVPP22_XLG_FORWARD_PFC_EN;
3290 	val |= MVPP22_XLG_FORWARD_802_3X_FC_EN;
3291 	val &= ~MVPP22_XLG_EN_IDLE_CHECK_FOR_LINK;
3292 	writel(val, port->base + MVPP22_XLG_CTRL4_REG);
3293 
3294 	/* Jumbo frame support: 0x1400 * 2 = 0x2800 bytes */
3295 	val = readl(port->base + MVPP22_XLG_CTRL1_REG);
3296 	val &= ~MVPP22_XLG_MAX_RX_SIZE_MASK;
3297 	val |= 0x1400 << MVPP22_XLG_MAX_RX_SIZE_OFFS;
3298 	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
3299 
3300 	/* unmask link change interrupt */
3301 	val = readl(port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3302 	val |= MVPP22_XLG_INTERRUPT_LINK_CHANGE;
3303 	val |= 1; /* unmask summary bit */
3304 	writel(val, port->base + MVPP22_XLG_INTERRUPT_MASK_REG);
3305 
3306 	return 0;
3307 }
3308 
3309 /* Set PCS to reset or exit from reset */
gop_xpcs_reset(struct mvpp2_port * port,int reset)3310 static int gop_xpcs_reset(struct mvpp2_port *port, int reset)
3311 {
3312 	u32 val;
3313 
3314 	/* read - modify - write */
3315 	val = readl(port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3316 	if (reset)
3317 		val &= ~MVPP22_XPCS_PCSRESET;
3318 	else
3319 		val |= MVPP22_XPCS_PCSRESET;
3320 	writel(val, port->priv->xpcs_base + MVPP22_XPCS_GLOBAL_CFG_0_REG);
3321 
3322 	return 0;
3323 }
3324 
3325 /* Set the MAC to reset or exit from reset */
gop_xlg_mac_reset(struct mvpp2_port * port,int reset)3326 static int gop_xlg_mac_reset(struct mvpp2_port *port, int reset)
3327 {
3328 	u32 val;
3329 
3330 	/* read - modify - write */
3331 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3332 	if (reset)
3333 		val &= ~MVPP22_XLG_MAC_RESETN;
3334 	else
3335 		val |= MVPP22_XLG_MAC_RESETN;
3336 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3337 
3338 	return 0;
3339 }
3340 
3341 /*
3342  * gop_port_init
3343  *
3344  * Init physical port. Configures the port mode and all it's elements
3345  * accordingly.
3346  * Does not verify that the selected mode/port number is valid at the
3347  * core level.
3348  */
gop_port_init(struct mvpp2_port * port)3349 static int gop_port_init(struct mvpp2_port *port)
3350 {
3351 	int mac_num = port->gop_id;
3352 	int num_of_act_lanes;
3353 
3354 	if (mac_num >= MVPP22_GOP_MAC_NUM) {
3355 		netdev_err(NULL, "%s: illegal port number %d", __func__,
3356 			   mac_num);
3357 		return -1;
3358 	}
3359 
3360 	switch (port->phy_interface) {
3361 	case PHY_INTERFACE_MODE_RGMII:
3362 	case PHY_INTERFACE_MODE_RGMII_ID:
3363 		gop_gmac_reset(port, 1);
3364 
3365 		/* configure PCS */
3366 		gop_gpcs_mode_cfg(port, 0);
3367 		gop_bypass_clk_cfg(port, 1);
3368 
3369 		/* configure MAC */
3370 		gop_gmac_mode_cfg(port);
3371 		/* pcs unreset */
3372 		gop_gpcs_reset(port, 0);
3373 
3374 		/* mac unreset */
3375 		gop_gmac_reset(port, 0);
3376 		break;
3377 
3378 	case PHY_INTERFACE_MODE_SGMII:
3379 		/* configure PCS */
3380 		gop_gpcs_mode_cfg(port, 1);
3381 
3382 		/* configure MAC */
3383 		gop_gmac_mode_cfg(port);
3384 		/* select proper Mac mode */
3385 		gop_xlg_2_gig_mac_cfg(port);
3386 
3387 		/* pcs unreset */
3388 		gop_gpcs_reset(port, 0);
3389 		/* mac unreset */
3390 		gop_gmac_reset(port, 0);
3391 		break;
3392 
3393 	case PHY_INTERFACE_MODE_SFI:
3394 		num_of_act_lanes = 2;
3395 		mac_num = 0;
3396 		/* configure PCS */
3397 		gop_xpcs_mode(port, num_of_act_lanes);
3398 		gop_mpcs_mode(port);
3399 		/* configure MAC */
3400 		gop_xlg_mac_mode_cfg(port, num_of_act_lanes);
3401 
3402 		/* pcs unreset */
3403 		gop_xpcs_reset(port, 0);
3404 
3405 		/* mac unreset */
3406 		gop_xlg_mac_reset(port, 0);
3407 		break;
3408 
3409 	default:
3410 		netdev_err(NULL, "%s: Requested port mode (%d) not supported\n",
3411 			   __func__, port->phy_interface);
3412 		return -1;
3413 	}
3414 
3415 	return 0;
3416 }
3417 
gop_xlg_mac_port_enable(struct mvpp2_port * port,int enable)3418 static void gop_xlg_mac_port_enable(struct mvpp2_port *port, int enable)
3419 {
3420 	u32 val;
3421 
3422 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
3423 	if (enable) {
3424 		/* Enable port and MIB counters update */
3425 		val |= MVPP22_XLG_PORT_EN;
3426 		val &= ~MVPP22_XLG_MIBCNT_DIS;
3427 	} else {
3428 		/* Disable port */
3429 		val &= ~MVPP22_XLG_PORT_EN;
3430 	}
3431 	writel(val, port->base + MVPP22_XLG_CTRL0_REG);
3432 }
3433 
gop_port_enable(struct mvpp2_port * port,int enable)3434 static void gop_port_enable(struct mvpp2_port *port, int enable)
3435 {
3436 	switch (port->phy_interface) {
3437 	case PHY_INTERFACE_MODE_RGMII:
3438 	case PHY_INTERFACE_MODE_RGMII_ID:
3439 	case PHY_INTERFACE_MODE_SGMII:
3440 		if (enable)
3441 			mvpp2_port_enable(port);
3442 		else
3443 			mvpp2_port_disable(port);
3444 		break;
3445 
3446 	case PHY_INTERFACE_MODE_SFI:
3447 		gop_xlg_mac_port_enable(port, enable);
3448 
3449 		break;
3450 	default:
3451 		netdev_err(NULL, "%s: Wrong port mode (%d)\n", __func__,
3452 			   port->phy_interface);
3453 		return;
3454 	}
3455 }
3456 
3457 /* RFU1 functions */
gop_rfu1_read(struct mvpp2 * priv,u32 offset)3458 static inline u32 gop_rfu1_read(struct mvpp2 *priv, u32 offset)
3459 {
3460 	return readl(priv->rfu1_base + offset);
3461 }
3462 
gop_rfu1_write(struct mvpp2 * priv,u32 offset,u32 data)3463 static inline void gop_rfu1_write(struct mvpp2 *priv, u32 offset, u32 data)
3464 {
3465 	writel(data, priv->rfu1_base + offset);
3466 }
3467 
mvpp2_netc_cfg_create(int gop_id,phy_interface_t phy_type)3468 static u32 mvpp2_netc_cfg_create(int gop_id, phy_interface_t phy_type)
3469 {
3470 	u32 val = 0;
3471 
3472 	if (gop_id == 2) {
3473 		if (phy_type == PHY_INTERFACE_MODE_SGMII)
3474 			val |= MV_NETC_GE_MAC2_SGMII;
3475 	}
3476 
3477 	if (gop_id == 3) {
3478 		if (phy_type == PHY_INTERFACE_MODE_SGMII)
3479 			val |= MV_NETC_GE_MAC3_SGMII;
3480 		else if (phy_type == PHY_INTERFACE_MODE_RGMII ||
3481 			 phy_type == PHY_INTERFACE_MODE_RGMII_ID)
3482 			val |= MV_NETC_GE_MAC3_RGMII;
3483 	}
3484 
3485 	return val;
3486 }
3487 
gop_netc_active_port(struct mvpp2 * priv,int gop_id,u32 val)3488 static void gop_netc_active_port(struct mvpp2 *priv, int gop_id, u32 val)
3489 {
3490 	u32 reg;
3491 
3492 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3493 	reg &= ~(NETC_PORTS_ACTIVE_MASK(gop_id));
3494 
3495 	val <<= NETC_PORTS_ACTIVE_OFFSET(gop_id);
3496 	val &= NETC_PORTS_ACTIVE_MASK(gop_id);
3497 
3498 	reg |= val;
3499 
3500 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3501 }
3502 
gop_netc_mii_mode(struct mvpp2 * priv,int gop_id,u32 val)3503 static void gop_netc_mii_mode(struct mvpp2 *priv, int gop_id, u32 val)
3504 {
3505 	u32 reg;
3506 
3507 	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3508 	reg &= ~NETC_GBE_PORT1_MII_MODE_MASK;
3509 
3510 	val <<= NETC_GBE_PORT1_MII_MODE_OFFS;
3511 	val &= NETC_GBE_PORT1_MII_MODE_MASK;
3512 
3513 	reg |= val;
3514 
3515 	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3516 }
3517 
gop_netc_gop_reset(struct mvpp2 * priv,u32 val)3518 static void gop_netc_gop_reset(struct mvpp2 *priv, u32 val)
3519 {
3520 	u32 reg;
3521 
3522 	reg = gop_rfu1_read(priv, GOP_SOFT_RESET_1_REG);
3523 	reg &= ~NETC_GOP_SOFT_RESET_MASK;
3524 
3525 	val <<= NETC_GOP_SOFT_RESET_OFFS;
3526 	val &= NETC_GOP_SOFT_RESET_MASK;
3527 
3528 	reg |= val;
3529 
3530 	gop_rfu1_write(priv, GOP_SOFT_RESET_1_REG, reg);
3531 }
3532 
gop_netc_gop_clock_logic_set(struct mvpp2 * priv,u32 val)3533 static void gop_netc_gop_clock_logic_set(struct mvpp2 *priv, u32 val)
3534 {
3535 	u32 reg;
3536 
3537 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3538 	reg &= ~NETC_CLK_DIV_PHASE_MASK;
3539 
3540 	val <<= NETC_CLK_DIV_PHASE_OFFS;
3541 	val &= NETC_CLK_DIV_PHASE_MASK;
3542 
3543 	reg |= val;
3544 
3545 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3546 }
3547 
gop_netc_port_rf_reset(struct mvpp2 * priv,int gop_id,u32 val)3548 static void gop_netc_port_rf_reset(struct mvpp2 *priv, int gop_id, u32 val)
3549 {
3550 	u32 reg;
3551 
3552 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_1_REG);
3553 	reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(gop_id));
3554 
3555 	val <<= NETC_PORT_GIG_RF_RESET_OFFS(gop_id);
3556 	val &= NETC_PORT_GIG_RF_RESET_MASK(gop_id);
3557 
3558 	reg |= val;
3559 
3560 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_1_REG, reg);
3561 }
3562 
gop_netc_gbe_sgmii_mode_select(struct mvpp2 * priv,int gop_id,u32 val)3563 static void gop_netc_gbe_sgmii_mode_select(struct mvpp2 *priv, int gop_id,
3564 					   u32 val)
3565 {
3566 	u32 reg, mask, offset;
3567 
3568 	if (gop_id == 2) {
3569 		mask = NETC_GBE_PORT0_SGMII_MODE_MASK;
3570 		offset = NETC_GBE_PORT0_SGMII_MODE_OFFS;
3571 	} else {
3572 		mask = NETC_GBE_PORT1_SGMII_MODE_MASK;
3573 		offset = NETC_GBE_PORT1_SGMII_MODE_OFFS;
3574 	}
3575 	reg = gop_rfu1_read(priv, NETCOMP_CONTROL_0_REG);
3576 	reg &= ~mask;
3577 
3578 	val <<= offset;
3579 	val &= mask;
3580 
3581 	reg |= val;
3582 
3583 	gop_rfu1_write(priv, NETCOMP_CONTROL_0_REG, reg);
3584 }
3585 
gop_netc_bus_width_select(struct mvpp2 * priv,u32 val)3586 static void gop_netc_bus_width_select(struct mvpp2 *priv, u32 val)
3587 {
3588 	u32 reg;
3589 
3590 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3591 	reg &= ~NETC_BUS_WIDTH_SELECT_MASK;
3592 
3593 	val <<= NETC_BUS_WIDTH_SELECT_OFFS;
3594 	val &= NETC_BUS_WIDTH_SELECT_MASK;
3595 
3596 	reg |= val;
3597 
3598 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3599 }
3600 
gop_netc_sample_stages_timing(struct mvpp2 * priv,u32 val)3601 static void gop_netc_sample_stages_timing(struct mvpp2 *priv, u32 val)
3602 {
3603 	u32 reg;
3604 
3605 	reg = gop_rfu1_read(priv, NETCOMP_PORTS_CONTROL_0_REG);
3606 	reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK;
3607 
3608 	val <<= NETC_GIG_RX_DATA_SAMPLE_OFFS;
3609 	val &= NETC_GIG_RX_DATA_SAMPLE_MASK;
3610 
3611 	reg |= val;
3612 
3613 	gop_rfu1_write(priv, NETCOMP_PORTS_CONTROL_0_REG, reg);
3614 }
3615 
gop_netc_mac_to_xgmii(struct mvpp2 * priv,int gop_id,enum mv_netc_phase phase)3616 static void gop_netc_mac_to_xgmii(struct mvpp2 *priv, int gop_id,
3617 				  enum mv_netc_phase phase)
3618 {
3619 	switch (phase) {
3620 	case MV_NETC_FIRST_PHASE:
3621 		/* Set Bus Width to HB mode = 1 */
3622 		gop_netc_bus_width_select(priv, 1);
3623 		/* Select RGMII mode */
3624 		gop_netc_gbe_sgmii_mode_select(priv, gop_id, MV_NETC_GBE_XMII);
3625 		break;
3626 
3627 	case MV_NETC_SECOND_PHASE:
3628 		/* De-assert the relevant port HB reset */
3629 		gop_netc_port_rf_reset(priv, gop_id, 1);
3630 		break;
3631 	}
3632 }
3633 
gop_netc_mac_to_sgmii(struct mvpp2 * priv,int gop_id,enum mv_netc_phase phase)3634 static void gop_netc_mac_to_sgmii(struct mvpp2 *priv, int gop_id,
3635 				  enum mv_netc_phase phase)
3636 {
3637 	switch (phase) {
3638 	case MV_NETC_FIRST_PHASE:
3639 		/* Set Bus Width to HB mode = 1 */
3640 		gop_netc_bus_width_select(priv, 1);
3641 		/* Select SGMII mode */
3642 		if (gop_id >= 1) {
3643 			gop_netc_gbe_sgmii_mode_select(priv, gop_id,
3644 						       MV_NETC_GBE_SGMII);
3645 		}
3646 
3647 		/* Configure the sample stages */
3648 		gop_netc_sample_stages_timing(priv, 0);
3649 		/* Configure the ComPhy Selector */
3650 		/* gop_netc_com_phy_selector_config(netComplex); */
3651 		break;
3652 
3653 	case MV_NETC_SECOND_PHASE:
3654 		/* De-assert the relevant port HB reset */
3655 		gop_netc_port_rf_reset(priv, gop_id, 1);
3656 		break;
3657 	}
3658 }
3659 
gop_netc_init(struct mvpp2 * priv,enum mv_netc_phase phase)3660 static int gop_netc_init(struct mvpp2 *priv, enum mv_netc_phase phase)
3661 {
3662 	u32 c = priv->netc_config;
3663 
3664 	if (c & MV_NETC_GE_MAC2_SGMII)
3665 		gop_netc_mac_to_sgmii(priv, 2, phase);
3666 	else
3667 		gop_netc_mac_to_xgmii(priv, 2, phase);
3668 
3669 	if (c & MV_NETC_GE_MAC3_SGMII) {
3670 		gop_netc_mac_to_sgmii(priv, 3, phase);
3671 	} else {
3672 		gop_netc_mac_to_xgmii(priv, 3, phase);
3673 		if (c & MV_NETC_GE_MAC3_RGMII)
3674 			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_RGMII);
3675 		else
3676 			gop_netc_mii_mode(priv, 3, MV_NETC_GBE_MII);
3677 	}
3678 
3679 	/* Activate gop ports 0, 2, 3 */
3680 	gop_netc_active_port(priv, 0, 1);
3681 	gop_netc_active_port(priv, 2, 1);
3682 	gop_netc_active_port(priv, 3, 1);
3683 
3684 	if (phase == MV_NETC_SECOND_PHASE) {
3685 		/* Enable the GOP internal clock logic */
3686 		gop_netc_gop_clock_logic_set(priv, 1);
3687 		/* De-assert GOP unit reset */
3688 		gop_netc_gop_reset(priv, 1);
3689 	}
3690 
3691 	return 0;
3692 }
3693 
3694 /* Set defaults to the MVPP2 port */
mvpp2_defaults_set(struct mvpp2_port * port)3695 static void mvpp2_defaults_set(struct mvpp2_port *port)
3696 {
3697 	int tx_port_num, val, queue, ptxq, lrxq;
3698 
3699 	if (port->priv->hw_version == MVPP21) {
3700 		/* Configure port to loopback if needed */
3701 		if (port->flags & MVPP2_F_LOOPBACK)
3702 			mvpp2_port_loopback_set(port);
3703 
3704 		/* Update TX FIFO MIN Threshold */
3705 		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3706 		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
3707 		/* Min. TX threshold must be less than minimal packet length */
3708 		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
3709 		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
3710 	}
3711 
3712 	/* Disable Legacy WRR, Disable EJP, Release from reset */
3713 	tx_port_num = mvpp2_egress_port(port);
3714 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3715 		    tx_port_num);
3716 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3717 
3718 	/* Close bandwidth for all queues */
3719 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
3720 		ptxq = mvpp2_txq_phys(port->id, queue);
3721 		mvpp2_write(port->priv,
3722 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
3723 	}
3724 
3725 	/* Set refill period to 1 usec, refill tokens
3726 	 * and bucket size to maximum
3727 	 */
3728 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, 0xc8);
3729 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
3730 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3731 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3732 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3733 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
3734 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
3735 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3736 
3737 	/* Set MaximumLowLatencyPacketSize value to 256 */
3738 	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
3739 		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3740 		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3741 
3742 	/* Enable Rx cache snoop */
3743 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3744 		queue = port->rxqs[lrxq]->id;
3745 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3746 		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
3747 			   MVPP2_SNOOP_BUF_HDR_MASK;
3748 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3749 	}
3750 }
3751 
3752 /* Enable/disable receiving packets */
mvpp2_ingress_enable(struct mvpp2_port * port)3753 static void mvpp2_ingress_enable(struct mvpp2_port *port)
3754 {
3755 	u32 val;
3756 	int lrxq, queue;
3757 
3758 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3759 		queue = port->rxqs[lrxq]->id;
3760 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3761 		val &= ~MVPP2_RXQ_DISABLE_MASK;
3762 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3763 	}
3764 }
3765 
mvpp2_ingress_disable(struct mvpp2_port * port)3766 static void mvpp2_ingress_disable(struct mvpp2_port *port)
3767 {
3768 	u32 val;
3769 	int lrxq, queue;
3770 
3771 	for (lrxq = 0; lrxq < rxq_number; lrxq++) {
3772 		queue = port->rxqs[lrxq]->id;
3773 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
3774 		val |= MVPP2_RXQ_DISABLE_MASK;
3775 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
3776 	}
3777 }
3778 
3779 /* Enable transmit via physical egress queue
3780  * - HW starts take descriptors from DRAM
3781  */
mvpp2_egress_enable(struct mvpp2_port * port)3782 static void mvpp2_egress_enable(struct mvpp2_port *port)
3783 {
3784 	u32 qmap;
3785 	int queue;
3786 	int tx_port_num = mvpp2_egress_port(port);
3787 
3788 	/* Enable all initialized TXs. */
3789 	qmap = 0;
3790 	for (queue = 0; queue < txq_number; queue++) {
3791 		struct mvpp2_tx_queue *txq = port->txqs[queue];
3792 
3793 		if (txq->descs != NULL)
3794 			qmap |= (1 << queue);
3795 	}
3796 
3797 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3798 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3799 }
3800 
3801 /* Disable transmit via physical egress queue
3802  * - HW doesn't take descriptors from DRAM
3803  */
mvpp2_egress_disable(struct mvpp2_port * port)3804 static void mvpp2_egress_disable(struct mvpp2_port *port)
3805 {
3806 	u32 reg_data;
3807 	int delay;
3808 	int tx_port_num = mvpp2_egress_port(port);
3809 
3810 	/* Issue stop command for active channels only */
3811 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3812 	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3813 		    MVPP2_TXP_SCHED_ENQ_MASK;
3814 	if (reg_data != 0)
3815 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
3816 			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
3817 
3818 	/* Wait for all Tx activity to terminate. */
3819 	delay = 0;
3820 	do {
3821 		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3822 			netdev_warn(port->dev,
3823 				    "Tx stop timed out, status=0x%08x\n",
3824 				    reg_data);
3825 			break;
3826 		}
3827 		mdelay(1);
3828 		delay++;
3829 
3830 		/* Check port TX Command register that all
3831 		 * Tx queues are stopped
3832 		 */
3833 		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
3834 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3835 }
3836 
3837 /* Rx descriptors helper methods */
3838 
3839 /* Get number of Rx descriptors occupied by received packets */
3840 static inline int
mvpp2_rxq_received(struct mvpp2_port * port,int rxq_id)3841 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3842 {
3843 	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
3844 
3845 	return val & MVPP2_RXQ_OCCUPIED_MASK;
3846 }
3847 
3848 /* Update Rx queue status with the number of occupied and available
3849  * Rx descriptor slots.
3850  */
3851 static inline void
mvpp2_rxq_status_update(struct mvpp2_port * port,int rxq_id,int used_count,int free_count)3852 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3853 			int used_count, int free_count)
3854 {
3855 	/* Decrement the number of used descriptors and increment count
3856 	 * increment the number of free descriptors.
3857 	 */
3858 	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3859 
3860 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3861 }
3862 
3863 /* Get pointer to next RX descriptor to be processed by SW */
3864 static inline struct mvpp2_rx_desc *
mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue * rxq)3865 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
3866 {
3867 	int rx_desc = rxq->next_desc_to_proc;
3868 
3869 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
3870 	prefetch(rxq->descs + rxq->next_desc_to_proc);
3871 	return rxq->descs + rx_desc;
3872 }
3873 
3874 /* Set rx queue offset */
mvpp2_rxq_offset_set(struct mvpp2_port * port,int prxq,int offset)3875 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
3876 				 int prxq, int offset)
3877 {
3878 	u32 val;
3879 
3880 	/* Convert offset from bytes to units of 32 bytes */
3881 	offset = offset >> 5;
3882 
3883 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
3884 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3885 
3886 	/* Offset is in */
3887 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3888 		    MVPP2_RXQ_PACKET_OFFSET_MASK);
3889 
3890 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
3891 }
3892 
3893 /* Obtain BM cookie information from descriptor */
mvpp2_bm_cookie_build(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)3894 static u32 mvpp2_bm_cookie_build(struct mvpp2_port *port,
3895 				 struct mvpp2_rx_desc *rx_desc)
3896 {
3897 	int cpu = smp_processor_id();
3898 	int pool;
3899 
3900 	pool = (mvpp2_rxdesc_status_get(port, rx_desc) &
3901 		MVPP2_RXD_BM_POOL_ID_MASK) >>
3902 		MVPP2_RXD_BM_POOL_ID_OFFS;
3903 
3904 	return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
3905 	       ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
3906 }
3907 
3908 /* Tx descriptors helper methods */
3909 
3910 /* Get number of Tx descriptors waiting to be transmitted by HW */
mvpp2_txq_pend_desc_num_get(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3911 static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
3912 				       struct mvpp2_tx_queue *txq)
3913 {
3914 	u32 val;
3915 
3916 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
3917 	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
3918 
3919 	return val & MVPP2_TXQ_PENDING_MASK;
3920 }
3921 
3922 /* Get pointer to next Tx descriptor to be processed (send) by HW */
3923 static struct mvpp2_tx_desc *
mvpp2_txq_next_desc_get(struct mvpp2_tx_queue * txq)3924 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
3925 {
3926 	int tx_desc = txq->next_desc_to_proc;
3927 
3928 	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
3929 	return txq->descs + tx_desc;
3930 }
3931 
3932 /* Update HW with number of aggregated Tx descriptors to be sent */
mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port * port,int pending)3933 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
3934 {
3935 	/* aggregated access - relevant TXQ number is written in TX desc */
3936 	mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
3937 }
3938 
3939 /* Get number of sent descriptors and decrement counter.
3940  * The number of sent descriptors is returned.
3941  * Per-CPU access
3942  */
mvpp2_txq_sent_desc_proc(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)3943 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
3944 					   struct mvpp2_tx_queue *txq)
3945 {
3946 	u32 val;
3947 
3948 	/* Reading status reg resets transmitted descriptor counter */
3949 	val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
3950 
3951 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
3952 		MVPP2_TRANSMITTED_COUNT_OFFSET;
3953 }
3954 
mvpp2_txq_sent_counter_clear(void * arg)3955 static void mvpp2_txq_sent_counter_clear(void *arg)
3956 {
3957 	struct mvpp2_port *port = arg;
3958 	int queue;
3959 
3960 	for (queue = 0; queue < txq_number; queue++) {
3961 		int id = port->txqs[queue]->id;
3962 
3963 		mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
3964 	}
3965 }
3966 
3967 /* Set max sizes for Tx queues */
mvpp2_txp_max_tx_size_set(struct mvpp2_port * port)3968 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3969 {
3970 	u32	val, size, mtu;
3971 	int	txq, tx_port_num;
3972 
3973 	mtu = port->pkt_size * 8;
3974 	if (mtu > MVPP2_TXP_MTU_MAX)
3975 		mtu = MVPP2_TXP_MTU_MAX;
3976 
3977 	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
3978 	mtu = 3 * mtu;
3979 
3980 	/* Indirect access to registers */
3981 	tx_port_num = mvpp2_egress_port(port);
3982 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3983 
3984 	/* Set MTU */
3985 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
3986 	val &= ~MVPP2_TXP_MTU_MAX;
3987 	val |= mtu;
3988 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
3989 
3990 	/* TXP token size and all TXQs token size must be larger that MTU */
3991 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3992 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3993 	if (size < mtu) {
3994 		size = mtu;
3995 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3996 		val |= size;
3997 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3998 	}
3999 
4000 	for (txq = 0; txq < txq_number; txq++) {
4001 		val = mvpp2_read(port->priv,
4002 				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
4003 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
4004 
4005 		if (size < mtu) {
4006 			size = mtu;
4007 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
4008 			val |= size;
4009 			mvpp2_write(port->priv,
4010 				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
4011 				    val);
4012 		}
4013 	}
4014 }
4015 
4016 /* Free Tx queue skbuffs */
mvpp2_txq_bufs_free(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,struct mvpp2_txq_pcpu * txq_pcpu,int num)4017 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4018 				struct mvpp2_tx_queue *txq,
4019 				struct mvpp2_txq_pcpu *txq_pcpu, int num)
4020 {
4021 	int i;
4022 
4023 	for (i = 0; i < num; i++)
4024 		mvpp2_txq_inc_get(txq_pcpu);
4025 }
4026 
mvpp2_get_rx_queue(struct mvpp2_port * port,u32 cause)4027 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4028 							u32 cause)
4029 {
4030 	int queue = fls(cause) - 1;
4031 
4032 	return port->rxqs[queue];
4033 }
4034 
mvpp2_get_tx_queue(struct mvpp2_port * port,u32 cause)4035 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4036 							u32 cause)
4037 {
4038 	int queue = fls(cause) - 1;
4039 
4040 	return port->txqs[queue];
4041 }
4042 
4043 /* Rx/Tx queue initialization/cleanup methods */
4044 
4045 /* Allocate and initialize descriptors for aggr TXQ */
mvpp2_aggr_txq_init(struct udevice * dev,struct mvpp2_tx_queue * aggr_txq,int desc_num,int cpu,struct mvpp2 * priv)4046 static int mvpp2_aggr_txq_init(struct udevice *dev,
4047 			       struct mvpp2_tx_queue *aggr_txq,
4048 			       int desc_num, int cpu,
4049 			       struct mvpp2 *priv)
4050 {
4051 	u32 txq_dma;
4052 
4053 	/* Allocate memory for TX descriptors */
4054 	aggr_txq->descs = buffer_loc.aggr_tx_descs;
4055 	aggr_txq->descs_dma = (dma_addr_t)buffer_loc.aggr_tx_descs;
4056 	if (!aggr_txq->descs)
4057 		return -ENOMEM;
4058 
4059 	/* Make sure descriptor address is cache line size aligned  */
4060 	BUG_ON(aggr_txq->descs !=
4061 	       PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4062 
4063 	aggr_txq->last_desc = aggr_txq->size - 1;
4064 
4065 	/* Aggr TXQ no reset WA */
4066 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
4067 						 MVPP2_AGGR_TXQ_INDEX_REG(cpu));
4068 
4069 	/* Set Tx descriptors queue starting address indirect
4070 	 * access
4071 	 */
4072 	if (priv->hw_version == MVPP21)
4073 		txq_dma = aggr_txq->descs_dma;
4074 	else
4075 		txq_dma = aggr_txq->descs_dma >>
4076 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
4077 
4078 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), txq_dma);
4079 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
4080 
4081 	return 0;
4082 }
4083 
4084 /* Create a specified Rx queue */
mvpp2_rxq_init(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4085 static int mvpp2_rxq_init(struct mvpp2_port *port,
4086 			  struct mvpp2_rx_queue *rxq)
4087 
4088 {
4089 	u32 rxq_dma;
4090 
4091 	rxq->size = port->rx_ring_size;
4092 
4093 	/* Allocate memory for RX descriptors */
4094 	rxq->descs = buffer_loc.rx_descs;
4095 	rxq->descs_dma = (dma_addr_t)buffer_loc.rx_descs;
4096 	if (!rxq->descs)
4097 		return -ENOMEM;
4098 
4099 	BUG_ON(rxq->descs !=
4100 	       PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4101 
4102 	rxq->last_desc = rxq->size - 1;
4103 
4104 	/* Zero occupied and non-occupied counters - direct access */
4105 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4106 
4107 	/* Set Rx descriptors queue starting address - indirect access */
4108 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4109 	if (port->priv->hw_version == MVPP21)
4110 		rxq_dma = rxq->descs_dma;
4111 	else
4112 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
4113 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
4114 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
4115 	mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
4116 
4117 	/* Set Offset */
4118 	mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
4119 
4120 	/* Add number of descriptors ready for receiving packets */
4121 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
4122 
4123 	return 0;
4124 }
4125 
4126 /* Push packets received by the RXQ to BM pool */
mvpp2_rxq_drop_pkts(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4127 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
4128 				struct mvpp2_rx_queue *rxq)
4129 {
4130 	int rx_received, i;
4131 
4132 	rx_received = mvpp2_rxq_received(port, rxq->id);
4133 	if (!rx_received)
4134 		return;
4135 
4136 	for (i = 0; i < rx_received; i++) {
4137 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
4138 		u32 bm = mvpp2_bm_cookie_build(port, rx_desc);
4139 
4140 		mvpp2_pool_refill(port, bm,
4141 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
4142 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
4143 	}
4144 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
4145 }
4146 
4147 /* Cleanup Rx queue */
mvpp2_rxq_deinit(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq)4148 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
4149 			     struct mvpp2_rx_queue *rxq)
4150 {
4151 	mvpp2_rxq_drop_pkts(port, rxq);
4152 
4153 	rxq->descs             = NULL;
4154 	rxq->last_desc         = 0;
4155 	rxq->next_desc_to_proc = 0;
4156 	rxq->descs_dma         = 0;
4157 
4158 	/* Clear Rx descriptors queue starting address and size;
4159 	 * free descriptor number
4160 	 */
4161 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
4162 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
4163 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
4164 	mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
4165 }
4166 
4167 /* Create and initialize a Tx queue */
mvpp2_txq_init(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4168 static int mvpp2_txq_init(struct mvpp2_port *port,
4169 			  struct mvpp2_tx_queue *txq)
4170 {
4171 	u32 val;
4172 	int cpu, desc, desc_per_txq, tx_port_num;
4173 	struct mvpp2_txq_pcpu *txq_pcpu;
4174 
4175 	txq->size = port->tx_ring_size;
4176 
4177 	/* Allocate memory for Tx descriptors */
4178 	txq->descs = buffer_loc.tx_descs;
4179 	txq->descs_dma = (dma_addr_t)buffer_loc.tx_descs;
4180 	if (!txq->descs)
4181 		return -ENOMEM;
4182 
4183 	/* Make sure descriptor address is cache line size aligned  */
4184 	BUG_ON(txq->descs !=
4185 	       PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
4186 
4187 	txq->last_desc = txq->size - 1;
4188 
4189 	/* Set Tx descriptors queue starting address - indirect access */
4190 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4191 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_dma);
4192 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
4193 					     MVPP2_TXQ_DESC_SIZE_MASK);
4194 	mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
4195 	mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
4196 		    txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
4197 	val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
4198 	val &= ~MVPP2_TXQ_PENDING_MASK;
4199 	mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
4200 
4201 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
4202 	 * for each existing TXQ.
4203 	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
4204 	 * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
4205 	 */
4206 	desc_per_txq = 16;
4207 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
4208 	       (txq->log_id * desc_per_txq);
4209 
4210 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
4211 		    MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
4212 		    MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
4213 
4214 	/* WRR / EJP configuration - indirect access */
4215 	tx_port_num = mvpp2_egress_port(port);
4216 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
4217 
4218 	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
4219 	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
4220 	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
4221 	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
4222 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
4223 
4224 	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
4225 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
4226 		    val);
4227 
4228 	for_each_present_cpu(cpu) {
4229 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4230 		txq_pcpu->size = txq->size;
4231 	}
4232 
4233 	return 0;
4234 }
4235 
4236 /* Free allocated TXQ resources */
mvpp2_txq_deinit(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4237 static void mvpp2_txq_deinit(struct mvpp2_port *port,
4238 			     struct mvpp2_tx_queue *txq)
4239 {
4240 	txq->descs             = NULL;
4241 	txq->last_desc         = 0;
4242 	txq->next_desc_to_proc = 0;
4243 	txq->descs_dma         = 0;
4244 
4245 	/* Set minimum bandwidth for disabled TXQs */
4246 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
4247 
4248 	/* Set Tx descriptors queue starting address and size */
4249 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4250 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
4251 	mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
4252 }
4253 
4254 /* Cleanup Tx ports */
mvpp2_txq_clean(struct mvpp2_port * port,struct mvpp2_tx_queue * txq)4255 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
4256 {
4257 	struct mvpp2_txq_pcpu *txq_pcpu;
4258 	int delay, pending, cpu;
4259 	u32 val;
4260 
4261 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4262 	val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
4263 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
4264 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4265 
4266 	/* The napi queue has been stopped so wait for all packets
4267 	 * to be transmitted.
4268 	 */
4269 	delay = 0;
4270 	do {
4271 		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
4272 			netdev_warn(port->dev,
4273 				    "port %d: cleaning queue %d timed out\n",
4274 				    port->id, txq->log_id);
4275 			break;
4276 		}
4277 		mdelay(1);
4278 		delay++;
4279 
4280 		pending = mvpp2_txq_pend_desc_num_get(port, txq);
4281 	} while (pending);
4282 
4283 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
4284 	mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
4285 
4286 	for_each_present_cpu(cpu) {
4287 		txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4288 
4289 		/* Release all packets */
4290 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
4291 
4292 		/* Reset queue */
4293 		txq_pcpu->count = 0;
4294 		txq_pcpu->txq_put_index = 0;
4295 		txq_pcpu->txq_get_index = 0;
4296 	}
4297 }
4298 
4299 /* Cleanup all Tx queues */
mvpp2_cleanup_txqs(struct mvpp2_port * port)4300 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
4301 {
4302 	struct mvpp2_tx_queue *txq;
4303 	int queue;
4304 	u32 val;
4305 
4306 	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
4307 
4308 	/* Reset Tx ports and delete Tx queues */
4309 	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
4310 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4311 
4312 	for (queue = 0; queue < txq_number; queue++) {
4313 		txq = port->txqs[queue];
4314 		mvpp2_txq_clean(port, txq);
4315 		mvpp2_txq_deinit(port, txq);
4316 	}
4317 
4318 	mvpp2_txq_sent_counter_clear(port);
4319 
4320 	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
4321 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
4322 }
4323 
4324 /* Cleanup all Rx queues */
mvpp2_cleanup_rxqs(struct mvpp2_port * port)4325 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
4326 {
4327 	int queue;
4328 
4329 	for (queue = 0; queue < rxq_number; queue++)
4330 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
4331 }
4332 
4333 /* Init all Rx queues for port */
mvpp2_setup_rxqs(struct mvpp2_port * port)4334 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
4335 {
4336 	int queue, err;
4337 
4338 	for (queue = 0; queue < rxq_number; queue++) {
4339 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
4340 		if (err)
4341 			goto err_cleanup;
4342 	}
4343 	return 0;
4344 
4345 err_cleanup:
4346 	mvpp2_cleanup_rxqs(port);
4347 	return err;
4348 }
4349 
4350 /* Init all tx queues for port */
mvpp2_setup_txqs(struct mvpp2_port * port)4351 static int mvpp2_setup_txqs(struct mvpp2_port *port)
4352 {
4353 	struct mvpp2_tx_queue *txq;
4354 	int queue, err;
4355 
4356 	for (queue = 0; queue < txq_number; queue++) {
4357 		txq = port->txqs[queue];
4358 		err = mvpp2_txq_init(port, txq);
4359 		if (err)
4360 			goto err_cleanup;
4361 	}
4362 
4363 	mvpp2_txq_sent_counter_clear(port);
4364 	return 0;
4365 
4366 err_cleanup:
4367 	mvpp2_cleanup_txqs(port);
4368 	return err;
4369 }
4370 
4371 /* Adjust link */
mvpp2_link_event(struct mvpp2_port * port)4372 static void mvpp2_link_event(struct mvpp2_port *port)
4373 {
4374 	struct phy_device *phydev = port->phy_dev;
4375 	int status_change = 0;
4376 	u32 val;
4377 
4378 	if (phydev->link) {
4379 		if ((port->speed != phydev->speed) ||
4380 		    (port->duplex != phydev->duplex)) {
4381 			u32 val;
4382 
4383 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4384 			val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
4385 				 MVPP2_GMAC_CONFIG_GMII_SPEED |
4386 				 MVPP2_GMAC_CONFIG_FULL_DUPLEX |
4387 				 MVPP2_GMAC_AN_SPEED_EN |
4388 				 MVPP2_GMAC_AN_DUPLEX_EN);
4389 
4390 			if (phydev->duplex)
4391 				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
4392 
4393 			if (phydev->speed == SPEED_1000)
4394 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
4395 			else if (phydev->speed == SPEED_100)
4396 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
4397 
4398 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4399 
4400 			port->duplex = phydev->duplex;
4401 			port->speed  = phydev->speed;
4402 		}
4403 	}
4404 
4405 	if (phydev->link != port->link) {
4406 		if (!phydev->link) {
4407 			port->duplex = -1;
4408 			port->speed = 0;
4409 		}
4410 
4411 		port->link = phydev->link;
4412 		status_change = 1;
4413 	}
4414 
4415 	if (status_change) {
4416 		if (phydev->link) {
4417 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4418 			val |= (MVPP2_GMAC_FORCE_LINK_PASS |
4419 				MVPP2_GMAC_FORCE_LINK_DOWN);
4420 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
4421 			mvpp2_egress_enable(port);
4422 			mvpp2_ingress_enable(port);
4423 		} else {
4424 			mvpp2_ingress_disable(port);
4425 			mvpp2_egress_disable(port);
4426 		}
4427 	}
4428 }
4429 
4430 /* Main RX/TX processing routines */
4431 
4432 /* Display more error info */
mvpp2_rx_error(struct mvpp2_port * port,struct mvpp2_rx_desc * rx_desc)4433 static void mvpp2_rx_error(struct mvpp2_port *port,
4434 			   struct mvpp2_rx_desc *rx_desc)
4435 {
4436 	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
4437 	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
4438 
4439 	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
4440 	case MVPP2_RXD_ERR_CRC:
4441 		netdev_err(port->dev, "bad rx status %08x (crc error), size=%zu\n",
4442 			   status, sz);
4443 		break;
4444 	case MVPP2_RXD_ERR_OVERRUN:
4445 		netdev_err(port->dev, "bad rx status %08x (overrun error), size=%zu\n",
4446 			   status, sz);
4447 		break;
4448 	case MVPP2_RXD_ERR_RESOURCE:
4449 		netdev_err(port->dev, "bad rx status %08x (resource error), size=%zu\n",
4450 			   status, sz);
4451 		break;
4452 	}
4453 }
4454 
4455 /* Reuse skb if possible, or allocate a new skb and add it to BM pool */
mvpp2_rx_refill(struct mvpp2_port * port,struct mvpp2_bm_pool * bm_pool,u32 bm,dma_addr_t dma_addr)4456 static int mvpp2_rx_refill(struct mvpp2_port *port,
4457 			   struct mvpp2_bm_pool *bm_pool,
4458 			   u32 bm, dma_addr_t dma_addr)
4459 {
4460 	mvpp2_pool_refill(port, bm, dma_addr, (unsigned long)dma_addr);
4461 	return 0;
4462 }
4463 
4464 /* Set hw internals when starting port */
mvpp2_start_dev(struct mvpp2_port * port)4465 static void mvpp2_start_dev(struct mvpp2_port *port)
4466 {
4467 	switch (port->phy_interface) {
4468 	case PHY_INTERFACE_MODE_RGMII:
4469 	case PHY_INTERFACE_MODE_RGMII_ID:
4470 	case PHY_INTERFACE_MODE_SGMII:
4471 		mvpp2_gmac_max_rx_size_set(port);
4472 	default:
4473 		break;
4474 	}
4475 
4476 	mvpp2_txp_max_tx_size_set(port);
4477 
4478 	if (port->priv->hw_version == MVPP21)
4479 		mvpp2_port_enable(port);
4480 	else
4481 		gop_port_enable(port, 1);
4482 }
4483 
4484 /* Set hw internals when stopping port */
mvpp2_stop_dev(struct mvpp2_port * port)4485 static void mvpp2_stop_dev(struct mvpp2_port *port)
4486 {
4487 	/* Stop new packets from arriving to RXQs */
4488 	mvpp2_ingress_disable(port);
4489 
4490 	mvpp2_egress_disable(port);
4491 
4492 	if (port->priv->hw_version == MVPP21)
4493 		mvpp2_port_disable(port);
4494 	else
4495 		gop_port_enable(port, 0);
4496 }
4497 
mvpp2_phy_connect(struct udevice * dev,struct mvpp2_port * port)4498 static int mvpp2_phy_connect(struct udevice *dev, struct mvpp2_port *port)
4499 {
4500 	struct phy_device *phy_dev;
4501 
4502 	if (!port->init || port->link == 0) {
4503 		phy_dev = phy_connect(port->priv->bus, port->phyaddr, dev,
4504 				      port->phy_interface);
4505 		port->phy_dev = phy_dev;
4506 		if (!phy_dev) {
4507 			netdev_err(port->dev, "cannot connect to phy\n");
4508 			return -ENODEV;
4509 		}
4510 		phy_dev->supported &= PHY_GBIT_FEATURES;
4511 		phy_dev->advertising = phy_dev->supported;
4512 
4513 		port->phy_dev = phy_dev;
4514 		port->link    = 0;
4515 		port->duplex  = 0;
4516 		port->speed   = 0;
4517 
4518 		phy_config(phy_dev);
4519 		phy_startup(phy_dev);
4520 		if (!phy_dev->link) {
4521 			printf("%s: No link\n", phy_dev->dev->name);
4522 			return -1;
4523 		}
4524 
4525 		port->init = 1;
4526 	} else {
4527 		mvpp2_egress_enable(port);
4528 		mvpp2_ingress_enable(port);
4529 	}
4530 
4531 	return 0;
4532 }
4533 
mvpp2_open(struct udevice * dev,struct mvpp2_port * port)4534 static int mvpp2_open(struct udevice *dev, struct mvpp2_port *port)
4535 {
4536 	unsigned char mac_bcast[ETH_ALEN] = {
4537 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4538 	int err;
4539 
4540 	err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
4541 	if (err) {
4542 		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4543 		return err;
4544 	}
4545 	err = mvpp2_prs_mac_da_accept(port->priv, port->id,
4546 				      port->dev_addr, true);
4547 	if (err) {
4548 		netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
4549 		return err;
4550 	}
4551 	err = mvpp2_prs_def_flow(port);
4552 	if (err) {
4553 		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4554 		return err;
4555 	}
4556 
4557 	/* Allocate the Rx/Tx queues */
4558 	err = mvpp2_setup_rxqs(port);
4559 	if (err) {
4560 		netdev_err(port->dev, "cannot allocate Rx queues\n");
4561 		return err;
4562 	}
4563 
4564 	err = mvpp2_setup_txqs(port);
4565 	if (err) {
4566 		netdev_err(port->dev, "cannot allocate Tx queues\n");
4567 		return err;
4568 	}
4569 
4570 	if (port->phy_node) {
4571 		err = mvpp2_phy_connect(dev, port);
4572 		if (err < 0)
4573 			return err;
4574 
4575 		mvpp2_link_event(port);
4576 	} else {
4577 		mvpp2_egress_enable(port);
4578 		mvpp2_ingress_enable(port);
4579 	}
4580 
4581 	mvpp2_start_dev(port);
4582 
4583 	return 0;
4584 }
4585 
4586 /* No Device ops here in U-Boot */
4587 
4588 /* Driver initialization */
4589 
mvpp2_port_power_up(struct mvpp2_port * port)4590 static void mvpp2_port_power_up(struct mvpp2_port *port)
4591 {
4592 	struct mvpp2 *priv = port->priv;
4593 
4594 	/* On PPv2.2 the GoP / interface configuration has already been done */
4595 	if (priv->hw_version == MVPP21)
4596 		mvpp2_port_mii_set(port);
4597 	mvpp2_port_periodic_xon_disable(port);
4598 	if (priv->hw_version == MVPP21)
4599 		mvpp2_port_fc_adv_enable(port);
4600 	mvpp2_port_reset(port);
4601 }
4602 
4603 /* Initialize port HW */
mvpp2_port_init(struct udevice * dev,struct mvpp2_port * port)4604 static int mvpp2_port_init(struct udevice *dev, struct mvpp2_port *port)
4605 {
4606 	struct mvpp2 *priv = port->priv;
4607 	struct mvpp2_txq_pcpu *txq_pcpu;
4608 	int queue, cpu, err;
4609 
4610 	if (port->first_rxq + rxq_number >
4611 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
4612 		return -EINVAL;
4613 
4614 	/* Disable port */
4615 	mvpp2_egress_disable(port);
4616 	if (priv->hw_version == MVPP21)
4617 		mvpp2_port_disable(port);
4618 	else
4619 		gop_port_enable(port, 0);
4620 
4621 	port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
4622 				  GFP_KERNEL);
4623 	if (!port->txqs)
4624 		return -ENOMEM;
4625 
4626 	/* Associate physical Tx queues to this port and initialize.
4627 	 * The mapping is predefined.
4628 	 */
4629 	for (queue = 0; queue < txq_number; queue++) {
4630 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
4631 		struct mvpp2_tx_queue *txq;
4632 
4633 		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
4634 		if (!txq)
4635 			return -ENOMEM;
4636 
4637 		txq->pcpu = devm_kzalloc(dev, sizeof(struct mvpp2_txq_pcpu),
4638 					 GFP_KERNEL);
4639 		if (!txq->pcpu)
4640 			return -ENOMEM;
4641 
4642 		txq->id = queue_phy_id;
4643 		txq->log_id = queue;
4644 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
4645 		for_each_present_cpu(cpu) {
4646 			txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4647 			txq_pcpu->cpu = cpu;
4648 		}
4649 
4650 		port->txqs[queue] = txq;
4651 	}
4652 
4653 	port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
4654 				  GFP_KERNEL);
4655 	if (!port->rxqs)
4656 		return -ENOMEM;
4657 
4658 	/* Allocate and initialize Rx queue for this port */
4659 	for (queue = 0; queue < rxq_number; queue++) {
4660 		struct mvpp2_rx_queue *rxq;
4661 
4662 		/* Map physical Rx queue to port's logical Rx queue */
4663 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
4664 		if (!rxq)
4665 			return -ENOMEM;
4666 		/* Map this Rx queue to a physical queue */
4667 		rxq->id = port->first_rxq + queue;
4668 		rxq->port = port->id;
4669 		rxq->logic_rxq = queue;
4670 
4671 		port->rxqs[queue] = rxq;
4672 	}
4673 
4674 
4675 	/* Create Rx descriptor rings */
4676 	for (queue = 0; queue < rxq_number; queue++) {
4677 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4678 
4679 		rxq->size = port->rx_ring_size;
4680 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
4681 		rxq->time_coal = MVPP2_RX_COAL_USEC;
4682 	}
4683 
4684 	mvpp2_ingress_disable(port);
4685 
4686 	/* Port default configuration */
4687 	mvpp2_defaults_set(port);
4688 
4689 	/* Port's classifier configuration */
4690 	mvpp2_cls_oversize_rxq_set(port);
4691 	mvpp2_cls_port_config(port);
4692 
4693 	/* Provide an initial Rx packet size */
4694 	port->pkt_size = MVPP2_RX_PKT_SIZE(PKTSIZE_ALIGN);
4695 
4696 	/* Initialize pools for swf */
4697 	err = mvpp2_swf_bm_pool_init(port);
4698 	if (err)
4699 		return err;
4700 
4701 	return 0;
4702 }
4703 
phy_info_parse(struct udevice * dev,struct mvpp2_port * port)4704 static int phy_info_parse(struct udevice *dev, struct mvpp2_port *port)
4705 {
4706 	int port_node = dev_of_offset(dev);
4707 	const char *phy_mode_str;
4708 	int phy_node, mdio_off, cp_node;
4709 	u32 id;
4710 	u32 phyaddr = 0;
4711 	int phy_mode = -1;
4712 	phys_addr_t mdio_addr;
4713 
4714 	phy_node = fdtdec_lookup_phandle(gd->fdt_blob, port_node, "phy");
4715 
4716 	if (phy_node > 0) {
4717 		phyaddr = fdtdec_get_int(gd->fdt_blob, phy_node, "reg", 0);
4718 		if (phyaddr < 0) {
4719 			dev_err(&pdev->dev, "could not find phy address\n");
4720 			return -1;
4721 		}
4722 		mdio_off = fdt_parent_offset(gd->fdt_blob, phy_node);
4723 
4724 		/* TODO: This WA for mdio issue. U-boot 2017 don't have
4725 		 * mdio driver and on MACHIATOBin board ports from CP1
4726 		 * connected to mdio on CP0.
4727 		 * WA is to get mdio address from phy handler parent
4728 		 * base address. WA should be removed after
4729 		 * mdio driver implementation.
4730 		 */
4731 		mdio_addr = fdtdec_get_uint(gd->fdt_blob,
4732 					    mdio_off, "reg", 0);
4733 
4734 		cp_node = fdt_parent_offset(gd->fdt_blob, mdio_off);
4735 		mdio_addr |= fdt_get_base_address((void *)gd->fdt_blob,
4736 						  cp_node);
4737 
4738 		port->priv->mdio_base = (void *)mdio_addr;
4739 
4740 		if (port->priv->mdio_base < 0) {
4741 			dev_err(&pdev->dev, "could not find mdio base address\n");
4742 			return -1;
4743 		}
4744 	} else {
4745 		phy_node = 0;
4746 	}
4747 
4748 	phy_mode_str = fdt_getprop(gd->fdt_blob, port_node, "phy-mode", NULL);
4749 	if (phy_mode_str)
4750 		phy_mode = phy_get_interface_by_name(phy_mode_str);
4751 	if (phy_mode == -1) {
4752 		dev_err(&pdev->dev, "incorrect phy mode\n");
4753 		return -EINVAL;
4754 	}
4755 
4756 	id = fdtdec_get_int(gd->fdt_blob, port_node, "port-id", -1);
4757 	if (id == -1) {
4758 		dev_err(&pdev->dev, "missing port-id value\n");
4759 		return -EINVAL;
4760 	}
4761 
4762 #ifdef CONFIG_DM_GPIO
4763 	gpio_request_by_name(dev, "phy-reset-gpios", 0,
4764 			     &port->phy_reset_gpio, GPIOD_IS_OUT);
4765 	gpio_request_by_name(dev, "marvell,sfp-tx-disable-gpio", 0,
4766 			     &port->phy_tx_disable_gpio, GPIOD_IS_OUT);
4767 #endif
4768 
4769 	/*
4770 	 * ToDo:
4771 	 * Not sure if this DT property "phy-speed" will get accepted, so
4772 	 * this might change later
4773 	 */
4774 	/* Get phy-speed for SGMII 2.5Gbps vs 1Gbps setup */
4775 	port->phy_speed = fdtdec_get_int(gd->fdt_blob, port_node,
4776 					 "phy-speed", 1000);
4777 
4778 	port->id = id;
4779 	if (port->priv->hw_version == MVPP21)
4780 		port->first_rxq = port->id * rxq_number;
4781 	else
4782 		port->first_rxq = port->id * port->priv->max_port_rxqs;
4783 	port->phy_node = phy_node;
4784 	port->phy_interface = phy_mode;
4785 	port->phyaddr = phyaddr;
4786 
4787 	return 0;
4788 }
4789 
4790 #ifdef CONFIG_DM_GPIO
4791 /* Port GPIO initialization */
mvpp2_gpio_init(struct mvpp2_port * port)4792 static void mvpp2_gpio_init(struct mvpp2_port *port)
4793 {
4794 	if (dm_gpio_is_valid(&port->phy_reset_gpio)) {
4795 		dm_gpio_set_value(&port->phy_reset_gpio, 1);
4796 		mdelay(10);
4797 		dm_gpio_set_value(&port->phy_reset_gpio, 0);
4798 	}
4799 
4800 	if (dm_gpio_is_valid(&port->phy_tx_disable_gpio))
4801 		dm_gpio_set_value(&port->phy_tx_disable_gpio, 0);
4802 }
4803 #endif
4804 
4805 /* Ports initialization */
mvpp2_port_probe(struct udevice * dev,struct mvpp2_port * port,int port_node,struct mvpp2 * priv)4806 static int mvpp2_port_probe(struct udevice *dev,
4807 			    struct mvpp2_port *port,
4808 			    int port_node,
4809 			    struct mvpp2 *priv)
4810 {
4811 	int err;
4812 
4813 	port->tx_ring_size = MVPP2_MAX_TXD;
4814 	port->rx_ring_size = MVPP2_MAX_RXD;
4815 
4816 	err = mvpp2_port_init(dev, port);
4817 	if (err < 0) {
4818 		dev_err(&pdev->dev, "failed to init port %d\n", port->id);
4819 		return err;
4820 	}
4821 	mvpp2_port_power_up(port);
4822 
4823 #ifdef CONFIG_DM_GPIO
4824 	mvpp2_gpio_init(port);
4825 #endif
4826 
4827 	priv->port_list[port->id] = port;
4828 	priv->num_ports++;
4829 	return 0;
4830 }
4831 
4832 /* Initialize decoding windows */
mvpp2_conf_mbus_windows(const struct mbus_dram_target_info * dram,struct mvpp2 * priv)4833 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
4834 				    struct mvpp2 *priv)
4835 {
4836 	u32 win_enable;
4837 	int i;
4838 
4839 	for (i = 0; i < 6; i++) {
4840 		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
4841 		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
4842 
4843 		if (i < 4)
4844 			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
4845 	}
4846 
4847 	win_enable = 0;
4848 
4849 	for (i = 0; i < dram->num_cs; i++) {
4850 		const struct mbus_dram_window *cs = dram->cs + i;
4851 
4852 		mvpp2_write(priv, MVPP2_WIN_BASE(i),
4853 			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
4854 			    dram->mbus_dram_target_id);
4855 
4856 		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
4857 			    (cs->size - 1) & 0xffff0000);
4858 
4859 		win_enable |= (1 << i);
4860 	}
4861 
4862 	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
4863 }
4864 
4865 /* Initialize Rx FIFO's */
mvpp2_rx_fifo_init(struct mvpp2 * priv)4866 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
4867 {
4868 	int port;
4869 
4870 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4871 		if (priv->hw_version == MVPP22) {
4872 			if (port == 0) {
4873 				mvpp2_write(priv,
4874 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4875 					    MVPP22_RX_FIFO_10GB_PORT_DATA_SIZE);
4876 				mvpp2_write(priv,
4877 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4878 					    MVPP22_RX_FIFO_10GB_PORT_ATTR_SIZE);
4879 			} else if (port == 1) {
4880 				mvpp2_write(priv,
4881 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4882 					    MVPP22_RX_FIFO_2_5GB_PORT_DATA_SIZE);
4883 				mvpp2_write(priv,
4884 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4885 					    MVPP22_RX_FIFO_2_5GB_PORT_ATTR_SIZE);
4886 			} else {
4887 				mvpp2_write(priv,
4888 					    MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4889 					    MVPP22_RX_FIFO_1GB_PORT_DATA_SIZE);
4890 				mvpp2_write(priv,
4891 					    MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4892 					    MVPP22_RX_FIFO_1GB_PORT_ATTR_SIZE);
4893 			}
4894 		} else {
4895 			mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
4896 				    MVPP21_RX_FIFO_PORT_DATA_SIZE);
4897 			mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
4898 				    MVPP21_RX_FIFO_PORT_ATTR_SIZE);
4899 		}
4900 	}
4901 
4902 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
4903 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
4904 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
4905 }
4906 
4907 /* Initialize Tx FIFO's */
mvpp2_tx_fifo_init(struct mvpp2 * priv)4908 static void mvpp2_tx_fifo_init(struct mvpp2 *priv)
4909 {
4910 	int port, val;
4911 
4912 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
4913 		/* Port 0 supports 10KB TX FIFO */
4914 		if (port == 0) {
4915 			val = MVPP2_TX_FIFO_DATA_SIZE_10KB &
4916 				MVPP22_TX_FIFO_SIZE_MASK;
4917 		} else {
4918 			val = MVPP2_TX_FIFO_DATA_SIZE_3KB &
4919 				MVPP22_TX_FIFO_SIZE_MASK;
4920 		}
4921 		mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), val);
4922 	}
4923 }
4924 
mvpp2_axi_init(struct mvpp2 * priv)4925 static void mvpp2_axi_init(struct mvpp2 *priv)
4926 {
4927 	u32 val, rdval, wrval;
4928 
4929 	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
4930 
4931 	/* AXI Bridge Configuration */
4932 
4933 	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
4934 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4935 	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4936 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4937 
4938 	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
4939 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
4940 	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4941 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
4942 
4943 	/* BM */
4944 	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
4945 	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
4946 
4947 	/* Descriptors */
4948 	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
4949 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
4950 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
4951 	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
4952 
4953 	/* Buffer Data */
4954 	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
4955 	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
4956 
4957 	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
4958 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4959 	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
4960 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4961 	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
4962 	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
4963 
4964 	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
4965 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4966 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4967 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4968 
4969 	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
4970 
4971 	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
4972 		<< MVPP22_AXI_CODE_CACHE_OFFS;
4973 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
4974 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
4975 
4976 	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
4977 }
4978 
4979 /* Initialize network controller common part HW */
mvpp2_init(struct udevice * dev,struct mvpp2 * priv)4980 static int mvpp2_init(struct udevice *dev, struct mvpp2 *priv)
4981 {
4982 	const struct mbus_dram_target_info *dram_target_info;
4983 	int err, i;
4984 	u32 val;
4985 
4986 	/* Checks for hardware constraints (U-Boot uses only one rxq) */
4987 	if ((rxq_number > priv->max_port_rxqs) ||
4988 	    (txq_number > MVPP2_MAX_TXQ)) {
4989 		dev_err(&pdev->dev, "invalid queue size parameter\n");
4990 		return -EINVAL;
4991 	}
4992 
4993 	if (priv->hw_version == MVPP22)
4994 		mvpp2_axi_init(priv);
4995 	else {
4996 		/* MBUS windows configuration */
4997 		dram_target_info = mvebu_mbus_dram_info();
4998 		if (dram_target_info)
4999 			mvpp2_conf_mbus_windows(dram_target_info, priv);
5000 	}
5001 
5002 	if (priv->hw_version == MVPP21) {
5003 		/* Disable HW PHY polling */
5004 		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5005 		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
5006 		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
5007 	} else {
5008 		/* Enable HW PHY polling */
5009 		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5010 		val |= MVPP22_SMI_POLLING_EN;
5011 		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
5012 	}
5013 
5014 	/* Allocate and initialize aggregated TXQs */
5015 	priv->aggr_txqs = devm_kcalloc(dev, num_present_cpus(),
5016 				       sizeof(struct mvpp2_tx_queue),
5017 				       GFP_KERNEL);
5018 	if (!priv->aggr_txqs)
5019 		return -ENOMEM;
5020 
5021 	for_each_present_cpu(i) {
5022 		priv->aggr_txqs[i].id = i;
5023 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
5024 		err = mvpp2_aggr_txq_init(dev, &priv->aggr_txqs[i],
5025 					  MVPP2_AGGR_TXQ_SIZE, i, priv);
5026 		if (err < 0)
5027 			return err;
5028 	}
5029 
5030 	/* Rx Fifo Init */
5031 	mvpp2_rx_fifo_init(priv);
5032 
5033 	/* Tx Fifo Init */
5034 	if (priv->hw_version == MVPP22)
5035 		mvpp2_tx_fifo_init(priv);
5036 
5037 	if (priv->hw_version == MVPP21)
5038 		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
5039 		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
5040 
5041 	/* Allow cache snoop when transmiting packets */
5042 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
5043 
5044 	/* Buffer Manager initialization */
5045 	err = mvpp2_bm_init(dev, priv);
5046 	if (err < 0)
5047 		return err;
5048 
5049 	/* Parser default initialization */
5050 	err = mvpp2_prs_default_init(dev, priv);
5051 	if (err < 0)
5052 		return err;
5053 
5054 	/* Classifier default initialization */
5055 	mvpp2_cls_init(priv);
5056 
5057 	return 0;
5058 }
5059 
5060 /* SMI / MDIO functions */
5061 
smi_wait_ready(struct mvpp2 * priv)5062 static int smi_wait_ready(struct mvpp2 *priv)
5063 {
5064 	u32 timeout = MVPP2_SMI_TIMEOUT;
5065 	u32 smi_reg;
5066 
5067 	/* wait till the SMI is not busy */
5068 	do {
5069 		/* read smi register */
5070 		smi_reg = readl(priv->mdio_base);
5071 		if (timeout-- == 0) {
5072 			printf("Error: SMI busy timeout\n");
5073 			return -EFAULT;
5074 		}
5075 	} while (smi_reg & MVPP2_SMI_BUSY);
5076 
5077 	return 0;
5078 }
5079 
5080 /*
5081  * mpp2_mdio_read - miiphy_read callback function.
5082  *
5083  * Returns 16bit phy register value, or 0xffff on error
5084  */
mpp2_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)5085 static int mpp2_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
5086 {
5087 	struct mvpp2 *priv = bus->priv;
5088 	u32 smi_reg;
5089 	u32 timeout;
5090 
5091 	/* check parameters */
5092 	if (addr > MVPP2_PHY_ADDR_MASK) {
5093 		printf("Error: Invalid PHY address %d\n", addr);
5094 		return -EFAULT;
5095 	}
5096 
5097 	if (reg > MVPP2_PHY_REG_MASK) {
5098 		printf("Err: Invalid register offset %d\n", reg);
5099 		return -EFAULT;
5100 	}
5101 
5102 	/* wait till the SMI is not busy */
5103 	if (smi_wait_ready(priv) < 0)
5104 		return -EFAULT;
5105 
5106 	/* fill the phy address and regiser offset and read opcode */
5107 	smi_reg = (addr << MVPP2_SMI_DEV_ADDR_OFFS)
5108 		| (reg << MVPP2_SMI_REG_ADDR_OFFS)
5109 		| MVPP2_SMI_OPCODE_READ;
5110 
5111 	/* write the smi register */
5112 	writel(smi_reg, priv->mdio_base);
5113 
5114 	/* wait till read value is ready */
5115 	timeout = MVPP2_SMI_TIMEOUT;
5116 
5117 	do {
5118 		/* read smi register */
5119 		smi_reg = readl(priv->mdio_base);
5120 		if (timeout-- == 0) {
5121 			printf("Err: SMI read ready timeout\n");
5122 			return -EFAULT;
5123 		}
5124 	} while (!(smi_reg & MVPP2_SMI_READ_VALID));
5125 
5126 	/* Wait for the data to update in the SMI register */
5127 	for (timeout = 0; timeout < MVPP2_SMI_TIMEOUT; timeout++)
5128 		;
5129 
5130 	return readl(priv->mdio_base) & MVPP2_SMI_DATA_MASK;
5131 }
5132 
5133 /*
5134  * mpp2_mdio_write - miiphy_write callback function.
5135  *
5136  * Returns 0 if write succeed, -EINVAL on bad parameters
5137  * -ETIME on timeout
5138  */
mpp2_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 value)5139 static int mpp2_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
5140 			   u16 value)
5141 {
5142 	struct mvpp2 *priv = bus->priv;
5143 	u32 smi_reg;
5144 
5145 	/* check parameters */
5146 	if (addr > MVPP2_PHY_ADDR_MASK) {
5147 		printf("Error: Invalid PHY address %d\n", addr);
5148 		return -EFAULT;
5149 	}
5150 
5151 	if (reg > MVPP2_PHY_REG_MASK) {
5152 		printf("Err: Invalid register offset %d\n", reg);
5153 		return -EFAULT;
5154 	}
5155 
5156 	/* wait till the SMI is not busy */
5157 	if (smi_wait_ready(priv) < 0)
5158 		return -EFAULT;
5159 
5160 	/* fill the phy addr and reg offset and write opcode and data */
5161 	smi_reg = value << MVPP2_SMI_DATA_OFFS;
5162 	smi_reg |= (addr << MVPP2_SMI_DEV_ADDR_OFFS)
5163 		| (reg << MVPP2_SMI_REG_ADDR_OFFS);
5164 	smi_reg &= ~MVPP2_SMI_OPCODE_READ;
5165 
5166 	/* write the smi register */
5167 	writel(smi_reg, priv->mdio_base);
5168 
5169 	return 0;
5170 }
5171 
mvpp2_recv(struct udevice * dev,int flags,uchar ** packetp)5172 static int mvpp2_recv(struct udevice *dev, int flags, uchar **packetp)
5173 {
5174 	struct mvpp2_port *port = dev_get_priv(dev);
5175 	struct mvpp2_rx_desc *rx_desc;
5176 	struct mvpp2_bm_pool *bm_pool;
5177 	dma_addr_t dma_addr;
5178 	u32 bm, rx_status;
5179 	int pool, rx_bytes, err;
5180 	int rx_received;
5181 	struct mvpp2_rx_queue *rxq;
5182 	u8 *data;
5183 
5184 	/* Process RX packets */
5185 	rxq = port->rxqs[0];
5186 
5187 	/* Get number of received packets and clamp the to-do */
5188 	rx_received = mvpp2_rxq_received(port, rxq->id);
5189 
5190 	/* Return if no packets are received */
5191 	if (!rx_received)
5192 		return 0;
5193 
5194 	rx_desc = mvpp2_rxq_next_desc_get(rxq);
5195 	rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
5196 	rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
5197 	rx_bytes -= MVPP2_MH_SIZE;
5198 	dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
5199 
5200 	bm = mvpp2_bm_cookie_build(port, rx_desc);
5201 	pool = mvpp2_bm_cookie_pool_get(bm);
5202 	bm_pool = &port->priv->bm_pools[pool];
5203 
5204 	/* In case of an error, release the requested buffer pointer
5205 	 * to the Buffer Manager. This request process is controlled
5206 	 * by the hardware, and the information about the buffer is
5207 	 * comprised by the RX descriptor.
5208 	 */
5209 	if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
5210 		mvpp2_rx_error(port, rx_desc);
5211 		/* Return the buffer to the pool */
5212 		mvpp2_pool_refill(port, bm, dma_addr, dma_addr);
5213 		return 0;
5214 	}
5215 
5216 	err = mvpp2_rx_refill(port, bm_pool, bm, dma_addr);
5217 	if (err) {
5218 		netdev_err(port->dev, "failed to refill BM pools\n");
5219 		return 0;
5220 	}
5221 
5222 	/* Update Rx queue management counters */
5223 	mb();
5224 	mvpp2_rxq_status_update(port, rxq->id, 1, 1);
5225 
5226 	/* give packet to stack - skip on first n bytes */
5227 	data = (u8 *)dma_addr + 2 + 32;
5228 
5229 	if (rx_bytes <= 0)
5230 		return 0;
5231 
5232 	/*
5233 	 * No cache invalidation needed here, since the rx_buffer's are
5234 	 * located in a uncached memory region
5235 	 */
5236 	*packetp = data;
5237 
5238 	return rx_bytes;
5239 }
5240 
mvpp2_send(struct udevice * dev,void * packet,int length)5241 static int mvpp2_send(struct udevice *dev, void *packet, int length)
5242 {
5243 	struct mvpp2_port *port = dev_get_priv(dev);
5244 	struct mvpp2_tx_queue *txq, *aggr_txq;
5245 	struct mvpp2_tx_desc *tx_desc;
5246 	int tx_done;
5247 	int timeout;
5248 
5249 	txq = port->txqs[0];
5250 	aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
5251 
5252 	/* Get a descriptor for the first part of the packet */
5253 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
5254 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
5255 	mvpp2_txdesc_size_set(port, tx_desc, length);
5256 	mvpp2_txdesc_offset_set(port, tx_desc,
5257 				(dma_addr_t)packet & MVPP2_TX_DESC_ALIGN);
5258 	mvpp2_txdesc_dma_addr_set(port, tx_desc,
5259 				  (dma_addr_t)packet & ~MVPP2_TX_DESC_ALIGN);
5260 	/* First and Last descriptor */
5261 	mvpp2_txdesc_cmd_set(port, tx_desc,
5262 			     MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE
5263 			     | MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC);
5264 
5265 	/* Flush tx data */
5266 	flush_dcache_range((unsigned long)packet,
5267 			   (unsigned long)packet + ALIGN(length, PKTALIGN));
5268 
5269 	/* Enable transmit */
5270 	mb();
5271 	mvpp2_aggr_txq_pend_desc_add(port, 1);
5272 
5273 	mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
5274 
5275 	timeout = 0;
5276 	do {
5277 		if (timeout++ > 10000) {
5278 			printf("timeout: packet not sent from aggregated to phys TXQ\n");
5279 			return 0;
5280 		}
5281 		tx_done = mvpp2_txq_pend_desc_num_get(port, txq);
5282 	} while (tx_done);
5283 
5284 	timeout = 0;
5285 	do {
5286 		if (timeout++ > 10000) {
5287 			printf("timeout: packet not sent\n");
5288 			return 0;
5289 		}
5290 		tx_done = mvpp2_txq_sent_desc_proc(port, txq);
5291 	} while (!tx_done);
5292 
5293 	return 0;
5294 }
5295 
mvpp2_start(struct udevice * dev)5296 static int mvpp2_start(struct udevice *dev)
5297 {
5298 	struct eth_pdata *pdata = dev_get_platdata(dev);
5299 	struct mvpp2_port *port = dev_get_priv(dev);
5300 
5301 	/* Load current MAC address */
5302 	memcpy(port->dev_addr, pdata->enetaddr, ETH_ALEN);
5303 
5304 	/* Reconfigure parser accept the original MAC address */
5305 	mvpp2_prs_update_mac_da(port, port->dev_addr);
5306 
5307 	switch (port->phy_interface) {
5308 	case PHY_INTERFACE_MODE_RGMII:
5309 	case PHY_INTERFACE_MODE_RGMII_ID:
5310 	case PHY_INTERFACE_MODE_SGMII:
5311 		mvpp2_port_power_up(port);
5312 	default:
5313 		break;
5314 	}
5315 
5316 	mvpp2_open(dev, port);
5317 
5318 	return 0;
5319 }
5320 
mvpp2_stop(struct udevice * dev)5321 static void mvpp2_stop(struct udevice *dev)
5322 {
5323 	struct mvpp2_port *port = dev_get_priv(dev);
5324 
5325 	mvpp2_stop_dev(port);
5326 	mvpp2_cleanup_rxqs(port);
5327 	mvpp2_cleanup_txqs(port);
5328 }
5329 
mvpp22_smi_phy_addr_cfg(struct mvpp2_port * port)5330 static int mvpp22_smi_phy_addr_cfg(struct mvpp2_port *port)
5331 {
5332 	writel(port->phyaddr, port->priv->iface_base +
5333 	       MVPP22_SMI_PHY_ADDR_REG(port->gop_id));
5334 
5335 	return 0;
5336 }
5337 
mvpp2_base_probe(struct udevice * dev)5338 static int mvpp2_base_probe(struct udevice *dev)
5339 {
5340 	struct mvpp2 *priv = dev_get_priv(dev);
5341 	struct mii_dev *bus;
5342 	void *bd_space;
5343 	u32 size = 0;
5344 	int i;
5345 
5346 	/* Save hw-version */
5347 	priv->hw_version = dev_get_driver_data(dev);
5348 
5349 	/*
5350 	 * U-Boot special buffer handling:
5351 	 *
5352 	 * Allocate buffer area for descs and rx_buffers. This is only
5353 	 * done once for all interfaces. As only one interface can
5354 	 * be active. Make this area DMA-safe by disabling the D-cache
5355 	 */
5356 
5357 	/* Align buffer area for descs and rx_buffers to 1MiB */
5358 	bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
5359 	mmu_set_region_dcache_behaviour((unsigned long)bd_space,
5360 					BD_SPACE, DCACHE_OFF);
5361 
5362 	buffer_loc.aggr_tx_descs = (struct mvpp2_tx_desc *)bd_space;
5363 	size += MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE;
5364 
5365 	buffer_loc.tx_descs =
5366 		(struct mvpp2_tx_desc *)((unsigned long)bd_space + size);
5367 	size += MVPP2_MAX_TXD * MVPP2_DESC_ALIGNED_SIZE;
5368 
5369 	buffer_loc.rx_descs =
5370 		(struct mvpp2_rx_desc *)((unsigned long)bd_space + size);
5371 	size += MVPP2_MAX_RXD * MVPP2_DESC_ALIGNED_SIZE;
5372 
5373 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
5374 		buffer_loc.bm_pool[i] =
5375 			(unsigned long *)((unsigned long)bd_space + size);
5376 		if (priv->hw_version == MVPP21)
5377 			size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u32);
5378 		else
5379 			size += MVPP2_BM_POOL_SIZE_MAX * 2 * sizeof(u64);
5380 	}
5381 
5382 	for (i = 0; i < MVPP2_BM_LONG_BUF_NUM; i++) {
5383 		buffer_loc.rx_buffer[i] =
5384 			(unsigned long *)((unsigned long)bd_space + size);
5385 		size += RX_BUFFER_SIZE;
5386 	}
5387 
5388 	/* Clear the complete area so that all descriptors are cleared */
5389 	memset(bd_space, 0, size);
5390 
5391 	/* Save base addresses for later use */
5392 	priv->base = (void *)devfdt_get_addr_index(dev, 0);
5393 	if (IS_ERR(priv->base))
5394 		return PTR_ERR(priv->base);
5395 
5396 	if (priv->hw_version == MVPP21) {
5397 		priv->lms_base = (void *)devfdt_get_addr_index(dev, 1);
5398 		if (IS_ERR(priv->lms_base))
5399 			return PTR_ERR(priv->lms_base);
5400 
5401 		priv->mdio_base = priv->lms_base + MVPP21_SMI;
5402 	} else {
5403 		priv->iface_base = (void *)devfdt_get_addr_index(dev, 1);
5404 		if (IS_ERR(priv->iface_base))
5405 			return PTR_ERR(priv->iface_base);
5406 
5407 		priv->mdio_base = priv->iface_base + MVPP22_SMI;
5408 
5409 		/* Store common base addresses for all ports */
5410 		priv->mpcs_base = priv->iface_base + MVPP22_MPCS;
5411 		priv->xpcs_base = priv->iface_base + MVPP22_XPCS;
5412 		priv->rfu1_base = priv->iface_base + MVPP22_RFU1;
5413 	}
5414 
5415 	if (priv->hw_version == MVPP21)
5416 		priv->max_port_rxqs = 8;
5417 	else
5418 		priv->max_port_rxqs = 32;
5419 
5420 	/* Finally create and register the MDIO bus driver */
5421 	bus = mdio_alloc();
5422 	if (!bus) {
5423 		printf("Failed to allocate MDIO bus\n");
5424 		return -ENOMEM;
5425 	}
5426 
5427 	bus->read = mpp2_mdio_read;
5428 	bus->write = mpp2_mdio_write;
5429 	snprintf(bus->name, sizeof(bus->name), dev->name);
5430 	bus->priv = (void *)priv;
5431 	priv->bus = bus;
5432 
5433 	return mdio_register(bus);
5434 }
5435 
mvpp2_probe(struct udevice * dev)5436 static int mvpp2_probe(struct udevice *dev)
5437 {
5438 	struct mvpp2_port *port = dev_get_priv(dev);
5439 	struct mvpp2 *priv = dev_get_priv(dev->parent);
5440 	int err;
5441 
5442 	/* Only call the probe function for the parent once */
5443 	if (!priv->probe_done)
5444 		err = mvpp2_base_probe(dev->parent);
5445 
5446 	port->priv = dev_get_priv(dev->parent);
5447 
5448 	err = phy_info_parse(dev, port);
5449 	if (err)
5450 		return err;
5451 
5452 	/*
5453 	 * We need the port specific io base addresses at this stage, since
5454 	 * gop_port_init() accesses these registers
5455 	 */
5456 	if (priv->hw_version == MVPP21) {
5457 		int priv_common_regs_num = 2;
5458 
5459 		port->base = (void __iomem *)devfdt_get_addr_index(
5460 			dev->parent, priv_common_regs_num + port->id);
5461 		if (IS_ERR(port->base))
5462 			return PTR_ERR(port->base);
5463 	} else {
5464 		port->gop_id = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
5465 					      "gop-port-id", -1);
5466 		if (port->id == -1) {
5467 			dev_err(&pdev->dev, "missing gop-port-id value\n");
5468 			return -EINVAL;
5469 		}
5470 
5471 		port->base = priv->iface_base + MVPP22_PORT_BASE +
5472 			port->gop_id * MVPP22_PORT_OFFSET;
5473 
5474 		/* Set phy address of the port */
5475 		if(port->phy_node)
5476 			mvpp22_smi_phy_addr_cfg(port);
5477 
5478 		/* GoP Init */
5479 		gop_port_init(port);
5480 	}
5481 
5482 	if (!priv->probe_done) {
5483 		/* Initialize network controller */
5484 		err = mvpp2_init(dev, priv);
5485 		if (err < 0) {
5486 			dev_err(&pdev->dev, "failed to initialize controller\n");
5487 			return err;
5488 		}
5489 		priv->num_ports = 0;
5490 		priv->probe_done = 1;
5491 	}
5492 
5493 	err = mvpp2_port_probe(dev, port, dev_of_offset(dev), priv);
5494 	if (err)
5495 		return err;
5496 
5497 	if (priv->hw_version == MVPP22) {
5498 		priv->netc_config |= mvpp2_netc_cfg_create(port->gop_id,
5499 							   port->phy_interface);
5500 
5501 		/* Netcomplex configurations for all ports */
5502 		gop_netc_init(priv, MV_NETC_FIRST_PHASE);
5503 		gop_netc_init(priv, MV_NETC_SECOND_PHASE);
5504 	}
5505 
5506 	return 0;
5507 }
5508 
5509 /*
5510  * Empty BM pool and stop its activity before the OS is started
5511  */
mvpp2_remove(struct udevice * dev)5512 static int mvpp2_remove(struct udevice *dev)
5513 {
5514 	struct mvpp2_port *port = dev_get_priv(dev);
5515 	struct mvpp2 *priv = port->priv;
5516 	int i;
5517 
5518 	priv->num_ports--;
5519 
5520 	if (priv->num_ports)
5521 		return 0;
5522 
5523 	for (i = 0; i < MVPP2_BM_POOLS_NUM; i++)
5524 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
5525 
5526 	return 0;
5527 }
5528 
5529 static const struct eth_ops mvpp2_ops = {
5530 	.start		= mvpp2_start,
5531 	.send		= mvpp2_send,
5532 	.recv		= mvpp2_recv,
5533 	.stop		= mvpp2_stop,
5534 };
5535 
5536 static struct driver mvpp2_driver = {
5537 	.name	= "mvpp2",
5538 	.id	= UCLASS_ETH,
5539 	.probe	= mvpp2_probe,
5540 	.remove = mvpp2_remove,
5541 	.ops	= &mvpp2_ops,
5542 	.priv_auto_alloc_size = sizeof(struct mvpp2_port),
5543 	.platdata_auto_alloc_size = sizeof(struct eth_pdata),
5544 	.flags	= DM_FLAG_ACTIVE_DMA,
5545 };
5546 
5547 /*
5548  * Use a MISC device to bind the n instances (child nodes) of the
5549  * network base controller in UCLASS_ETH.
5550  */
mvpp2_base_bind(struct udevice * parent)5551 static int mvpp2_base_bind(struct udevice *parent)
5552 {
5553 	const void *blob = gd->fdt_blob;
5554 	int node = dev_of_offset(parent);
5555 	struct uclass_driver *drv;
5556 	struct udevice *dev;
5557 	struct eth_pdata *plat;
5558 	char *name;
5559 	int subnode;
5560 	u32 id;
5561 	int base_id_add;
5562 
5563 	/* Lookup eth driver */
5564 	drv = lists_uclass_lookup(UCLASS_ETH);
5565 	if (!drv) {
5566 		puts("Cannot find eth driver\n");
5567 		return -ENOENT;
5568 	}
5569 
5570 	base_id_add = base_id;
5571 
5572 	fdt_for_each_subnode(subnode, blob, node) {
5573 		/* Increment base_id for all subnodes, also the disabled ones */
5574 		base_id++;
5575 
5576 		/* Skip disabled ports */
5577 		if (!fdtdec_get_is_enabled(blob, subnode))
5578 			continue;
5579 
5580 		plat = calloc(1, sizeof(*plat));
5581 		if (!plat)
5582 			return -ENOMEM;
5583 
5584 		id = fdtdec_get_int(blob, subnode, "port-id", -1);
5585 		id += base_id_add;
5586 
5587 		name = calloc(1, 16);
5588 		if (!name) {
5589 			free(plat);
5590 			return -ENOMEM;
5591 		}
5592 		sprintf(name, "mvpp2-%d", id);
5593 
5594 		/* Create child device UCLASS_ETH and bind it */
5595 		device_bind(parent, &mvpp2_driver, name, plat, subnode, &dev);
5596 		dev_set_of_offset(dev, subnode);
5597 	}
5598 
5599 	return 0;
5600 }
5601 
5602 static const struct udevice_id mvpp2_ids[] = {
5603 	{
5604 		.compatible = "marvell,armada-375-pp2",
5605 		.data = MVPP21,
5606 	},
5607 	{
5608 		.compatible = "marvell,armada-7k-pp22",
5609 		.data = MVPP22,
5610 	},
5611 	{ }
5612 };
5613 
5614 U_BOOT_DRIVER(mvpp2_base) = {
5615 	.name	= "mvpp2_base",
5616 	.id	= UCLASS_MISC,
5617 	.of_match = mvpp2_ids,
5618 	.bind	= mvpp2_base_bind,
5619 	.priv_auto_alloc_size = sizeof(struct mvpp2),
5620 };
5621