1 /* $OpenBSD: if_mvpp.c,v 1.53 2024/05/13 01:15:50 jsg Exp $ */
2 /*
3 * Copyright (c) 2008, 2019 Mark Kettenis <kettenis@openbsd.org>
4 * Copyright (c) 2017, 2020 Patrick Wildt <patrick@blueri.se>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18 /*
19 * Copyright (C) 2016 Marvell International Ltd.
20 *
21 * Marvell BSD License Option
22 *
23 * If you received this File from Marvell, you may opt to use, redistribute
24 * and/or modify this File under the following licensing terms.
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions are met:
27 *
28 * * Redistributions of source code must retain the above copyright notice,
29 * this list of conditions and the following disclaimer.
30 *
31 * * Redistributions in binary form must reproduce the above copyright
32 * notice, this list of conditions and the following disclaimer in the
33 * documentation and/or other materials provided with the distribution.
34 *
35 * * Neither the name of Marvell nor the names of its contributors may be
36 * used to endorse or promote products derived from this software without
37 * specific prior written permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
40 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
41 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
42 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
43 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
44 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
45 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
46 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
47 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
48 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
49 * POSSIBILITY OF SUCH DAMAGE.
50 */
51
52 #include "bpfilter.h"
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/device.h>
57 #include <sys/kernel.h>
58 #include <sys/malloc.h>
59 #include <sys/mbuf.h>
60 #include <sys/queue.h>
61 #include <sys/socket.h>
62 #include <sys/sockio.h>
63 #include <sys/timeout.h>
64
65 #include <uvm/uvm_extern.h>
66
67 #include <machine/cpufunc.h>
68 #include <machine/bus.h>
69 #include <machine/fdt.h>
70
71 #include <net/if.h>
72 #include <net/if_media.h>
73 #include <net/ppp_defs.h>
74
75 #include <dev/ofw/openfirm.h>
76 #include <dev/ofw/ofw_clock.h>
77 #include <dev/ofw/ofw_gpio.h>
78 #include <dev/ofw/ofw_misc.h>
79 #include <dev/ofw/ofw_pinctrl.h>
80 #include <dev/ofw/ofw_regulator.h>
81 #include <dev/ofw/fdt.h>
82
83 #include <dev/mii/mii.h>
84 #include <dev/mii/miivar.h>
85
86 #if NBPFILTER > 0
87 #include <net/bpf.h>
88 #endif
89
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93
94 #include <netinet6/in6_var.h>
95 #include <netinet/ip6.h>
96
97 #include <dev/fdt/if_mvppreg.h>
98
99 struct mvpp2_buf {
100 bus_dmamap_t mb_map;
101 struct mbuf *mb_m;
102 };
103
104 #define MVPP2_NTXDESC 512
105 #define MVPP2_NTXSEGS 16
106 #define MVPP2_NRXDESC 512
107
108 struct mvpp2_bm_pool {
109 struct mvpp2_dmamem *bm_mem;
110 struct mvpp2_buf *rxbuf;
111 uint32_t *freelist;
112 int free_prod;
113 int free_cons;
114 };
115
116 #define MVPP2_BM_SIZE 64
117 #define MVPP2_BM_POOL_PTR_ALIGN 128
118 #define MVPP2_BM_POOLS_NUM 8
119 #define MVPP2_BM_ALIGN 32
120
121 struct mvpp2_tx_queue {
122 uint8_t id;
123 uint8_t log_id;
124 struct mvpp2_dmamem *ring;
125 struct mvpp2_buf *buf;
126 struct mvpp2_tx_desc *descs;
127 int prod;
128 int cons;
129
130 uint32_t done_pkts_coal;
131 };
132
133 struct mvpp2_rx_queue {
134 uint8_t id;
135 struct mvpp2_dmamem *ring;
136 struct mvpp2_rx_desc *descs;
137 int prod;
138 struct if_rxring rxring;
139 int cons;
140
141 uint32_t pkts_coal;
142 uint32_t time_coal;
143 };
144
145 struct mvpp2_dmamem {
146 bus_dmamap_t mdm_map;
147 bus_dma_segment_t mdm_seg;
148 size_t mdm_size;
149 caddr_t mdm_kva;
150 };
151 #define MVPP2_DMA_MAP(_mdm) ((_mdm)->mdm_map)
152 #define MVPP2_DMA_LEN(_mdm) ((_mdm)->mdm_size)
153 #define MVPP2_DMA_DVA(_mdm) ((_mdm)->mdm_map->dm_segs[0].ds_addr)
154 #define MVPP2_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
155
156 struct mvpp2_port;
157 struct mvpp2_softc {
158 struct device sc_dev;
159 int sc_node;
160 bus_space_tag_t sc_iot;
161 bus_space_handle_t sc_ioh_base;
162 bus_space_handle_t sc_ioh_iface;
163 paddr_t sc_ioh_paddr;
164 bus_size_t sc_iosize_base;
165 bus_size_t sc_iosize_iface;
166 bus_dma_tag_t sc_dmat;
167 struct regmap *sc_rm;
168
169 uint32_t sc_tclk;
170
171 struct mvpp2_bm_pool *sc_bm_pools;
172 int sc_npools;
173
174 struct mvpp2_prs_shadow *sc_prs_shadow;
175 uint8_t *sc_prs_double_vlans;
176
177 int sc_aggr_ntxq;
178 struct mvpp2_tx_queue *sc_aggr_txqs;
179
180 struct mvpp2_port **sc_ports;
181 };
182
183 struct mvpp2_port {
184 struct device sc_dev;
185 struct mvpp2_softc *sc;
186 int sc_node;
187 bus_dma_tag_t sc_dmat;
188 int sc_id;
189 int sc_gop_id;
190
191 struct arpcom sc_ac;
192 #define sc_lladdr sc_ac.ac_enaddr
193 struct mii_data sc_mii;
194 #define sc_media sc_mii.mii_media
195 struct mii_bus *sc_mdio;
196
197 enum {
198 PHY_MODE_XAUI,
199 PHY_MODE_10GBASER,
200 PHY_MODE_2500BASEX,
201 PHY_MODE_1000BASEX,
202 PHY_MODE_SGMII,
203 PHY_MODE_RGMII,
204 PHY_MODE_RGMII_ID,
205 PHY_MODE_RGMII_RXID,
206 PHY_MODE_RGMII_TXID,
207 } sc_phy_mode;
208 int sc_fixed_link;
209 int sc_inband_status;
210 int sc_link;
211 int sc_phyloc;
212 int sc_sfp;
213
214 int sc_ntxq;
215 int sc_nrxq;
216
217 struct mvpp2_tx_queue *sc_txqs;
218 struct mvpp2_rx_queue *sc_rxqs;
219
220 struct timeout sc_tick;
221
222 uint32_t sc_tx_time_coal;
223 };
224
225 #define MVPP2_MAX_PORTS 4
226
227 struct mvpp2_attach_args {
228 int ma_node;
229 bus_dma_tag_t ma_dmat;
230 };
231
232 #define DEVNAME(_s) ((_s)->sc_dev.dv_xname)
233
234 static struct rwlock mvpp2_sff_lock = RWLOCK_INITIALIZER("mvpp2sff");
235
236 int mvpp2_match(struct device *, void *, void *);
237 void mvpp2_attach(struct device *, struct device *, void *);
238 void mvpp2_attach_deferred(struct device *);
239
240 const struct cfattach mvppc_ca = {
241 sizeof(struct mvpp2_softc), mvpp2_match, mvpp2_attach
242 };
243
244 struct cfdriver mvppc_cd = {
245 NULL, "mvppc", DV_DULL
246 };
247
248 int mvpp2_port_match(struct device *, void *, void *);
249 void mvpp2_port_attach(struct device *, struct device *, void *);
250
251 const struct cfattach mvpp_ca = {
252 sizeof(struct mvpp2_port), mvpp2_port_match, mvpp2_port_attach
253 };
254
255 struct cfdriver mvpp_cd = {
256 NULL, "mvpp", DV_IFNET
257 };
258
259 void mvpp2_port_attach_sfp(struct device *);
260
261 uint32_t mvpp2_read(struct mvpp2_softc *, bus_addr_t);
262 void mvpp2_write(struct mvpp2_softc *, bus_addr_t, uint32_t);
263 uint32_t mvpp2_gmac_read(struct mvpp2_port *, bus_addr_t);
264 void mvpp2_gmac_write(struct mvpp2_port *, bus_addr_t, uint32_t);
265 uint32_t mvpp2_xlg_read(struct mvpp2_port *, bus_addr_t);
266 void mvpp2_xlg_write(struct mvpp2_port *, bus_addr_t, uint32_t);
267 uint32_t mvpp2_xpcs_read(struct mvpp2_port *, bus_addr_t);
268 void mvpp2_xpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
269 uint32_t mvpp2_mpcs_read(struct mvpp2_port *, bus_addr_t);
270 void mvpp2_mpcs_write(struct mvpp2_port *, bus_addr_t, uint32_t);
271
272 int mvpp2_ioctl(struct ifnet *, u_long, caddr_t);
273 void mvpp2_start(struct ifnet *);
274 int mvpp2_rxrinfo(struct mvpp2_port *, struct if_rxrinfo *);
275 void mvpp2_watchdog(struct ifnet *);
276
277 int mvpp2_media_change(struct ifnet *);
278 void mvpp2_media_status(struct ifnet *, struct ifmediareq *);
279
280 int mvpp2_mii_readreg(struct device *, int, int);
281 void mvpp2_mii_writereg(struct device *, int, int, int);
282 void mvpp2_mii_statchg(struct device *);
283 void mvpp2_inband_statchg(struct mvpp2_port *);
284 void mvpp2_port_change(struct mvpp2_port *);
285
286 void mvpp2_tick(void *);
287
288 int mvpp2_link_intr(void *);
289 int mvpp2_intr(void *);
290 void mvpp2_tx_proc(struct mvpp2_port *, uint8_t);
291 void mvpp2_txq_proc(struct mvpp2_port *, struct mvpp2_tx_queue *);
292 void mvpp2_rx_proc(struct mvpp2_port *, uint8_t);
293 void mvpp2_rxq_proc(struct mvpp2_port *, struct mvpp2_rx_queue *);
294 void mvpp2_rx_refill(struct mvpp2_port *);
295
296 void mvpp2_up(struct mvpp2_port *);
297 void mvpp2_down(struct mvpp2_port *);
298 void mvpp2_iff(struct mvpp2_port *);
299
300 void mvpp2_aggr_txq_hw_init(struct mvpp2_softc *, struct mvpp2_tx_queue *);
301 void mvpp2_txq_hw_init(struct mvpp2_port *, struct mvpp2_tx_queue *);
302 void mvpp2_rxq_hw_init(struct mvpp2_port *, struct mvpp2_rx_queue *);
303 void mvpp2_txq_hw_deinit(struct mvpp2_port *, struct mvpp2_tx_queue *);
304 void mvpp2_rxq_hw_drop(struct mvpp2_port *, struct mvpp2_rx_queue *);
305 void mvpp2_rxq_hw_deinit(struct mvpp2_port *, struct mvpp2_rx_queue *);
306 void mvpp2_rxq_long_pool_set(struct mvpp2_port *, int, int);
307 void mvpp2_rxq_short_pool_set(struct mvpp2_port *, int, int);
308
309 void mvpp2_mac_reset_assert(struct mvpp2_port *);
310 void mvpp2_pcs_reset_assert(struct mvpp2_port *);
311 void mvpp2_pcs_reset_deassert(struct mvpp2_port *);
312 void mvpp2_mac_config(struct mvpp2_port *);
313 void mvpp2_xlg_config(struct mvpp2_port *);
314 void mvpp2_gmac_config(struct mvpp2_port *);
315 void mvpp2_comphy_config(struct mvpp2_port *, int);
316 void mvpp2_gop_config(struct mvpp2_port *);
317 void mvpp2_gop_intr_mask(struct mvpp2_port *);
318 void mvpp2_gop_intr_unmask(struct mvpp2_port *);
319
320 struct mvpp2_dmamem *
321 mvpp2_dmamem_alloc(struct mvpp2_softc *, bus_size_t, bus_size_t);
322 void mvpp2_dmamem_free(struct mvpp2_softc *, struct mvpp2_dmamem *);
323 struct mbuf *mvpp2_alloc_mbuf(struct mvpp2_softc *, bus_dmamap_t);
324
325 void mvpp2_interrupts_enable(struct mvpp2_port *, int);
326 void mvpp2_interrupts_disable(struct mvpp2_port *, int);
327 int mvpp2_egress_port(struct mvpp2_port *);
328 int mvpp2_txq_phys(int, int);
329 void mvpp2_defaults_set(struct mvpp2_port *);
330 void mvpp2_ingress_enable(struct mvpp2_port *);
331 void mvpp2_ingress_disable(struct mvpp2_port *);
332 void mvpp2_egress_enable(struct mvpp2_port *);
333 void mvpp2_egress_disable(struct mvpp2_port *);
334 void mvpp2_port_enable(struct mvpp2_port *);
335 void mvpp2_port_disable(struct mvpp2_port *);
336 void mvpp2_rxq_status_update(struct mvpp2_port *, int, int, int);
337 int mvpp2_rxq_received(struct mvpp2_port *, int);
338 void mvpp2_rxq_offset_set(struct mvpp2_port *, int, int);
339 void mvpp2_txp_max_tx_size_set(struct mvpp2_port *);
340 void mvpp2_rx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
341 uint32_t);
342 void mvpp2_tx_pkts_coal_set(struct mvpp2_port *, struct mvpp2_tx_queue *,
343 uint32_t);
344 void mvpp2_rx_time_coal_set(struct mvpp2_port *, struct mvpp2_rx_queue *,
345 uint32_t);
346 void mvpp2_tx_time_coal_set(struct mvpp2_port *, uint32_t);
347
348 void mvpp2_axi_config(struct mvpp2_softc *);
349 void mvpp2_bm_pool_init(struct mvpp2_softc *);
350 void mvpp2_rx_fifo_init(struct mvpp2_softc *);
351 void mvpp2_tx_fifo_init(struct mvpp2_softc *);
352 int mvpp2_prs_default_init(struct mvpp2_softc *);
353 void mvpp2_prs_hw_inv(struct mvpp2_softc *, int);
354 void mvpp2_prs_hw_port_init(struct mvpp2_softc *, int, int, int, int);
355 void mvpp2_prs_def_flow_init(struct mvpp2_softc *);
356 void mvpp2_prs_mh_init(struct mvpp2_softc *);
357 void mvpp2_prs_mac_init(struct mvpp2_softc *);
358 void mvpp2_prs_dsa_init(struct mvpp2_softc *);
359 int mvpp2_prs_etype_init(struct mvpp2_softc *);
360 int mvpp2_prs_vlan_init(struct mvpp2_softc *);
361 int mvpp2_prs_pppoe_init(struct mvpp2_softc *);
362 int mvpp2_prs_ip6_init(struct mvpp2_softc *);
363 int mvpp2_prs_ip4_init(struct mvpp2_softc *);
364 void mvpp2_prs_shadow_ri_set(struct mvpp2_softc *, int,
365 uint32_t, uint32_t);
366 void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *, uint32_t);
367 void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *, uint32_t, int);
368 void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *, uint32_t);
369 uint32_t mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *);
370 void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *, uint32_t,
371 uint8_t, uint8_t);
372 void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *, uint32_t,
373 uint8_t *, uint8_t *);
374 int mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *, int, uint16_t);
375 void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
376 int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *);
377 int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *);
378 void mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *, uint32_t,
379 uint32_t *, uint32_t *);
380 void mvpp2_prs_match_etype(struct mvpp2_prs_entry *, uint32_t, uint16_t);
381 int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *);
382 void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
383 void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *, uint32_t, uint32_t);
384 void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *, uint32_t, uint32_t);
385 void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *, uint32_t, uint32_t);
386 void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *, int, uint32_t);
387 void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *, uint32_t, int,
388 uint32_t);
389 void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *, uint32_t);
390 void mvpp2_prs_shadow_set(struct mvpp2_softc *, int, uint32_t);
391 int mvpp2_prs_hw_write(struct mvpp2_softc *, struct mvpp2_prs_entry *);
392 int mvpp2_prs_hw_read(struct mvpp2_softc *, struct mvpp2_prs_entry *, int);
393 int mvpp2_prs_flow_find(struct mvpp2_softc *, int);
394 int mvpp2_prs_tcam_first_free(struct mvpp2_softc *, uint8_t, uint8_t);
395 void mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *, uint32_t, int);
396 void mvpp2_prs_mac_promisc_set(struct mvpp2_softc *, uint32_t, int, int);
397 void mvpp2_prs_dsa_tag_set(struct mvpp2_softc *, uint32_t, int, int, int);
398 void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *, uint32_t,
399 int, int, int);
400 struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2_softc *, uint16_t,
401 int);
402 int mvpp2_prs_vlan_add(struct mvpp2_softc *, uint16_t, int, uint32_t);
403 int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *);
404 struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2_softc *,
405 uint16_t, uint16_t);
406 int mvpp2_prs_double_vlan_add(struct mvpp2_softc *, uint16_t, uint16_t,
407 uint32_t);
408 int mvpp2_prs_ip4_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
409 int mvpp2_prs_ip4_cast(struct mvpp2_softc *, uint16_t);
410 int mvpp2_prs_ip6_proto(struct mvpp2_softc *, uint16_t, uint32_t, uint32_t);
411 int mvpp2_prs_ip6_cast(struct mvpp2_softc *, uint16_t);
412 int mvpp2_prs_mac_da_range_find(struct mvpp2_softc *, int, const uint8_t *,
413 uint8_t *, int);
414 int mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *, const uint8_t *,
415 uint8_t *);
416 int mvpp2_prs_mac_da_accept(struct mvpp2_port *, const uint8_t *, int);
417 void mvpp2_prs_mac_del_all(struct mvpp2_port *);
418 int mvpp2_prs_tag_mode_set(struct mvpp2_softc *, int, int);
419 int mvpp2_prs_def_flow(struct mvpp2_port *);
420 void mvpp2_cls_flow_write(struct mvpp2_softc *, struct mvpp2_cls_flow_entry *);
421 void mvpp2_cls_lookup_write(struct mvpp2_softc *, struct mvpp2_cls_lookup_entry *);
422 void mvpp2_cls_init(struct mvpp2_softc *);
423 void mvpp2_cls_port_config(struct mvpp2_port *);
424 void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *);
425
426 int
mvpp2_match(struct device * parent,void * cfdata,void * aux)427 mvpp2_match(struct device *parent, void *cfdata, void *aux)
428 {
429 struct fdt_attach_args *faa = aux;
430
431 return OF_is_compatible(faa->fa_node, "marvell,armada-7k-pp22");
432 }
433
434 void
mvpp2_attach(struct device * parent,struct device * self,void * aux)435 mvpp2_attach(struct device *parent, struct device *self, void *aux)
436 {
437 struct mvpp2_softc *sc = (void *)self;
438 struct fdt_attach_args *faa = aux;
439
440 if (faa->fa_nreg < 2) {
441 printf(": no registers\n");
442 return;
443 }
444
445 sc->sc_node = faa->fa_node;
446 sc->sc_iot = faa->fa_iot;
447 sc->sc_dmat = faa->fa_dmat;
448
449 if (bus_space_map(sc->sc_iot, faa->fa_reg[0].addr,
450 faa->fa_reg[0].size, 0, &sc->sc_ioh_base)) {
451 printf(": can't map registers\n");
452 return;
453 }
454 sc->sc_iosize_base = faa->fa_reg[0].size;
455
456 sc->sc_ioh_paddr = bus_space_mmap(sc->sc_iot, faa->fa_reg[0].addr,
457 0, PROT_READ | PROT_WRITE, 0);
458 KASSERT(sc->sc_ioh_paddr != -1);
459 sc->sc_ioh_paddr &= PMAP_PA_MASK;
460
461 if (bus_space_map(sc->sc_iot, faa->fa_reg[1].addr,
462 faa->fa_reg[1].size, 0, &sc->sc_ioh_iface)) {
463 printf(": can't map registers\n");
464 bus_space_unmap(sc->sc_iot, sc->sc_ioh_base,
465 sc->sc_iosize_base);
466 return;
467 }
468 sc->sc_iosize_iface = faa->fa_reg[1].size;
469
470 sc->sc_rm = regmap_byphandle(OF_getpropint(faa->fa_node,
471 "marvell,system-controller", 0));
472
473 clock_enable_all(faa->fa_node);
474 sc->sc_tclk = clock_get_frequency(faa->fa_node, "pp_clk");
475
476 printf("\n");
477
478 config_defer(self, mvpp2_attach_deferred);
479 }
480
481 void
mvpp2_attach_deferred(struct device * self)482 mvpp2_attach_deferred(struct device *self)
483 {
484 struct mvpp2_softc *sc = (void *)self;
485 struct mvpp2_attach_args maa;
486 struct mvpp2_tx_queue *txq;
487 int i, node;
488
489 mvpp2_axi_config(sc);
490
491 bus_space_write_4(sc->sc_iot, sc->sc_ioh_iface, MVPP22_SMI_MISC_CFG_REG,
492 bus_space_read_4(sc->sc_iot, sc->sc_ioh_iface,
493 MVPP22_SMI_MISC_CFG_REG) & ~MVPP22_SMI_POLLING_EN);
494
495 sc->sc_aggr_ntxq = 1;
496 sc->sc_aggr_txqs = mallocarray(sc->sc_aggr_ntxq,
497 sizeof(*sc->sc_aggr_txqs), M_DEVBUF, M_WAITOK | M_ZERO);
498
499 for (i = 0; i < sc->sc_aggr_ntxq; i++) {
500 txq = &sc->sc_aggr_txqs[i];
501 txq->id = i;
502 mvpp2_aggr_txq_hw_init(sc, txq);
503 }
504
505 mvpp2_rx_fifo_init(sc);
506 mvpp2_tx_fifo_init(sc);
507
508 mvpp2_write(sc, MVPP2_TX_SNOOP_REG, 0x1);
509
510 mvpp2_bm_pool_init(sc);
511
512 sc->sc_prs_shadow = mallocarray(MVPP2_PRS_TCAM_SRAM_SIZE,
513 sizeof(*sc->sc_prs_shadow), M_DEVBUF, M_WAITOK | M_ZERO);
514
515 mvpp2_prs_default_init(sc);
516 mvpp2_cls_init(sc);
517
518 memset(&maa, 0, sizeof(maa));
519 for (node = OF_child(sc->sc_node); node; node = OF_peer(node)) {
520 maa.ma_node = node;
521 maa.ma_dmat = sc->sc_dmat;
522 config_found(self, &maa, NULL);
523 }
524 }
525
526 void
mvpp2_axi_config(struct mvpp2_softc * sc)527 mvpp2_axi_config(struct mvpp2_softc *sc)
528 {
529 uint32_t reg;
530
531 mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG, 0);
532
533 reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
534 (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
535 mvpp2_write(sc, MVPP22_AXI_BM_WR_ATTR_REG, reg);
536 mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, reg);
537 mvpp2_write(sc, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, reg);
538 mvpp2_write(sc, MVPP22_AXI_RX_DATA_WR_ATTR_REG, reg);
539
540 reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_ATTR_CACHE_OFFS) |
541 (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_ATTR_DOMAIN_OFFS);
542 mvpp2_write(sc, MVPP22_AXI_BM_RD_ATTR_REG, reg);
543 mvpp2_write(sc, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, reg);
544 mvpp2_write(sc, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, reg);
545 mvpp2_write(sc, MVPP22_AXI_TX_DATA_RD_ATTR_REG, reg);
546
547 reg = (MVPP22_AXI_CODE_CACHE_NON_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
548 (MVPP22_AXI_CODE_DOMAIN_SYSTEM << MVPP22_AXI_CODE_DOMAIN_OFFS);
549 mvpp2_write(sc, MVPP22_AXI_RD_NORMAL_CODE_REG, reg);
550 mvpp2_write(sc, MVPP22_AXI_WR_NORMAL_CODE_REG, reg);
551
552 reg = (MVPP22_AXI_CODE_CACHE_RD_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
553 (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
554 mvpp2_write(sc, MVPP22_AXI_RD_SNOOP_CODE_REG, reg);
555
556 reg = (MVPP22_AXI_CODE_CACHE_WR_CACHE << MVPP22_AXI_CODE_CACHE_OFFS) |
557 (MVPP22_AXI_CODE_DOMAIN_OUTER_DOM << MVPP22_AXI_CODE_DOMAIN_OFFS);
558 mvpp2_write(sc, MVPP22_AXI_WR_SNOOP_CODE_REG, reg);
559 }
560
561 void
mvpp2_bm_pool_init(struct mvpp2_softc * sc)562 mvpp2_bm_pool_init(struct mvpp2_softc *sc)
563 {
564 struct mvpp2_bm_pool *bm;
565 struct mvpp2_buf *rxb;
566 uint64_t phys, virt;
567 int i, j, inuse;
568
569 for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
570 mvpp2_write(sc, MVPP2_BM_INTR_MASK_REG(i), 0);
571 mvpp2_write(sc, MVPP2_BM_INTR_CAUSE_REG(i), 0);
572 }
573
574 sc->sc_npools = ncpus;
575 sc->sc_npools = min(sc->sc_npools, MVPP2_BM_POOLS_NUM);
576
577 sc->sc_bm_pools = mallocarray(sc->sc_npools, sizeof(*sc->sc_bm_pools),
578 M_DEVBUF, M_WAITOK | M_ZERO);
579
580 for (i = 0; i < sc->sc_npools; i++) {
581 bm = &sc->sc_bm_pools[i];
582 bm->bm_mem = mvpp2_dmamem_alloc(sc,
583 MVPP2_BM_SIZE * sizeof(uint64_t) * 2,
584 MVPP2_BM_POOL_PTR_ALIGN);
585 KASSERT(bm->bm_mem != NULL);
586 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(bm->bm_mem), 0,
587 MVPP2_DMA_LEN(bm->bm_mem),
588 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
589
590 mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
591 mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
592 MVPP2_BM_STOP_MASK);
593
594 mvpp2_write(sc, MVPP2_BM_POOL_BASE_REG(i),
595 (uint64_t)MVPP2_DMA_DVA(bm->bm_mem) & 0xffffffff);
596 mvpp2_write(sc, MVPP22_BM_POOL_BASE_HIGH_REG,
597 ((uint64_t)MVPP2_DMA_DVA(bm->bm_mem) >> 32)
598 & MVPP22_BM_POOL_BASE_HIGH_MASK);
599 mvpp2_write(sc, MVPP2_BM_POOL_SIZE_REG(i),
600 MVPP2_BM_SIZE);
601
602 mvpp2_write(sc, MVPP2_BM_POOL_CTRL_REG(i),
603 mvpp2_read(sc, MVPP2_BM_POOL_CTRL_REG(i)) |
604 MVPP2_BM_START_MASK);
605
606 /*
607 * U-Boot might not have cleaned its pools. The pool needs
608 * to be empty before we fill it, otherwise our packets are
609 * written to wherever U-Boot allocated memory. Cleaning it
610 * up ourselves is worrying as well, since the BM's pages are
611 * probably in our own memory. Best we can do is stop the BM,
612 * set new memory and drain the pool.
613 */
614 inuse = mvpp2_read(sc, MVPP2_BM_POOL_PTRS_NUM_REG(i)) &
615 MVPP2_BM_POOL_PTRS_NUM_MASK;
616 inuse += mvpp2_read(sc, MVPP2_BM_BPPI_PTRS_NUM_REG(i)) &
617 MVPP2_BM_BPPI_PTRS_NUM_MASK;
618 if (inuse)
619 inuse++;
620 for (j = 0; j < inuse; j++)
621 mvpp2_read(sc, MVPP2_BM_PHY_ALLOC_REG(i));
622
623 mvpp2_write(sc, MVPP2_POOL_BUF_SIZE_REG(i),
624 roundup(MCLBYTES, 1 << MVPP2_POOL_BUF_SIZE_OFFSET));
625
626 bm->rxbuf = mallocarray(MVPP2_BM_SIZE, sizeof(struct mvpp2_buf),
627 M_DEVBUF, M_WAITOK);
628 bm->freelist = mallocarray(MVPP2_BM_SIZE, sizeof(*bm->freelist),
629 M_DEVBUF, M_WAITOK | M_ZERO);
630
631 for (j = 0; j < MVPP2_BM_SIZE; j++) {
632 rxb = &bm->rxbuf[j];
633 bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1,
634 MCLBYTES, 0, BUS_DMA_WAITOK, &rxb->mb_map);
635 rxb->mb_m = NULL;
636 }
637
638 /* Use pool-id and rxbuf index as cookie. */
639 for (j = 0; j < MVPP2_BM_SIZE; j++)
640 bm->freelist[j] = (i << 16) | (j << 0);
641
642 for (j = 0; j < MVPP2_BM_SIZE; j++) {
643 rxb = &bm->rxbuf[j];
644 rxb->mb_m = mvpp2_alloc_mbuf(sc, rxb->mb_map);
645 if (rxb->mb_m == NULL)
646 break;
647
648 KASSERT(bm->freelist[bm->free_cons] != -1);
649 virt = bm->freelist[bm->free_cons];
650 bm->freelist[bm->free_cons] = -1;
651 bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
652
653 phys = rxb->mb_map->dm_segs[0].ds_addr;
654 mvpp2_write(sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
655 (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
656 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
657 ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
658 mvpp2_write(sc, MVPP2_BM_VIRT_RLS_REG,
659 virt & 0xffffffff);
660 mvpp2_write(sc, MVPP2_BM_PHY_RLS_REG(i),
661 phys & 0xffffffff);
662 }
663 }
664 }
665
666 void
mvpp2_rx_fifo_init(struct mvpp2_softc * sc)667 mvpp2_rx_fifo_init(struct mvpp2_softc *sc)
668 {
669 int i;
670
671 mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
672 MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
673 mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
674 MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
675
676 mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
677 MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
678 mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
679 MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
680
681 for (i = 2; i < MVPP2_MAX_PORTS; i++) {
682 mvpp2_write(sc, MVPP2_RX_DATA_FIFO_SIZE_REG(i),
683 MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
684 mvpp2_write(sc, MVPP2_RX_ATTR_FIFO_SIZE_REG(i),
685 MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
686 }
687
688 mvpp2_write(sc, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT);
689 mvpp2_write(sc, MVPP2_RX_FIFO_INIT_REG, 0x1);
690 }
691
692 void
mvpp2_tx_fifo_init(struct mvpp2_softc * sc)693 mvpp2_tx_fifo_init(struct mvpp2_softc *sc)
694 {
695 int i;
696
697 mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(0),
698 MVPP22_TX_FIFO_DATA_SIZE_10KB);
699 mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(0),
700 MVPP2_TX_FIFO_THRESHOLD_10KB);
701
702 for (i = 1; i < MVPP2_MAX_PORTS; i++) {
703 mvpp2_write(sc, MVPP22_TX_FIFO_SIZE_REG(i),
704 MVPP22_TX_FIFO_DATA_SIZE_3KB);
705 mvpp2_write(sc, MVPP22_TX_FIFO_THRESH_REG(i),
706 MVPP2_TX_FIFO_THRESHOLD_3KB);
707 }
708 }
709
710 int
mvpp2_prs_default_init(struct mvpp2_softc * sc)711 mvpp2_prs_default_init(struct mvpp2_softc *sc)
712 {
713 int i, j, ret;
714
715 mvpp2_write(sc, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
716
717 for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++) {
718 mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, i);
719 for (j = 0; j < MVPP2_PRS_TCAM_WORDS; j++)
720 mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(j), 0);
721
722 mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, i);
723 for (j = 0; j < MVPP2_PRS_SRAM_WORDS; j++)
724 mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(j), 0);
725 }
726
727 for (i = 0; i < MVPP2_PRS_TCAM_SRAM_SIZE; i++)
728 mvpp2_prs_hw_inv(sc, i);
729
730 for (i = 0; i < MVPP2_MAX_PORTS; i++)
731 mvpp2_prs_hw_port_init(sc, i, MVPP2_PRS_LU_MH,
732 MVPP2_PRS_PORT_LU_MAX, 0);
733
734 mvpp2_prs_def_flow_init(sc);
735 mvpp2_prs_mh_init(sc);
736 mvpp2_prs_mac_init(sc);
737 mvpp2_prs_dsa_init(sc);
738 ret = mvpp2_prs_etype_init(sc);
739 if (ret)
740 return ret;
741 ret = mvpp2_prs_vlan_init(sc);
742 if (ret)
743 return ret;
744 ret = mvpp2_prs_pppoe_init(sc);
745 if (ret)
746 return ret;
747 ret = mvpp2_prs_ip6_init(sc);
748 if (ret)
749 return ret;
750 ret = mvpp2_prs_ip4_init(sc);
751 if (ret)
752 return ret;
753
754 return 0;
755 }
756
757 void
mvpp2_prs_hw_inv(struct mvpp2_softc * sc,int index)758 mvpp2_prs_hw_inv(struct mvpp2_softc *sc, int index)
759 {
760 mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, index);
761 mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
762 MVPP2_PRS_TCAM_INV_MASK);
763 }
764
765 void
mvpp2_prs_hw_port_init(struct mvpp2_softc * sc,int port,int lu_first,int lu_max,int offset)766 mvpp2_prs_hw_port_init(struct mvpp2_softc *sc, int port,
767 int lu_first, int lu_max, int offset)
768 {
769 uint32_t reg;
770
771 reg = mvpp2_read(sc, MVPP2_PRS_INIT_LOOKUP_REG);
772 reg &= ~MVPP2_PRS_PORT_LU_MASK(port);
773 reg |= MVPP2_PRS_PORT_LU_VAL(port, lu_first);
774 mvpp2_write(sc, MVPP2_PRS_INIT_LOOKUP_REG, reg);
775
776 reg = mvpp2_read(sc, MVPP2_PRS_MAX_LOOP_REG(port));
777 reg &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
778 reg |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
779 mvpp2_write(sc, MVPP2_PRS_MAX_LOOP_REG(port), reg);
780
781 reg = mvpp2_read(sc, MVPP2_PRS_INIT_OFFS_REG(port));
782 reg &= ~MVPP2_PRS_INIT_OFF_MASK(port);
783 reg |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
784 mvpp2_write(sc, MVPP2_PRS_INIT_OFFS_REG(port), reg);
785 }
786
787 void
mvpp2_prs_def_flow_init(struct mvpp2_softc * sc)788 mvpp2_prs_def_flow_init(struct mvpp2_softc *sc)
789 {
790 struct mvpp2_prs_entry pe;
791 int i;
792
793 for (i = 0; i < MVPP2_MAX_PORTS; i++) {
794 memset(&pe, 0, sizeof(pe));
795 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
796 pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - i;
797 mvpp2_prs_tcam_port_map_set(&pe, 0);
798 mvpp2_prs_sram_ai_update(&pe, i, MVPP2_PRS_FLOW_ID_MASK);
799 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
800 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_FLOWS);
801 mvpp2_prs_hw_write(sc, &pe);
802 }
803 }
804
805 void
mvpp2_prs_mh_init(struct mvpp2_softc * sc)806 mvpp2_prs_mh_init(struct mvpp2_softc *sc)
807 {
808 struct mvpp2_prs_entry pe;
809
810 memset(&pe, 0, sizeof(pe));
811 pe.index = MVPP2_PE_MH_DEFAULT;
812 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
813 mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
814 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
815 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
816 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
817 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MH);
818 mvpp2_prs_hw_write(sc, &pe);
819 }
820
821 void
mvpp2_prs_mac_init(struct mvpp2_softc * sc)822 mvpp2_prs_mac_init(struct mvpp2_softc *sc)
823 {
824 struct mvpp2_prs_entry pe;
825
826 memset(&pe, 0, sizeof(pe));
827 pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
828 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
829 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
830 MVPP2_PRS_RI_DROP_MASK);
831 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
832 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
833 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
834 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
835 mvpp2_prs_hw_write(sc, &pe);
836 mvpp2_prs_mac_drop_all_set(sc, 0, 0);
837 mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_UNI_CAST, 0);
838 mvpp2_prs_mac_promisc_set(sc, 0, MVPP2_PRS_L2_MULTI_CAST, 0);
839 }
840
841 void
mvpp2_prs_dsa_init(struct mvpp2_softc * sc)842 mvpp2_prs_dsa_init(struct mvpp2_softc *sc)
843 {
844 struct mvpp2_prs_entry pe;
845
846 mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
847 mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
848 mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
849 mvpp2_prs_dsa_tag_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
850 mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
851 mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 0, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
852 mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
853 mvpp2_prs_dsa_tag_ethertype_set(sc, 0, 1, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
854 memset(&pe, 0, sizeof(pe));
855 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
856 pe.index = MVPP2_PE_DSA_DEFAULT;
857 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
858 mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
859 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
860 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
861 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
862 mvpp2_prs_hw_write(sc, &pe);
863 }
864
865 int
mvpp2_prs_etype_init(struct mvpp2_softc * sc)866 mvpp2_prs_etype_init(struct mvpp2_softc *sc)
867 {
868 struct mvpp2_prs_entry pe;
869 int tid;
870
871 /* Ethertype: PPPoE */
872 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
873 MVPP2_PE_LAST_FREE_TID);
874 if (tid < 0)
875 return tid;
876 memset(&pe, 0, sizeof(pe));
877 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
878 pe.index = tid;
879 mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_PPPOE);
880 mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
881 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
882 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
883 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
884 MVPP2_PRS_RI_PPPOE_MASK);
885 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
886 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
887 sc->sc_prs_shadow[pe.index].finish = 0;
888 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
889 MVPP2_PRS_RI_PPPOE_MASK);
890 mvpp2_prs_hw_write(sc, &pe);
891
892 /* Ethertype: ARP */
893 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
894 MVPP2_PE_LAST_FREE_TID);
895 if (tid < 0)
896 return tid;
897 memset(&pe, 0, sizeof(pe));
898 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
899 pe.index = tid;
900 mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_ARP);
901 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
902 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
903 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
904 MVPP2_PRS_RI_L3_PROTO_MASK);
905 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
906 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
907 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
908 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
909 sc->sc_prs_shadow[pe.index].finish = 1;
910 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_ARP,
911 MVPP2_PRS_RI_L3_PROTO_MASK);
912 mvpp2_prs_hw_write(sc, &pe);
913
914 /* Ethertype: LBTD */
915 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
916 MVPP2_PE_LAST_FREE_TID);
917 if (tid < 0)
918 return tid;
919 memset(&pe, 0, sizeof(pe));
920 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
921 pe.index = tid;
922 mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
923 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
924 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
925 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
926 MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
927 MVPP2_PRS_RI_UDF3_MASK);
928 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
929 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
930 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
931 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
932 sc->sc_prs_shadow[pe.index].finish = 1;
933 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
934 MVPP2_PRS_RI_UDF3_RX_SPECIAL, MVPP2_PRS_RI_CPU_CODE_MASK |
935 MVPP2_PRS_RI_UDF3_MASK);
936 mvpp2_prs_hw_write(sc, &pe);
937
938 /* Ethertype: IPv4 without options */
939 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
940 MVPP2_PE_LAST_FREE_TID);
941 if (tid < 0)
942 return tid;
943 memset(&pe, 0, sizeof(pe));
944 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
945 pe.index = tid;
946 mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IP);
947 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
948 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
949 MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
950 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
951 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
952 MVPP2_PRS_RI_L3_PROTO_MASK);
953 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
954 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
955 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
956 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
957 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
958 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
959 sc->sc_prs_shadow[pe.index].finish = 0;
960 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4,
961 MVPP2_PRS_RI_L3_PROTO_MASK);
962 mvpp2_prs_hw_write(sc, &pe);
963
964 /* Ethertype: IPv4 with options */
965 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
966 MVPP2_PE_LAST_FREE_TID);
967 if (tid < 0)
968 return tid;
969 pe.index = tid;
970
971 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
972 pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
973 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
974 MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK);
975 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
976 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
977 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
978 MVPP2_PRS_RI_L3_PROTO_MASK);
979 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
980 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
981 sc->sc_prs_shadow[pe.index].finish = 0;
982 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
983 MVPP2_PRS_RI_L3_PROTO_MASK);
984 mvpp2_prs_hw_write(sc, &pe);
985
986 /* Ethertype: IPv6 without options */
987 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
988 MVPP2_PE_LAST_FREE_TID);
989 if (tid < 0)
990 return tid;
991 memset(&pe, 0, sizeof(pe));
992 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
993 pe.index = tid;
994 mvpp2_prs_match_etype(&pe, 0, ETHERTYPE_IPV6);
995 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
996 MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
997 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
998 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
999 MVPP2_PRS_RI_L3_PROTO_MASK);
1000 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1001 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1002 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1003 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1004 sc->sc_prs_shadow[pe.index].finish = 0;
1005 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_IP6,
1006 MVPP2_PRS_RI_L3_PROTO_MASK);
1007 mvpp2_prs_hw_write(sc, &pe);
1008
1009 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
1010 memset(&pe, 0, sizeof(pe));
1011 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
1012 pe.index = MVPP2_PE_ETH_TYPE_UN;
1013 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1014 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1015 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1016 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1017 MVPP2_PRS_RI_L3_PROTO_MASK);
1018 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1019 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1020 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_L2);
1021 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
1022 sc->sc_prs_shadow[pe.index].finish = 1;
1023 mvpp2_prs_shadow_ri_set(sc, pe.index, MVPP2_PRS_RI_L3_UN,
1024 MVPP2_PRS_RI_L3_PROTO_MASK);
1025 mvpp2_prs_hw_write(sc, &pe);
1026
1027 return 0;
1028 }
1029
1030 int
mvpp2_prs_vlan_init(struct mvpp2_softc * sc)1031 mvpp2_prs_vlan_init(struct mvpp2_softc *sc)
1032 {
1033 struct mvpp2_prs_entry pe;
1034 int ret;
1035
1036 sc->sc_prs_double_vlans = mallocarray(MVPP2_PRS_DBL_VLANS_MAX,
1037 sizeof(*sc->sc_prs_double_vlans), M_DEVBUF, M_WAITOK | M_ZERO);
1038
1039 ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_QINQ,
1040 MVPP2_PRS_PORT_MASK);
1041 if (ret)
1042 return ret;
1043 ret = mvpp2_prs_double_vlan_add(sc, ETHERTYPE_VLAN, ETHERTYPE_VLAN,
1044 MVPP2_PRS_PORT_MASK);
1045 if (ret)
1046 return ret;
1047 ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_QINQ, MVPP2_PRS_SINGLE_VLAN_AI,
1048 MVPP2_PRS_PORT_MASK);
1049 if (ret)
1050 return ret;
1051 ret = mvpp2_prs_vlan_add(sc, ETHERTYPE_VLAN, MVPP2_PRS_SINGLE_VLAN_AI,
1052 MVPP2_PRS_PORT_MASK);
1053 if (ret)
1054 return ret;
1055
1056 memset(&pe, 0, sizeof(pe));
1057 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1058 pe.index = MVPP2_PE_VLAN_DBL;
1059 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1060 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
1061 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
1062 MVPP2_PRS_RI_VLAN_MASK);
1063 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
1064 MVPP2_PRS_DBL_VLAN_AI_BIT);
1065 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1066 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1067 mvpp2_prs_hw_write(sc, &pe);
1068
1069 memset(&pe, 0, sizeof(pe));
1070 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
1071 pe.index = MVPP2_PE_VLAN_NONE;
1072 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
1073 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
1074 MVPP2_PRS_RI_VLAN_MASK);
1075 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1076 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_VLAN);
1077 mvpp2_prs_hw_write(sc, &pe);
1078
1079 return 0;
1080 }
1081
1082 int
mvpp2_prs_pppoe_init(struct mvpp2_softc * sc)1083 mvpp2_prs_pppoe_init(struct mvpp2_softc *sc)
1084 {
1085 struct mvpp2_prs_entry pe;
1086 int tid;
1087
1088 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1089 MVPP2_PE_LAST_FREE_TID);
1090 if (tid < 0)
1091 return tid;
1092
1093 memset(&pe, 0, sizeof(pe));
1094 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1095 pe.index = tid;
1096 mvpp2_prs_match_etype(&pe, 0, PPP_IP);
1097 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1098 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
1099 MVPP2_PRS_RI_L3_PROTO_MASK);
1100 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1101 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1102 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1103 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1104 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1105 mvpp2_prs_hw_write(sc, &pe);
1106
1107 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1108 MVPP2_PE_LAST_FREE_TID);
1109 if (tid < 0)
1110 return tid;
1111
1112 pe.index = tid;
1113 mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
1114 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
1115 MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK);
1116 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
1117 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
1118 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK);
1119 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1120 mvpp2_prs_hw_write(sc, &pe);
1121
1122 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1123 MVPP2_PE_LAST_FREE_TID);
1124 if (tid < 0)
1125 return tid;
1126
1127 memset(&pe, 0, sizeof(pe));
1128 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1129 pe.index = tid;
1130 mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
1131 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1132 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
1133 MVPP2_PRS_RI_L3_PROTO_MASK);
1134 mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
1135 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1136 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1137 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1138 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1139 mvpp2_prs_hw_write(sc, &pe);
1140
1141 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1142 MVPP2_PE_LAST_FREE_TID);
1143 if (tid < 0)
1144 return tid;
1145
1146 memset(&pe, 0, sizeof(pe));
1147 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
1148 pe.index = tid;
1149 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
1150 MVPP2_PRS_RI_L3_PROTO_MASK);
1151 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1152 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1153 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
1154 MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1155 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_PPPOE);
1156 mvpp2_prs_hw_write(sc, &pe);
1157
1158 return 0;
1159 }
1160
1161 int
mvpp2_prs_ip6_init(struct mvpp2_softc * sc)1162 mvpp2_prs_ip6_init(struct mvpp2_softc *sc)
1163 {
1164 struct mvpp2_prs_entry pe;
1165 int tid, ret;
1166
1167 ret = mvpp2_prs_ip6_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1168 MVPP2_PRS_RI_L4_PROTO_MASK);
1169 if (ret)
1170 return ret;
1171 ret = mvpp2_prs_ip6_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1172 MVPP2_PRS_RI_L4_PROTO_MASK);
1173 if (ret)
1174 return ret;
1175 ret = mvpp2_prs_ip6_proto(sc, IPPROTO_ICMPV6,
1176 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1177 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1178 if (ret)
1179 return ret;
1180 ret = mvpp2_prs_ip6_proto(sc, IPPROTO_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE,
1181 MVPP2_PRS_RI_UDF7_MASK);
1182 if (ret)
1183 return ret;
1184 ret = mvpp2_prs_ip6_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1185 if (ret)
1186 return ret;
1187
1188 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
1189 MVPP2_PE_LAST_FREE_TID);
1190 if (tid < 0)
1191 return tid;
1192
1193 memset(&pe, 0, sizeof(pe));
1194 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1195 pe.index = tid;
1196 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1197 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1198 mvpp2_prs_sram_ri_update(&pe,
1199 MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK,
1200 MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK);
1201 mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
1202 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1203 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1204 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1205 mvpp2_prs_hw_write(sc, &pe);
1206
1207 memset(&pe, 0, sizeof(pe));
1208 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1209 pe.index = MVPP2_PE_IP6_PROTO_UN;
1210 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1211 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1212 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1213 MVPP2_PRS_RI_L4_PROTO_MASK);
1214 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1215 sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1216 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1217 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1218 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1219 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1220 mvpp2_prs_hw_write(sc, &pe);
1221
1222 memset(&pe, 0, sizeof(pe));
1223 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1224 pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
1225 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1226 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1227 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1228 MVPP2_PRS_RI_L4_PROTO_MASK);
1229 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
1230 MVPP2_PRS_IPV6_EXT_AI_BIT);
1231 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1232 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1233 mvpp2_prs_hw_write(sc, &pe);
1234
1235 memset(&pe, 0, sizeof(pe));
1236 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
1237 pe.index = MVPP2_PE_IP6_ADDR_UN;
1238 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
1239 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1240 MVPP2_PRS_RI_L3_ADDR_MASK);
1241 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
1242 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1243 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1244 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
1245 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1246 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
1247 mvpp2_prs_hw_write(sc, &pe);
1248
1249 return 0;
1250 }
1251
1252 int
mvpp2_prs_ip4_init(struct mvpp2_softc * sc)1253 mvpp2_prs_ip4_init(struct mvpp2_softc *sc)
1254 {
1255 struct mvpp2_prs_entry pe;
1256 int ret;
1257
1258 ret = mvpp2_prs_ip4_proto(sc, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
1259 MVPP2_PRS_RI_L4_PROTO_MASK);
1260 if (ret)
1261 return ret;
1262 ret = mvpp2_prs_ip4_proto(sc, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
1263 MVPP2_PRS_RI_L4_PROTO_MASK);
1264 if (ret)
1265 return ret;
1266 ret = mvpp2_prs_ip4_proto(sc, IPPROTO_IGMP,
1267 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL,
1268 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK);
1269 if (ret)
1270 return ret;
1271 ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_BROAD_CAST);
1272 if (ret)
1273 return ret;
1274 ret = mvpp2_prs_ip4_cast(sc, MVPP2_PRS_L3_MULTI_CAST);
1275 if (ret)
1276 return ret;
1277
1278 memset(&pe, 0, sizeof(pe));
1279 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1280 pe.index = MVPP2_PE_IP4_PROTO_UN;
1281 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
1282 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
1283 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
1284 sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
1285 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1286 MVPP2_PRS_IPV4_DIP_AI_BIT);
1287 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
1288 MVPP2_PRS_RI_L4_PROTO_MASK);
1289 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
1290 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1291 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1292 mvpp2_prs_hw_write(sc, &pe);
1293
1294 memset(&pe, 0, sizeof(pe));
1295 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
1296 pe.index = MVPP2_PE_IP4_ADDR_UN;
1297 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
1298 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
1299 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
1300 MVPP2_PRS_RI_L3_ADDR_MASK);
1301 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
1302 MVPP2_PRS_IPV4_DIP_AI_BIT);
1303 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
1304 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
1305 mvpp2_prs_hw_write(sc, &pe);
1306
1307 return 0;
1308 }
1309
1310 int
mvpp2_port_match(struct device * parent,void * cfdata,void * aux)1311 mvpp2_port_match(struct device *parent, void *cfdata, void *aux)
1312 {
1313 struct mvpp2_attach_args *maa = aux;
1314 char buf[32];
1315
1316 if (OF_getprop(maa->ma_node, "status", buf, sizeof(buf)) > 0 &&
1317 strcmp(buf, "disabled") == 0)
1318 return 0;
1319
1320 return 1;
1321 }
1322
1323 void
mvpp2_port_attach(struct device * parent,struct device * self,void * aux)1324 mvpp2_port_attach(struct device *parent, struct device *self, void *aux)
1325 {
1326 struct mvpp2_port *sc = (void *)self;
1327 struct mvpp2_attach_args *maa = aux;
1328 struct mvpp2_tx_queue *txq;
1329 struct mvpp2_rx_queue *rxq;
1330 struct ifnet *ifp;
1331 uint32_t phy, reg;
1332 int i, idx, len, node;
1333 int mii_flags = 0;
1334 char *phy_mode;
1335 char *managed;
1336
1337 sc->sc = (void *)parent;
1338 sc->sc_node = maa->ma_node;
1339 sc->sc_dmat = maa->ma_dmat;
1340
1341 sc->sc_id = OF_getpropint(sc->sc_node, "port-id", 0);
1342 sc->sc_gop_id = OF_getpropint(sc->sc_node, "gop-port-id", 0);
1343 sc->sc_sfp = OF_getpropint(sc->sc_node, "sfp", 0);
1344
1345 len = OF_getproplen(sc->sc_node, "phy-mode");
1346 if (len <= 0) {
1347 printf("%s: cannot extract phy-mode\n", self->dv_xname);
1348 return;
1349 }
1350
1351 phy_mode = malloc(len, M_TEMP, M_WAITOK);
1352 OF_getprop(sc->sc_node, "phy-mode", phy_mode, len);
1353 if (!strncmp(phy_mode, "10gbase-r", strlen("10gbase-r")))
1354 sc->sc_phy_mode = PHY_MODE_10GBASER;
1355 else if (!strncmp(phy_mode, "10gbase-kr", strlen("10gbase-kr")))
1356 sc->sc_phy_mode = PHY_MODE_10GBASER;
1357 else if (!strncmp(phy_mode, "2500base-x", strlen("2500base-x")))
1358 sc->sc_phy_mode = PHY_MODE_2500BASEX;
1359 else if (!strncmp(phy_mode, "1000base-x", strlen("1000base-x")))
1360 sc->sc_phy_mode = PHY_MODE_1000BASEX;
1361 else if (!strncmp(phy_mode, "sgmii", strlen("sgmii")))
1362 sc->sc_phy_mode = PHY_MODE_SGMII;
1363 else if (!strncmp(phy_mode, "rgmii-rxid", strlen("rgmii-rxid")))
1364 sc->sc_phy_mode = PHY_MODE_RGMII_RXID;
1365 else if (!strncmp(phy_mode, "rgmii-txid", strlen("rgmii-txid")))
1366 sc->sc_phy_mode = PHY_MODE_RGMII_TXID;
1367 else if (!strncmp(phy_mode, "rgmii-id", strlen("rgmii-id")))
1368 sc->sc_phy_mode = PHY_MODE_RGMII_ID;
1369 else if (!strncmp(phy_mode, "rgmii", strlen("rgmii")))
1370 sc->sc_phy_mode = PHY_MODE_RGMII;
1371 else {
1372 printf("%s: cannot use phy-mode %s\n", self->dv_xname,
1373 phy_mode);
1374 return;
1375 }
1376 free(phy_mode, M_TEMP, len);
1377
1378 /* Lookup PHY. */
1379 phy = OF_getpropint(sc->sc_node, "phy", 0);
1380 if (phy) {
1381 node = OF_getnodebyphandle(phy);
1382 if (!node) {
1383 printf(": no phy\n");
1384 return;
1385 }
1386 sc->sc_mdio = mii_byphandle(phy);
1387 sc->sc_phyloc = OF_getpropint(node, "reg", MII_PHY_ANY);
1388 sc->sc_sfp = OF_getpropint(node, "sfp", sc->sc_sfp);
1389 sc->sc_mii.mii_node = node;
1390 }
1391
1392 if (sc->sc_sfp)
1393 config_mountroot(self, mvpp2_port_attach_sfp);
1394
1395 if ((len = OF_getproplen(sc->sc_node, "managed")) >= 0) {
1396 managed = malloc(len, M_TEMP, M_WAITOK);
1397 OF_getprop(sc->sc_node, "managed", managed, len);
1398 if (!strncmp(managed, "in-band-status",
1399 strlen("in-band-status")))
1400 sc->sc_inband_status = 1;
1401 free(managed, M_TEMP, len);
1402 }
1403
1404 if (OF_getprop(sc->sc_node, "local-mac-address",
1405 &sc->sc_lladdr, ETHER_ADDR_LEN) != ETHER_ADDR_LEN)
1406 memset(sc->sc_lladdr, 0xff, sizeof(sc->sc_lladdr));
1407 printf(": address %s\n", ether_sprintf(sc->sc_lladdr));
1408
1409 sc->sc_ntxq = sc->sc_nrxq = 1;
1410 sc->sc_txqs = mallocarray(sc->sc_ntxq, sizeof(*sc->sc_txqs),
1411 M_DEVBUF, M_WAITOK | M_ZERO);
1412 sc->sc_rxqs = mallocarray(sc->sc_nrxq, sizeof(*sc->sc_rxqs),
1413 M_DEVBUF, M_WAITOK | M_ZERO);
1414
1415 for (i = 0; i < sc->sc_ntxq; i++) {
1416 txq = &sc->sc_txqs[i];
1417 txq->id = mvpp2_txq_phys(sc->sc_id, i);
1418 txq->log_id = i;
1419 txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
1420 }
1421
1422 sc->sc_tx_time_coal = MVPP2_TXDONE_COAL_USEC;
1423
1424 for (i = 0; i < sc->sc_nrxq; i++) {
1425 rxq = &sc->sc_rxqs[i];
1426 rxq->id = sc->sc_id * 32 + i;
1427 rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
1428 rxq->time_coal = MVPP2_RX_COAL_USEC;
1429 }
1430
1431 mvpp2_egress_disable(sc);
1432 mvpp2_port_disable(sc);
1433
1434 mvpp2_write(sc->sc, MVPP2_ISR_RXQ_GROUP_INDEX_REG,
1435 sc->sc_id << MVPP2_ISR_RXQ_GROUP_INDEX_GROUP_SHIFT |
1436 0 /* queue vector id */);
1437 mvpp2_write(sc->sc, MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_REG,
1438 sc->sc_nrxq << MVPP2_ISR_RXQ_SUB_GROUP_CONFIG_SIZE_SHIFT |
1439 0 /* first rxq */);
1440
1441 mvpp2_ingress_disable(sc);
1442 mvpp2_defaults_set(sc);
1443
1444 mvpp2_cls_oversize_rxq_set(sc);
1445 mvpp2_cls_port_config(sc);
1446
1447 /*
1448 * We have one pool per core, so all RX queues on a specific
1449 * core share that pool. Also long and short uses the same
1450 * pool.
1451 */
1452 for (i = 0; i < sc->sc_nrxq; i++) {
1453 mvpp2_rxq_long_pool_set(sc, i, i);
1454 mvpp2_rxq_short_pool_set(sc, i, i);
1455 }
1456
1457 mvpp2_mac_reset_assert(sc);
1458 mvpp2_pcs_reset_assert(sc);
1459
1460 timeout_set(&sc->sc_tick, mvpp2_tick, sc);
1461
1462 ifp = &sc->sc_ac.ac_if;
1463 ifp->if_softc = sc;
1464 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
1465 ifp->if_ioctl = mvpp2_ioctl;
1466 ifp->if_start = mvpp2_start;
1467 ifp->if_watchdog = mvpp2_watchdog;
1468 ifq_init_maxlen(&ifp->if_snd, MVPP2_NTXDESC - 1);
1469 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ);
1470
1471 ifp->if_capabilities = IFCAP_VLAN_MTU;
1472
1473 sc->sc_mii.mii_ifp = ifp;
1474 sc->sc_mii.mii_readreg = mvpp2_mii_readreg;
1475 sc->sc_mii.mii_writereg = mvpp2_mii_writereg;
1476 sc->sc_mii.mii_statchg = mvpp2_mii_statchg;
1477
1478 ifmedia_init(&sc->sc_media, 0, mvpp2_media_change, mvpp2_media_status);
1479
1480 if (sc->sc_mdio) {
1481 switch (sc->sc_phy_mode) {
1482 case PHY_MODE_1000BASEX:
1483 mii_flags |= MIIF_IS_1000X;
1484 break;
1485 case PHY_MODE_SGMII:
1486 mii_flags |= MIIF_SGMII;
1487 break;
1488 case PHY_MODE_RGMII_ID:
1489 mii_flags |= MIIF_RXID | MIIF_TXID;
1490 break;
1491 case PHY_MODE_RGMII_RXID:
1492 mii_flags |= MIIF_RXID;
1493 break;
1494 case PHY_MODE_RGMII_TXID:
1495 mii_flags |= MIIF_TXID;
1496 break;
1497 default:
1498 break;
1499 }
1500 mii_attach(self, &sc->sc_mii, 0xffffffff, sc->sc_phyloc,
1501 (sc->sc_phyloc == MII_PHY_ANY) ? 0 : MII_OFFSET_ANY,
1502 mii_flags);
1503 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) {
1504 printf("%s: no PHY found!\n", self->dv_xname);
1505 ifmedia_add(&sc->sc_mii.mii_media,
1506 IFM_ETHER|IFM_MANUAL, 0, NULL);
1507 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_MANUAL);
1508 } else
1509 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1510 } else {
1511 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO, 0, NULL);
1512 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO);
1513
1514 if (sc->sc_inband_status) {
1515 switch (sc->sc_phy_mode) {
1516 case PHY_MODE_1000BASEX:
1517 sc->sc_mii.mii_media_active =
1518 IFM_ETHER|IFM_1000_KX|IFM_FDX;
1519 break;
1520 case PHY_MODE_2500BASEX:
1521 sc->sc_mii.mii_media_active =
1522 IFM_ETHER|IFM_2500_KX|IFM_FDX;
1523 break;
1524 case PHY_MODE_10GBASER:
1525 sc->sc_mii.mii_media_active =
1526 IFM_ETHER|IFM_10G_KR|IFM_FDX;
1527 break;
1528 default:
1529 break;
1530 }
1531 mvpp2_inband_statchg(sc);
1532 } else {
1533 sc->sc_mii.mii_media_status = IFM_AVALID|IFM_ACTIVE;
1534 sc->sc_mii.mii_media_active = IFM_ETHER|IFM_1000_T|IFM_FDX;
1535 mvpp2_mii_statchg(self);
1536 }
1537
1538 ifp->if_baudrate = ifmedia_baudrate(sc->sc_mii.mii_media_active);
1539 ifp->if_link_state = LINK_STATE_FULL_DUPLEX;
1540 }
1541
1542 if_attach(ifp);
1543 ether_ifattach(ifp);
1544
1545 if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
1546 sc->sc_phy_mode == PHY_MODE_1000BASEX ||
1547 sc->sc_phy_mode == PHY_MODE_SGMII ||
1548 sc->sc_phy_mode == PHY_MODE_RGMII ||
1549 sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
1550 sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
1551 sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
1552 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1553 reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1554 mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1555 }
1556
1557 if (sc->sc_gop_id == 0) {
1558 reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_MASK_REG);
1559 reg |= MV_XLG_INTERRUPT_LINK_CHANGE;
1560 mvpp2_xlg_write(sc, MV_XLG_INTERRUPT_MASK_REG, reg);
1561 }
1562
1563 mvpp2_gop_intr_unmask(sc);
1564
1565 idx = OF_getindex(sc->sc_node, "link", "interrupt-names");
1566 if (idx >= 0)
1567 fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1568 mvpp2_link_intr, sc, sc->sc_dev.dv_xname);
1569 idx = OF_getindex(sc->sc_node, "hif0", "interrupt-names");
1570 if (idx < 0)
1571 idx = OF_getindex(sc->sc_node, "tx-cpu0", "interrupt-names");
1572 if (idx >= 0)
1573 fdt_intr_establish_idx(sc->sc_node, idx, IPL_NET,
1574 mvpp2_intr, sc, sc->sc_dev.dv_xname);
1575 }
1576
1577 void
mvpp2_port_attach_sfp(struct device * self)1578 mvpp2_port_attach_sfp(struct device *self)
1579 {
1580 struct mvpp2_port *sc = (struct mvpp2_port *)self;
1581 uint32_t reg;
1582
1583 rw_enter(&mvpp2_sff_lock, RW_WRITE);
1584 sfp_disable(sc->sc_sfp);
1585 sfp_add_media(sc->sc_sfp, &sc->sc_mii);
1586 rw_exit(&mvpp2_sff_lock);
1587
1588 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) {
1589 case IFM_10G_SR:
1590 case IFM_10G_LR:
1591 case IFM_10G_LRM:
1592 case IFM_10G_ER:
1593 case IFM_10G_SFP_CU:
1594 sc->sc_phy_mode = PHY_MODE_10GBASER;
1595 sc->sc_mii.mii_media_status = IFM_AVALID;
1596 sc->sc_inband_status = 1;
1597 break;
1598 case IFM_2500_SX:
1599 sc->sc_phy_mode = PHY_MODE_2500BASEX;
1600 sc->sc_mii.mii_media_status = IFM_AVALID;
1601 sc->sc_inband_status = 1;
1602 break;
1603 case IFM_1000_CX:
1604 case IFM_1000_LX:
1605 case IFM_1000_SX:
1606 case IFM_1000_T:
1607 sc->sc_phy_mode = PHY_MODE_1000BASEX;
1608 sc->sc_mii.mii_media_status = IFM_AVALID;
1609 sc->sc_inband_status = 1;
1610 break;
1611 }
1612
1613 if (sc->sc_inband_status) {
1614 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_MASK_REG);
1615 reg |= MVPP2_GMAC_INT_CAUSE_LINK_CHANGE;
1616 mvpp2_gmac_write(sc, MVPP2_GMAC_INT_MASK_REG, reg);
1617 }
1618 }
1619
1620 uint32_t
mvpp2_read(struct mvpp2_softc * sc,bus_addr_t addr)1621 mvpp2_read(struct mvpp2_softc *sc, bus_addr_t addr)
1622 {
1623 return bus_space_read_4(sc->sc_iot, sc->sc_ioh_base, addr);
1624 }
1625
1626 void
mvpp2_write(struct mvpp2_softc * sc,bus_addr_t addr,uint32_t data)1627 mvpp2_write(struct mvpp2_softc *sc, bus_addr_t addr, uint32_t data)
1628 {
1629 bus_space_write_4(sc->sc_iot, sc->sc_ioh_base, addr, data);
1630 }
1631
1632 uint32_t
mvpp2_gmac_read(struct mvpp2_port * sc,bus_addr_t addr)1633 mvpp2_gmac_read(struct mvpp2_port *sc, bus_addr_t addr)
1634 {
1635 return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1636 MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr);
1637 }
1638
1639 void
mvpp2_gmac_write(struct mvpp2_port * sc,bus_addr_t addr,uint32_t data)1640 mvpp2_gmac_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1641 {
1642 bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1643 MVPP22_GMAC_OFFSET + sc->sc_gop_id * MVPP22_GMAC_REG_SIZE + addr,
1644 data);
1645 }
1646
1647 uint32_t
mvpp2_xlg_read(struct mvpp2_port * sc,bus_addr_t addr)1648 mvpp2_xlg_read(struct mvpp2_port *sc, bus_addr_t addr)
1649 {
1650 return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1651 MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr);
1652 }
1653
1654 void
mvpp2_xlg_write(struct mvpp2_port * sc,bus_addr_t addr,uint32_t data)1655 mvpp2_xlg_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1656 {
1657 bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1658 MVPP22_XLG_OFFSET + sc->sc_gop_id * MVPP22_XLG_REG_SIZE + addr,
1659 data);
1660 }
1661
1662 uint32_t
mvpp2_mpcs_read(struct mvpp2_port * sc,bus_addr_t addr)1663 mvpp2_mpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1664 {
1665 return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1666 MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr);
1667 }
1668
1669 void
mvpp2_mpcs_write(struct mvpp2_port * sc,bus_addr_t addr,uint32_t data)1670 mvpp2_mpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1671 {
1672 bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1673 MVPP22_MPCS_OFFSET + sc->sc_gop_id * MVPP22_MPCS_REG_SIZE + addr,
1674 data);
1675 }
1676
1677 uint32_t
mvpp2_xpcs_read(struct mvpp2_port * sc,bus_addr_t addr)1678 mvpp2_xpcs_read(struct mvpp2_port *sc, bus_addr_t addr)
1679 {
1680 return bus_space_read_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1681 MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr);
1682 }
1683
1684 void
mvpp2_xpcs_write(struct mvpp2_port * sc,bus_addr_t addr,uint32_t data)1685 mvpp2_xpcs_write(struct mvpp2_port *sc, bus_addr_t addr, uint32_t data)
1686 {
1687 bus_space_write_4(sc->sc->sc_iot, sc->sc->sc_ioh_iface,
1688 MVPP22_XPCS_OFFSET + sc->sc_gop_id * MVPP22_XPCS_REG_SIZE + addr,
1689 data);
1690 }
1691
1692 static inline int
mvpp2_load_mbuf(bus_dma_tag_t dmat,bus_dmamap_t map,struct mbuf * m)1693 mvpp2_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m)
1694 {
1695 int error;
1696
1697 error = bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1698 if (error != EFBIG)
1699 return (error);
1700
1701 error = m_defrag(m, M_DONTWAIT);
1702 if (error != 0)
1703 return (error);
1704
1705 return bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT);
1706 }
1707
1708 void
mvpp2_start(struct ifnet * ifp)1709 mvpp2_start(struct ifnet *ifp)
1710 {
1711 struct mvpp2_port *sc = ifp->if_softc;
1712 struct mvpp2_tx_queue *txq = &sc->sc->sc_aggr_txqs[0];
1713 struct mvpp2_tx_desc *txd;
1714 struct mbuf *m;
1715 bus_dmamap_t map;
1716 uint32_t command;
1717 int i, current, first, last;
1718 int free, prod, used;
1719
1720 if (!(ifp->if_flags & IFF_RUNNING))
1721 return;
1722 if (ifq_is_oactive(&ifp->if_snd))
1723 return;
1724 if (ifq_empty(&ifp->if_snd))
1725 return;
1726 if (!sc->sc_link)
1727 return;
1728
1729 used = 0;
1730 prod = txq->prod;
1731 free = txq->cons;
1732 if (free <= prod)
1733 free += MVPP2_AGGR_TXQ_SIZE;
1734 free -= prod;
1735
1736 for (;;) {
1737 if (free <= MVPP2_NTXSEGS) {
1738 ifq_set_oactive(&ifp->if_snd);
1739 break;
1740 }
1741
1742 m = ifq_dequeue(&ifp->if_snd);
1743 if (m == NULL)
1744 break;
1745
1746 first = last = current = prod;
1747 map = txq->buf[current].mb_map;
1748
1749 if (mvpp2_load_mbuf(sc->sc_dmat, map, m) != 0) {
1750 ifp->if_oerrors++;
1751 m_freem(m);
1752 continue;
1753 }
1754
1755 bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize,
1756 BUS_DMASYNC_PREWRITE);
1757
1758 command = MVPP2_TXD_L4_CSUM_NOT |
1759 MVPP2_TXD_IP_CSUM_DISABLE;
1760 for (i = 0; i < map->dm_nsegs; i++) {
1761 txd = &txq->descs[current];
1762 memset(txd, 0, sizeof(*txd));
1763 txd->buf_phys_addr_hw_cmd2 =
1764 map->dm_segs[i].ds_addr & ~0x1f;
1765 txd->packet_offset =
1766 map->dm_segs[i].ds_addr & 0x1f;
1767 txd->data_size = map->dm_segs[i].ds_len;
1768 txd->phys_txq = sc->sc_txqs[0].id;
1769 txd->command = command |
1770 MVPP2_TXD_PADDING_DISABLE;
1771 if (i == 0)
1772 txd->command |= MVPP2_TXD_F_DESC;
1773 if (i == (map->dm_nsegs - 1))
1774 txd->command |= MVPP2_TXD_L_DESC;
1775
1776 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring),
1777 current * sizeof(*txd), sizeof(*txd),
1778 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1779
1780 last = current;
1781 current = (current + 1) % MVPP2_AGGR_TXQ_SIZE;
1782 KASSERT(current != txq->cons);
1783 }
1784
1785 KASSERT(txq->buf[last].mb_m == NULL);
1786 txq->buf[first].mb_map = txq->buf[last].mb_map;
1787 txq->buf[last].mb_map = map;
1788 txq->buf[last].mb_m = m;
1789
1790 #if NBPFILTER > 0
1791 if (ifp->if_bpf)
1792 bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT);
1793 #endif
1794
1795 free -= map->dm_nsegs;
1796 used += map->dm_nsegs;
1797 prod = current;
1798 }
1799
1800 if (used)
1801 mvpp2_write(sc->sc, MVPP2_AGGR_TXQ_UPDATE_REG, used);
1802
1803 if (txq->prod != prod)
1804 txq->prod = prod;
1805 }
1806
1807 int
mvpp2_ioctl(struct ifnet * ifp,u_long cmd,caddr_t addr)1808 mvpp2_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
1809 {
1810 struct mvpp2_port *sc = ifp->if_softc;
1811 struct ifreq *ifr = (struct ifreq *)addr;
1812 int error = 0, s;
1813
1814 s = splnet();
1815
1816 switch (cmd) {
1817 case SIOCSIFADDR:
1818 ifp->if_flags |= IFF_UP;
1819 /* FALLTHROUGH */
1820 case SIOCSIFFLAGS:
1821 if (ifp->if_flags & IFF_UP) {
1822 if (ifp->if_flags & IFF_RUNNING)
1823 error = ENETRESET;
1824 else
1825 mvpp2_up(sc);
1826 } else {
1827 if (ifp->if_flags & IFF_RUNNING)
1828 mvpp2_down(sc);
1829 }
1830 break;
1831
1832 case SIOCGIFMEDIA:
1833 case SIOCSIFMEDIA:
1834 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1835 break;
1836
1837 case SIOCGIFRXR:
1838 error = mvpp2_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
1839 break;
1840
1841 case SIOCGIFSFFPAGE:
1842 error = rw_enter(&mvpp2_sff_lock, RW_WRITE|RW_INTR);
1843 if (error != 0)
1844 break;
1845
1846 error = sfp_get_sffpage(sc->sc_sfp, (struct if_sffpage *)addr);
1847 rw_exit(&mvpp2_sff_lock);
1848 break;
1849
1850 default:
1851 error = ether_ioctl(ifp, &sc->sc_ac, cmd, addr);
1852 break;
1853 }
1854
1855 if (error == ENETRESET) {
1856 if (ifp->if_flags & IFF_RUNNING)
1857 mvpp2_iff(sc);
1858 error = 0;
1859 }
1860
1861 splx(s);
1862 return (error);
1863 }
1864
1865 int
mvpp2_rxrinfo(struct mvpp2_port * sc,struct if_rxrinfo * ifri)1866 mvpp2_rxrinfo(struct mvpp2_port *sc, struct if_rxrinfo *ifri)
1867 {
1868 struct mvpp2_rx_queue *rxq;
1869 struct if_rxring_info *ifrs, *ifr;
1870 unsigned int i;
1871 int error;
1872
1873 ifrs = mallocarray(sc->sc_nrxq, sizeof(*ifrs), M_TEMP,
1874 M_WAITOK|M_ZERO|M_CANFAIL);
1875 if (ifrs == NULL)
1876 return (ENOMEM);
1877
1878 for (i = 0; i < sc->sc_nrxq; i++) {
1879 rxq = &sc->sc_rxqs[i];
1880 ifr = &ifrs[i];
1881
1882 snprintf(ifr->ifr_name, sizeof(ifr->ifr_name), "%u", i);
1883 ifr->ifr_size = MCLBYTES;
1884 ifr->ifr_info = rxq->rxring;
1885 }
1886
1887 error = if_rxr_info_ioctl(ifri, i, ifrs);
1888 free(ifrs, M_TEMP, i * sizeof(*ifrs));
1889
1890 return (error);
1891 }
1892
1893 void
mvpp2_watchdog(struct ifnet * ifp)1894 mvpp2_watchdog(struct ifnet *ifp)
1895 {
1896 printf("%s\n", __func__);
1897 }
1898
1899 int
mvpp2_media_change(struct ifnet * ifp)1900 mvpp2_media_change(struct ifnet *ifp)
1901 {
1902 struct mvpp2_port *sc = ifp->if_softc;
1903
1904 if (LIST_FIRST(&sc->sc_mii.mii_phys))
1905 mii_mediachg(&sc->sc_mii);
1906
1907 return (0);
1908 }
1909
1910 void
mvpp2_media_status(struct ifnet * ifp,struct ifmediareq * ifmr)1911 mvpp2_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
1912 {
1913 struct mvpp2_port *sc = ifp->if_softc;
1914
1915 if (LIST_FIRST(&sc->sc_mii.mii_phys))
1916 mii_pollstat(&sc->sc_mii);
1917
1918 ifmr->ifm_active = sc->sc_mii.mii_media_active;
1919 ifmr->ifm_status = sc->sc_mii.mii_media_status;
1920 }
1921
1922 int
mvpp2_mii_readreg(struct device * self,int phy,int reg)1923 mvpp2_mii_readreg(struct device *self, int phy, int reg)
1924 {
1925 struct mvpp2_port *sc = (void *)self;
1926 return sc->sc_mdio->md_readreg(sc->sc_mdio->md_cookie, phy, reg);
1927 }
1928
1929 void
mvpp2_mii_writereg(struct device * self,int phy,int reg,int val)1930 mvpp2_mii_writereg(struct device *self, int phy, int reg, int val)
1931 {
1932 struct mvpp2_port *sc = (void *)self;
1933 return sc->sc_mdio->md_writereg(sc->sc_mdio->md_cookie, phy, reg, val);
1934 }
1935
1936 void
mvpp2_mii_statchg(struct device * self)1937 mvpp2_mii_statchg(struct device *self)
1938 {
1939 struct mvpp2_port *sc = (void *)self;
1940 mvpp2_port_change(sc);
1941 }
1942
1943 void
mvpp2_inband_statchg(struct mvpp2_port * sc)1944 mvpp2_inband_statchg(struct mvpp2_port *sc)
1945 {
1946 uint64_t subtype = IFM_SUBTYPE(sc->sc_mii.mii_media_active);
1947 uint32_t reg;
1948
1949 sc->sc_mii.mii_media_status = IFM_AVALID;
1950 sc->sc_mii.mii_media_active = IFM_ETHER;
1951
1952 if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1953 sc->sc_phy_mode == PHY_MODE_XAUI)) {
1954 reg = mvpp2_xlg_read(sc, MV_XLG_MAC_PORT_STATUS_REG);
1955 if (reg & MV_XLG_MAC_PORT_STATUS_LINKSTATUS)
1956 sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1957 sc->sc_mii.mii_media_active |= IFM_FDX;
1958 sc->sc_mii.mii_media_active |= subtype;
1959 } else {
1960 reg = mvpp2_gmac_read(sc, MVPP2_PORT_STATUS0_REG);
1961 if (reg & MVPP2_PORT_STATUS0_LINKUP)
1962 sc->sc_mii.mii_media_status |= IFM_ACTIVE;
1963 if (reg & MVPP2_PORT_STATUS0_FULLDX)
1964 sc->sc_mii.mii_media_active |= IFM_FDX;
1965 if (sc->sc_phy_mode == PHY_MODE_2500BASEX)
1966 sc->sc_mii.mii_media_active |= subtype;
1967 else if (sc->sc_phy_mode == PHY_MODE_1000BASEX)
1968 sc->sc_mii.mii_media_active |= subtype;
1969 else if (reg & MVPP2_PORT_STATUS0_GMIISPEED)
1970 sc->sc_mii.mii_media_active |= IFM_1000_T;
1971 else if (reg & MVPP2_PORT_STATUS0_MIISPEED)
1972 sc->sc_mii.mii_media_active |= IFM_100_TX;
1973 else
1974 sc->sc_mii.mii_media_active |= IFM_10_T;
1975 }
1976
1977 mvpp2_port_change(sc);
1978 }
1979
1980 void
mvpp2_port_change(struct mvpp2_port * sc)1981 mvpp2_port_change(struct mvpp2_port *sc)
1982 {
1983 uint32_t reg;
1984
1985 sc->sc_link = !!(sc->sc_mii.mii_media_status & IFM_ACTIVE);
1986
1987 if (sc->sc_inband_status)
1988 return;
1989
1990 if (sc->sc_link) {
1991 if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
1992 sc->sc_phy_mode == PHY_MODE_XAUI) {
1993 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
1994 reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
1995 reg |= MV_XLG_MAC_CTRL0_FORCELINKPASS;
1996 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
1997 } else {
1998 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
1999 reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2000 reg |= MVPP2_GMAC_FORCE_LINK_PASS;
2001 reg &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2002 reg &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2003 reg &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2004 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_KX ||
2005 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_2500_SX ||
2006 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_CX ||
2007 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_LX ||
2008 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_KX ||
2009 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_SX ||
2010 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_1000_T)
2011 reg |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2012 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX)
2013 reg |= MVPP2_GMAC_CONFIG_MII_SPEED;
2014 if ((sc->sc_mii.mii_media_active & IFM_GMASK) == IFM_FDX)
2015 reg |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2016 mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2017 }
2018 } else {
2019 if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2020 sc->sc_phy_mode == PHY_MODE_XAUI) {
2021 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2022 reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2023 reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2024 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2025 } else {
2026 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2027 reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2028 reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2029 mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2030 }
2031 }
2032 }
2033
2034 void
mvpp2_tick(void * arg)2035 mvpp2_tick(void *arg)
2036 {
2037 struct mvpp2_port *sc = arg;
2038 int s;
2039
2040 s = splnet();
2041 mii_tick(&sc->sc_mii);
2042 splx(s);
2043
2044 timeout_add_sec(&sc->sc_tick, 1);
2045 }
2046
2047 int
mvpp2_link_intr(void * arg)2048 mvpp2_link_intr(void *arg)
2049 {
2050 struct mvpp2_port *sc = arg;
2051 uint32_t reg;
2052 int event = 0;
2053
2054 if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2055 sc->sc_phy_mode == PHY_MODE_XAUI)) {
2056 reg = mvpp2_xlg_read(sc, MV_XLG_INTERRUPT_CAUSE_REG);
2057 if (reg & MV_XLG_INTERRUPT_LINK_CHANGE)
2058 event = 1;
2059 } else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2060 sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2061 sc->sc_phy_mode == PHY_MODE_SGMII ||
2062 sc->sc_phy_mode == PHY_MODE_RGMII ||
2063 sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2064 sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2065 sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2066 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_CAUSE_REG);
2067 if (reg & MVPP2_GMAC_INT_CAUSE_LINK_CHANGE)
2068 event = 1;
2069 }
2070
2071 if (event && sc->sc_inband_status)
2072 mvpp2_inband_statchg(sc);
2073
2074 return (1);
2075 }
2076
2077 int
mvpp2_intr(void * arg)2078 mvpp2_intr(void *arg)
2079 {
2080 struct mvpp2_port *sc = arg;
2081 uint32_t reg;
2082
2083 reg = mvpp2_read(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id));
2084 if (reg & MVPP2_CAUSE_MISC_SUM_MASK) {
2085 mvpp2_write(sc->sc, MVPP2_ISR_MISC_CAUSE_REG, 0);
2086 mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_CAUSE_REG(sc->sc_id),
2087 reg & ~MVPP2_CAUSE_MISC_SUM_MASK);
2088 }
2089 if (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK)
2090 mvpp2_tx_proc(sc,
2091 (reg & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK) >>
2092 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET);
2093
2094 if (reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)
2095 mvpp2_rx_proc(sc,
2096 reg & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK);
2097
2098 return (1);
2099 }
2100
2101 void
mvpp2_tx_proc(struct mvpp2_port * sc,uint8_t queues)2102 mvpp2_tx_proc(struct mvpp2_port *sc, uint8_t queues)
2103 {
2104 struct mvpp2_tx_queue *txq;
2105 int i;
2106
2107 for (i = 0; i < sc->sc_ntxq; i++) {
2108 txq = &sc->sc_txqs[i];
2109 if ((queues & (1 << i)) == 0)
2110 continue;
2111 mvpp2_txq_proc(sc, txq);
2112 }
2113 }
2114
2115 void
mvpp2_txq_proc(struct mvpp2_port * sc,struct mvpp2_tx_queue * txq)2116 mvpp2_txq_proc(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2117 {
2118 struct ifnet *ifp = &sc->sc_ac.ac_if;
2119 struct mvpp2_tx_queue *aggr_txq = &sc->sc->sc_aggr_txqs[0];
2120 struct mvpp2_buf *txb;
2121 int i, idx, nsent;
2122
2123 /* XXX: this is a percpu register! */
2124 nsent = (mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id)) &
2125 MVPP2_TRANSMITTED_COUNT_MASK) >>
2126 MVPP2_TRANSMITTED_COUNT_OFFSET;
2127
2128 for (i = 0; i < nsent; i++) {
2129 idx = aggr_txq->cons;
2130 KASSERT(idx < MVPP2_AGGR_TXQ_SIZE);
2131
2132 txb = &aggr_txq->buf[idx];
2133 if (txb->mb_m) {
2134 bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2135 txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2136 bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2137
2138 m_freem(txb->mb_m);
2139 txb->mb_m = NULL;
2140 }
2141
2142 aggr_txq->cons = (aggr_txq->cons + 1) % MVPP2_AGGR_TXQ_SIZE;
2143 }
2144
2145 if (ifq_is_oactive(&ifp->if_snd))
2146 ifq_restart(&ifp->if_snd);
2147 }
2148
2149 void
mvpp2_rx_proc(struct mvpp2_port * sc,uint8_t queues)2150 mvpp2_rx_proc(struct mvpp2_port *sc, uint8_t queues)
2151 {
2152 struct mvpp2_rx_queue *rxq;
2153 int i;
2154
2155 for (i = 0; i < sc->sc_nrxq; i++) {
2156 rxq = &sc->sc_rxqs[i];
2157 if ((queues & (1 << i)) == 0)
2158 continue;
2159 mvpp2_rxq_proc(sc, rxq);
2160 }
2161
2162 mvpp2_rx_refill(sc);
2163 }
2164
2165 void
mvpp2_rxq_proc(struct mvpp2_port * sc,struct mvpp2_rx_queue * rxq)2166 mvpp2_rxq_proc(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2167 {
2168 struct ifnet *ifp = &sc->sc_ac.ac_if;
2169 struct mbuf_list ml = MBUF_LIST_INITIALIZER();
2170 struct mvpp2_rx_desc *rxd;
2171 struct mvpp2_bm_pool *bm;
2172 struct mvpp2_buf *rxb;
2173 struct mbuf *m;
2174 uint64_t virt;
2175 uint32_t i, nrecv, pool;
2176
2177 nrecv = mvpp2_rxq_received(sc, rxq->id);
2178 if (!nrecv)
2179 return;
2180
2181 pool = curcpu()->ci_cpuid;
2182 KASSERT(pool < sc->sc->sc_npools);
2183 bm = &sc->sc->sc_bm_pools[pool];
2184
2185 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2186 MVPP2_DMA_LEN(rxq->ring),
2187 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2188
2189 for (i = 0; i < nrecv; i++) {
2190 rxd = &rxq->descs[rxq->cons];
2191 virt = rxd->buf_cookie_bm_qset_cls_info;
2192 KASSERT(((virt >> 16) & 0xffff) == pool);
2193 KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2194 rxb = &bm->rxbuf[virt & 0xffff];
2195 KASSERT(rxb->mb_m != NULL);
2196
2197 bus_dmamap_sync(sc->sc_dmat, rxb->mb_map, 0,
2198 rxd->data_size, BUS_DMASYNC_POSTREAD);
2199 bus_dmamap_unload(sc->sc_dmat, rxb->mb_map);
2200
2201 m = rxb->mb_m;
2202 rxb->mb_m = NULL;
2203
2204 m->m_pkthdr.len = m->m_len = rxd->data_size;
2205 m_adj(m, MVPP2_MH_SIZE);
2206 ml_enqueue(&ml, m);
2207
2208 KASSERT(bm->freelist[bm->free_prod] == -1);
2209 bm->freelist[bm->free_prod] = virt & 0xffffffff;
2210 bm->free_prod = (bm->free_prod + 1) % MVPP2_BM_SIZE;
2211
2212 rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
2213 }
2214
2215 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2216 MVPP2_DMA_LEN(rxq->ring),
2217 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2218
2219 mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
2220
2221 if_input(ifp, &ml);
2222 }
2223
2224 /*
2225 * We have a pool per core, and since we should not assume that
2226 * RX buffers are always used in order, keep a list of rxbuf[]
2227 * indices that should be filled with an mbuf, if possible.
2228 */
2229 void
mvpp2_rx_refill(struct mvpp2_port * sc)2230 mvpp2_rx_refill(struct mvpp2_port *sc)
2231 {
2232 struct mvpp2_bm_pool *bm;
2233 struct mvpp2_buf *rxb;
2234 uint64_t phys, virt;
2235 int pool;
2236
2237 pool = curcpu()->ci_cpuid;
2238 KASSERT(pool < sc->sc->sc_npools);
2239 bm = &sc->sc->sc_bm_pools[pool];
2240
2241 while (bm->freelist[bm->free_cons] != -1) {
2242 virt = bm->freelist[bm->free_cons];
2243 KASSERT(((virt >> 16) & 0xffff) == pool);
2244 KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2245 rxb = &bm->rxbuf[virt & 0xffff];
2246 KASSERT(rxb->mb_m == NULL);
2247
2248 rxb->mb_m = mvpp2_alloc_mbuf(sc->sc, rxb->mb_map);
2249 if (rxb->mb_m == NULL)
2250 break;
2251
2252 bm->freelist[bm->free_cons] = -1;
2253 bm->free_cons = (bm->free_cons + 1) % MVPP2_BM_SIZE;
2254
2255 phys = rxb->mb_map->dm_segs[0].ds_addr;
2256 mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2257 (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2258 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2259 ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2260 mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2261 virt & 0xffffffff);
2262 mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2263 phys & 0xffffffff);
2264 }
2265 }
2266
2267 void
mvpp2_up(struct mvpp2_port * sc)2268 mvpp2_up(struct mvpp2_port *sc)
2269 {
2270 struct ifnet *ifp = &sc->sc_ac.ac_if;
2271 int i;
2272
2273 if (sc->sc_sfp) {
2274 rw_enter(&mvpp2_sff_lock, RW_WRITE);
2275 sfp_enable(sc->sc_sfp);
2276 rw_exit(&mvpp2_sff_lock);
2277 }
2278
2279 mvpp2_prs_mac_da_accept(sc, etherbroadcastaddr, 1);
2280 mvpp2_prs_mac_da_accept(sc, sc->sc_lladdr, 1);
2281 mvpp2_prs_tag_mode_set(sc->sc, sc->sc_id, MVPP2_TAG_TYPE_MH);
2282 mvpp2_prs_def_flow(sc);
2283
2284 for (i = 0; i < sc->sc_ntxq; i++)
2285 mvpp2_txq_hw_init(sc, &sc->sc_txqs[i]);
2286
2287 mvpp2_tx_time_coal_set(sc, sc->sc_tx_time_coal);
2288
2289 for (i = 0; i < sc->sc_nrxq; i++)
2290 mvpp2_rxq_hw_init(sc, &sc->sc_rxqs[i]);
2291
2292 /* FIXME: rx buffer fill */
2293
2294 /* Configure media. */
2295 if (LIST_FIRST(&sc->sc_mii.mii_phys))
2296 mii_mediachg(&sc->sc_mii);
2297
2298 /* Program promiscuous mode and multicast filters. */
2299 mvpp2_iff(sc);
2300
2301 ifp->if_flags |= IFF_RUNNING;
2302 ifq_clr_oactive(&ifp->if_snd);
2303
2304 mvpp2_txp_max_tx_size_set(sc);
2305
2306 /* XXX: single vector */
2307 mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id),
2308 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK |
2309 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
2310 MVPP2_CAUSE_MISC_SUM_MASK);
2311 mvpp2_interrupts_enable(sc, (1 << 0));
2312
2313 mvpp2_mac_config(sc);
2314 mvpp2_egress_enable(sc);
2315 mvpp2_ingress_enable(sc);
2316
2317 timeout_add_sec(&sc->sc_tick, 1);
2318 }
2319
2320 void
mvpp2_aggr_txq_hw_init(struct mvpp2_softc * sc,struct mvpp2_tx_queue * txq)2321 mvpp2_aggr_txq_hw_init(struct mvpp2_softc *sc, struct mvpp2_tx_queue *txq)
2322 {
2323 struct mvpp2_buf *txb;
2324 int i;
2325
2326 txq->ring = mvpp2_dmamem_alloc(sc,
2327 MVPP2_AGGR_TXQ_SIZE * sizeof(struct mvpp2_tx_desc), 32);
2328 KASSERT(txq->ring != NULL);
2329 txq->descs = MVPP2_DMA_KVA(txq->ring);
2330
2331 txq->buf = mallocarray(MVPP2_AGGR_TXQ_SIZE, sizeof(struct mvpp2_buf),
2332 M_DEVBUF, M_WAITOK);
2333
2334 for (i = 0; i < MVPP2_AGGR_TXQ_SIZE; i++) {
2335 txb = &txq->buf[i];
2336 bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2337 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2338 txb->mb_m = NULL;
2339 }
2340
2341 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2342 MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2343
2344 txq->prod = mvpp2_read(sc, MVPP2_AGGR_TXQ_INDEX_REG(txq->id));
2345 mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_ADDR_REG(txq->id),
2346 MVPP2_DMA_DVA(txq->ring) >> MVPP22_DESC_ADDR_OFFS);
2347 mvpp2_write(sc, MVPP2_AGGR_TXQ_DESC_SIZE_REG(txq->id),
2348 MVPP2_AGGR_TXQ_SIZE);
2349 }
2350
2351 void
mvpp2_txq_hw_init(struct mvpp2_port * sc,struct mvpp2_tx_queue * txq)2352 mvpp2_txq_hw_init(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2353 {
2354 struct mvpp2_buf *txb;
2355 int desc, desc_per_txq;
2356 uint32_t reg;
2357 int i;
2358
2359 txq->prod = txq->cons = 0;
2360 // txq->last_desc = txq->size - 1;
2361
2362 txq->ring = mvpp2_dmamem_alloc(sc->sc,
2363 MVPP2_NTXDESC * sizeof(struct mvpp2_tx_desc), 32);
2364 KASSERT(txq->ring != NULL);
2365 txq->descs = MVPP2_DMA_KVA(txq->ring);
2366
2367 txq->buf = mallocarray(MVPP2_NTXDESC, sizeof(struct mvpp2_buf),
2368 M_DEVBUF, M_WAITOK);
2369
2370 for (i = 0; i < MVPP2_NTXDESC; i++) {
2371 txb = &txq->buf[i];
2372 bus_dmamap_create(sc->sc_dmat, MCLBYTES, MVPP2_NTXSEGS,
2373 MCLBYTES, 0, BUS_DMA_WAITOK, &txb->mb_map);
2374 txb->mb_m = NULL;
2375 }
2376
2377 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(txq->ring), 0,
2378 MVPP2_DMA_LEN(txq->ring), BUS_DMASYNC_PREWRITE);
2379
2380 mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2381 mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG,
2382 MVPP2_DMA_DVA(txq->ring));
2383 mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG,
2384 MVPP2_NTXDESC & MVPP2_TXQ_DESC_SIZE_MASK);
2385 mvpp2_write(sc->sc, MVPP2_TXQ_INDEX_REG, 0);
2386 mvpp2_write(sc->sc, MVPP2_TXQ_RSVD_CLR_REG,
2387 txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2388 reg = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG);
2389 reg &= ~MVPP2_TXQ_PENDING_MASK;
2390 mvpp2_write(sc->sc, MVPP2_TXQ_PENDING_REG, reg);
2391
2392 desc_per_txq = 16;
2393 desc = (sc->sc_id * MVPP2_MAX_TXQ * desc_per_txq) +
2394 (txq->log_id * desc_per_txq);
2395
2396 mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG,
2397 MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2398 MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2399
2400 /* WRR / EJP configuration - indirect access */
2401 mvpp2_write(sc->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2402 mvpp2_egress_port(sc));
2403
2404 reg = mvpp2_read(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2405 reg &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2406 reg |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2407 reg |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2408 mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), reg);
2409
2410 mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2411 MVPP2_TXQ_TOKEN_SIZE_MAX);
2412
2413 mvpp2_tx_pkts_coal_set(sc, txq, txq->done_pkts_coal);
2414
2415 mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2416 }
2417
2418 void
mvpp2_rxq_hw_init(struct mvpp2_port * sc,struct mvpp2_rx_queue * rxq)2419 mvpp2_rxq_hw_init(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2420 {
2421 rxq->prod = rxq->cons = 0;
2422
2423 rxq->ring = mvpp2_dmamem_alloc(sc->sc,
2424 MVPP2_NRXDESC * sizeof(struct mvpp2_rx_desc), 32);
2425 KASSERT(rxq->ring != NULL);
2426 rxq->descs = MVPP2_DMA_KVA(rxq->ring);
2427
2428 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring),
2429 0, MVPP2_DMA_LEN(rxq->ring),
2430 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
2431
2432 mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2433 mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
2434 mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG,
2435 MVPP2_DMA_DVA(rxq->ring) >> MVPP22_DESC_ADDR_OFFS);
2436 mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, MVPP2_NRXDESC);
2437 mvpp2_write(sc->sc, MVPP2_RXQ_INDEX_REG, 0);
2438 mvpp2_rxq_offset_set(sc, rxq->id, 0);
2439 mvpp2_rx_pkts_coal_set(sc, rxq, rxq->pkts_coal);
2440 mvpp2_rx_time_coal_set(sc, rxq, rxq->time_coal);
2441 mvpp2_rxq_status_update(sc, rxq->id, 0, MVPP2_NRXDESC);
2442 }
2443
2444 void
mvpp2_mac_reset_assert(struct mvpp2_port * sc)2445 mvpp2_mac_reset_assert(struct mvpp2_port *sc)
2446 {
2447 mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2448 mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) |
2449 MVPP2_PORT_CTRL2_PORTMACRESET);
2450 if (sc->sc_gop_id == 0)
2451 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG,
2452 mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2453 ~MV_XLG_MAC_CTRL0_MACRESETN);
2454 }
2455
2456 void
mvpp2_pcs_reset_assert(struct mvpp2_port * sc)2457 mvpp2_pcs_reset_assert(struct mvpp2_port *sc)
2458 {
2459 uint32_t reg;
2460
2461 if (sc->sc_gop_id != 0)
2462 return;
2463
2464 reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2465 reg |= MVPP22_MPCS_CLK_DIV_PHASE_SET;
2466 reg &= ~MVPP22_MPCS_TX_SD_CLK_RESET;
2467 reg &= ~MVPP22_MPCS_RX_SD_CLK_RESET;
2468 reg &= ~MVPP22_MPCS_MAC_CLK_RESET;
2469 mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2470 reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2471 reg &= ~MVPP22_XPCS_PCSRESET;
2472 mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2473 }
2474
2475 void
mvpp2_pcs_reset_deassert(struct mvpp2_port * sc)2476 mvpp2_pcs_reset_deassert(struct mvpp2_port *sc)
2477 {
2478 uint32_t reg;
2479
2480 if (sc->sc_gop_id != 0)
2481 return;
2482
2483 if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2484 reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2485 reg &= ~MVPP22_MPCS_CLK_DIV_PHASE_SET;
2486 reg |= MVPP22_MPCS_TX_SD_CLK_RESET;
2487 reg |= MVPP22_MPCS_RX_SD_CLK_RESET;
2488 reg |= MVPP22_MPCS_MAC_CLK_RESET;
2489 mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2490 } else if (sc->sc_phy_mode == PHY_MODE_XAUI) {
2491 reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2492 reg |= MVPP22_XPCS_PCSRESET;
2493 mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2494 }
2495 }
2496
2497 void
mvpp2_mac_config(struct mvpp2_port * sc)2498 mvpp2_mac_config(struct mvpp2_port *sc)
2499 {
2500 uint32_t reg;
2501
2502 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2503 reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2504 reg |= MVPP2_GMAC_FORCE_LINK_DOWN;
2505 mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2506 if (sc->sc_gop_id == 0) {
2507 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2508 reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2509 reg |= MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2510 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2511 }
2512
2513 mvpp2_port_disable(sc);
2514
2515 mvpp2_mac_reset_assert(sc);
2516 mvpp2_pcs_reset_assert(sc);
2517
2518 mvpp2_gop_intr_mask(sc);
2519 mvpp2_comphy_config(sc, 0);
2520
2521 if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2522 sc->sc_phy_mode == PHY_MODE_XAUI))
2523 mvpp2_xlg_config(sc);
2524 else
2525 mvpp2_gmac_config(sc);
2526
2527 mvpp2_comphy_config(sc, 1);
2528 mvpp2_gop_config(sc);
2529
2530 mvpp2_pcs_reset_deassert(sc);
2531
2532 if (sc->sc_gop_id == 0) {
2533 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL3_REG);
2534 reg &= ~MV_XLG_MAC_CTRL3_MACMODESELECT_MASK;
2535 if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2536 sc->sc_phy_mode == PHY_MODE_XAUI)
2537 reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_10G;
2538 else
2539 reg |= MV_XLG_MAC_CTRL3_MACMODESELECT_GMAC;
2540 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL3_REG, reg);
2541 }
2542
2543 if (sc->sc_gop_id == 0 && (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2544 sc->sc_phy_mode == PHY_MODE_XAUI)) {
2545 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL1_REG);
2546 reg &= ~MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK;
2547 reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2548 MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS;
2549 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL1_REG, reg);
2550 } else {
2551 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_CTRL_0_REG);
2552 reg &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2553 reg |= ((MCLBYTES - MVPP2_MH_SIZE) / 2) <<
2554 MVPP2_GMAC_MAX_RX_SIZE_OFFS;
2555 mvpp2_gmac_write(sc, MVPP2_GMAC_CTRL_0_REG, reg);
2556 }
2557
2558 mvpp2_gop_intr_unmask(sc);
2559
2560 if (!(sc->sc_phy_mode == PHY_MODE_10GBASER ||
2561 sc->sc_phy_mode == PHY_MODE_XAUI)) {
2562 mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG,
2563 mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2564 ~MVPP2_PORT_CTRL2_PORTMACRESET);
2565 while (mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG) &
2566 MVPP2_PORT_CTRL2_PORTMACRESET)
2567 ;
2568 }
2569
2570 mvpp2_port_enable(sc);
2571
2572 if (sc->sc_inband_status) {
2573 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2574 reg &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2575 reg &= ~MVPP2_GMAC_FORCE_LINK_DOWN;
2576 mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, reg);
2577 if (sc->sc_gop_id == 0) {
2578 reg = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2579 reg &= ~MV_XLG_MAC_CTRL0_FORCELINKPASS;
2580 reg &= ~MV_XLG_MAC_CTRL0_FORCELINKDOWN;
2581 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, reg);
2582 }
2583 } else
2584 mvpp2_port_change(sc);
2585 }
2586
2587 void
mvpp2_xlg_config(struct mvpp2_port * sc)2588 mvpp2_xlg_config(struct mvpp2_port *sc)
2589 {
2590 uint32_t ctl0, ctl4;
2591
2592 ctl0 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG);
2593 ctl4 = mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL4_REG);
2594
2595 ctl0 |= MV_XLG_MAC_CTRL0_MACRESETN;
2596 ctl4 &= ~MV_XLG_MAC_CTRL4_EN_IDLE_CHECK_FOR_LINK;
2597 ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_PFC_EN;
2598 ctl4 |= MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN;
2599
2600 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL0_REG, ctl0);
2601 mvpp2_xlg_write(sc, MV_XLG_PORT_MAC_CTRL4_REG, ctl4);
2602
2603 /* Port reset */
2604 while ((mvpp2_xlg_read(sc, MV_XLG_PORT_MAC_CTRL0_REG) &
2605 MV_XLG_MAC_CTRL0_MACRESETN) == 0)
2606 ;
2607 }
2608
2609 void
mvpp2_gmac_config(struct mvpp2_port * sc)2610 mvpp2_gmac_config(struct mvpp2_port *sc)
2611 {
2612 uint32_t ctl0, ctl2, ctl4, panc;
2613
2614 /* Setup phy. */
2615 ctl0 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL0_REG);
2616 ctl2 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL2_REG);
2617 ctl4 = mvpp2_gmac_read(sc, MVPP2_PORT_CTRL4_REG);
2618 panc = mvpp2_gmac_read(sc, MVPP2_GMAC_AUTONEG_CONFIG);
2619
2620 ctl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
2621 ctl2 &= ~(MVPP2_GMAC_PORT_RESET_MASK | MVPP2_GMAC_PCS_ENABLE_MASK |
2622 MVPP2_GMAC_INBAND_AN_MASK);
2623 panc &= ~(MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
2624 MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FC_ADV_EN |
2625 MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
2626 MVPP2_GMAC_IN_BAND_AUTONEG);
2627
2628 switch (sc->sc_phy_mode) {
2629 case PHY_MODE_XAUI:
2630 case PHY_MODE_10GBASER:
2631 break;
2632 case PHY_MODE_2500BASEX:
2633 case PHY_MODE_1000BASEX:
2634 ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2635 ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2636 ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2637 ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2638 ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2639 break;
2640 case PHY_MODE_SGMII:
2641 ctl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
2642 ctl2 |= MVPP2_GMAC_INBAND_AN_MASK;
2643 ctl4 &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2644 ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2645 ctl4 |= MVPP2_PORT_CTRL4_DP_CLK_SEL;
2646 ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2647 break;
2648 case PHY_MODE_RGMII:
2649 case PHY_MODE_RGMII_ID:
2650 case PHY_MODE_RGMII_RXID:
2651 case PHY_MODE_RGMII_TXID:
2652 ctl4 &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL;
2653 ctl4 |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL;
2654 ctl4 |= MVPP2_PORT_CTRL4_SYNC_BYPASS;
2655 ctl4 |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE;
2656 break;
2657 }
2658
2659 /* Use Auto-Negotiation for Inband Status only */
2660 if (sc->sc_inband_status) {
2661 panc &= ~MVPP2_GMAC_CONFIG_MII_SPEED;
2662 panc &= ~MVPP2_GMAC_CONFIG_GMII_SPEED;
2663 panc &= ~MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2664 panc |= MVPP2_GMAC_IN_BAND_AUTONEG;
2665 /* TODO: read mode from SFP */
2666 if (sc->sc_phy_mode == PHY_MODE_SGMII) {
2667 /* SGMII */
2668 panc |= MVPP2_GMAC_AN_SPEED_EN;
2669 panc |= MVPP2_GMAC_AN_DUPLEX_EN;
2670 } else {
2671 /* 802.3z */
2672 ctl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
2673 panc |= MVPP2_GMAC_CONFIG_GMII_SPEED;
2674 panc |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
2675 }
2676 }
2677
2678 mvpp2_gmac_write(sc, MVPP2_PORT_CTRL0_REG, ctl0);
2679 mvpp2_gmac_write(sc, MVPP2_PORT_CTRL2_REG, ctl2);
2680 mvpp2_gmac_write(sc, MVPP2_PORT_CTRL4_REG, ctl4);
2681 mvpp2_gmac_write(sc, MVPP2_GMAC_AUTONEG_CONFIG, panc);
2682 }
2683
2684 #define COMPHY_BASE 0x120000
2685 #define COMPHY_SIP_POWER_ON 0x82000001
2686 #define COMPHY_SIP_POWER_OFF 0x82000002
2687 #define COMPHY_SPEED(x) ((x) << 2)
2688 #define COMPHY_SPEED_1_25G 0 /* SGMII 1G */
2689 #define COMPHY_SPEED_2_5G 1
2690 #define COMPHY_SPEED_3_125G 2 /* SGMII 2.5G */
2691 #define COMPHY_SPEED_5G 3
2692 #define COMPHY_SPEED_5_15625G 4 /* XFI 5G */
2693 #define COMPHY_SPEED_6G 5
2694 #define COMPHY_SPEED_10_3125G 6 /* XFI 10G */
2695 #define COMPHY_UNIT(x) ((x) << 8)
2696 #define COMPHY_MODE(x) ((x) << 12)
2697 #define COMPHY_MODE_SATA 1
2698 #define COMPHY_MODE_SGMII 2 /* SGMII 1G */
2699 #define COMPHY_MODE_HS_SGMII 3 /* SGMII 2.5G */
2700 #define COMPHY_MODE_USB3H 4
2701 #define COMPHY_MODE_USB3D 5
2702 #define COMPHY_MODE_PCIE 6
2703 #define COMPHY_MODE_RXAUI 7
2704 #define COMPHY_MODE_XFI 8
2705 #define COMPHY_MODE_SFI 9
2706 #define COMPHY_MODE_USB3 10
2707 #define COMPHY_MODE_AP 11
2708
2709 void
mvpp2_comphy_config(struct mvpp2_port * sc,int on)2710 mvpp2_comphy_config(struct mvpp2_port *sc, int on)
2711 {
2712 int node, phys[2], lane, unit;
2713 uint32_t mode;
2714
2715 if (OF_getpropintarray(sc->sc_node, "phys", phys, sizeof(phys)) !=
2716 sizeof(phys))
2717 return;
2718 node = OF_getnodebyphandle(phys[0]);
2719 if (!node)
2720 return;
2721
2722 lane = OF_getpropint(node, "reg", 0);
2723 unit = phys[1];
2724
2725 switch (sc->sc_phy_mode) {
2726 case PHY_MODE_XAUI:
2727 mode = COMPHY_MODE(COMPHY_MODE_RXAUI) |
2728 COMPHY_UNIT(unit);
2729 break;
2730 case PHY_MODE_10GBASER:
2731 mode = COMPHY_MODE(COMPHY_MODE_XFI) |
2732 COMPHY_SPEED(COMPHY_SPEED_10_3125G) |
2733 COMPHY_UNIT(unit);
2734 break;
2735 case PHY_MODE_2500BASEX:
2736 mode = COMPHY_MODE(COMPHY_MODE_HS_SGMII) |
2737 COMPHY_SPEED(COMPHY_SPEED_3_125G) |
2738 COMPHY_UNIT(unit);
2739 break;
2740 case PHY_MODE_1000BASEX:
2741 case PHY_MODE_SGMII:
2742 mode = COMPHY_MODE(COMPHY_MODE_SGMII) |
2743 COMPHY_SPEED(COMPHY_SPEED_1_25G) |
2744 COMPHY_UNIT(unit);
2745 break;
2746 default:
2747 return;
2748 }
2749
2750 if (on)
2751 smc_call(COMPHY_SIP_POWER_ON, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2752 lane, mode);
2753 else
2754 smc_call(COMPHY_SIP_POWER_OFF, sc->sc->sc_ioh_paddr + COMPHY_BASE,
2755 lane, 0);
2756 }
2757
2758 void
mvpp2_gop_config(struct mvpp2_port * sc)2759 mvpp2_gop_config(struct mvpp2_port *sc)
2760 {
2761 uint32_t reg;
2762
2763 if (sc->sc->sc_rm == NULL)
2764 return;
2765
2766 if (sc->sc_phy_mode == PHY_MODE_RGMII ||
2767 sc->sc_phy_mode == PHY_MODE_RGMII_ID ||
2768 sc->sc_phy_mode == PHY_MODE_RGMII_RXID ||
2769 sc->sc_phy_mode == PHY_MODE_RGMII_TXID) {
2770 if (sc->sc_gop_id == 0)
2771 return;
2772 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2773 reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
2774 regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2775 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2776 if (sc->sc_gop_id == 2)
2777 reg |= GENCONF_CTRL0_PORT0_RGMII |
2778 GENCONF_CTRL0_PORT1_RGMII;
2779 else if (sc->sc_gop_id == 3)
2780 reg |= GENCONF_CTRL0_PORT1_RGMII_MII;
2781 regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2782 } else if (sc->sc_phy_mode == PHY_MODE_2500BASEX ||
2783 sc->sc_phy_mode == PHY_MODE_1000BASEX ||
2784 sc->sc_phy_mode == PHY_MODE_SGMII) {
2785 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2786 reg |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
2787 GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
2788 regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2789 if (sc->sc_gop_id > 1) {
2790 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_CTRL0);
2791 if (sc->sc_gop_id == 2)
2792 reg &= ~GENCONF_CTRL0_PORT0_RGMII;
2793 else if (sc->sc_gop_id == 3)
2794 reg &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
2795 regmap_write_4(sc->sc->sc_rm, GENCONF_CTRL0, reg);
2796 }
2797 } else if (sc->sc_phy_mode == PHY_MODE_10GBASER) {
2798 if (sc->sc_gop_id != 0)
2799 return;
2800 reg = mvpp2_xpcs_read(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG);
2801 reg &= ~MVPP22_XPCS_PCSMODE_MASK;
2802 reg &= ~MVPP22_XPCS_LANEACTIVE_MASK;
2803 reg |= 2 << MVPP22_XPCS_LANEACTIVE_OFFS;
2804 mvpp2_xpcs_write(sc, MVPP22_XPCS_GLOBAL_CFG_0_REG, reg);
2805 reg = mvpp2_mpcs_read(sc, MVPP22_MPCS40G_COMMON_CONTROL);
2806 reg &= ~MVPP22_MPCS_FORWARD_ERROR_CORRECTION_MASK;
2807 mvpp2_mpcs_write(sc, MVPP22_MPCS40G_COMMON_CONTROL, reg);
2808 reg = mvpp2_mpcs_read(sc, MVPP22_MPCS_CLOCK_RESET);
2809 reg &= ~MVPP22_MPCS_CLK_DIVISION_RATIO_MASK;
2810 reg |= MVPP22_MPCS_CLK_DIVISION_RATIO_DEFAULT;
2811 mvpp2_mpcs_write(sc, MVPP22_MPCS_CLOCK_RESET, reg);
2812 } else
2813 return;
2814
2815 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1);
2816 reg |= GENCONF_PORT_CTRL1_RESET(sc->sc_gop_id) |
2817 GENCONF_PORT_CTRL1_EN(sc->sc_gop_id);
2818 regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL1, reg);
2819
2820 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0);
2821 reg |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
2822 regmap_write_4(sc->sc->sc_rm, GENCONF_PORT_CTRL0, reg);
2823
2824 reg = regmap_read_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1);
2825 reg |= GENCONF_SOFT_RESET1_GOP;
2826 regmap_write_4(sc->sc->sc_rm, GENCONF_SOFT_RESET1, reg);
2827 }
2828
2829 void
mvpp2_gop_intr_mask(struct mvpp2_port * sc)2830 mvpp2_gop_intr_mask(struct mvpp2_port *sc)
2831 {
2832 uint32_t reg;
2833
2834 if (sc->sc_gop_id == 0) {
2835 reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2836 reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2837 reg &= ~MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2838 mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2839 }
2840
2841 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2842 reg &= ~MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2843 mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2844 }
2845
2846 void
mvpp2_gop_intr_unmask(struct mvpp2_port * sc)2847 mvpp2_gop_intr_unmask(struct mvpp2_port *sc)
2848 {
2849 uint32_t reg;
2850
2851 reg = mvpp2_gmac_read(sc, MVPP2_GMAC_INT_SUM_MASK_REG);
2852 reg |= MVPP2_GMAC_INT_SUM_CAUSE_LINK_CHANGE;
2853 mvpp2_gmac_write(sc, MVPP2_GMAC_INT_SUM_MASK_REG, reg);
2854
2855 if (sc->sc_gop_id == 0) {
2856 reg = mvpp2_xlg_read(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG);
2857 if (sc->sc_phy_mode == PHY_MODE_10GBASER ||
2858 sc->sc_phy_mode == PHY_MODE_XAUI)
2859 reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_XLG;
2860 else
2861 reg |= MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_GIG;
2862 mvpp2_xlg_write(sc, MV_XLG_EXTERNAL_INTERRUPT_MASK_REG, reg);
2863 }
2864 }
2865
2866 void
mvpp2_down(struct mvpp2_port * sc)2867 mvpp2_down(struct mvpp2_port *sc)
2868 {
2869 struct ifnet *ifp = &sc->sc_ac.ac_if;
2870 uint32_t reg;
2871 int i;
2872
2873 timeout_del(&sc->sc_tick);
2874
2875 ifp->if_flags &= ~IFF_RUNNING;
2876 ifq_clr_oactive(&ifp->if_snd);
2877
2878 mvpp2_egress_disable(sc);
2879 mvpp2_ingress_disable(sc);
2880
2881 mvpp2_mac_reset_assert(sc);
2882 mvpp2_pcs_reset_assert(sc);
2883
2884 /* XXX: single vector */
2885 mvpp2_interrupts_disable(sc, (1 << 0));
2886 mvpp2_write(sc->sc, MVPP2_ISR_RX_TX_MASK_REG(sc->sc_id), 0);
2887
2888 reg = mvpp2_read(sc->sc, MVPP2_TX_PORT_FLUSH_REG);
2889 reg |= MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2890 mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2891
2892 for (i = 0; i < sc->sc_ntxq; i++)
2893 mvpp2_txq_hw_deinit(sc, &sc->sc_txqs[i]);
2894
2895 reg &= ~MVPP2_TX_PORT_FLUSH_MASK(sc->sc_id);
2896 mvpp2_write(sc->sc, MVPP2_TX_PORT_FLUSH_REG, reg);
2897
2898 for (i = 0; i < sc->sc_nrxq; i++)
2899 mvpp2_rxq_hw_deinit(sc, &sc->sc_rxqs[i]);
2900
2901 if (sc->sc_sfp) {
2902 rw_enter(&mvpp2_sff_lock, RW_WRITE);
2903 sfp_disable(sc->sc_sfp);
2904 rw_exit(&mvpp2_sff_lock);
2905 }
2906 }
2907
2908 void
mvpp2_txq_hw_deinit(struct mvpp2_port * sc,struct mvpp2_tx_queue * txq)2909 mvpp2_txq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_tx_queue *txq)
2910 {
2911 struct mvpp2_buf *txb;
2912 int i, pending;
2913 uint32_t reg;
2914
2915 mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2916 reg = mvpp2_read(sc->sc, MVPP2_TXQ_PREF_BUF_REG);
2917 reg |= MVPP2_TXQ_DRAIN_EN_MASK;
2918 mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2919
2920 /*
2921 * the queue has been stopped so wait for all packets
2922 * to be transmitted.
2923 */
2924 i = 0;
2925 do {
2926 if (i >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2927 printf("%s: port %d: cleaning queue %d timed out\n",
2928 sc->sc_dev.dv_xname, sc->sc_id, txq->log_id);
2929 break;
2930 }
2931 delay(1000);
2932 i++;
2933
2934 pending = mvpp2_read(sc->sc, MVPP2_TXQ_PENDING_REG) &
2935 MVPP2_TXQ_PENDING_MASK;
2936 } while (pending);
2937
2938 reg &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2939 mvpp2_write(sc->sc, MVPP2_TXQ_PREF_BUF_REG, reg);
2940
2941 mvpp2_write(sc->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2942 mvpp2_write(sc->sc, MVPP2_TXQ_NUM_REG, txq->id);
2943 mvpp2_write(sc->sc, MVPP2_TXQ_DESC_ADDR_REG, 0);
2944 mvpp2_write(sc->sc, MVPP2_TXQ_DESC_SIZE_REG, 0);
2945 mvpp2_read(sc->sc, MVPP2_TXQ_SENT_REG(txq->id));
2946
2947 for (i = 0; i < MVPP2_NTXDESC; i++) {
2948 txb = &txq->buf[i];
2949 if (txb->mb_m) {
2950 bus_dmamap_sync(sc->sc_dmat, txb->mb_map, 0,
2951 txb->mb_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2952 bus_dmamap_unload(sc->sc_dmat, txb->mb_map);
2953 m_freem(txb->mb_m);
2954 }
2955 bus_dmamap_destroy(sc->sc_dmat, txb->mb_map);
2956 }
2957
2958 mvpp2_dmamem_free(sc->sc, txq->ring);
2959 free(txq->buf, M_DEVBUF, sizeof(struct mvpp2_buf) *
2960 MVPP2_NTXDESC);
2961 }
2962
2963 void
mvpp2_rxq_hw_drop(struct mvpp2_port * sc,struct mvpp2_rx_queue * rxq)2964 mvpp2_rxq_hw_drop(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
2965 {
2966 struct mvpp2_rx_desc *rxd;
2967 struct mvpp2_bm_pool *bm;
2968 uint64_t phys, virt;
2969 uint32_t i, nrecv, pool;
2970 struct mvpp2_buf *rxb;
2971
2972 nrecv = mvpp2_rxq_received(sc, rxq->id);
2973 if (!nrecv)
2974 return;
2975
2976 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
2977 MVPP2_DMA_LEN(rxq->ring),
2978 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2979
2980 for (i = 0; i < nrecv; i++) {
2981 rxd = &rxq->descs[rxq->cons];
2982 virt = rxd->buf_cookie_bm_qset_cls_info;
2983 pool = (virt >> 16) & 0xffff;
2984 KASSERT(pool < sc->sc->sc_npools);
2985 bm = &sc->sc->sc_bm_pools[pool];
2986 KASSERT((virt & 0xffff) < MVPP2_BM_SIZE);
2987 rxb = &bm->rxbuf[virt & 0xffff];
2988 KASSERT(rxb->mb_m != NULL);
2989 virt &= 0xffffffff;
2990 phys = rxb->mb_map->dm_segs[0].ds_addr;
2991 mvpp2_write(sc->sc, MVPP22_BM_ADDR_HIGH_RLS_REG,
2992 (((virt >> 32) & MVPP22_ADDR_HIGH_MASK)
2993 << MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) |
2994 ((phys >> 32) & MVPP22_ADDR_HIGH_MASK));
2995 mvpp2_write(sc->sc, MVPP2_BM_VIRT_RLS_REG,
2996 virt & 0xffffffff);
2997 mvpp2_write(sc->sc, MVPP2_BM_PHY_RLS_REG(pool),
2998 phys & 0xffffffff);
2999 rxq->cons = (rxq->cons + 1) % MVPP2_NRXDESC;
3000 }
3001
3002 bus_dmamap_sync(sc->sc_dmat, MVPP2_DMA_MAP(rxq->ring), 0,
3003 MVPP2_DMA_LEN(rxq->ring),
3004 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
3005
3006 mvpp2_rxq_status_update(sc, rxq->id, nrecv, nrecv);
3007 }
3008
3009 void
mvpp2_rxq_hw_deinit(struct mvpp2_port * sc,struct mvpp2_rx_queue * rxq)3010 mvpp2_rxq_hw_deinit(struct mvpp2_port *sc, struct mvpp2_rx_queue *rxq)
3011 {
3012 mvpp2_rxq_hw_drop(sc, rxq);
3013
3014 mvpp2_write(sc->sc, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3015 mvpp2_write(sc->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3016 mvpp2_write(sc->sc, MVPP2_RXQ_DESC_ADDR_REG, 0);
3017 mvpp2_write(sc->sc, MVPP2_RXQ_DESC_SIZE_REG, 0);
3018
3019 mvpp2_dmamem_free(sc->sc, rxq->ring);
3020 }
3021
3022 void
mvpp2_rxq_long_pool_set(struct mvpp2_port * port,int lrxq,int pool)3023 mvpp2_rxq_long_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3024 {
3025 uint32_t val;
3026 int prxq;
3027
3028 /* get queue physical ID */
3029 prxq = port->sc_rxqs[lrxq].id;
3030
3031 val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3032 val &= ~MVPP2_RXQ_POOL_LONG_MASK;
3033 val |= ((pool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK);
3034
3035 mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3036 }
3037
3038 void
mvpp2_rxq_short_pool_set(struct mvpp2_port * port,int lrxq,int pool)3039 mvpp2_rxq_short_pool_set(struct mvpp2_port *port, int lrxq, int pool)
3040 {
3041 uint32_t val;
3042 int prxq;
3043
3044 /* get queue physical ID */
3045 prxq = port->sc_rxqs[lrxq].id;
3046
3047 val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3048 val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
3049 val |= ((pool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK);
3050
3051 mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3052 }
3053
3054 void
mvpp2_iff(struct mvpp2_port * sc)3055 mvpp2_iff(struct mvpp2_port *sc)
3056 {
3057 struct arpcom *ac = &sc->sc_ac;
3058 struct ifnet *ifp = &sc->sc_ac.ac_if;
3059 struct ether_multi *enm;
3060 struct ether_multistep step;
3061
3062 ifp->if_flags &= ~IFF_ALLMULTI;
3063
3064 /* Removes all but broadcast and (new) lladdr */
3065 mvpp2_prs_mac_del_all(sc);
3066
3067 if (ifp->if_flags & IFF_PROMISC) {
3068 mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3069 MVPP2_PRS_L2_UNI_CAST, 1);
3070 mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3071 MVPP2_PRS_L2_MULTI_CAST, 1);
3072 return;
3073 }
3074
3075 mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3076 MVPP2_PRS_L2_UNI_CAST, 0);
3077 mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3078 MVPP2_PRS_L2_MULTI_CAST, 0);
3079
3080 if (ac->ac_multirangecnt > 0 ||
3081 ac->ac_multicnt > MVPP2_PRS_MAC_MC_FILT_MAX) {
3082 ifp->if_flags |= IFF_ALLMULTI;
3083 mvpp2_prs_mac_promisc_set(sc->sc, sc->sc_id,
3084 MVPP2_PRS_L2_MULTI_CAST, 1);
3085 } else {
3086 ETHER_FIRST_MULTI(step, ac, enm);
3087 while (enm != NULL) {
3088 mvpp2_prs_mac_da_accept(sc, enm->enm_addrlo, 1);
3089 ETHER_NEXT_MULTI(step, enm);
3090 }
3091 }
3092 }
3093
3094 struct mvpp2_dmamem *
mvpp2_dmamem_alloc(struct mvpp2_softc * sc,bus_size_t size,bus_size_t align)3095 mvpp2_dmamem_alloc(struct mvpp2_softc *sc, bus_size_t size, bus_size_t align)
3096 {
3097 struct mvpp2_dmamem *mdm;
3098 int nsegs;
3099
3100 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_WAITOK | M_ZERO);
3101 mdm->mdm_size = size;
3102
3103 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
3104 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
3105 goto mdmfree;
3106
3107 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &mdm->mdm_seg, 1,
3108 &nsegs, BUS_DMA_WAITOK) != 0)
3109 goto destroy;
3110
3111 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
3112 &mdm->mdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
3113 goto free;
3114
3115 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
3116 NULL, BUS_DMA_WAITOK) != 0)
3117 goto unmap;
3118
3119 bzero(mdm->mdm_kva, size);
3120
3121 return (mdm);
3122
3123 unmap:
3124 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
3125 free:
3126 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3127 destroy:
3128 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3129 mdmfree:
3130 free(mdm, M_DEVBUF, 0);
3131
3132 return (NULL);
3133 }
3134
3135 void
mvpp2_dmamem_free(struct mvpp2_softc * sc,struct mvpp2_dmamem * mdm)3136 mvpp2_dmamem_free(struct mvpp2_softc *sc, struct mvpp2_dmamem *mdm)
3137 {
3138 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
3139 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
3140 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
3141 free(mdm, M_DEVBUF, 0);
3142 }
3143
3144 struct mbuf *
mvpp2_alloc_mbuf(struct mvpp2_softc * sc,bus_dmamap_t map)3145 mvpp2_alloc_mbuf(struct mvpp2_softc *sc, bus_dmamap_t map)
3146 {
3147 struct mbuf *m = NULL;
3148
3149 m = MCLGETL(NULL, M_DONTWAIT, MCLBYTES);
3150 if (!m)
3151 return (NULL);
3152 m->m_len = m->m_pkthdr.len = MCLBYTES;
3153
3154 if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) {
3155 printf("%s: could not load mbuf DMA map", DEVNAME(sc));
3156 m_freem(m);
3157 return (NULL);
3158 }
3159
3160 bus_dmamap_sync(sc->sc_dmat, map, 0,
3161 m->m_pkthdr.len, BUS_DMASYNC_PREREAD);
3162
3163 return (m);
3164 }
3165
3166 void
mvpp2_interrupts_enable(struct mvpp2_port * port,int cpu_mask)3167 mvpp2_interrupts_enable(struct mvpp2_port *port, int cpu_mask)
3168 {
3169 mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3170 MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
3171 }
3172
3173 void
mvpp2_interrupts_disable(struct mvpp2_port * port,int cpu_mask)3174 mvpp2_interrupts_disable(struct mvpp2_port *port, int cpu_mask)
3175 {
3176 mvpp2_write(port->sc, MVPP2_ISR_ENABLE_REG(port->sc_id),
3177 MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
3178 }
3179
3180 int
mvpp2_egress_port(struct mvpp2_port * port)3181 mvpp2_egress_port(struct mvpp2_port *port)
3182 {
3183 return MVPP2_MAX_TCONT + port->sc_id;
3184 }
3185
3186 int
mvpp2_txq_phys(int port,int txq)3187 mvpp2_txq_phys(int port, int txq)
3188 {
3189 return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
3190 }
3191
3192 void
mvpp2_defaults_set(struct mvpp2_port * port)3193 mvpp2_defaults_set(struct mvpp2_port *port)
3194 {
3195 int val, queue;
3196
3197 mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3198 mvpp2_egress_port(port));
3199 mvpp2_write(port->sc, MVPP2_TXP_SCHED_CMD_1_REG, 0);
3200
3201 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
3202 mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
3203
3204 mvpp2_write(port->sc, MVPP2_TXP_SCHED_PERIOD_REG, port->sc->sc_tclk /
3205 (1000 * 1000));
3206 val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_REFILL_REG);
3207 val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
3208 val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
3209 val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
3210 mvpp2_write(port->sc, MVPP2_TXP_SCHED_REFILL_REG, val);
3211 val = MVPP2_TXP_TOKEN_SIZE_MAX;
3212 mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3213
3214 /* set maximum_low_latency_packet_size value to 256 */
3215 mvpp2_write(port->sc, MVPP2_RX_CTRL_REG(port->sc_id),
3216 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
3217 MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
3218
3219 /* mask all interrupts to all present cpus */
3220 mvpp2_interrupts_disable(port, (0xf << 0));
3221 }
3222
3223 void
mvpp2_ingress_enable(struct mvpp2_port * port)3224 mvpp2_ingress_enable(struct mvpp2_port *port)
3225 {
3226 uint32_t val;
3227 int lrxq, queue;
3228
3229 for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3230 queue = port->sc_rxqs[lrxq].id;
3231 val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3232 val &= ~MVPP2_RXQ_DISABLE_MASK;
3233 mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3234 }
3235 }
3236
3237 void
mvpp2_ingress_disable(struct mvpp2_port * port)3238 mvpp2_ingress_disable(struct mvpp2_port *port)
3239 {
3240 uint32_t val;
3241 int lrxq, queue;
3242
3243 for (lrxq = 0; lrxq < port->sc_nrxq; lrxq++) {
3244 queue = port->sc_rxqs[lrxq].id;
3245 val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(queue));
3246 val |= MVPP2_RXQ_DISABLE_MASK;
3247 mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(queue), val);
3248 }
3249 }
3250
3251 void
mvpp2_egress_enable(struct mvpp2_port * port)3252 mvpp2_egress_enable(struct mvpp2_port *port)
3253 {
3254 struct mvpp2_tx_queue *txq;
3255 uint32_t qmap;
3256 int queue;
3257
3258 qmap = 0;
3259 for (queue = 0; queue < port->sc_ntxq; queue++) {
3260 txq = &port->sc_txqs[queue];
3261
3262 if (txq->descs != NULL) {
3263 qmap |= (1 << queue);
3264 }
3265 }
3266
3267 mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3268 mvpp2_egress_port(port));
3269 mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
3270 }
3271
3272 void
mvpp2_egress_disable(struct mvpp2_port * port)3273 mvpp2_egress_disable(struct mvpp2_port *port)
3274 {
3275 uint32_t reg_data;
3276 int i;
3277
3278 mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3279 mvpp2_egress_port(port));
3280 reg_data = (mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG)) &
3281 MVPP2_TXP_SCHED_ENQ_MASK;
3282 if (reg_data)
3283 mvpp2_write(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG,
3284 reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET);
3285
3286 i = 0;
3287 do {
3288 if (i >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
3289 printf("%s: tx stop timed out, status=0x%08x\n",
3290 port->sc_dev.dv_xname, reg_data);
3291 break;
3292 }
3293 delay(1000);
3294 i++;
3295 reg_data = mvpp2_read(port->sc, MVPP2_TXP_SCHED_Q_CMD_REG);
3296 } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
3297 }
3298
3299 void
mvpp2_port_enable(struct mvpp2_port * port)3300 mvpp2_port_enable(struct mvpp2_port *port)
3301 {
3302 uint32_t val;
3303
3304 if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3305 port->sc_phy_mode == PHY_MODE_XAUI)) {
3306 val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3307 val |= MV_XLG_MAC_CTRL0_PORTEN;
3308 val &= ~MV_XLG_MAC_CTRL0_MIBCNTDIS;
3309 mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3310 } else {
3311 val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3312 val |= MVPP2_GMAC_PORT_EN_MASK;
3313 val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
3314 mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3315 }
3316 }
3317
3318 void
mvpp2_port_disable(struct mvpp2_port * port)3319 mvpp2_port_disable(struct mvpp2_port *port)
3320 {
3321 uint32_t val;
3322
3323 if (port->sc_gop_id == 0 && (port->sc_phy_mode == PHY_MODE_10GBASER ||
3324 port->sc_phy_mode == PHY_MODE_XAUI)) {
3325 val = mvpp2_xlg_read(port, MV_XLG_PORT_MAC_CTRL0_REG);
3326 val &= ~MV_XLG_MAC_CTRL0_PORTEN;
3327 mvpp2_xlg_write(port, MV_XLG_PORT_MAC_CTRL0_REG, val);
3328 }
3329
3330 val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG);
3331 val &= ~MVPP2_GMAC_PORT_EN_MASK;
3332 mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val);
3333 }
3334
3335 int
mvpp2_rxq_received(struct mvpp2_port * port,int rxq_id)3336 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
3337 {
3338 uint32_t val = mvpp2_read(port->sc, MVPP2_RXQ_STATUS_REG(rxq_id));
3339
3340 return val & MVPP2_RXQ_OCCUPIED_MASK;
3341 }
3342
3343 void
mvpp2_rxq_status_update(struct mvpp2_port * port,int rxq_id,int used_count,int free_count)3344 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
3345 int used_count, int free_count)
3346 {
3347 uint32_t val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
3348 mvpp2_write(port->sc, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
3349 }
3350
3351 void
mvpp2_rxq_offset_set(struct mvpp2_port * port,int prxq,int offset)3352 mvpp2_rxq_offset_set(struct mvpp2_port *port, int prxq, int offset)
3353 {
3354 uint32_t val;
3355
3356 offset = offset >> 5;
3357 val = mvpp2_read(port->sc, MVPP2_RXQ_CONFIG_REG(prxq));
3358 val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
3359 val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
3360 MVPP2_RXQ_PACKET_OFFSET_MASK);
3361 mvpp2_write(port->sc, MVPP2_RXQ_CONFIG_REG(prxq), val);
3362 }
3363
3364 void
mvpp2_txp_max_tx_size_set(struct mvpp2_port * port)3365 mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
3366 {
3367 uint32_t val, size, mtu;
3368 int txq;
3369
3370 mtu = MCLBYTES * 8;
3371 if (mtu > MVPP2_TXP_MTU_MAX)
3372 mtu = MVPP2_TXP_MTU_MAX;
3373
3374 /* WA for wrong token bucket update: set MTU value = 3*real MTU value */
3375 mtu = 3 * mtu;
3376
3377 /* indirect access to reg_valisters */
3378 mvpp2_write(port->sc, MVPP2_TXP_SCHED_PORT_INDEX_REG,
3379 mvpp2_egress_port(port));
3380
3381 /* set MTU */
3382 val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_MTU_REG);
3383 val &= ~MVPP2_TXP_MTU_MAX;
3384 val |= mtu;
3385 mvpp2_write(port->sc, MVPP2_TXP_SCHED_MTU_REG, val);
3386
3387 /* TXP token size and all TXqs token size must be larger that MTU */
3388 val = mvpp2_read(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
3389 size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
3390 if (size < mtu) {
3391 size = mtu;
3392 val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
3393 val |= size;
3394 mvpp2_write(port->sc, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
3395 }
3396
3397 for (txq = 0; txq < port->sc_ntxq; txq++) {
3398 val = mvpp2_read(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
3399 size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
3400
3401 if (size < mtu) {
3402 size = mtu;
3403 val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
3404 val |= size;
3405 mvpp2_write(port->sc, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), val);
3406 }
3407 }
3408 }
3409
3410 void
mvpp2_rx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq,uint32_t pkts)3411 mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3412 uint32_t pkts)
3413 {
3414 rxq->pkts_coal =
3415 pkts <= MVPP2_OCCUPIED_THRESH_MASK ?
3416 pkts : MVPP2_OCCUPIED_THRESH_MASK;
3417
3418 mvpp2_write(port->sc, MVPP2_RXQ_NUM_REG, rxq->id);
3419 mvpp2_write(port->sc, MVPP2_RXQ_THRESH_REG, rxq->pkts_coal);
3420
3421 }
3422
3423 void
mvpp2_tx_pkts_coal_set(struct mvpp2_port * port,struct mvpp2_tx_queue * txq,uint32_t pkts)3424 mvpp2_tx_pkts_coal_set(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3425 uint32_t pkts)
3426 {
3427 txq->done_pkts_coal =
3428 pkts <= MVPP2_TRANSMITTED_THRESH_MASK ?
3429 pkts : MVPP2_TRANSMITTED_THRESH_MASK;
3430
3431 mvpp2_write(port->sc, MVPP2_TXQ_NUM_REG, txq->id);
3432 mvpp2_write(port->sc, MVPP2_TXQ_THRESH_REG,
3433 txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET);
3434 }
3435
3436 void
mvpp2_rx_time_coal_set(struct mvpp2_port * port,struct mvpp2_rx_queue * rxq,uint32_t usec)3437 mvpp2_rx_time_coal_set(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3438 uint32_t usec)
3439 {
3440 uint32_t val;
3441
3442 val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3443 mvpp2_write(port->sc, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
3444
3445 rxq->time_coal = usec;
3446 }
3447
3448 void
mvpp2_tx_time_coal_set(struct mvpp2_port * port,uint32_t usec)3449 mvpp2_tx_time_coal_set(struct mvpp2_port *port, uint32_t usec)
3450 {
3451 uint32_t val;
3452
3453 val = (port->sc->sc_tclk / (1000 * 1000)) * usec;
3454 mvpp2_write(port->sc, MVPP2_ISR_TX_THRESHOLD_REG(port->sc_id), val);
3455
3456 port->sc_tx_time_coal = usec;
3457 }
3458
3459 void
mvpp2_prs_shadow_ri_set(struct mvpp2_softc * sc,int index,uint32_t ri,uint32_t ri_mask)3460 mvpp2_prs_shadow_ri_set(struct mvpp2_softc *sc, int index,
3461 uint32_t ri, uint32_t ri_mask)
3462 {
3463 sc->sc_prs_shadow[index].ri_mask = ri_mask;
3464 sc->sc_prs_shadow[index].ri = ri;
3465 }
3466
3467 void
mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry * pe,uint32_t lu)3468 mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3469 {
3470 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
3471
3472 pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
3473 pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
3474 }
3475
3476 void
mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry * pe,uint32_t port,int add)3477 mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, uint32_t port, int add)
3478 {
3479 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3480
3481 if (add)
3482 pe->tcam.byte[enable_off] &= ~(1 << port);
3483 else
3484 pe->tcam.byte[enable_off] |= (1 << port);
3485 }
3486
3487 void
mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry * pe,uint32_t port_mask)3488 mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, uint32_t port_mask)
3489 {
3490 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3491 uint8_t mask = MVPP2_PRS_PORT_MASK;
3492
3493 pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
3494 pe->tcam.byte[enable_off] &= ~mask;
3495 pe->tcam.byte[enable_off] |= ~port_mask & MVPP2_PRS_PORT_MASK;
3496 }
3497
3498 uint32_t
mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry * pe)3499 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
3500 {
3501 int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
3502
3503 return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
3504 }
3505
3506 void
mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry * pe,uint32_t offs,uint8_t byte,uint8_t enable)3507 mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, uint32_t offs,
3508 uint8_t byte, uint8_t enable)
3509 {
3510 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
3511 pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
3512 }
3513
3514 void
mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry * pe,uint32_t offs,uint8_t * byte,uint8_t * enable)3515 mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, uint32_t offs,
3516 uint8_t *byte, uint8_t *enable)
3517 {
3518 *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
3519 *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
3520 }
3521
3522 int
mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry * pe,int offset,uint16_t data)3523 mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offset, uint16_t data)
3524 {
3525 int byte_offset = MVPP2_PRS_TCAM_DATA_BYTE(offset);
3526 uint16_t tcam_data;
3527
3528 tcam_data = (pe->tcam.byte[byte_offset + 1] << 8) |
3529 pe->tcam.byte[byte_offset];
3530 return tcam_data == data;
3531 }
3532
3533 void
mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry * pe,uint32_t bits,uint32_t enable)3534 mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t enable)
3535 {
3536 int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
3537
3538 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
3539 if (!(enable & BIT(i)))
3540 continue;
3541
3542 if (bits & BIT(i))
3543 pe->tcam.byte[ai_idx] |= BIT(i);
3544 else
3545 pe->tcam.byte[ai_idx] &= ~BIT(i);
3546 }
3547
3548 pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
3549 }
3550
3551 int
mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry * pe)3552 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
3553 {
3554 return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
3555 }
3556
3557 void
mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry * pe,uint32_t data_offset,uint32_t * word,uint32_t * enable)3558 mvpp2_prs_tcam_data_word_get(struct mvpp2_prs_entry *pe, uint32_t data_offset,
3559 uint32_t *word, uint32_t *enable)
3560 {
3561 int index, position;
3562 uint8_t byte, mask;
3563
3564 for (index = 0; index < 4; index++) {
3565 position = (data_offset * sizeof(int)) + index;
3566 mvpp2_prs_tcam_data_byte_get(pe, position, &byte, &mask);
3567 ((uint8_t *)word)[index] = byte;
3568 ((uint8_t *)enable)[index] = mask;
3569 }
3570 }
3571
3572 void
mvpp2_prs_match_etype(struct mvpp2_prs_entry * pe,uint32_t offs,uint16_t ether_type)3573 mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, uint32_t offs,
3574 uint16_t ether_type)
3575 {
3576 mvpp2_prs_tcam_data_byte_set(pe, offs + 0, ether_type >> 8, 0xff);
3577 mvpp2_prs_tcam_data_byte_set(pe, offs + 1, ether_type & 0xff, 0xff);
3578 }
3579
3580 void
mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry * pe,uint32_t bit,uint32_t val)3581 mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3582 {
3583 pe->sram.byte[bit / 8] |= (val << (bit % 8));
3584 }
3585
3586 void
mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry * pe,uint32_t bit,uint32_t val)3587 mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, uint32_t bit, uint32_t val)
3588 {
3589 pe->sram.byte[bit / 8] &= ~(val << (bit % 8));
3590 }
3591
3592 void
mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry * pe,uint32_t bits,uint32_t mask)3593 mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3594 {
3595 int i;
3596
3597 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
3598 if (!(mask & BIT(i)))
3599 continue;
3600
3601 if (bits & BIT(i))
3602 mvpp2_prs_sram_bits_set(pe,
3603 MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3604 else
3605 mvpp2_prs_sram_bits_clear(pe,
3606 MVPP2_PRS_SRAM_RI_OFFS + i, 1);
3607
3608 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
3609 }
3610 }
3611
3612 int
mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry * pe)3613 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
3614 {
3615 return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
3616 }
3617
3618 void
mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry * pe,uint32_t bits,uint32_t mask)3619 mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, uint32_t bits, uint32_t mask)
3620 {
3621 int i;
3622
3623 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
3624 if (!(mask & BIT(i)))
3625 continue;
3626
3627 if (bits & BIT(i))
3628 mvpp2_prs_sram_bits_set(pe,
3629 MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3630 else
3631 mvpp2_prs_sram_bits_clear(pe,
3632 MVPP2_PRS_SRAM_AI_OFFS + i, 1);
3633
3634 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
3635 }
3636 }
3637
3638 int
mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry * pe)3639 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
3640 {
3641 uint8_t bits;
3642 int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
3643 int ai_en_off = ai_off + 1;
3644 int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
3645
3646 bits = (pe->sram.byte[ai_off] >> ai_shift) |
3647 (pe->sram.byte[ai_en_off] << (8 - ai_shift));
3648
3649 return bits;
3650 }
3651
3652 void
mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry * pe,int shift,uint32_t op)3653 mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift, uint32_t op)
3654 {
3655 if (shift < 0) {
3656 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3657 shift = -shift;
3658 } else {
3659 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
3660 }
3661
3662 pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] |=
3663 shift & MVPP2_PRS_SRAM_SHIFT_MASK;
3664 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
3665 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
3666 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
3667 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3668 }
3669
3670 void
mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry * pe,uint32_t type,int offset,uint32_t op)3671 mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, uint32_t type, int offset,
3672 uint32_t op)
3673 {
3674 uint8_t udf_byte, udf_byte_offset;
3675 uint8_t op_sel_udf_byte, op_sel_udf_byte_offset;
3676
3677 udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
3678 MVPP2_PRS_SRAM_UDF_BITS);
3679 udf_byte_offset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8));
3680 op_sel_udf_byte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
3681 MVPP2_PRS_SRAM_OP_SEL_UDF_BITS);
3682 op_sel_udf_byte_offset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8));
3683
3684 if (offset < 0) {
3685 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3686 offset = -offset;
3687 } else {
3688 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
3689 }
3690
3691 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
3692 MVPP2_PRS_SRAM_UDF_MASK);
3693 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
3694 pe->sram.byte[udf_byte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> udf_byte_offset);
3695 pe->sram.byte[udf_byte] |= (offset >> udf_byte_offset);
3696 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
3697 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
3698 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
3699 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
3700 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
3701 mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
3702 pe->sram.byte[op_sel_udf_byte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
3703 op_sel_udf_byte_offset);
3704 pe->sram.byte[op_sel_udf_byte] |= (op >> op_sel_udf_byte_offset);
3705 mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
3706 }
3707
3708 void
mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry * pe,uint32_t lu)3709 mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, uint32_t lu)
3710 {
3711 int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
3712
3713 mvpp2_prs_sram_bits_clear(pe, sram_next_off, MVPP2_PRS_SRAM_NEXT_LU_MASK);
3714 mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
3715 }
3716
3717 void
mvpp2_prs_shadow_set(struct mvpp2_softc * sc,int index,uint32_t lu)3718 mvpp2_prs_shadow_set(struct mvpp2_softc *sc, int index, uint32_t lu)
3719 {
3720 sc->sc_prs_shadow[index].valid = 1;
3721 sc->sc_prs_shadow[index].lu = lu;
3722 }
3723
3724 int
mvpp2_prs_hw_write(struct mvpp2_softc * sc,struct mvpp2_prs_entry * pe)3725 mvpp2_prs_hw_write(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe)
3726 {
3727 int i;
3728
3729 if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3730 return EINVAL;
3731
3732 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
3733 mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3734 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3735 mvpp2_write(sc, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
3736 mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3737 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3738 mvpp2_write(sc, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
3739
3740 return 0;
3741 }
3742
3743 int
mvpp2_prs_hw_read(struct mvpp2_softc * sc,struct mvpp2_prs_entry * pe,int tid)3744 mvpp2_prs_hw_read(struct mvpp2_softc *sc, struct mvpp2_prs_entry *pe, int tid)
3745 {
3746 int i;
3747
3748 if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
3749 return EINVAL;
3750
3751 memset(pe, 0, sizeof(*pe));
3752 pe->index = tid;
3753
3754 mvpp2_write(sc, MVPP2_PRS_TCAM_IDX_REG, pe->index);
3755 pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] =
3756 mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
3757 if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
3758 return EINVAL;
3759 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
3760 pe->tcam.word[i] =
3761 mvpp2_read(sc, MVPP2_PRS_TCAM_DATA_REG(i));
3762
3763 mvpp2_write(sc, MVPP2_PRS_SRAM_IDX_REG, pe->index);
3764 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
3765 pe->sram.word[i] =
3766 mvpp2_read(sc, MVPP2_PRS_SRAM_DATA_REG(i));
3767
3768 return 0;
3769 }
3770
3771 int
mvpp2_prs_flow_find(struct mvpp2_softc * sc,int flow)3772 mvpp2_prs_flow_find(struct mvpp2_softc *sc, int flow)
3773 {
3774 struct mvpp2_prs_entry pe;
3775 uint8_t bits;
3776 int tid;
3777
3778 for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
3779 if (!sc->sc_prs_shadow[tid].valid ||
3780 sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
3781 continue;
3782
3783 mvpp2_prs_hw_read(sc, &pe, tid);
3784 bits = mvpp2_prs_sram_ai_get(&pe);
3785
3786 if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
3787 return tid;
3788 }
3789
3790 return -1;
3791 }
3792
3793 int
mvpp2_prs_tcam_first_free(struct mvpp2_softc * sc,uint8_t start,uint8_t end)3794 mvpp2_prs_tcam_first_free(struct mvpp2_softc *sc, uint8_t start, uint8_t end)
3795 {
3796 uint8_t tmp;
3797 int tid;
3798
3799 if (start > end) {
3800 tmp = end;
3801 end = start;
3802 start = tmp;
3803 }
3804
3805 for (tid = start; tid <= end; tid++) {
3806 if (!sc->sc_prs_shadow[tid].valid)
3807 return tid;
3808 }
3809
3810 return -1;
3811 }
3812
3813 void
mvpp2_prs_mac_drop_all_set(struct mvpp2_softc * sc,uint32_t port,int add)3814 mvpp2_prs_mac_drop_all_set(struct mvpp2_softc *sc, uint32_t port, int add)
3815 {
3816 struct mvpp2_prs_entry pe;
3817
3818 if (sc->sc_prs_shadow[MVPP2_PE_DROP_ALL].valid) {
3819 mvpp2_prs_hw_read(sc, &pe, MVPP2_PE_DROP_ALL);
3820 } else {
3821 memset(&pe, 0, sizeof(pe));
3822 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3823 pe.index = MVPP2_PE_DROP_ALL;
3824 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
3825 MVPP2_PRS_RI_DROP_MASK);
3826 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
3827 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
3828 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3829 mvpp2_prs_tcam_port_map_set(&pe, 0);
3830 }
3831
3832 mvpp2_prs_tcam_port_set(&pe, port, add);
3833 mvpp2_prs_hw_write(sc, &pe);
3834 }
3835
3836 void
mvpp2_prs_mac_promisc_set(struct mvpp2_softc * sc,uint32_t port,int l2_cast,int add)3837 mvpp2_prs_mac_promisc_set(struct mvpp2_softc *sc, uint32_t port, int l2_cast,
3838 int add)
3839 {
3840 struct mvpp2_prs_entry pe;
3841 uint8_t cast_match;
3842 uint32_t ri;
3843 int tid;
3844
3845 if (l2_cast == MVPP2_PRS_L2_UNI_CAST) {
3846 cast_match = MVPP2_PRS_UCAST_VAL;
3847 tid = MVPP2_PE_MAC_UC_PROMISCUOUS;
3848 ri = MVPP2_PRS_RI_L2_UCAST;
3849 } else {
3850 cast_match = MVPP2_PRS_MCAST_VAL;
3851 tid = MVPP2_PE_MAC_MC_PROMISCUOUS;
3852 ri = MVPP2_PRS_RI_L2_MCAST;
3853 }
3854
3855 if (sc->sc_prs_shadow[tid].valid) {
3856 mvpp2_prs_hw_read(sc, &pe, tid);
3857 } else {
3858 memset(&pe, 0, sizeof(pe));
3859 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
3860 pe.index = tid;
3861 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
3862 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK);
3863 mvpp2_prs_tcam_data_byte_set(&pe, 0, cast_match,
3864 MVPP2_PRS_CAST_MASK);
3865 mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
3866 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3867 mvpp2_prs_tcam_port_map_set(&pe, 0);
3868 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
3869 }
3870
3871 mvpp2_prs_tcam_port_set(&pe, port, add);
3872 mvpp2_prs_hw_write(sc, &pe);
3873 }
3874
3875 void
mvpp2_prs_dsa_tag_set(struct mvpp2_softc * sc,uint32_t port,int add,int tagged,int extend)3876 mvpp2_prs_dsa_tag_set(struct mvpp2_softc *sc, uint32_t port, int add,
3877 int tagged, int extend)
3878 {
3879 struct mvpp2_prs_entry pe;
3880 int32_t tid, shift;
3881
3882 if (extend) {
3883 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3884 shift = 8;
3885 } else {
3886 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3887 shift = 4;
3888 }
3889
3890 if (sc->sc_prs_shadow[tid].valid) {
3891 mvpp2_prs_hw_read(sc, &pe, tid);
3892 } else {
3893 memset(&pe, 0, sizeof(pe));
3894 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3895 pe.index = tid;
3896 mvpp2_prs_sram_shift_set(&pe, shift,
3897 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3898 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3899 if (tagged) {
3900 mvpp2_prs_tcam_data_byte_set(&pe, 0,
3901 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3902 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3903 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3904 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3905 } else {
3906 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3907 MVPP2_PRS_RI_VLAN_MASK);
3908 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3909 }
3910 mvpp2_prs_tcam_port_map_set(&pe, 0);
3911 }
3912
3913 mvpp2_prs_tcam_port_set(&pe, port, add);
3914 mvpp2_prs_hw_write(sc, &pe);
3915 }
3916
3917 void
mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc * sc,uint32_t port,int add,int tagged,int extend)3918 mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2_softc *sc, uint32_t port,
3919 int add, int tagged, int extend)
3920 {
3921 struct mvpp2_prs_entry pe;
3922 int32_t tid, shift, port_mask;
3923
3924 if (extend) {
3925 tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
3926 port_mask = 0;
3927 shift = 8;
3928 } else {
3929 tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
3930 port_mask = MVPP2_PRS_PORT_MASK;
3931 shift = 4;
3932 }
3933
3934 if (sc->sc_prs_shadow[tid].valid) {
3935 mvpp2_prs_hw_read(sc, &pe, tid);
3936 } else {
3937 memset(&pe, 0, sizeof(pe));
3938 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
3939 pe.index = tid;
3940 mvpp2_prs_match_etype(&pe, 0, 0xdada);
3941 mvpp2_prs_match_etype(&pe, 2, 0);
3942 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
3943 MVPP2_PRS_RI_DSA_MASK);
3944 mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN + shift,
3945 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
3946 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_DSA);
3947 if (tagged) {
3948 mvpp2_prs_tcam_data_byte_set(&pe,
3949 MVPP2_ETH_TYPE_LEN + 2 + 3,
3950 MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
3951 MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
3952 mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
3953 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
3954 } else {
3955 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
3956 MVPP2_PRS_RI_VLAN_MASK);
3957 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
3958 }
3959 mvpp2_prs_tcam_port_map_set(&pe, port_mask);
3960 }
3961
3962 mvpp2_prs_tcam_port_set(&pe, port, add);
3963 mvpp2_prs_hw_write(sc, &pe);
3964 }
3965
3966 struct mvpp2_prs_entry *
mvpp2_prs_vlan_find(struct mvpp2_softc * sc,uint16_t tpid,int ai)3967 mvpp2_prs_vlan_find(struct mvpp2_softc *sc, uint16_t tpid, int ai)
3968 {
3969 struct mvpp2_prs_entry *pe;
3970 uint32_t ri_bits, ai_bits;
3971 int match, tid;
3972
3973 pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
3974 if (pe == NULL)
3975 return NULL;
3976
3977 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
3978
3979 for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
3980 if (!sc->sc_prs_shadow[tid].valid ||
3981 sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
3982 continue;
3983 mvpp2_prs_hw_read(sc, pe, tid);
3984 match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid));
3985 if (!match)
3986 continue;
3987 ri_bits = mvpp2_prs_sram_ri_get(pe);
3988 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
3989 ai_bits = mvpp2_prs_tcam_ai_get(pe);
3990 ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
3991 if (ai != ai_bits)
3992 continue;
3993 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
3994 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
3995 return pe;
3996 }
3997
3998 free(pe, M_TEMP, sizeof(*pe));
3999 return NULL;
4000 }
4001
4002 int
mvpp2_prs_vlan_add(struct mvpp2_softc * sc,uint16_t tpid,int ai,uint32_t port_map)4003 mvpp2_prs_vlan_add(struct mvpp2_softc *sc, uint16_t tpid, int ai, uint32_t port_map)
4004 {
4005 struct mvpp2_prs_entry *pe;
4006 uint32_t ri_bits;
4007 int tid_aux, tid;
4008 int ret = 0;
4009
4010 pe = mvpp2_prs_vlan_find(sc, tpid, ai);
4011 if (pe == NULL) {
4012 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_LAST_FREE_TID,
4013 MVPP2_PE_FIRST_FREE_TID);
4014 if (tid < 0)
4015 return tid;
4016
4017 pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4018 if (pe == NULL)
4019 return ENOMEM;
4020
4021 /* get last double vlan tid */
4022 for (tid_aux = MVPP2_PE_LAST_FREE_TID;
4023 tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
4024 if (!sc->sc_prs_shadow[tid_aux].valid ||
4025 sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4026 continue;
4027 mvpp2_prs_hw_read(sc, pe, tid_aux);
4028 ri_bits = mvpp2_prs_sram_ri_get(pe);
4029 if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
4030 MVPP2_PRS_RI_VLAN_DOUBLE)
4031 break;
4032 }
4033
4034 if (tid <= tid_aux) {
4035 ret = EINVAL;
4036 goto error;
4037 }
4038
4039 memset(pe, 0, sizeof(*pe));
4040 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4041 pe->index = tid;
4042 mvpp2_prs_match_etype(pe, 0, tpid);
4043 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
4044 mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
4045 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4046 mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
4047 if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
4048 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
4049 MVPP2_PRS_RI_VLAN_MASK);
4050 } else {
4051 ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
4052 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
4053 MVPP2_PRS_RI_VLAN_MASK);
4054 }
4055 mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
4056 mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4057 }
4058
4059 mvpp2_prs_tcam_port_map_set(pe, port_map);
4060 mvpp2_prs_hw_write(sc, pe);
4061
4062 error:
4063 free(pe, M_TEMP, sizeof(*pe));
4064 return ret;
4065 }
4066
4067 int
mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc * sc)4068 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2_softc *sc)
4069 {
4070 int i;
4071
4072 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++)
4073 if (!sc->sc_prs_double_vlans[i])
4074 return i;
4075
4076 return -1;
4077 }
4078
4079 struct mvpp2_prs_entry *
mvpp2_prs_double_vlan_find(struct mvpp2_softc * sc,uint16_t tpid1,uint16_t tpid2)4080 mvpp2_prs_double_vlan_find(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2)
4081 {
4082 struct mvpp2_prs_entry *pe;
4083 uint32_t ri_mask;
4084 int match, tid;
4085
4086 pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4087 if (pe == NULL)
4088 return NULL;
4089
4090 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4091
4092 for (tid = MVPP2_PE_FIRST_FREE_TID; tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
4093 if (!sc->sc_prs_shadow[tid].valid ||
4094 sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
4095 continue;
4096
4097 mvpp2_prs_hw_read(sc, pe, tid);
4098 match = mvpp2_prs_tcam_data_cmp(pe, 0, swap16(tpid1)) &&
4099 mvpp2_prs_tcam_data_cmp(pe, 4, swap16(tpid2));
4100 if (!match)
4101 continue;
4102 ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
4103 if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
4104 return pe;
4105 }
4106
4107 free(pe, M_TEMP, sizeof(*pe));
4108 return NULL;
4109 }
4110
4111 int
mvpp2_prs_double_vlan_add(struct mvpp2_softc * sc,uint16_t tpid1,uint16_t tpid2,uint32_t port_map)4112 mvpp2_prs_double_vlan_add(struct mvpp2_softc *sc, uint16_t tpid1, uint16_t tpid2,
4113 uint32_t port_map)
4114 {
4115 struct mvpp2_prs_entry *pe;
4116 int tid_aux, tid, ai, ret = 0;
4117 uint32_t ri_bits;
4118
4119 pe = mvpp2_prs_double_vlan_find(sc, tpid1, tpid2);
4120 if (pe == NULL) {
4121 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4122 MVPP2_PE_LAST_FREE_TID);
4123 if (tid < 0)
4124 return tid;
4125
4126 pe = malloc(sizeof(*pe), M_TEMP, M_NOWAIT);
4127 if (pe == NULL)
4128 return ENOMEM;
4129
4130 ai = mvpp2_prs_double_vlan_ai_free_get(sc);
4131 if (ai < 0) {
4132 ret = ai;
4133 goto error;
4134 }
4135
4136 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
4137 tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
4138 if (!sc->sc_prs_shadow[tid_aux].valid ||
4139 sc->sc_prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
4140 continue;
4141 mvpp2_prs_hw_read(sc, pe, tid_aux);
4142 ri_bits = mvpp2_prs_sram_ri_get(pe);
4143 ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
4144 if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
4145 ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
4146 break;
4147 }
4148
4149 if (tid >= tid_aux) {
4150 ret = ERANGE;
4151 goto error;
4152 }
4153
4154 memset(pe, 0, sizeof(*pe));
4155 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
4156 pe->index = tid;
4157 sc->sc_prs_double_vlans[ai] = 1;
4158 mvpp2_prs_match_etype(pe, 0, tpid1);
4159 mvpp2_prs_match_etype(pe, 4, tpid2);
4160 mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
4161 mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
4162 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4163 mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
4164 MVPP2_PRS_RI_VLAN_MASK);
4165 mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
4166 MVPP2_PRS_SRAM_AI_MASK);
4167 mvpp2_prs_shadow_set(sc, pe->index, MVPP2_PRS_LU_VLAN);
4168 }
4169
4170 mvpp2_prs_tcam_port_map_set(pe, port_map);
4171 mvpp2_prs_hw_write(sc, pe);
4172
4173 error:
4174 free(pe, M_TEMP, sizeof(*pe));
4175 return ret;
4176 }
4177
4178 int
mvpp2_prs_ip4_proto(struct mvpp2_softc * sc,uint16_t proto,uint32_t ri,uint32_t ri_mask)4179 mvpp2_prs_ip4_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4180 uint32_t ri_mask)
4181 {
4182 struct mvpp2_prs_entry pe;
4183 int tid;
4184
4185 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4186 (proto != IPPROTO_IGMP))
4187 return EINVAL;
4188
4189 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4190 MVPP2_PE_LAST_FREE_TID);
4191 if (tid < 0)
4192 return tid;
4193
4194 memset(&pe, 0, sizeof(pe));
4195 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4196 pe.index = tid;
4197 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
4198 mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4199 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4200 sizeof(struct ip) - 4, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4201 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4202 MVPP2_PRS_IPV4_DIP_AI_BIT);
4203 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4204 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
4205 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
4206 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4207 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
4208 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4209 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4210 mvpp2_prs_hw_write(sc, &pe);
4211
4212 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4213 MVPP2_PE_LAST_FREE_TID);
4214 if (tid < 0)
4215 return tid;
4216
4217 pe.index = tid;
4218 pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
4219 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
4220 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4221 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
4222 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
4223 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
4224 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
4225 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4226 mvpp2_prs_hw_write(sc, &pe);
4227
4228 return 0;
4229 }
4230
4231 int
mvpp2_prs_ip4_cast(struct mvpp2_softc * sc,uint16_t l3_cast)4232 mvpp2_prs_ip4_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4233 {
4234 struct mvpp2_prs_entry pe;
4235 int mask, tid;
4236
4237 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4238 MVPP2_PE_LAST_FREE_TID);
4239 if (tid < 0)
4240 return tid;
4241
4242 memset(&pe, 0, sizeof(pe));
4243 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
4244 pe.index = tid;
4245
4246 switch (l3_cast) {
4247 case MVPP2_PRS_L3_MULTI_CAST:
4248 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
4249 MVPP2_PRS_IPV4_MC_MASK);
4250 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4251 MVPP2_PRS_RI_L3_ADDR_MASK);
4252 break;
4253 case MVPP2_PRS_L3_BROAD_CAST:
4254 mask = MVPP2_PRS_IPV4_BC_MASK;
4255 mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
4256 mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
4257 mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
4258 mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
4259 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
4260 MVPP2_PRS_RI_L3_ADDR_MASK);
4261 break;
4262 default:
4263 return EINVAL;
4264 }
4265
4266 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4267 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4268 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
4269 MVPP2_PRS_IPV4_DIP_AI_BIT);
4270 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4271 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP4);
4272 mvpp2_prs_hw_write(sc, &pe);
4273
4274 return 0;
4275 }
4276
4277 int
mvpp2_prs_ip6_proto(struct mvpp2_softc * sc,uint16_t proto,uint32_t ri,uint32_t ri_mask)4278 mvpp2_prs_ip6_proto(struct mvpp2_softc *sc, uint16_t proto, uint32_t ri,
4279 uint32_t ri_mask)
4280 {
4281 struct mvpp2_prs_entry pe;
4282 int tid;
4283
4284 if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
4285 (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
4286 return EINVAL;
4287
4288 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4289 MVPP2_PE_LAST_FREE_TID);
4290 if (tid < 0)
4291 return tid;
4292
4293 memset(&pe, 0, sizeof(pe));
4294 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4295 pe.index = tid;
4296 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4297 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
4298 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
4299 mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
4300 sizeof(struct ip6_hdr) - 6, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
4301 mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
4302 mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4303 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4304 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4305 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4306 mvpp2_prs_hw_write(sc, &pe);
4307
4308 return 0;
4309 }
4310
4311 int
mvpp2_prs_ip6_cast(struct mvpp2_softc * sc,uint16_t l3_cast)4312 mvpp2_prs_ip6_cast(struct mvpp2_softc *sc, uint16_t l3_cast)
4313 {
4314 struct mvpp2_prs_entry pe;
4315 int tid;
4316
4317 if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
4318 return EINVAL;
4319
4320 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_FIRST_FREE_TID,
4321 MVPP2_PE_LAST_FREE_TID);
4322 if (tid < 0)
4323 return tid;
4324
4325 memset(&pe, 0, sizeof(pe));
4326 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
4327 pe.index = tid;
4328 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
4329 mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
4330 MVPP2_PRS_RI_L3_ADDR_MASK);
4331 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
4332 MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4333 mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4334 mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
4335 MVPP2_PRS_IPV6_MC_MASK);
4336 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
4337 mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
4338 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_IP6);
4339 mvpp2_prs_hw_write(sc, &pe);
4340
4341 return 0;
4342 }
4343
4344 int
mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry * pe,const uint8_t * da,uint8_t * mask)4345 mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, const uint8_t *da,
4346 uint8_t *mask)
4347 {
4348 uint8_t tcam_byte, tcam_mask;
4349 int index;
4350
4351 for (index = 0; index < ETHER_ADDR_LEN; index++) {
4352 mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte,
4353 &tcam_mask);
4354 if (tcam_mask != mask[index])
4355 return 0;
4356 if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
4357 return 0;
4358 }
4359
4360 return 1;
4361 }
4362
4363 int
mvpp2_prs_mac_da_range_find(struct mvpp2_softc * sc,int pmap,const uint8_t * da,uint8_t * mask,int udf_type)4364 mvpp2_prs_mac_da_range_find(struct mvpp2_softc *sc, int pmap, const uint8_t *da,
4365 uint8_t *mask, int udf_type)
4366 {
4367 struct mvpp2_prs_entry pe;
4368 int tid;
4369
4370 for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4371 tid++) {
4372 uint32_t entry_pmap;
4373
4374 if (!sc->sc_prs_shadow[tid].valid ||
4375 (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4376 (sc->sc_prs_shadow[tid].udf != udf_type))
4377 continue;
4378
4379 mvpp2_prs_hw_read(sc, &pe, tid);
4380 entry_pmap = mvpp2_prs_tcam_port_map_get(&pe);
4381 if (mvpp2_prs_mac_range_equals(&pe, da, mask) &&
4382 entry_pmap == pmap)
4383 return tid;
4384 }
4385
4386 return -1;
4387 }
4388
4389 int
mvpp2_prs_mac_da_accept(struct mvpp2_port * port,const uint8_t * da,int add)4390 mvpp2_prs_mac_da_accept(struct mvpp2_port *port, const uint8_t *da, int add)
4391 {
4392 struct mvpp2_softc *sc = port->sc;
4393 struct mvpp2_prs_entry pe;
4394 uint32_t pmap, len, ri;
4395 uint8_t mask[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4396 int tid;
4397
4398 memset(&pe, 0, sizeof(pe));
4399
4400 tid = mvpp2_prs_mac_da_range_find(sc, BIT(port->sc_id), da, mask,
4401 MVPP2_PRS_UDF_MAC_DEF);
4402 if (tid < 0) {
4403 if (!add)
4404 return 0;
4405
4406 tid = mvpp2_prs_tcam_first_free(sc, MVPP2_PE_MAC_RANGE_START,
4407 MVPP2_PE_MAC_RANGE_END);
4408 if (tid < 0)
4409 return tid;
4410
4411 pe.index = tid;
4412 mvpp2_prs_tcam_port_map_set(&pe, 0);
4413 } else {
4414 mvpp2_prs_hw_read(sc, &pe, tid);
4415 }
4416
4417 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
4418
4419 mvpp2_prs_tcam_port_set(&pe, port->sc_id, add);
4420
4421 /* invalidate the entry if no ports are left enabled */
4422 pmap = mvpp2_prs_tcam_port_map_get(&pe);
4423 if (pmap == 0) {
4424 if (add)
4425 return -1;
4426 mvpp2_prs_hw_inv(sc, pe.index);
4427 sc->sc_prs_shadow[pe.index].valid = 0;
4428 return 0;
4429 }
4430
4431 mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
4432
4433 len = ETHER_ADDR_LEN;
4434 while (len--)
4435 mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
4436
4437 if (ETHER_IS_BROADCAST(da))
4438 ri = MVPP2_PRS_RI_L2_BCAST;
4439 else if (ETHER_IS_MULTICAST(da))
4440 ri = MVPP2_PRS_RI_L2_MCAST;
4441 else
4442 ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
4443
4444 mvpp2_prs_sram_ri_update(&pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4445 MVPP2_PRS_RI_MAC_ME_MASK);
4446 mvpp2_prs_shadow_ri_set(sc, pe.index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
4447 MVPP2_PRS_RI_MAC_ME_MASK);
4448 mvpp2_prs_sram_shift_set(&pe, 2 * ETHER_ADDR_LEN,
4449 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
4450 sc->sc_prs_shadow[pe.index].udf = MVPP2_PRS_UDF_MAC_DEF;
4451 mvpp2_prs_shadow_set(sc, pe.index, MVPP2_PRS_LU_MAC);
4452 mvpp2_prs_hw_write(sc, &pe);
4453
4454 return 0;
4455 }
4456
4457 void
mvpp2_prs_mac_del_all(struct mvpp2_port * port)4458 mvpp2_prs_mac_del_all(struct mvpp2_port *port)
4459 {
4460 struct mvpp2_softc *sc = port->sc;
4461 struct mvpp2_prs_entry pe;
4462 uint32_t pmap;
4463 int index, tid;
4464
4465 for (tid = MVPP2_PE_MAC_RANGE_START; tid <= MVPP2_PE_MAC_RANGE_END;
4466 tid++) {
4467 uint8_t da[ETHER_ADDR_LEN], da_mask[ETHER_ADDR_LEN];
4468
4469 if (!sc->sc_prs_shadow[tid].valid ||
4470 (sc->sc_prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
4471 (sc->sc_prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
4472 continue;
4473
4474 mvpp2_prs_hw_read(sc, &pe, tid);
4475 pmap = mvpp2_prs_tcam_port_map_get(&pe);
4476
4477 if (!(pmap & (1 << port->sc_id)))
4478 continue;
4479
4480 for (index = 0; index < ETHER_ADDR_LEN; index++)
4481 mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
4482 &da_mask[index]);
4483
4484 if (ETHER_IS_BROADCAST(da) || ETHER_IS_EQ(da, port->sc_lladdr))
4485 continue;
4486
4487 mvpp2_prs_mac_da_accept(port, da, 0);
4488 }
4489 }
4490
4491 int
mvpp2_prs_tag_mode_set(struct mvpp2_softc * sc,int port_id,int type)4492 mvpp2_prs_tag_mode_set(struct mvpp2_softc *sc, int port_id, int type)
4493 {
4494 switch (type) {
4495 case MVPP2_TAG_TYPE_EDSA:
4496 mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4497 MVPP2_PRS_EDSA);
4498 mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4499 MVPP2_PRS_EDSA);
4500 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4501 MVPP2_PRS_DSA);
4502 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4503 MVPP2_PRS_DSA);
4504 break;
4505 case MVPP2_TAG_TYPE_DSA:
4506 mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_TAGGED,
4507 MVPP2_PRS_DSA);
4508 mvpp2_prs_dsa_tag_set(sc, port_id, 1, MVPP2_PRS_UNTAGGED,
4509 MVPP2_PRS_DSA);
4510 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4511 MVPP2_PRS_EDSA);
4512 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4513 MVPP2_PRS_EDSA);
4514 break;
4515 case MVPP2_TAG_TYPE_MH:
4516 case MVPP2_TAG_TYPE_NONE:
4517 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4518 MVPP2_PRS_DSA);
4519 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4520 MVPP2_PRS_DSA);
4521 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_TAGGED,
4522 MVPP2_PRS_EDSA);
4523 mvpp2_prs_dsa_tag_set(sc, port_id, 0, MVPP2_PRS_UNTAGGED,
4524 MVPP2_PRS_EDSA);
4525 break;
4526 default:
4527 if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
4528 return EINVAL;
4529 break;
4530 }
4531
4532 return 0;
4533 }
4534
4535 int
mvpp2_prs_def_flow(struct mvpp2_port * port)4536 mvpp2_prs_def_flow(struct mvpp2_port *port)
4537 {
4538 struct mvpp2_prs_entry pe;
4539 int tid;
4540
4541 memset(&pe, 0, sizeof(pe));
4542
4543 tid = mvpp2_prs_flow_find(port->sc, port->sc_id);
4544 if (tid < 0) {
4545 tid = mvpp2_prs_tcam_first_free(port->sc,
4546 MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID);
4547 if (tid < 0)
4548 return tid;
4549
4550 pe.index = tid;
4551 mvpp2_prs_sram_ai_update(&pe, port->sc_id,
4552 MVPP2_PRS_FLOW_ID_MASK);
4553 mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
4554 mvpp2_prs_shadow_set(port->sc, pe.index, MVPP2_PRS_LU_FLOWS);
4555 } else {
4556 mvpp2_prs_hw_read(port->sc, &pe, tid);
4557 }
4558
4559 mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
4560 mvpp2_prs_tcam_port_map_set(&pe, (1 << port->sc_id));
4561 mvpp2_prs_hw_write(port->sc, &pe);
4562 return 0;
4563 }
4564
4565 void
mvpp2_cls_flow_write(struct mvpp2_softc * sc,struct mvpp2_cls_flow_entry * fe)4566 mvpp2_cls_flow_write(struct mvpp2_softc *sc, struct mvpp2_cls_flow_entry *fe)
4567 {
4568 mvpp2_write(sc, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
4569 mvpp2_write(sc, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]);
4570 mvpp2_write(sc, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]);
4571 mvpp2_write(sc, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]);
4572 }
4573
4574 void
mvpp2_cls_lookup_write(struct mvpp2_softc * sc,struct mvpp2_cls_lookup_entry * le)4575 mvpp2_cls_lookup_write(struct mvpp2_softc *sc, struct mvpp2_cls_lookup_entry *le)
4576 {
4577 uint32_t val;
4578
4579 val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
4580 mvpp2_write(sc, MVPP2_CLS_LKP_INDEX_REG, val);
4581 mvpp2_write(sc, MVPP2_CLS_LKP_TBL_REG, le->data);
4582 }
4583
4584 void
mvpp2_cls_init(struct mvpp2_softc * sc)4585 mvpp2_cls_init(struct mvpp2_softc *sc)
4586 {
4587 struct mvpp2_cls_lookup_entry le;
4588 struct mvpp2_cls_flow_entry fe;
4589 int index;
4590
4591 mvpp2_write(sc, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
4592 memset(&fe.data, 0, sizeof(fe.data));
4593 for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
4594 fe.index = index;
4595 mvpp2_cls_flow_write(sc, &fe);
4596 }
4597 le.data = 0;
4598 for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
4599 le.lkpid = index;
4600 le.way = 0;
4601 mvpp2_cls_lookup_write(sc, &le);
4602 le.way = 1;
4603 mvpp2_cls_lookup_write(sc, &le);
4604 }
4605 }
4606
4607 void
mvpp2_cls_port_config(struct mvpp2_port * port)4608 mvpp2_cls_port_config(struct mvpp2_port *port)
4609 {
4610 struct mvpp2_cls_lookup_entry le;
4611 uint32_t val;
4612
4613 /* set way for the port */
4614 val = mvpp2_read(port->sc, MVPP2_CLS_PORT_WAY_REG);
4615 val &= ~MVPP2_CLS_PORT_WAY_MASK(port->sc_id);
4616 mvpp2_write(port->sc, MVPP2_CLS_PORT_WAY_REG, val);
4617
4618 /*
4619 * pick the entry to be accessed in lookup ID decoding table
4620 * according to the way and lkpid.
4621 */
4622 le.lkpid = port->sc_id;
4623 le.way = 0;
4624 le.data = 0;
4625
4626 /* set initial CPU queue for receiving packets */
4627 le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
4628 le.data |= (port->sc_id * 32);
4629
4630 /* disable classification engines */
4631 le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
4632
4633 /* update lookup ID table entry */
4634 mvpp2_cls_lookup_write(port->sc, &le);
4635 }
4636
4637 void
mvpp2_cls_oversize_rxq_set(struct mvpp2_port * port)4638 mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
4639 {
4640 uint32_t val;
4641
4642 mvpp2_write(port->sc, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->sc_id),
4643 (port->sc_id * 32) & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
4644 mvpp2_write(port->sc, MVPP2_CLS_SWFWD_P2HQ_REG(port->sc_id),
4645 (port->sc_id * 32) >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS);
4646 val = mvpp2_read(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG);
4647 val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->sc_id);
4648 mvpp2_write(port->sc, MVPP2_CLS_SWFWD_PCTRL_REG, val);
4649 }
4650