1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019-2021 NXP Semiconductors
3 *
4 * This is an umbrella module for all network switches that are
5 * register-compatible with Ocelot and that perform I/O to their host CPU
6 * through an NPI (Node Processor Interface) Ethernet port.
7 */
8 #include <uapi/linux/if_bridge.h>
9 #include <soc/mscc/ocelot_vcap.h>
10 #include <soc/mscc/ocelot_qsys.h>
11 #include <soc/mscc/ocelot_sys.h>
12 #include <soc/mscc/ocelot_dev.h>
13 #include <soc/mscc/ocelot_ana.h>
14 #include <soc/mscc/ocelot_ptp.h>
15 #include <soc/mscc/ocelot.h>
16 #include <linux/dsa/8021q.h>
17 #include <linux/dsa/ocelot.h>
18 #include <linux/platform_device.h>
19 #include <linux/ptp_classify.h>
20 #include <linux/module.h>
21 #include <linux/of_net.h>
22 #include <linux/pci.h>
23 #include <linux/of.h>
24 #include <linux/pcs-lynx.h>
25 #include <net/pkt_sched.h>
26 #include <net/dsa.h>
27 #include "felix.h"
28
felix_tag_8021q_rxvlan_add(struct felix * felix,int port,u16 vid,bool pvid,bool untagged)29 static int felix_tag_8021q_rxvlan_add(struct felix *felix, int port, u16 vid,
30 bool pvid, bool untagged)
31 {
32 struct ocelot_vcap_filter *outer_tagging_rule;
33 struct ocelot *ocelot = &felix->ocelot;
34 struct dsa_switch *ds = felix->ds;
35 int key_length, upstream, err;
36
37 /* We don't need to install the rxvlan into the other ports' filtering
38 * tables, because we're just pushing the rxvlan when sending towards
39 * the CPU
40 */
41 if (!pvid)
42 return 0;
43
44 key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length;
45 upstream = dsa_upstream_port(ds, port);
46
47 outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter),
48 GFP_KERNEL);
49 if (!outer_tagging_rule)
50 return -ENOMEM;
51
52 outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
53 outer_tagging_rule->prio = 1;
54 outer_tagging_rule->id.cookie = port;
55 outer_tagging_rule->id.tc_offload = false;
56 outer_tagging_rule->block_id = VCAP_ES0;
57 outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
58 outer_tagging_rule->lookup = 0;
59 outer_tagging_rule->ingress_port.value = port;
60 outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0);
61 outer_tagging_rule->egress_port.value = upstream;
62 outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0);
63 outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG;
64 outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
65 outer_tagging_rule->action.tag_a_vid_sel = 1;
66 outer_tagging_rule->action.vid_a_val = vid;
67
68 err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
69 if (err)
70 kfree(outer_tagging_rule);
71
72 return err;
73 }
74
felix_tag_8021q_txvlan_add(struct felix * felix,int port,u16 vid,bool pvid,bool untagged)75 static int felix_tag_8021q_txvlan_add(struct felix *felix, int port, u16 vid,
76 bool pvid, bool untagged)
77 {
78 struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
79 struct ocelot *ocelot = &felix->ocelot;
80 struct dsa_switch *ds = felix->ds;
81 int upstream, err;
82
83 /* tag_8021q.c assumes we are implementing this via port VLAN
84 * membership, which we aren't. So we don't need to add any VCAP filter
85 * for the CPU port.
86 */
87 if (ocelot->ports[port]->is_dsa_8021q_cpu)
88 return 0;
89
90 untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
91 if (!untagging_rule)
92 return -ENOMEM;
93
94 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
95 if (!redirect_rule) {
96 kfree(untagging_rule);
97 return -ENOMEM;
98 }
99
100 upstream = dsa_upstream_port(ds, port);
101
102 untagging_rule->key_type = OCELOT_VCAP_KEY_ANY;
103 untagging_rule->ingress_port_mask = BIT(upstream);
104 untagging_rule->vlan.vid.value = vid;
105 untagging_rule->vlan.vid.mask = VLAN_VID_MASK;
106 untagging_rule->prio = 1;
107 untagging_rule->id.cookie = port;
108 untagging_rule->id.tc_offload = false;
109 untagging_rule->block_id = VCAP_IS1;
110 untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
111 untagging_rule->lookup = 0;
112 untagging_rule->action.vlan_pop_cnt_ena = true;
113 untagging_rule->action.vlan_pop_cnt = 1;
114 untagging_rule->action.pag_override_mask = 0xff;
115 untagging_rule->action.pag_val = port;
116
117 err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL);
118 if (err) {
119 kfree(untagging_rule);
120 kfree(redirect_rule);
121 return err;
122 }
123
124 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
125 redirect_rule->ingress_port_mask = BIT(upstream);
126 redirect_rule->pag = port;
127 redirect_rule->prio = 1;
128 redirect_rule->id.cookie = port;
129 redirect_rule->id.tc_offload = false;
130 redirect_rule->block_id = VCAP_IS2;
131 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
132 redirect_rule->lookup = 0;
133 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
134 redirect_rule->action.port_mask = BIT(port);
135
136 err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
137 if (err) {
138 ocelot_vcap_filter_del(ocelot, untagging_rule);
139 kfree(redirect_rule);
140 return err;
141 }
142
143 return 0;
144 }
145
felix_tag_8021q_vlan_add(struct dsa_switch * ds,int port,u16 vid,u16 flags)146 static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
147 u16 flags)
148 {
149 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
150 bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
151 struct ocelot *ocelot = ds->priv;
152
153 if (vid_is_dsa_8021q_rxvlan(vid))
154 return felix_tag_8021q_rxvlan_add(ocelot_to_felix(ocelot),
155 port, vid, pvid, untagged);
156
157 if (vid_is_dsa_8021q_txvlan(vid))
158 return felix_tag_8021q_txvlan_add(ocelot_to_felix(ocelot),
159 port, vid, pvid, untagged);
160
161 return 0;
162 }
163
felix_tag_8021q_rxvlan_del(struct felix * felix,int port,u16 vid)164 static int felix_tag_8021q_rxvlan_del(struct felix *felix, int port, u16 vid)
165 {
166 struct ocelot_vcap_filter *outer_tagging_rule;
167 struct ocelot_vcap_block *block_vcap_es0;
168 struct ocelot *ocelot = &felix->ocelot;
169
170 block_vcap_es0 = &ocelot->block[VCAP_ES0];
171
172 outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
173 port, false);
174 /* In rxvlan_add, we had the "if (!pvid) return 0" logic to avoid
175 * installing outer tagging ES0 rules where they weren't needed.
176 * But in rxvlan_del, the API doesn't give us the "flags" anymore,
177 * so that forces us to be slightly sloppy here, and just assume that
178 * if we didn't find an outer_tagging_rule it means that there was
179 * none in the first place, i.e. rxvlan_del is called on a non-pvid
180 * port. This is most probably true though.
181 */
182 if (!outer_tagging_rule)
183 return 0;
184
185 return ocelot_vcap_filter_del(ocelot, outer_tagging_rule);
186 }
187
felix_tag_8021q_txvlan_del(struct felix * felix,int port,u16 vid)188 static int felix_tag_8021q_txvlan_del(struct felix *felix, int port, u16 vid)
189 {
190 struct ocelot_vcap_filter *untagging_rule, *redirect_rule;
191 struct ocelot_vcap_block *block_vcap_is1;
192 struct ocelot_vcap_block *block_vcap_is2;
193 struct ocelot *ocelot = &felix->ocelot;
194 int err;
195
196 if (ocelot->ports[port]->is_dsa_8021q_cpu)
197 return 0;
198
199 block_vcap_is1 = &ocelot->block[VCAP_IS1];
200 block_vcap_is2 = &ocelot->block[VCAP_IS2];
201
202 untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
203 port, false);
204 if (!untagging_rule)
205 return 0;
206
207 err = ocelot_vcap_filter_del(ocelot, untagging_rule);
208 if (err)
209 return err;
210
211 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
212 port, false);
213 if (!redirect_rule)
214 return 0;
215
216 return ocelot_vcap_filter_del(ocelot, redirect_rule);
217 }
218
felix_tag_8021q_vlan_del(struct dsa_switch * ds,int port,u16 vid)219 static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
220 {
221 struct ocelot *ocelot = ds->priv;
222
223 if (vid_is_dsa_8021q_rxvlan(vid))
224 return felix_tag_8021q_rxvlan_del(ocelot_to_felix(ocelot),
225 port, vid);
226
227 if (vid_is_dsa_8021q_txvlan(vid))
228 return felix_tag_8021q_txvlan_del(ocelot_to_felix(ocelot),
229 port, vid);
230
231 return 0;
232 }
233
234 static const struct dsa_8021q_ops felix_tag_8021q_ops = {
235 .vlan_add = felix_tag_8021q_vlan_add,
236 .vlan_del = felix_tag_8021q_vlan_del,
237 };
238
239 /* Alternatively to using the NPI functionality, that same hardware MAC
240 * connected internally to the enetc or fman DSA master can be configured to
241 * use the software-defined tag_8021q frame format. As far as the hardware is
242 * concerned, it thinks it is a "dumb switch" - the queues of the CPU port
243 * module are now disconnected from it, but can still be accessed through
244 * register-based MMIO.
245 */
felix_8021q_cpu_port_init(struct ocelot * ocelot,int port)246 static void felix_8021q_cpu_port_init(struct ocelot *ocelot, int port)
247 {
248 ocelot->ports[port]->is_dsa_8021q_cpu = true;
249 ocelot->npi = -1;
250
251 /* Overwrite PGID_CPU with the non-tagging port */
252 ocelot_write_rix(ocelot, BIT(port), ANA_PGID_PGID, PGID_CPU);
253
254 ocelot_apply_bridge_fwd_mask(ocelot);
255 }
256
felix_8021q_cpu_port_deinit(struct ocelot * ocelot,int port)257 static void felix_8021q_cpu_port_deinit(struct ocelot *ocelot, int port)
258 {
259 ocelot->ports[port]->is_dsa_8021q_cpu = false;
260
261 /* Restore PGID_CPU */
262 ocelot_write_rix(ocelot, BIT(ocelot->num_phys_ports), ANA_PGID_PGID,
263 PGID_CPU);
264
265 ocelot_apply_bridge_fwd_mask(ocelot);
266 }
267
268 /* Set up a VCAP IS2 rule for delivering PTP frames to the CPU port module.
269 * If the quirk_no_xtr_irq is in place, then also copy those PTP frames to the
270 * tag_8021q CPU port.
271 */
felix_setup_mmio_filtering(struct felix * felix)272 static int felix_setup_mmio_filtering(struct felix *felix)
273 {
274 unsigned long user_ports = 0, cpu_ports = 0;
275 struct ocelot_vcap_filter *redirect_rule;
276 struct ocelot_vcap_filter *tagging_rule;
277 struct ocelot *ocelot = &felix->ocelot;
278 struct dsa_switch *ds = felix->ds;
279 int port, ret;
280
281 tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
282 if (!tagging_rule)
283 return -ENOMEM;
284
285 redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL);
286 if (!redirect_rule) {
287 kfree(tagging_rule);
288 return -ENOMEM;
289 }
290
291 for (port = 0; port < ocelot->num_phys_ports; port++) {
292 if (dsa_is_user_port(ds, port))
293 user_ports |= BIT(port);
294 if (dsa_is_cpu_port(ds, port))
295 cpu_ports |= BIT(port);
296 }
297
298 tagging_rule->key_type = OCELOT_VCAP_KEY_ETYPE;
299 *(__be16 *)tagging_rule->key.etype.etype.value = htons(ETH_P_1588);
300 *(__be16 *)tagging_rule->key.etype.etype.mask = htons(0xffff);
301 tagging_rule->ingress_port_mask = user_ports;
302 tagging_rule->prio = 1;
303 tagging_rule->id.cookie = ocelot->num_phys_ports;
304 tagging_rule->id.tc_offload = false;
305 tagging_rule->block_id = VCAP_IS1;
306 tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
307 tagging_rule->lookup = 0;
308 tagging_rule->action.pag_override_mask = 0xff;
309 tagging_rule->action.pag_val = ocelot->num_phys_ports;
310
311 ret = ocelot_vcap_filter_add(ocelot, tagging_rule, NULL);
312 if (ret) {
313 kfree(tagging_rule);
314 kfree(redirect_rule);
315 return ret;
316 }
317
318 redirect_rule->key_type = OCELOT_VCAP_KEY_ANY;
319 redirect_rule->ingress_port_mask = user_ports;
320 redirect_rule->pag = ocelot->num_phys_ports;
321 redirect_rule->prio = 1;
322 redirect_rule->id.cookie = ocelot->num_phys_ports;
323 redirect_rule->id.tc_offload = false;
324 redirect_rule->block_id = VCAP_IS2;
325 redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD;
326 redirect_rule->lookup = 0;
327 redirect_rule->action.cpu_copy_ena = true;
328 if (felix->info->quirk_no_xtr_irq) {
329 /* Redirect to the tag_8021q CPU but also copy PTP packets to
330 * the CPU port module
331 */
332 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
333 redirect_rule->action.port_mask = cpu_ports;
334 } else {
335 /* Trap PTP packets only to the CPU port module (which is
336 * redirected to the NPI port)
337 */
338 redirect_rule->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
339 redirect_rule->action.port_mask = 0;
340 }
341
342 ret = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL);
343 if (ret) {
344 ocelot_vcap_filter_del(ocelot, tagging_rule);
345 kfree(redirect_rule);
346 return ret;
347 }
348
349 /* The ownership of the CPU port module's queues might have just been
350 * transferred to the tag_8021q tagger from the NPI-based tagger.
351 * So there might still be all sorts of crap in the queues. On the
352 * other hand, the MMIO-based matching of PTP frames is very brittle,
353 * so we need to be careful that there are no extra frames to be
354 * dequeued over MMIO, since we would never know to discard them.
355 */
356 ocelot_drain_cpu_queue(ocelot, 0);
357
358 return 0;
359 }
360
felix_teardown_mmio_filtering(struct felix * felix)361 static int felix_teardown_mmio_filtering(struct felix *felix)
362 {
363 struct ocelot_vcap_filter *tagging_rule, *redirect_rule;
364 struct ocelot_vcap_block *block_vcap_is1;
365 struct ocelot_vcap_block *block_vcap_is2;
366 struct ocelot *ocelot = &felix->ocelot;
367 int err;
368
369 block_vcap_is1 = &ocelot->block[VCAP_IS1];
370 block_vcap_is2 = &ocelot->block[VCAP_IS2];
371
372 tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1,
373 ocelot->num_phys_ports,
374 false);
375 if (!tagging_rule)
376 return -ENOENT;
377
378 err = ocelot_vcap_filter_del(ocelot, tagging_rule);
379 if (err)
380 return err;
381
382 redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2,
383 ocelot->num_phys_ports,
384 false);
385 if (!redirect_rule)
386 return -ENOENT;
387
388 return ocelot_vcap_filter_del(ocelot, redirect_rule);
389 }
390
felix_setup_tag_8021q(struct dsa_switch * ds,int cpu)391 static int felix_setup_tag_8021q(struct dsa_switch *ds, int cpu)
392 {
393 struct ocelot *ocelot = ds->priv;
394 struct felix *felix = ocelot_to_felix(ocelot);
395 unsigned long cpu_flood;
396 int port, err;
397
398 felix_8021q_cpu_port_init(ocelot, cpu);
399
400 for (port = 0; port < ds->num_ports; port++) {
401 if (dsa_is_unused_port(ds, port))
402 continue;
403
404 /* This overwrites ocelot_init():
405 * Do not forward BPDU frames to the CPU port module,
406 * for 2 reasons:
407 * - When these packets are injected from the tag_8021q
408 * CPU port, we want them to go out, not loop back
409 * into the system.
410 * - STP traffic ingressing on a user port should go to
411 * the tag_8021q CPU port, not to the hardware CPU
412 * port module.
413 */
414 ocelot_write_gix(ocelot,
415 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0),
416 ANA_PORT_CPU_FWD_BPDU_CFG, port);
417 }
418
419 /* In tag_8021q mode, the CPU port module is unused, except for PTP
420 * frames. So we want to disable flooding of any kind to the CPU port
421 * module, since packets going there will end in a black hole.
422 */
423 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
424 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_UC);
425 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_MC);
426 ocelot_rmw_rix(ocelot, 0, cpu_flood, ANA_PGID_PGID, PGID_BC);
427
428 felix->dsa_8021q_ctx = kzalloc(sizeof(*felix->dsa_8021q_ctx),
429 GFP_KERNEL);
430 if (!felix->dsa_8021q_ctx)
431 return -ENOMEM;
432
433 felix->dsa_8021q_ctx->ops = &felix_tag_8021q_ops;
434 felix->dsa_8021q_ctx->proto = htons(ETH_P_8021AD);
435 felix->dsa_8021q_ctx->ds = ds;
436
437 err = dsa_8021q_setup(felix->dsa_8021q_ctx, true);
438 if (err)
439 goto out_free_dsa_8021_ctx;
440
441 err = felix_setup_mmio_filtering(felix);
442 if (err)
443 goto out_teardown_dsa_8021q;
444
445 return 0;
446
447 out_teardown_dsa_8021q:
448 dsa_8021q_setup(felix->dsa_8021q_ctx, false);
449 out_free_dsa_8021_ctx:
450 kfree(felix->dsa_8021q_ctx);
451 return err;
452 }
453
felix_teardown_tag_8021q(struct dsa_switch * ds,int cpu)454 static void felix_teardown_tag_8021q(struct dsa_switch *ds, int cpu)
455 {
456 struct ocelot *ocelot = ds->priv;
457 struct felix *felix = ocelot_to_felix(ocelot);
458 int err, port;
459
460 err = felix_teardown_mmio_filtering(felix);
461 if (err)
462 dev_err(ds->dev, "felix_teardown_mmio_filtering returned %d",
463 err);
464
465 err = dsa_8021q_setup(felix->dsa_8021q_ctx, false);
466 if (err)
467 dev_err(ds->dev, "dsa_8021q_setup returned %d", err);
468
469 kfree(felix->dsa_8021q_ctx);
470
471 for (port = 0; port < ds->num_ports; port++) {
472 if (dsa_is_unused_port(ds, port))
473 continue;
474
475 /* Restore the logic from ocelot_init:
476 * do not forward BPDU frames to the front ports.
477 */
478 ocelot_write_gix(ocelot,
479 ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff),
480 ANA_PORT_CPU_FWD_BPDU_CFG,
481 port);
482 }
483
484 felix_8021q_cpu_port_deinit(ocelot, cpu);
485 }
486
487 /* The CPU port module is connected to the Node Processor Interface (NPI). This
488 * is the mode through which frames can be injected from and extracted to an
489 * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU
490 * running Linux, and this forms a DSA setup together with the enetc or fman
491 * DSA master.
492 */
felix_npi_port_init(struct ocelot * ocelot,int port)493 static void felix_npi_port_init(struct ocelot *ocelot, int port)
494 {
495 ocelot->npi = port;
496
497 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
498 QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
499 QSYS_EXT_CPU_CFG);
500
501 /* NPI port Injection/Extraction configuration */
502 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
503 ocelot->npi_xtr_prefix);
504 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
505 ocelot->npi_inj_prefix);
506
507 /* Disable transmission of pause frames */
508 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
509 }
510
felix_npi_port_deinit(struct ocelot * ocelot,int port)511 static void felix_npi_port_deinit(struct ocelot *ocelot, int port)
512 {
513 /* Restore hardware defaults */
514 int unused_port = ocelot->num_phys_ports + 2;
515
516 ocelot->npi = -1;
517
518 ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port),
519 QSYS_EXT_CPU_CFG);
520
521 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
522 OCELOT_TAG_PREFIX_DISABLED);
523 ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
524 OCELOT_TAG_PREFIX_DISABLED);
525
526 /* Enable transmission of pause frames */
527 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1);
528 }
529
felix_setup_tag_npi(struct dsa_switch * ds,int cpu)530 static int felix_setup_tag_npi(struct dsa_switch *ds, int cpu)
531 {
532 struct ocelot *ocelot = ds->priv;
533 unsigned long cpu_flood;
534
535 felix_npi_port_init(ocelot, cpu);
536
537 /* Include the CPU port module (and indirectly, the NPI port)
538 * in the forwarding mask for unknown unicast - the hardware
539 * default value for ANA_FLOODING_FLD_UNICAST excludes
540 * BIT(ocelot->num_phys_ports), and so does ocelot_init,
541 * since Ocelot relies on whitelisting MAC addresses towards
542 * PGID_CPU.
543 * We do this because DSA does not yet perform RX filtering,
544 * and the NPI port does not perform source address learning,
545 * so traffic sent to Linux is effectively unknown from the
546 * switch's perspective.
547 */
548 cpu_flood = ANA_PGID_PGID_PGID(BIT(ocelot->num_phys_ports));
549 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_UC);
550 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_MC);
551 ocelot_rmw_rix(ocelot, cpu_flood, cpu_flood, ANA_PGID_PGID, PGID_BC);
552
553 return 0;
554 }
555
felix_teardown_tag_npi(struct dsa_switch * ds,int cpu)556 static void felix_teardown_tag_npi(struct dsa_switch *ds, int cpu)
557 {
558 struct ocelot *ocelot = ds->priv;
559
560 felix_npi_port_deinit(ocelot, cpu);
561 }
562
felix_set_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)563 static int felix_set_tag_protocol(struct dsa_switch *ds, int cpu,
564 enum dsa_tag_protocol proto)
565 {
566 int err;
567
568 switch (proto) {
569 case DSA_TAG_PROTO_SEVILLE:
570 case DSA_TAG_PROTO_OCELOT:
571 err = felix_setup_tag_npi(ds, cpu);
572 break;
573 case DSA_TAG_PROTO_OCELOT_8021Q:
574 err = felix_setup_tag_8021q(ds, cpu);
575 break;
576 default:
577 err = -EPROTONOSUPPORT;
578 }
579
580 return err;
581 }
582
felix_del_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)583 static void felix_del_tag_protocol(struct dsa_switch *ds, int cpu,
584 enum dsa_tag_protocol proto)
585 {
586 switch (proto) {
587 case DSA_TAG_PROTO_SEVILLE:
588 case DSA_TAG_PROTO_OCELOT:
589 felix_teardown_tag_npi(ds, cpu);
590 break;
591 case DSA_TAG_PROTO_OCELOT_8021Q:
592 felix_teardown_tag_8021q(ds, cpu);
593 break;
594 default:
595 break;
596 }
597 }
598
599 /* This always leaves the switch in a consistent state, because although the
600 * tag_8021q setup can fail, the NPI setup can't. So either the change is made,
601 * or the restoration is guaranteed to work.
602 */
felix_change_tag_protocol(struct dsa_switch * ds,int cpu,enum dsa_tag_protocol proto)603 static int felix_change_tag_protocol(struct dsa_switch *ds, int cpu,
604 enum dsa_tag_protocol proto)
605 {
606 struct ocelot *ocelot = ds->priv;
607 struct felix *felix = ocelot_to_felix(ocelot);
608 enum dsa_tag_protocol old_proto = felix->tag_proto;
609 int err;
610
611 if (proto != DSA_TAG_PROTO_SEVILLE &&
612 proto != DSA_TAG_PROTO_OCELOT &&
613 proto != DSA_TAG_PROTO_OCELOT_8021Q)
614 return -EPROTONOSUPPORT;
615
616 felix_del_tag_protocol(ds, cpu, old_proto);
617
618 err = felix_set_tag_protocol(ds, cpu, proto);
619 if (err) {
620 felix_set_tag_protocol(ds, cpu, old_proto);
621 return err;
622 }
623
624 felix->tag_proto = proto;
625
626 return 0;
627 }
628
felix_get_tag_protocol(struct dsa_switch * ds,int port,enum dsa_tag_protocol mp)629 static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds,
630 int port,
631 enum dsa_tag_protocol mp)
632 {
633 struct ocelot *ocelot = ds->priv;
634 struct felix *felix = ocelot_to_felix(ocelot);
635
636 return felix->tag_proto;
637 }
638
felix_set_ageing_time(struct dsa_switch * ds,unsigned int ageing_time)639 static int felix_set_ageing_time(struct dsa_switch *ds,
640 unsigned int ageing_time)
641 {
642 struct ocelot *ocelot = ds->priv;
643
644 ocelot_set_ageing_time(ocelot, ageing_time);
645
646 return 0;
647 }
648
felix_fdb_dump(struct dsa_switch * ds,int port,dsa_fdb_dump_cb_t * cb,void * data)649 static int felix_fdb_dump(struct dsa_switch *ds, int port,
650 dsa_fdb_dump_cb_t *cb, void *data)
651 {
652 struct ocelot *ocelot = ds->priv;
653
654 return ocelot_fdb_dump(ocelot, port, cb, data);
655 }
656
felix_fdb_add(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)657 static int felix_fdb_add(struct dsa_switch *ds, int port,
658 const unsigned char *addr, u16 vid)
659 {
660 struct ocelot *ocelot = ds->priv;
661
662 return ocelot_fdb_add(ocelot, port, addr, vid);
663 }
664
felix_fdb_del(struct dsa_switch * ds,int port,const unsigned char * addr,u16 vid)665 static int felix_fdb_del(struct dsa_switch *ds, int port,
666 const unsigned char *addr, u16 vid)
667 {
668 struct ocelot *ocelot = ds->priv;
669
670 return ocelot_fdb_del(ocelot, port, addr, vid);
671 }
672
felix_mdb_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)673 static int felix_mdb_add(struct dsa_switch *ds, int port,
674 const struct switchdev_obj_port_mdb *mdb)
675 {
676 struct ocelot *ocelot = ds->priv;
677
678 return ocelot_port_mdb_add(ocelot, port, mdb);
679 }
680
felix_mdb_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_mdb * mdb)681 static int felix_mdb_del(struct dsa_switch *ds, int port,
682 const struct switchdev_obj_port_mdb *mdb)
683 {
684 struct ocelot *ocelot = ds->priv;
685
686 return ocelot_port_mdb_del(ocelot, port, mdb);
687 }
688
felix_bridge_stp_state_set(struct dsa_switch * ds,int port,u8 state)689 static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port,
690 u8 state)
691 {
692 struct ocelot *ocelot = ds->priv;
693
694 return ocelot_bridge_stp_state_set(ocelot, port, state);
695 }
696
felix_pre_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)697 static int felix_pre_bridge_flags(struct dsa_switch *ds, int port,
698 struct switchdev_brport_flags val,
699 struct netlink_ext_ack *extack)
700 {
701 struct ocelot *ocelot = ds->priv;
702
703 return ocelot_port_pre_bridge_flags(ocelot, port, val);
704 }
705
felix_bridge_flags(struct dsa_switch * ds,int port,struct switchdev_brport_flags val,struct netlink_ext_ack * extack)706 static int felix_bridge_flags(struct dsa_switch *ds, int port,
707 struct switchdev_brport_flags val,
708 struct netlink_ext_ack *extack)
709 {
710 struct ocelot *ocelot = ds->priv;
711
712 ocelot_port_bridge_flags(ocelot, port, val);
713
714 return 0;
715 }
716
felix_bridge_join(struct dsa_switch * ds,int port,struct net_device * br)717 static int felix_bridge_join(struct dsa_switch *ds, int port,
718 struct net_device *br)
719 {
720 struct ocelot *ocelot = ds->priv;
721
722 ocelot_port_bridge_join(ocelot, port, br);
723
724 return 0;
725 }
726
felix_bridge_leave(struct dsa_switch * ds,int port,struct net_device * br)727 static void felix_bridge_leave(struct dsa_switch *ds, int port,
728 struct net_device *br)
729 {
730 struct ocelot *ocelot = ds->priv;
731
732 ocelot_port_bridge_leave(ocelot, port, br);
733 }
734
felix_lag_join(struct dsa_switch * ds,int port,struct net_device * bond,struct netdev_lag_upper_info * info)735 static int felix_lag_join(struct dsa_switch *ds, int port,
736 struct net_device *bond,
737 struct netdev_lag_upper_info *info)
738 {
739 struct ocelot *ocelot = ds->priv;
740
741 return ocelot_port_lag_join(ocelot, port, bond, info);
742 }
743
felix_lag_leave(struct dsa_switch * ds,int port,struct net_device * bond)744 static int felix_lag_leave(struct dsa_switch *ds, int port,
745 struct net_device *bond)
746 {
747 struct ocelot *ocelot = ds->priv;
748
749 ocelot_port_lag_leave(ocelot, port, bond);
750
751 return 0;
752 }
753
felix_lag_change(struct dsa_switch * ds,int port)754 static int felix_lag_change(struct dsa_switch *ds, int port)
755 {
756 struct dsa_port *dp = dsa_to_port(ds, port);
757 struct ocelot *ocelot = ds->priv;
758
759 ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled);
760
761 return 0;
762 }
763
felix_vlan_prepare(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)764 static int felix_vlan_prepare(struct dsa_switch *ds, int port,
765 const struct switchdev_obj_port_vlan *vlan)
766 {
767 struct ocelot *ocelot = ds->priv;
768 u16 flags = vlan->flags;
769
770 /* Ocelot switches copy frames as-is to the CPU, so the flags:
771 * egress-untagged or not, pvid or not, make no difference. This
772 * behavior is already better than what DSA just tries to approximate
773 * when it installs the VLAN with the same flags on the CPU port.
774 * Just accept any configuration, and don't let ocelot deny installing
775 * multiple native VLANs on the NPI port, because the switch doesn't
776 * look at the port tag settings towards the NPI interface anyway.
777 */
778 if (port == ocelot->npi)
779 return 0;
780
781 return ocelot_vlan_prepare(ocelot, port, vlan->vid,
782 flags & BRIDGE_VLAN_INFO_PVID,
783 flags & BRIDGE_VLAN_INFO_UNTAGGED);
784 }
785
felix_vlan_filtering(struct dsa_switch * ds,int port,bool enabled,struct netlink_ext_ack * extack)786 static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
787 struct netlink_ext_ack *extack)
788 {
789 struct ocelot *ocelot = ds->priv;
790
791 return ocelot_port_vlan_filtering(ocelot, port, enabled);
792 }
793
felix_vlan_add(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan,struct netlink_ext_ack * extack)794 static int felix_vlan_add(struct dsa_switch *ds, int port,
795 const struct switchdev_obj_port_vlan *vlan,
796 struct netlink_ext_ack *extack)
797 {
798 struct ocelot *ocelot = ds->priv;
799 u16 flags = vlan->flags;
800 int err;
801
802 err = felix_vlan_prepare(ds, port, vlan);
803 if (err)
804 return err;
805
806 return ocelot_vlan_add(ocelot, port, vlan->vid,
807 flags & BRIDGE_VLAN_INFO_PVID,
808 flags & BRIDGE_VLAN_INFO_UNTAGGED);
809 }
810
felix_vlan_del(struct dsa_switch * ds,int port,const struct switchdev_obj_port_vlan * vlan)811 static int felix_vlan_del(struct dsa_switch *ds, int port,
812 const struct switchdev_obj_port_vlan *vlan)
813 {
814 struct ocelot *ocelot = ds->priv;
815
816 return ocelot_vlan_del(ocelot, port, vlan->vid);
817 }
818
felix_port_enable(struct dsa_switch * ds,int port,struct phy_device * phy)819 static int felix_port_enable(struct dsa_switch *ds, int port,
820 struct phy_device *phy)
821 {
822 struct ocelot *ocelot = ds->priv;
823
824 ocelot_port_enable(ocelot, port, phy);
825
826 return 0;
827 }
828
felix_port_disable(struct dsa_switch * ds,int port)829 static void felix_port_disable(struct dsa_switch *ds, int port)
830 {
831 struct ocelot *ocelot = ds->priv;
832
833 return ocelot_port_disable(ocelot, port);
834 }
835
felix_phylink_validate(struct dsa_switch * ds,int port,unsigned long * supported,struct phylink_link_state * state)836 static void felix_phylink_validate(struct dsa_switch *ds, int port,
837 unsigned long *supported,
838 struct phylink_link_state *state)
839 {
840 struct ocelot *ocelot = ds->priv;
841 struct felix *felix = ocelot_to_felix(ocelot);
842
843 if (felix->info->phylink_validate)
844 felix->info->phylink_validate(ocelot, port, supported, state);
845 }
846
felix_phylink_mac_config(struct dsa_switch * ds,int port,unsigned int link_an_mode,const struct phylink_link_state * state)847 static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
848 unsigned int link_an_mode,
849 const struct phylink_link_state *state)
850 {
851 struct ocelot *ocelot = ds->priv;
852 struct felix *felix = ocelot_to_felix(ocelot);
853 struct dsa_port *dp = dsa_to_port(ds, port);
854
855 if (felix->pcs[port])
856 phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
857 }
858
felix_phylink_mac_link_down(struct dsa_switch * ds,int port,unsigned int link_an_mode,phy_interface_t interface)859 static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
860 unsigned int link_an_mode,
861 phy_interface_t interface)
862 {
863 struct ocelot *ocelot = ds->priv;
864 struct ocelot_port *ocelot_port = ocelot->ports[port];
865 int err;
866
867 ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
868 DEV_MAC_ENA_CFG);
869
870 ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
871
872 err = ocelot_port_flush(ocelot, port);
873 if (err)
874 dev_err(ocelot->dev, "failed to flush port %d: %d\n",
875 port, err);
876
877 /* Put the port in reset. */
878 ocelot_port_writel(ocelot_port,
879 DEV_CLOCK_CFG_MAC_TX_RST |
880 DEV_CLOCK_CFG_MAC_RX_RST |
881 DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
882 DEV_CLOCK_CFG);
883 }
884
felix_phylink_mac_link_up(struct dsa_switch * ds,int port,unsigned int link_an_mode,phy_interface_t interface,struct phy_device * phydev,int speed,int duplex,bool tx_pause,bool rx_pause)885 static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
886 unsigned int link_an_mode,
887 phy_interface_t interface,
888 struct phy_device *phydev,
889 int speed, int duplex,
890 bool tx_pause, bool rx_pause)
891 {
892 struct ocelot *ocelot = ds->priv;
893 struct ocelot_port *ocelot_port = ocelot->ports[port];
894 struct felix *felix = ocelot_to_felix(ocelot);
895 u32 mac_fc_cfg;
896
897 /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and
898 * PORT_RST bits in DEV_CLOCK_CFG. Note that the way this system is
899 * integrated is that the MAC speed is fixed and it's the PCS who is
900 * performing the rate adaptation, so we have to write "1000Mbps" into
901 * the LINK_SPEED field of DEV_CLOCK_CFG (which is also its default
902 * value).
903 */
904 ocelot_port_writel(ocelot_port,
905 DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
906 DEV_CLOCK_CFG);
907
908 switch (speed) {
909 case SPEED_10:
910 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(3);
911 break;
912 case SPEED_100:
913 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(2);
914 break;
915 case SPEED_1000:
916 case SPEED_2500:
917 mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(1);
918 break;
919 default:
920 dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n",
921 port, speed);
922 return;
923 }
924
925 /* handle Rx pause in all cases, with 2500base-X this is used for rate
926 * adaptation.
927 */
928 mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA;
929
930 if (tx_pause)
931 mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA |
932 SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) |
933 SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) |
934 SYS_MAC_FC_CFG_ZERO_PAUSE_ENA;
935
936 /* Flow control. Link speed is only used here to evaluate the time
937 * specification in incoming pause frames.
938 */
939 ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port);
940
941 ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
942
943 /* Undo the effects of felix_phylink_mac_link_down:
944 * enable MAC module
945 */
946 ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
947 DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
948
949 /* Enable receiving frames on the port, and activate auto-learning of
950 * MAC addresses.
951 */
952 ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO |
953 ANA_PORT_PORT_CFG_RECV_ENA |
954 ANA_PORT_PORT_CFG_PORTID_VAL(port),
955 ANA_PORT_PORT_CFG, port);
956
957 /* Core: Enable port for frame transfer */
958 ocelot_fields_write(ocelot, port,
959 QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
960
961 if (felix->info->port_sched_speed_set)
962 felix->info->port_sched_speed_set(ocelot, port, speed);
963 }
964
felix_port_qos_map_init(struct ocelot * ocelot,int port)965 static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
966 {
967 int i;
968
969 ocelot_rmw_gix(ocelot,
970 ANA_PORT_QOS_CFG_QOS_PCP_ENA,
971 ANA_PORT_QOS_CFG_QOS_PCP_ENA,
972 ANA_PORT_QOS_CFG,
973 port);
974
975 for (i = 0; i < OCELOT_NUM_TC * 2; i++) {
976 ocelot_rmw_ix(ocelot,
977 (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) |
978 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i),
979 ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL |
980 ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M,
981 ANA_PORT_PCP_DEI_MAP,
982 port, i);
983 }
984 }
985
felix_get_strings(struct dsa_switch * ds,int port,u32 stringset,u8 * data)986 static void felix_get_strings(struct dsa_switch *ds, int port,
987 u32 stringset, u8 *data)
988 {
989 struct ocelot *ocelot = ds->priv;
990
991 return ocelot_get_strings(ocelot, port, stringset, data);
992 }
993
felix_get_ethtool_stats(struct dsa_switch * ds,int port,u64 * data)994 static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
995 {
996 struct ocelot *ocelot = ds->priv;
997
998 ocelot_get_ethtool_stats(ocelot, port, data);
999 }
1000
felix_get_sset_count(struct dsa_switch * ds,int port,int sset)1001 static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
1002 {
1003 struct ocelot *ocelot = ds->priv;
1004
1005 return ocelot_get_sset_count(ocelot, port, sset);
1006 }
1007
felix_get_ts_info(struct dsa_switch * ds,int port,struct ethtool_ts_info * info)1008 static int felix_get_ts_info(struct dsa_switch *ds, int port,
1009 struct ethtool_ts_info *info)
1010 {
1011 struct ocelot *ocelot = ds->priv;
1012
1013 return ocelot_get_ts_info(ocelot, port, info);
1014 }
1015
felix_parse_ports_node(struct felix * felix,struct device_node * ports_node,phy_interface_t * port_phy_modes)1016 static int felix_parse_ports_node(struct felix *felix,
1017 struct device_node *ports_node,
1018 phy_interface_t *port_phy_modes)
1019 {
1020 struct ocelot *ocelot = &felix->ocelot;
1021 struct device *dev = felix->ocelot.dev;
1022 struct device_node *child;
1023
1024 for_each_available_child_of_node(ports_node, child) {
1025 phy_interface_t phy_mode;
1026 u32 port;
1027 int err;
1028
1029 /* Get switch port number from DT */
1030 if (of_property_read_u32(child, "reg", &port) < 0) {
1031 dev_err(dev, "Port number not defined in device tree "
1032 "(property \"reg\")\n");
1033 of_node_put(child);
1034 return -ENODEV;
1035 }
1036
1037 /* Get PHY mode from DT */
1038 err = of_get_phy_mode(child, &phy_mode);
1039 if (err) {
1040 dev_err(dev, "Failed to read phy-mode or "
1041 "phy-interface-type property for port %d\n",
1042 port);
1043 of_node_put(child);
1044 return -ENODEV;
1045 }
1046
1047 err = felix->info->prevalidate_phy_mode(ocelot, port, phy_mode);
1048 if (err < 0) {
1049 dev_err(dev, "Unsupported PHY mode %s on port %d\n",
1050 phy_modes(phy_mode), port);
1051 of_node_put(child);
1052 return err;
1053 }
1054
1055 port_phy_modes[port] = phy_mode;
1056 }
1057
1058 return 0;
1059 }
1060
felix_parse_dt(struct felix * felix,phy_interface_t * port_phy_modes)1061 static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
1062 {
1063 struct device *dev = felix->ocelot.dev;
1064 struct device_node *switch_node;
1065 struct device_node *ports_node;
1066 int err;
1067
1068 switch_node = dev->of_node;
1069
1070 ports_node = of_get_child_by_name(switch_node, "ports");
1071 if (!ports_node) {
1072 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
1073 return -ENODEV;
1074 }
1075
1076 err = felix_parse_ports_node(felix, ports_node, port_phy_modes);
1077 of_node_put(ports_node);
1078
1079 return err;
1080 }
1081
felix_init_structs(struct felix * felix,int num_phys_ports)1082 static int felix_init_structs(struct felix *felix, int num_phys_ports)
1083 {
1084 struct ocelot *ocelot = &felix->ocelot;
1085 phy_interface_t *port_phy_modes;
1086 struct resource res;
1087 int port, i, err;
1088
1089 ocelot->num_phys_ports = num_phys_ports;
1090 ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports,
1091 sizeof(struct ocelot_port *), GFP_KERNEL);
1092 if (!ocelot->ports)
1093 return -ENOMEM;
1094
1095 ocelot->map = felix->info->map;
1096 ocelot->stats_layout = felix->info->stats_layout;
1097 ocelot->num_stats = felix->info->num_stats;
1098 ocelot->num_mact_rows = felix->info->num_mact_rows;
1099 ocelot->vcap = felix->info->vcap;
1100 ocelot->ops = felix->info->ops;
1101 ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT;
1102 ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
1103 ocelot->devlink = felix->ds->devlink;
1104
1105 port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
1106 GFP_KERNEL);
1107 if (!port_phy_modes)
1108 return -ENOMEM;
1109
1110 err = felix_parse_dt(felix, port_phy_modes);
1111 if (err) {
1112 kfree(port_phy_modes);
1113 return err;
1114 }
1115
1116 for (i = 0; i < TARGET_MAX; i++) {
1117 struct regmap *target;
1118
1119 if (!felix->info->target_io_res[i].name)
1120 continue;
1121
1122 memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
1123 res.flags = IORESOURCE_MEM;
1124 res.start += felix->switch_base;
1125 res.end += felix->switch_base;
1126
1127 target = ocelot_regmap_init(ocelot, &res);
1128 if (IS_ERR(target)) {
1129 dev_err(ocelot->dev,
1130 "Failed to map device memory space\n");
1131 kfree(port_phy_modes);
1132 return PTR_ERR(target);
1133 }
1134
1135 ocelot->targets[i] = target;
1136 }
1137
1138 err = ocelot_regfields_init(ocelot, felix->info->regfields);
1139 if (err) {
1140 dev_err(ocelot->dev, "failed to init reg fields map\n");
1141 kfree(port_phy_modes);
1142 return err;
1143 }
1144
1145 for (port = 0; port < num_phys_ports; port++) {
1146 struct ocelot_port *ocelot_port;
1147 struct regmap *target;
1148
1149 ocelot_port = devm_kzalloc(ocelot->dev,
1150 sizeof(struct ocelot_port),
1151 GFP_KERNEL);
1152 if (!ocelot_port) {
1153 dev_err(ocelot->dev,
1154 "failed to allocate port memory\n");
1155 kfree(port_phy_modes);
1156 return -ENOMEM;
1157 }
1158
1159 memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
1160 res.flags = IORESOURCE_MEM;
1161 res.start += felix->switch_base;
1162 res.end += felix->switch_base;
1163
1164 target = ocelot_regmap_init(ocelot, &res);
1165 if (IS_ERR(target)) {
1166 dev_err(ocelot->dev,
1167 "Failed to map memory space for port %d\n",
1168 port);
1169 kfree(port_phy_modes);
1170 return PTR_ERR(target);
1171 }
1172
1173 ocelot_port->phy_mode = port_phy_modes[port];
1174 ocelot_port->ocelot = ocelot;
1175 ocelot_port->target = target;
1176 ocelot->ports[port] = ocelot_port;
1177 }
1178
1179 kfree(port_phy_modes);
1180
1181 if (felix->info->mdio_bus_alloc) {
1182 err = felix->info->mdio_bus_alloc(ocelot);
1183 if (err < 0)
1184 return err;
1185 }
1186
1187 return 0;
1188 }
1189
1190 /* Hardware initialization done here so that we can allocate structures with
1191 * devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
1192 * us to allocate structures twice (leak memory) and map PCI memory twice
1193 * (which will not work).
1194 */
felix_setup(struct dsa_switch * ds)1195 static int felix_setup(struct dsa_switch *ds)
1196 {
1197 struct ocelot *ocelot = ds->priv;
1198 struct felix *felix = ocelot_to_felix(ocelot);
1199 int port, err;
1200
1201 err = felix_init_structs(felix, ds->num_ports);
1202 if (err)
1203 return err;
1204
1205 err = ocelot_init(ocelot);
1206 if (err)
1207 goto out_mdiobus_free;
1208
1209 if (ocelot->ptp) {
1210 err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
1211 if (err) {
1212 dev_err(ocelot->dev,
1213 "Timestamp initialization failed\n");
1214 ocelot->ptp = 0;
1215 }
1216 }
1217
1218 for (port = 0; port < ds->num_ports; port++) {
1219 if (dsa_is_unused_port(ds, port))
1220 continue;
1221
1222 ocelot_init_port(ocelot, port);
1223
1224 /* Set the default QoS Classification based on PCP and DEI
1225 * bits of vlan tag.
1226 */
1227 felix_port_qos_map_init(ocelot, port);
1228 }
1229
1230 err = ocelot_devlink_sb_register(ocelot);
1231 if (err)
1232 goto out_deinit_ports;
1233
1234 for (port = 0; port < ds->num_ports; port++) {
1235 if (!dsa_is_cpu_port(ds, port))
1236 continue;
1237
1238 /* The initial tag protocol is NPI which always returns 0, so
1239 * there's no real point in checking for errors.
1240 */
1241 felix_set_tag_protocol(ds, port, felix->tag_proto);
1242 }
1243
1244 ds->mtu_enforcement_ingress = true;
1245 ds->assisted_learning_on_cpu_port = true;
1246
1247 return 0;
1248
1249 out_deinit_ports:
1250 for (port = 0; port < ocelot->num_phys_ports; port++) {
1251 if (dsa_is_unused_port(ds, port))
1252 continue;
1253
1254 ocelot_deinit_port(ocelot, port);
1255 }
1256
1257 ocelot_deinit_timestamp(ocelot);
1258 ocelot_deinit(ocelot);
1259
1260 out_mdiobus_free:
1261 if (felix->info->mdio_bus_free)
1262 felix->info->mdio_bus_free(ocelot);
1263
1264 return err;
1265 }
1266
felix_teardown(struct dsa_switch * ds)1267 static void felix_teardown(struct dsa_switch *ds)
1268 {
1269 struct ocelot *ocelot = ds->priv;
1270 struct felix *felix = ocelot_to_felix(ocelot);
1271 int port;
1272
1273 for (port = 0; port < ds->num_ports; port++) {
1274 if (!dsa_is_cpu_port(ds, port))
1275 continue;
1276
1277 felix_del_tag_protocol(ds, port, felix->tag_proto);
1278 }
1279
1280 ocelot_devlink_sb_unregister(ocelot);
1281 ocelot_deinit_timestamp(ocelot);
1282 ocelot_deinit(ocelot);
1283
1284 for (port = 0; port < ocelot->num_phys_ports; port++) {
1285 if (dsa_is_unused_port(ds, port))
1286 continue;
1287
1288 ocelot_deinit_port(ocelot, port);
1289 }
1290
1291 if (felix->info->mdio_bus_free)
1292 felix->info->mdio_bus_free(ocelot);
1293 }
1294
felix_hwtstamp_get(struct dsa_switch * ds,int port,struct ifreq * ifr)1295 static int felix_hwtstamp_get(struct dsa_switch *ds, int port,
1296 struct ifreq *ifr)
1297 {
1298 struct ocelot *ocelot = ds->priv;
1299
1300 return ocelot_hwstamp_get(ocelot, port, ifr);
1301 }
1302
felix_hwtstamp_set(struct dsa_switch * ds,int port,struct ifreq * ifr)1303 static int felix_hwtstamp_set(struct dsa_switch *ds, int port,
1304 struct ifreq *ifr)
1305 {
1306 struct ocelot *ocelot = ds->priv;
1307
1308 return ocelot_hwstamp_set(ocelot, port, ifr);
1309 }
1310
felix_check_xtr_pkt(struct ocelot * ocelot,unsigned int ptp_type)1311 static bool felix_check_xtr_pkt(struct ocelot *ocelot, unsigned int ptp_type)
1312 {
1313 struct felix *felix = ocelot_to_felix(ocelot);
1314 int err, grp = 0;
1315
1316 if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q)
1317 return false;
1318
1319 if (!felix->info->quirk_no_xtr_irq)
1320 return false;
1321
1322 if (ptp_type == PTP_CLASS_NONE)
1323 return false;
1324
1325 while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
1326 struct sk_buff *skb;
1327 unsigned int type;
1328
1329 err = ocelot_xtr_poll_frame(ocelot, grp, &skb);
1330 if (err)
1331 goto out;
1332
1333 /* We trap to the CPU port module all PTP frames, but
1334 * felix_rxtstamp() only gets called for event frames.
1335 * So we need to avoid sending duplicate general
1336 * message frames by running a second BPF classifier
1337 * here and dropping those.
1338 */
1339 __skb_push(skb, ETH_HLEN);
1340
1341 type = ptp_classify_raw(skb);
1342
1343 __skb_pull(skb, ETH_HLEN);
1344
1345 if (type == PTP_CLASS_NONE) {
1346 kfree_skb(skb);
1347 continue;
1348 }
1349
1350 netif_rx(skb);
1351 }
1352
1353 out:
1354 if (err < 0)
1355 ocelot_drain_cpu_queue(ocelot, 0);
1356
1357 return true;
1358 }
1359
felix_rxtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb,unsigned int type)1360 static bool felix_rxtstamp(struct dsa_switch *ds, int port,
1361 struct sk_buff *skb, unsigned int type)
1362 {
1363 u8 *extraction = skb->data - ETH_HLEN - OCELOT_TAG_LEN;
1364 struct skb_shared_hwtstamps *shhwtstamps;
1365 struct ocelot *ocelot = ds->priv;
1366 u32 tstamp_lo, tstamp_hi;
1367 struct timespec64 ts;
1368 u64 tstamp, val;
1369
1370 /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb
1371 * for RX timestamping. Then free it, and poll for its copy through
1372 * MMIO in the CPU port module, and inject that into the stack from
1373 * ocelot_xtr_poll().
1374 */
1375 if (felix_check_xtr_pkt(ocelot, type)) {
1376 kfree_skb(skb);
1377 return true;
1378 }
1379
1380 ocelot_ptp_gettime64(&ocelot->ptp_info, &ts);
1381 tstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
1382
1383 ocelot_xfh_get_rew_val(extraction, &val);
1384 tstamp_lo = (u32)val;
1385
1386 tstamp_hi = tstamp >> 32;
1387 if ((tstamp & 0xffffffff) < tstamp_lo)
1388 tstamp_hi--;
1389
1390 tstamp = ((u64)tstamp_hi << 32) | tstamp_lo;
1391
1392 shhwtstamps = skb_hwtstamps(skb);
1393 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1394 shhwtstamps->hwtstamp = tstamp;
1395 return false;
1396 }
1397
felix_txtstamp(struct dsa_switch * ds,int port,struct sk_buff * skb)1398 static void felix_txtstamp(struct dsa_switch *ds, int port,
1399 struct sk_buff *skb)
1400 {
1401 struct ocelot *ocelot = ds->priv;
1402 struct sk_buff *clone = NULL;
1403
1404 if (!ocelot->ptp)
1405 return;
1406
1407 if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone))
1408 return;
1409
1410 if (clone)
1411 OCELOT_SKB_CB(skb)->clone = clone;
1412 }
1413
felix_change_mtu(struct dsa_switch * ds,int port,int new_mtu)1414 static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
1415 {
1416 struct ocelot *ocelot = ds->priv;
1417
1418 ocelot_port_set_maxlen(ocelot, port, new_mtu);
1419
1420 return 0;
1421 }
1422
felix_get_max_mtu(struct dsa_switch * ds,int port)1423 static int felix_get_max_mtu(struct dsa_switch *ds, int port)
1424 {
1425 struct ocelot *ocelot = ds->priv;
1426
1427 return ocelot_get_max_mtu(ocelot, port);
1428 }
1429
felix_cls_flower_add(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1430 static int felix_cls_flower_add(struct dsa_switch *ds, int port,
1431 struct flow_cls_offload *cls, bool ingress)
1432 {
1433 struct ocelot *ocelot = ds->priv;
1434
1435 return ocelot_cls_flower_replace(ocelot, port, cls, ingress);
1436 }
1437
felix_cls_flower_del(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1438 static int felix_cls_flower_del(struct dsa_switch *ds, int port,
1439 struct flow_cls_offload *cls, bool ingress)
1440 {
1441 struct ocelot *ocelot = ds->priv;
1442
1443 return ocelot_cls_flower_destroy(ocelot, port, cls, ingress);
1444 }
1445
felix_cls_flower_stats(struct dsa_switch * ds,int port,struct flow_cls_offload * cls,bool ingress)1446 static int felix_cls_flower_stats(struct dsa_switch *ds, int port,
1447 struct flow_cls_offload *cls, bool ingress)
1448 {
1449 struct ocelot *ocelot = ds->priv;
1450
1451 return ocelot_cls_flower_stats(ocelot, port, cls, ingress);
1452 }
1453
felix_port_policer_add(struct dsa_switch * ds,int port,struct dsa_mall_policer_tc_entry * policer)1454 static int felix_port_policer_add(struct dsa_switch *ds, int port,
1455 struct dsa_mall_policer_tc_entry *policer)
1456 {
1457 struct ocelot *ocelot = ds->priv;
1458 struct ocelot_policer pol = {
1459 .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8,
1460 .burst = policer->burst,
1461 };
1462
1463 return ocelot_port_policer_add(ocelot, port, &pol);
1464 }
1465
felix_port_policer_del(struct dsa_switch * ds,int port)1466 static void felix_port_policer_del(struct dsa_switch *ds, int port)
1467 {
1468 struct ocelot *ocelot = ds->priv;
1469
1470 ocelot_port_policer_del(ocelot, port);
1471 }
1472
felix_port_setup_tc(struct dsa_switch * ds,int port,enum tc_setup_type type,void * type_data)1473 static int felix_port_setup_tc(struct dsa_switch *ds, int port,
1474 enum tc_setup_type type,
1475 void *type_data)
1476 {
1477 struct ocelot *ocelot = ds->priv;
1478 struct felix *felix = ocelot_to_felix(ocelot);
1479
1480 if (felix->info->port_setup_tc)
1481 return felix->info->port_setup_tc(ds, port, type, type_data);
1482 else
1483 return -EOPNOTSUPP;
1484 }
1485
felix_sb_pool_get(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,struct devlink_sb_pool_info * pool_info)1486 static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index,
1487 u16 pool_index,
1488 struct devlink_sb_pool_info *pool_info)
1489 {
1490 struct ocelot *ocelot = ds->priv;
1491
1492 return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info);
1493 }
1494
felix_sb_pool_set(struct dsa_switch * ds,unsigned int sb_index,u16 pool_index,u32 size,enum devlink_sb_threshold_type threshold_type,struct netlink_ext_ack * extack)1495 static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index,
1496 u16 pool_index, u32 size,
1497 enum devlink_sb_threshold_type threshold_type,
1498 struct netlink_ext_ack *extack)
1499 {
1500 struct ocelot *ocelot = ds->priv;
1501
1502 return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size,
1503 threshold_type, extack);
1504 }
1505
felix_sb_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_threshold)1506 static int felix_sb_port_pool_get(struct dsa_switch *ds, int port,
1507 unsigned int sb_index, u16 pool_index,
1508 u32 *p_threshold)
1509 {
1510 struct ocelot *ocelot = ds->priv;
1511
1512 return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index,
1513 p_threshold);
1514 }
1515
felix_sb_port_pool_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1516 static int felix_sb_port_pool_set(struct dsa_switch *ds, int port,
1517 unsigned int sb_index, u16 pool_index,
1518 u32 threshold, struct netlink_ext_ack *extack)
1519 {
1520 struct ocelot *ocelot = ds->priv;
1521
1522 return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index,
1523 threshold, extack);
1524 }
1525
felix_sb_tc_pool_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 * p_pool_index,u32 * p_threshold)1526 static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port,
1527 unsigned int sb_index, u16 tc_index,
1528 enum devlink_sb_pool_type pool_type,
1529 u16 *p_pool_index, u32 *p_threshold)
1530 {
1531 struct ocelot *ocelot = ds->priv;
1532
1533 return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index,
1534 pool_type, p_pool_index,
1535 p_threshold);
1536 }
1537
felix_sb_tc_pool_bind_set(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u16 pool_index,u32 threshold,struct netlink_ext_ack * extack)1538 static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port,
1539 unsigned int sb_index, u16 tc_index,
1540 enum devlink_sb_pool_type pool_type,
1541 u16 pool_index, u32 threshold,
1542 struct netlink_ext_ack *extack)
1543 {
1544 struct ocelot *ocelot = ds->priv;
1545
1546 return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index,
1547 pool_type, pool_index, threshold,
1548 extack);
1549 }
1550
felix_sb_occ_snapshot(struct dsa_switch * ds,unsigned int sb_index)1551 static int felix_sb_occ_snapshot(struct dsa_switch *ds,
1552 unsigned int sb_index)
1553 {
1554 struct ocelot *ocelot = ds->priv;
1555
1556 return ocelot_sb_occ_snapshot(ocelot, sb_index);
1557 }
1558
felix_sb_occ_max_clear(struct dsa_switch * ds,unsigned int sb_index)1559 static int felix_sb_occ_max_clear(struct dsa_switch *ds,
1560 unsigned int sb_index)
1561 {
1562 struct ocelot *ocelot = ds->priv;
1563
1564 return ocelot_sb_occ_max_clear(ocelot, sb_index);
1565 }
1566
felix_sb_occ_port_pool_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 pool_index,u32 * p_cur,u32 * p_max)1567 static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port,
1568 unsigned int sb_index, u16 pool_index,
1569 u32 *p_cur, u32 *p_max)
1570 {
1571 struct ocelot *ocelot = ds->priv;
1572
1573 return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index,
1574 p_cur, p_max);
1575 }
1576
felix_sb_occ_tc_port_bind_get(struct dsa_switch * ds,int port,unsigned int sb_index,u16 tc_index,enum devlink_sb_pool_type pool_type,u32 * p_cur,u32 * p_max)1577 static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port,
1578 unsigned int sb_index, u16 tc_index,
1579 enum devlink_sb_pool_type pool_type,
1580 u32 *p_cur, u32 *p_max)
1581 {
1582 struct ocelot *ocelot = ds->priv;
1583
1584 return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index,
1585 pool_type, p_cur, p_max);
1586 }
1587
felix_mrp_add(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)1588 static int felix_mrp_add(struct dsa_switch *ds, int port,
1589 const struct switchdev_obj_mrp *mrp)
1590 {
1591 struct ocelot *ocelot = ds->priv;
1592
1593 return ocelot_mrp_add(ocelot, port, mrp);
1594 }
1595
felix_mrp_del(struct dsa_switch * ds,int port,const struct switchdev_obj_mrp * mrp)1596 static int felix_mrp_del(struct dsa_switch *ds, int port,
1597 const struct switchdev_obj_mrp *mrp)
1598 {
1599 struct ocelot *ocelot = ds->priv;
1600
1601 return ocelot_mrp_add(ocelot, port, mrp);
1602 }
1603
1604 static int
felix_mrp_add_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)1605 felix_mrp_add_ring_role(struct dsa_switch *ds, int port,
1606 const struct switchdev_obj_ring_role_mrp *mrp)
1607 {
1608 struct ocelot *ocelot = ds->priv;
1609
1610 return ocelot_mrp_add_ring_role(ocelot, port, mrp);
1611 }
1612
1613 static int
felix_mrp_del_ring_role(struct dsa_switch * ds,int port,const struct switchdev_obj_ring_role_mrp * mrp)1614 felix_mrp_del_ring_role(struct dsa_switch *ds, int port,
1615 const struct switchdev_obj_ring_role_mrp *mrp)
1616 {
1617 struct ocelot *ocelot = ds->priv;
1618
1619 return ocelot_mrp_del_ring_role(ocelot, port, mrp);
1620 }
1621
1622 const struct dsa_switch_ops felix_switch_ops = {
1623 .get_tag_protocol = felix_get_tag_protocol,
1624 .change_tag_protocol = felix_change_tag_protocol,
1625 .setup = felix_setup,
1626 .teardown = felix_teardown,
1627 .set_ageing_time = felix_set_ageing_time,
1628 .get_strings = felix_get_strings,
1629 .get_ethtool_stats = felix_get_ethtool_stats,
1630 .get_sset_count = felix_get_sset_count,
1631 .get_ts_info = felix_get_ts_info,
1632 .phylink_validate = felix_phylink_validate,
1633 .phylink_mac_config = felix_phylink_mac_config,
1634 .phylink_mac_link_down = felix_phylink_mac_link_down,
1635 .phylink_mac_link_up = felix_phylink_mac_link_up,
1636 .port_enable = felix_port_enable,
1637 .port_disable = felix_port_disable,
1638 .port_fdb_dump = felix_fdb_dump,
1639 .port_fdb_add = felix_fdb_add,
1640 .port_fdb_del = felix_fdb_del,
1641 .port_mdb_add = felix_mdb_add,
1642 .port_mdb_del = felix_mdb_del,
1643 .port_pre_bridge_flags = felix_pre_bridge_flags,
1644 .port_bridge_flags = felix_bridge_flags,
1645 .port_bridge_join = felix_bridge_join,
1646 .port_bridge_leave = felix_bridge_leave,
1647 .port_lag_join = felix_lag_join,
1648 .port_lag_leave = felix_lag_leave,
1649 .port_lag_change = felix_lag_change,
1650 .port_stp_state_set = felix_bridge_stp_state_set,
1651 .port_vlan_filtering = felix_vlan_filtering,
1652 .port_vlan_add = felix_vlan_add,
1653 .port_vlan_del = felix_vlan_del,
1654 .port_hwtstamp_get = felix_hwtstamp_get,
1655 .port_hwtstamp_set = felix_hwtstamp_set,
1656 .port_rxtstamp = felix_rxtstamp,
1657 .port_txtstamp = felix_txtstamp,
1658 .port_change_mtu = felix_change_mtu,
1659 .port_max_mtu = felix_get_max_mtu,
1660 .port_policer_add = felix_port_policer_add,
1661 .port_policer_del = felix_port_policer_del,
1662 .cls_flower_add = felix_cls_flower_add,
1663 .cls_flower_del = felix_cls_flower_del,
1664 .cls_flower_stats = felix_cls_flower_stats,
1665 .port_setup_tc = felix_port_setup_tc,
1666 .devlink_sb_pool_get = felix_sb_pool_get,
1667 .devlink_sb_pool_set = felix_sb_pool_set,
1668 .devlink_sb_port_pool_get = felix_sb_port_pool_get,
1669 .devlink_sb_port_pool_set = felix_sb_port_pool_set,
1670 .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get,
1671 .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set,
1672 .devlink_sb_occ_snapshot = felix_sb_occ_snapshot,
1673 .devlink_sb_occ_max_clear = felix_sb_occ_max_clear,
1674 .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get,
1675 .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get,
1676 .port_mrp_add = felix_mrp_add,
1677 .port_mrp_del = felix_mrp_del,
1678 .port_mrp_add_ring_role = felix_mrp_add_ring_role,
1679 .port_mrp_del_ring_role = felix_mrp_del_ring_role,
1680 };
1681
felix_port_to_netdev(struct ocelot * ocelot,int port)1682 struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
1683 {
1684 struct felix *felix = ocelot_to_felix(ocelot);
1685 struct dsa_switch *ds = felix->ds;
1686
1687 if (!dsa_is_user_port(ds, port))
1688 return NULL;
1689
1690 return dsa_to_port(ds, port)->slave;
1691 }
1692
felix_netdev_to_port(struct net_device * dev)1693 int felix_netdev_to_port(struct net_device *dev)
1694 {
1695 struct dsa_port *dp;
1696
1697 dp = dsa_port_from_netdev(dev);
1698 if (IS_ERR(dp))
1699 return -EINVAL;
1700
1701 return dp->index;
1702 }
1703