1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3 *
4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5 */
6
7 #include "sparx5_main_regs.h"
8 #include "sparx5_main.h"
9
sparx5_vlant_set_mask(struct sparx5 * sparx5,u16 vid)10 static int sparx5_vlant_set_mask(struct sparx5 *sparx5, u16 vid)
11 {
12 u32 mask[3];
13
14 /* Divide up mask in 32 bit words */
15 bitmap_to_arr32(mask, sparx5->vlan_mask[vid], SPX5_PORTS);
16
17 /* Output mask to respective registers */
18 spx5_wr(mask[0], sparx5, ANA_L3_VLAN_MASK_CFG(vid));
19 spx5_wr(mask[1], sparx5, ANA_L3_VLAN_MASK_CFG1(vid));
20 spx5_wr(mask[2], sparx5, ANA_L3_VLAN_MASK_CFG2(vid));
21
22 return 0;
23 }
24
sparx5_vlan_init(struct sparx5 * sparx5)25 void sparx5_vlan_init(struct sparx5 *sparx5)
26 {
27 u16 vid;
28
29 spx5_rmw(ANA_L3_VLAN_CTRL_VLAN_ENA_SET(1),
30 ANA_L3_VLAN_CTRL_VLAN_ENA,
31 sparx5,
32 ANA_L3_VLAN_CTRL);
33
34 /* Map VLAN = FID */
35 for (vid = NULL_VID; vid < VLAN_N_VID; vid++)
36 spx5_rmw(ANA_L3_VLAN_CFG_VLAN_FID_SET(vid),
37 ANA_L3_VLAN_CFG_VLAN_FID,
38 sparx5,
39 ANA_L3_VLAN_CFG(vid));
40 }
41
sparx5_vlan_port_setup(struct sparx5 * sparx5,int portno)42 void sparx5_vlan_port_setup(struct sparx5 *sparx5, int portno)
43 {
44 struct sparx5_port *port = sparx5->ports[portno];
45
46 /* Configure PVID */
47 spx5_rmw(ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(0) |
48 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid),
49 ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA |
50 ANA_CL_VLAN_CTRL_PORT_VID,
51 sparx5,
52 ANA_CL_VLAN_CTRL(port->portno));
53 }
54
sparx5_vlan_vid_add(struct sparx5_port * port,u16 vid,bool pvid,bool untagged)55 int sparx5_vlan_vid_add(struct sparx5_port *port, u16 vid, bool pvid,
56 bool untagged)
57 {
58 struct sparx5 *sparx5 = port->sparx5;
59 int ret;
60
61 /* Untagged egress vlan classification */
62 if (untagged && port->vid != vid) {
63 if (port->vid) {
64 netdev_err(port->ndev,
65 "Port already has a native VLAN: %d\n",
66 port->vid);
67 return -EBUSY;
68 }
69 port->vid = vid;
70 }
71
72 /* Make the port a member of the VLAN */
73 set_bit(port->portno, sparx5->vlan_mask[vid]);
74 ret = sparx5_vlant_set_mask(sparx5, vid);
75 if (ret)
76 return ret;
77
78 /* Default ingress vlan classification */
79 if (pvid)
80 port->pvid = vid;
81
82 sparx5_vlan_port_apply(sparx5, port);
83
84 return 0;
85 }
86
sparx5_vlan_vid_del(struct sparx5_port * port,u16 vid)87 int sparx5_vlan_vid_del(struct sparx5_port *port, u16 vid)
88 {
89 struct sparx5 *sparx5 = port->sparx5;
90 int ret;
91
92 /* 8021q removes VID 0 on module unload for all interfaces
93 * with VLAN filtering feature. We need to keep it to receive
94 * untagged traffic.
95 */
96 if (vid == 0)
97 return 0;
98
99 /* Stop the port from being a member of the vlan */
100 clear_bit(port->portno, sparx5->vlan_mask[vid]);
101 ret = sparx5_vlant_set_mask(sparx5, vid);
102 if (ret)
103 return ret;
104
105 /* Ingress */
106 if (port->pvid == vid)
107 port->pvid = 0;
108
109 /* Egress */
110 if (port->vid == vid)
111 port->vid = 0;
112
113 sparx5_vlan_port_apply(sparx5, port);
114
115 return 0;
116 }
117
sparx5_pgid_update_mask(struct sparx5_port * port,int pgid,bool enable)118 void sparx5_pgid_update_mask(struct sparx5_port *port, int pgid, bool enable)
119 {
120 struct sparx5 *sparx5 = port->sparx5;
121 u32 val, mask;
122
123 /* mask is spread across 3 registers x 32 bit */
124 if (port->portno < 32) {
125 mask = BIT(port->portno);
126 val = enable ? mask : 0;
127 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG(pgid));
128 } else if (port->portno < 64) {
129 mask = BIT(port->portno - 32);
130 val = enable ? mask : 0;
131 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG1(pgid));
132 } else if (port->portno < SPX5_PORTS) {
133 mask = BIT(port->portno - 64);
134 val = enable ? mask : 0;
135 spx5_rmw(val, mask, sparx5, ANA_AC_PGID_CFG2(pgid));
136 } else {
137 netdev_err(port->ndev, "Invalid port no: %d\n", port->portno);
138 }
139 }
140
sparx5_pgid_clear(struct sparx5 * spx5,int pgid)141 void sparx5_pgid_clear(struct sparx5 *spx5, int pgid)
142 {
143 spx5_wr(0, spx5, ANA_AC_PGID_CFG(pgid));
144 spx5_wr(0, spx5, ANA_AC_PGID_CFG1(pgid));
145 spx5_wr(0, spx5, ANA_AC_PGID_CFG2(pgid));
146 }
147
sparx5_pgid_read_mask(struct sparx5 * spx5,int pgid,u32 portmask[3])148 void sparx5_pgid_read_mask(struct sparx5 *spx5, int pgid, u32 portmask[3])
149 {
150 portmask[0] = spx5_rd(spx5, ANA_AC_PGID_CFG(pgid));
151 portmask[1] = spx5_rd(spx5, ANA_AC_PGID_CFG1(pgid));
152 portmask[2] = spx5_rd(spx5, ANA_AC_PGID_CFG2(pgid));
153 }
154
sparx5_update_fwd(struct sparx5 * sparx5)155 void sparx5_update_fwd(struct sparx5 *sparx5)
156 {
157 DECLARE_BITMAP(workmask, SPX5_PORTS);
158 u32 mask[3];
159 int port;
160
161 /* Divide up fwd mask in 32 bit words */
162 bitmap_to_arr32(mask, sparx5->bridge_fwd_mask, SPX5_PORTS);
163
164 /* Update flood masks */
165 for (port = PGID_UC_FLOOD; port <= PGID_BCAST; port++) {
166 spx5_wr(mask[0], sparx5, ANA_AC_PGID_CFG(port));
167 spx5_wr(mask[1], sparx5, ANA_AC_PGID_CFG1(port));
168 spx5_wr(mask[2], sparx5, ANA_AC_PGID_CFG2(port));
169 }
170
171 /* Update SRC masks */
172 for (port = 0; port < SPX5_PORTS; port++) {
173 if (test_bit(port, sparx5->bridge_fwd_mask)) {
174 /* Allow to send to all bridged but self */
175 bitmap_copy(workmask, sparx5->bridge_fwd_mask, SPX5_PORTS);
176 clear_bit(port, workmask);
177 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
178 spx5_wr(mask[0], sparx5, ANA_AC_SRC_CFG(port));
179 spx5_wr(mask[1], sparx5, ANA_AC_SRC_CFG1(port));
180 spx5_wr(mask[2], sparx5, ANA_AC_SRC_CFG2(port));
181 } else {
182 spx5_wr(0, sparx5, ANA_AC_SRC_CFG(port));
183 spx5_wr(0, sparx5, ANA_AC_SRC_CFG1(port));
184 spx5_wr(0, sparx5, ANA_AC_SRC_CFG2(port));
185 }
186 }
187
188 /* Learning enabled only for bridged ports */
189 bitmap_and(workmask, sparx5->bridge_fwd_mask,
190 sparx5->bridge_lrn_mask, SPX5_PORTS);
191 bitmap_to_arr32(mask, workmask, SPX5_PORTS);
192
193 /* Apply learning mask */
194 spx5_wr(mask[0], sparx5, ANA_L2_AUTO_LRN_CFG);
195 spx5_wr(mask[1], sparx5, ANA_L2_AUTO_LRN_CFG1);
196 spx5_wr(mask[2], sparx5, ANA_L2_AUTO_LRN_CFG2);
197 }
198
sparx5_vlan_port_apply(struct sparx5 * sparx5,struct sparx5_port * port)199 void sparx5_vlan_port_apply(struct sparx5 *sparx5,
200 struct sparx5_port *port)
201
202 {
203 u32 val;
204
205 /* Configure PVID, vlan aware */
206 val = ANA_CL_VLAN_CTRL_VLAN_AWARE_ENA_SET(port->vlan_aware) |
207 ANA_CL_VLAN_CTRL_VLAN_POP_CNT_SET(port->vlan_aware) |
208 ANA_CL_VLAN_CTRL_PORT_VID_SET(port->pvid);
209 spx5_wr(val, sparx5, ANA_CL_VLAN_CTRL(port->portno));
210
211 val = 0;
212 if (port->vlan_aware && !port->pvid)
213 /* If port is vlan-aware and tagged, drop untagged and
214 * priority tagged frames.
215 */
216 val = ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(1) |
217 ANA_CL_VLAN_FILTER_CTRL_PRIO_CTAG_DIS_SET(1) |
218 ANA_CL_VLAN_FILTER_CTRL_PRIO_STAG_DIS_SET(1);
219 spx5_wr(val, sparx5,
220 ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
221
222 /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
223 val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
224 if (port->vlan_aware) {
225 if (port->vid)
226 /* Tag all frames except when VID == DEFAULT_VLAN */
227 val |= REW_TAG_CTRL_TAG_CFG_SET(1);
228 else
229 val |= REW_TAG_CTRL_TAG_CFG_SET(3);
230 }
231 spx5_wr(val, sparx5, REW_TAG_CTRL(port->portno));
232
233 /* Egress VID */
234 spx5_rmw(REW_PORT_VLAN_CFG_PORT_VID_SET(port->vid),
235 REW_PORT_VLAN_CFG_PORT_VID,
236 sparx5,
237 REW_PORT_VLAN_CFG(port->portno));
238 }
239