1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56 .major = MLXSW_SP1_FWREV_MAJOR,
57 .minor = MLXSW_SP_FWREV_MINOR,
58 .subminor = MLXSW_SP_FWREV_SUBMINOR,
59 .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63 "mellanox/mlxsw_spectrum-" __stringify(MLXSW_SP1_FWREV_MAJOR) \
64 "." __stringify(MLXSW_SP_FWREV_MINOR) \
65 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
66
67 #define MLXSW_SP2_FWREV_MAJOR 29
68
69 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
70 .major = MLXSW_SP2_FWREV_MAJOR,
71 .minor = MLXSW_SP_FWREV_MINOR,
72 .subminor = MLXSW_SP_FWREV_SUBMINOR,
73 };
74
75 #define MLXSW_SP2_FW_FILENAME \
76 "mellanox/mlxsw_spectrum2-" __stringify(MLXSW_SP2_FWREV_MAJOR) \
77 "." __stringify(MLXSW_SP_FWREV_MINOR) \
78 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
79
80 #define MLXSW_SP3_FWREV_MAJOR 30
81
82 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
83 .major = MLXSW_SP3_FWREV_MAJOR,
84 .minor = MLXSW_SP_FWREV_MINOR,
85 .subminor = MLXSW_SP_FWREV_SUBMINOR,
86 };
87
88 #define MLXSW_SP3_FW_FILENAME \
89 "mellanox/mlxsw_spectrum3-" __stringify(MLXSW_SP3_FWREV_MAJOR) \
90 "." __stringify(MLXSW_SP_FWREV_MINOR) \
91 "." __stringify(MLXSW_SP_FWREV_SUBMINOR) ".mfa2"
92
93 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
94 "mellanox/lc_ini_bundle_" \
95 __stringify(MLXSW_SP_FWREV_MINOR) "_" \
96 __stringify(MLXSW_SP_FWREV_SUBMINOR) ".bin"
97
98 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
99 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
100 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
101 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
102
103 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
104 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
105 };
106 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
107 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
108 };
109
110 /* tx_hdr_version
111 * Tx header version.
112 * Must be set to 1.
113 */
114 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
115
116 /* tx_hdr_ctl
117 * Packet control type.
118 * 0 - Ethernet control (e.g. EMADs, LACP)
119 * 1 - Ethernet data
120 */
121 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
122
123 /* tx_hdr_proto
124 * Packet protocol type. Must be set to 1 (Ethernet).
125 */
126 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
127
128 /* tx_hdr_rx_is_router
129 * Packet is sent from the router. Valid for data packets only.
130 */
131 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
132
133 /* tx_hdr_fid_valid
134 * Indicates if the 'fid' field is valid and should be used for
135 * forwarding lookup. Valid for data packets only.
136 */
137 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
138
139 /* tx_hdr_swid
140 * Switch partition ID. Must be set to 0.
141 */
142 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
143
144 /* tx_hdr_control_tclass
145 * Indicates if the packet should use the control TClass and not one
146 * of the data TClasses.
147 */
148 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
149
150 /* tx_hdr_etclass
151 * Egress TClass to be used on the egress device on the egress port.
152 */
153 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
154
155 /* tx_hdr_port_mid
156 * Destination local port for unicast packets.
157 * Destination multicast ID for multicast packets.
158 *
159 * Control packets are directed to a specific egress port, while data
160 * packets are transmitted through the CPU port (0) into the switch partition,
161 * where forwarding rules are applied.
162 */
163 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
164
165 /* tx_hdr_fid
166 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
167 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
168 * Valid for data packets only.
169 */
170 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
171
172 /* tx_hdr_type
173 * 0 - Data packets
174 * 6 - Control packets
175 */
176 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
177
mlxsw_sp_flow_counter_get(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index,bool clear,u64 * packets,u64 * bytes)178 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
179 unsigned int counter_index, bool clear,
180 u64 *packets, u64 *bytes)
181 {
182 enum mlxsw_reg_mgpc_opcode op = clear ? MLXSW_REG_MGPC_OPCODE_CLEAR :
183 MLXSW_REG_MGPC_OPCODE_NOP;
184 char mgpc_pl[MLXSW_REG_MGPC_LEN];
185 int err;
186
187 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, op,
188 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
189 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
190 if (err)
191 return err;
192 if (packets)
193 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
194 if (bytes)
195 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
196 return 0;
197 }
198
mlxsw_sp_flow_counter_clear(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)199 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
200 unsigned int counter_index)
201 {
202 char mgpc_pl[MLXSW_REG_MGPC_LEN];
203
204 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
205 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
207 }
208
mlxsw_sp_flow_counter_alloc(struct mlxsw_sp * mlxsw_sp,unsigned int * p_counter_index)209 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
210 unsigned int *p_counter_index)
211 {
212 int err;
213
214 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
215 p_counter_index);
216 if (err)
217 return err;
218 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
219 if (err)
220 goto err_counter_clear;
221 return 0;
222
223 err_counter_clear:
224 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
225 *p_counter_index);
226 return err;
227 }
228
mlxsw_sp_flow_counter_free(struct mlxsw_sp * mlxsw_sp,unsigned int counter_index)229 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
230 unsigned int counter_index)
231 {
232 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
233 counter_index);
234 }
235
mlxsw_sp_txhdr_construct(struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)236 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
237 const struct mlxsw_tx_info *tx_info)
238 {
239 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
240
241 memset(txhdr, 0, MLXSW_TXHDR_LEN);
242
243 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
244 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
245 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
246 mlxsw_tx_hdr_swid_set(txhdr, 0);
247 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
248 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
249 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
250 }
251
252 int
mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)253 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
254 struct mlxsw_sp_port *mlxsw_sp_port,
255 struct sk_buff *skb,
256 const struct mlxsw_tx_info *tx_info)
257 {
258 char *txhdr;
259 u16 max_fid;
260 int err;
261
262 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
263 err = -ENOMEM;
264 goto err_skb_cow_head;
265 }
266
267 if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
268 err = -EIO;
269 goto err_res_valid;
270 }
271 max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
272
273 txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
274 memset(txhdr, 0, MLXSW_TXHDR_LEN);
275
276 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
277 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
278 mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
279 mlxsw_tx_hdr_fid_valid_set(txhdr, true);
280 mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
281 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
282 return 0;
283
284 err_res_valid:
285 err_skb_cow_head:
286 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
287 dev_kfree_skb_any(skb);
288 return err;
289 }
290
mlxsw_sp_skb_requires_ts(struct sk_buff * skb)291 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
292 {
293 unsigned int type;
294
295 if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
296 return false;
297
298 type = ptp_classify_raw(skb);
299 return !!ptp_parse_header(skb, type);
300 }
301
mlxsw_sp_txhdr_handle(struct mlxsw_core * mlxsw_core,struct mlxsw_sp_port * mlxsw_sp_port,struct sk_buff * skb,const struct mlxsw_tx_info * tx_info)302 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
303 struct mlxsw_sp_port *mlxsw_sp_port,
304 struct sk_buff *skb,
305 const struct mlxsw_tx_info *tx_info)
306 {
307 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
308
309 /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
310 * need special handling and cannot be transmitted as regular control
311 * packets.
312 */
313 if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
314 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
315 mlxsw_sp_port, skb,
316 tx_info);
317
318 if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
319 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
320 dev_kfree_skb_any(skb);
321 return -ENOMEM;
322 }
323
324 mlxsw_sp_txhdr_construct(skb, tx_info);
325 return 0;
326 }
327
mlxsw_sp_stp_spms_state(u8 state)328 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
329 {
330 switch (state) {
331 case BR_STATE_FORWARDING:
332 return MLXSW_REG_SPMS_STATE_FORWARDING;
333 case BR_STATE_LEARNING:
334 return MLXSW_REG_SPMS_STATE_LEARNING;
335 case BR_STATE_LISTENING:
336 case BR_STATE_DISABLED:
337 case BR_STATE_BLOCKING:
338 return MLXSW_REG_SPMS_STATE_DISCARDING;
339 default:
340 BUG();
341 }
342 }
343
mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u8 state)344 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
345 u8 state)
346 {
347 enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
349 char *spms_pl;
350 int err;
351
352 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
353 if (!spms_pl)
354 return -ENOMEM;
355 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
356 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
357
358 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
359 kfree(spms_pl);
360 return err;
361 }
362
mlxsw_sp_base_mac_get(struct mlxsw_sp * mlxsw_sp)363 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
364 {
365 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
366 int err;
367
368 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
369 if (err)
370 return err;
371 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
372 return 0;
373 }
374
mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_up)375 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
376 bool is_up)
377 {
378 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
379 char paos_pl[MLXSW_REG_PAOS_LEN];
380
381 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
382 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
383 MLXSW_PORT_ADMIN_STATUS_DOWN);
384 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
385 }
386
mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port * mlxsw_sp_port,const unsigned char * addr)387 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
388 const unsigned char *addr)
389 {
390 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391 char ppad_pl[MLXSW_REG_PPAD_LEN];
392
393 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
394 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
395 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
396 }
397
mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port * mlxsw_sp_port)398 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
399 {
400 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401
402 eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
403 mlxsw_sp_port->local_port);
404 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
405 mlxsw_sp_port->dev->dev_addr);
406 }
407
mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port * mlxsw_sp_port,int * p_max_mtu)408 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
409 {
410 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
411 char pmtu_pl[MLXSW_REG_PMTU_LEN];
412 int err;
413
414 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
415 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
416 if (err)
417 return err;
418
419 *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
420 return 0;
421 }
422
mlxsw_sp_port_mtu_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 mtu)423 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
424 {
425 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
426 char pmtu_pl[MLXSW_REG_PMTU_LEN];
427
428 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
429 if (mtu > mlxsw_sp_port->max_mtu)
430 return -EINVAL;
431
432 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
433 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
434 }
435
mlxsw_sp_port_swid_set(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 swid)436 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
437 u16 local_port, u8 swid)
438 {
439 char pspa_pl[MLXSW_REG_PSPA_LEN];
440
441 mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
442 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
443 }
444
mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)445 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
446 {
447 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
448 char svpe_pl[MLXSW_REG_SVPE_LEN];
449
450 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
452 }
453
mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,bool learn_enable)454 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
455 bool learn_enable)
456 {
457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
458 char *spvmlr_pl;
459 int err;
460
461 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
462 if (!spvmlr_pl)
463 return -ENOMEM;
464 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
465 learn_enable);
466 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
467 kfree(spvmlr_pl);
468 return err;
469 }
470
mlxsw_sp_port_security_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)471 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
472 {
473 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
474 char spfsr_pl[MLXSW_REG_SPFSR_LEN];
475 int err;
476
477 if (mlxsw_sp_port->security == enable)
478 return 0;
479
480 mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
481 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
482 if (err)
483 return err;
484
485 mlxsw_sp_port->security = enable;
486 return 0;
487 }
488
mlxsw_sp_ethtype_to_sver_type(u16 ethtype,u8 * p_sver_type)489 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
490 {
491 switch (ethtype) {
492 case ETH_P_8021Q:
493 *p_sver_type = 0;
494 break;
495 case ETH_P_8021AD:
496 *p_sver_type = 1;
497 break;
498 default:
499 return -EINVAL;
500 }
501
502 return 0;
503 }
504
mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 ethtype)505 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
506 u16 ethtype)
507 {
508 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
509 char spevet_pl[MLXSW_REG_SPEVET_LEN];
510 u8 sver_type;
511 int err;
512
513 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
514 if (err)
515 return err;
516
517 mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
518 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
519 }
520
__mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u16 ethtype)521 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
522 u16 vid, u16 ethtype)
523 {
524 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
525 char spvid_pl[MLXSW_REG_SPVID_LEN];
526 u8 sver_type;
527 int err;
528
529 err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
530 if (err)
531 return err;
532
533 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
534 sver_type);
535
536 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
537 }
538
mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port * mlxsw_sp_port,bool allow)539 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
540 bool allow)
541 {
542 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
543 char spaft_pl[MLXSW_REG_SPAFT_LEN];
544
545 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
546 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
547 }
548
mlxsw_sp_port_pvid_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid,u16 ethtype)549 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
550 u16 ethtype)
551 {
552 int err;
553
554 if (!vid) {
555 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
556 if (err)
557 return err;
558 } else {
559 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
560 if (err)
561 return err;
562 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
563 if (err)
564 goto err_port_allow_untagged_set;
565 }
566
567 mlxsw_sp_port->pvid = vid;
568 return 0;
569
570 err_port_allow_untagged_set:
571 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
572 return err;
573 }
574
575 static int
mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port * mlxsw_sp_port)576 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
577 {
578 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
579 char sspr_pl[MLXSW_REG_SSPR_LEN];
580
581 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
582 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
583 }
584
585 static int
mlxsw_sp_port_module_info_parse(struct mlxsw_sp * mlxsw_sp,u16 local_port,char * pmlp_pl,struct mlxsw_sp_port_mapping * port_mapping)586 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
587 u16 local_port, char *pmlp_pl,
588 struct mlxsw_sp_port_mapping *port_mapping)
589 {
590 bool separate_rxtx;
591 u8 first_lane;
592 u8 slot_index;
593 u8 module;
594 u8 width;
595 int i;
596
597 module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
598 slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
599 width = mlxsw_reg_pmlp_width_get(pmlp_pl);
600 separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
601 first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
602
603 if (width && !is_power_of_2(width)) {
604 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
605 local_port);
606 return -EINVAL;
607 }
608
609 for (i = 0; i < width; i++) {
610 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
611 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
612 local_port);
613 return -EINVAL;
614 }
615 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
616 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
617 local_port);
618 return -EINVAL;
619 }
620 if (separate_rxtx &&
621 mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
622 mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
623 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
624 local_port);
625 return -EINVAL;
626 }
627 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
628 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
629 local_port);
630 return -EINVAL;
631 }
632 }
633
634 port_mapping->module = module;
635 port_mapping->slot_index = slot_index;
636 port_mapping->width = width;
637 port_mapping->module_width = width;
638 port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
639 return 0;
640 }
641
642 static int
mlxsw_sp_port_module_info_get(struct mlxsw_sp * mlxsw_sp,u16 local_port,struct mlxsw_sp_port_mapping * port_mapping)643 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
644 struct mlxsw_sp_port_mapping *port_mapping)
645 {
646 char pmlp_pl[MLXSW_REG_PMLP_LEN];
647 int err;
648
649 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
650 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
651 if (err)
652 return err;
653 return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
654 pmlp_pl, port_mapping);
655 }
656
657 static int
mlxsw_sp_port_module_map(struct mlxsw_sp * mlxsw_sp,u16 local_port,const struct mlxsw_sp_port_mapping * port_mapping)658 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
659 const struct mlxsw_sp_port_mapping *port_mapping)
660 {
661 char pmlp_pl[MLXSW_REG_PMLP_LEN];
662 int i, err;
663
664 mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
665 port_mapping->module);
666
667 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
668 mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
669 for (i = 0; i < port_mapping->width; i++) {
670 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
671 port_mapping->slot_index);
672 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
673 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
674 }
675
676 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
677 if (err)
678 goto err_pmlp_write;
679 return 0;
680
681 err_pmlp_write:
682 mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
683 port_mapping->module);
684 return err;
685 }
686
mlxsw_sp_port_module_unmap(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 slot_index,u8 module)687 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
688 u8 slot_index, u8 module)
689 {
690 char pmlp_pl[MLXSW_REG_PMLP_LEN];
691
692 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
693 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
694 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
695 mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
696 }
697
mlxsw_sp_port_open(struct net_device * dev)698 static int mlxsw_sp_port_open(struct net_device *dev)
699 {
700 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
701 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
702 int err;
703
704 err = mlxsw_env_module_port_up(mlxsw_sp->core,
705 mlxsw_sp_port->mapping.slot_index,
706 mlxsw_sp_port->mapping.module);
707 if (err)
708 return err;
709 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
710 if (err)
711 goto err_port_admin_status_set;
712 netif_start_queue(dev);
713 return 0;
714
715 err_port_admin_status_set:
716 mlxsw_env_module_port_down(mlxsw_sp->core,
717 mlxsw_sp_port->mapping.slot_index,
718 mlxsw_sp_port->mapping.module);
719 return err;
720 }
721
mlxsw_sp_port_stop(struct net_device * dev)722 static int mlxsw_sp_port_stop(struct net_device *dev)
723 {
724 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
725 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
726
727 netif_stop_queue(dev);
728 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
729 mlxsw_env_module_port_down(mlxsw_sp->core,
730 mlxsw_sp_port->mapping.slot_index,
731 mlxsw_sp_port->mapping.module);
732 return 0;
733 }
734
mlxsw_sp_port_xmit(struct sk_buff * skb,struct net_device * dev)735 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
736 struct net_device *dev)
737 {
738 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
739 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
740 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
741 const struct mlxsw_tx_info tx_info = {
742 .local_port = mlxsw_sp_port->local_port,
743 .is_emad = false,
744 };
745 u64 len;
746 int err;
747
748 memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
749
750 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
751 return NETDEV_TX_BUSY;
752
753 if (eth_skb_pad(skb)) {
754 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
755 return NETDEV_TX_OK;
756 }
757
758 err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
759 &tx_info);
760 if (err)
761 return NETDEV_TX_OK;
762
763 /* TX header is consumed by HW on the way so we shouldn't count its
764 * bytes as being sent.
765 */
766 len = skb->len - MLXSW_TXHDR_LEN;
767
768 /* Due to a race we might fail here because of a full queue. In that
769 * unlikely case we simply drop the packet.
770 */
771 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
772
773 if (!err) {
774 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
775 u64_stats_update_begin(&pcpu_stats->syncp);
776 pcpu_stats->tx_packets++;
777 pcpu_stats->tx_bytes += len;
778 u64_stats_update_end(&pcpu_stats->syncp);
779 } else {
780 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
781 dev_kfree_skb_any(skb);
782 }
783 return NETDEV_TX_OK;
784 }
785
mlxsw_sp_set_rx_mode(struct net_device * dev)786 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
787 {
788 }
789
mlxsw_sp_port_set_mac_address(struct net_device * dev,void * p)790 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
791 {
792 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
793 struct sockaddr *addr = p;
794 int err;
795
796 if (!is_valid_ether_addr(addr->sa_data))
797 return -EADDRNOTAVAIL;
798
799 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
800 if (err)
801 return err;
802 eth_hw_addr_set(dev, addr->sa_data);
803 return 0;
804 }
805
mlxsw_sp_port_change_mtu(struct net_device * dev,int mtu)806 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
807 {
808 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
809 struct mlxsw_sp_hdroom orig_hdroom;
810 struct mlxsw_sp_hdroom hdroom;
811 int err;
812
813 orig_hdroom = *mlxsw_sp_port->hdroom;
814
815 hdroom = orig_hdroom;
816 hdroom.mtu = mtu;
817 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
818
819 err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
820 if (err) {
821 netdev_err(dev, "Failed to configure port's headroom\n");
822 return err;
823 }
824
825 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
826 if (err)
827 goto err_port_mtu_set;
828 WRITE_ONCE(dev->mtu, mtu);
829 return 0;
830
831 err_port_mtu_set:
832 mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
833 return err;
834 }
835
836 static int
mlxsw_sp_port_get_sw_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)837 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
838 struct rtnl_link_stats64 *stats)
839 {
840 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
841 struct mlxsw_sp_port_pcpu_stats *p;
842 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
843 u32 tx_dropped = 0;
844 unsigned int start;
845 int i;
846
847 for_each_possible_cpu(i) {
848 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
849 do {
850 start = u64_stats_fetch_begin(&p->syncp);
851 rx_packets = p->rx_packets;
852 rx_bytes = p->rx_bytes;
853 tx_packets = p->tx_packets;
854 tx_bytes = p->tx_bytes;
855 } while (u64_stats_fetch_retry(&p->syncp, start));
856
857 stats->rx_packets += rx_packets;
858 stats->rx_bytes += rx_bytes;
859 stats->tx_packets += tx_packets;
860 stats->tx_bytes += tx_bytes;
861 /* tx_dropped is u32, updated without syncp protection. */
862 tx_dropped += p->tx_dropped;
863 }
864 stats->tx_dropped = tx_dropped;
865 return 0;
866 }
867
mlxsw_sp_port_has_offload_stats(const struct net_device * dev,int attr_id)868 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
869 {
870 switch (attr_id) {
871 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
872 return true;
873 }
874
875 return false;
876 }
877
mlxsw_sp_port_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)878 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
879 void *sp)
880 {
881 switch (attr_id) {
882 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
883 return mlxsw_sp_port_get_sw_stats64(dev, sp);
884 }
885
886 return -EINVAL;
887 }
888
mlxsw_sp_port_get_stats_raw(struct net_device * dev,int grp,int prio,char * ppcnt_pl)889 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
890 int prio, char *ppcnt_pl)
891 {
892 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
893 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
894
895 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
896 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
897 }
898
mlxsw_sp_port_get_hw_stats(struct net_device * dev,struct rtnl_link_stats64 * stats)899 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
900 struct rtnl_link_stats64 *stats)
901 {
902 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
903 int err;
904
905 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
906 0, ppcnt_pl);
907 if (err)
908 goto out;
909
910 stats->tx_packets =
911 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
912 stats->rx_packets =
913 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
914 stats->tx_bytes =
915 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
916 stats->rx_bytes =
917 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
918 stats->multicast =
919 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
920
921 stats->rx_crc_errors =
922 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
923 stats->rx_frame_errors =
924 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
925
926 stats->rx_length_errors = (
927 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
928 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
929 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
930
931 stats->rx_errors = (stats->rx_crc_errors +
932 stats->rx_frame_errors + stats->rx_length_errors);
933
934 out:
935 return err;
936 }
937
938 static void
mlxsw_sp_port_get_hw_xstats(struct net_device * dev,struct mlxsw_sp_port_xstats * xstats)939 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
940 struct mlxsw_sp_port_xstats *xstats)
941 {
942 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
943 int err, i;
944
945 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
946 ppcnt_pl);
947 if (!err)
948 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
949
950 for (i = 0; i < TC_MAX_QUEUE; i++) {
951 err = mlxsw_sp_port_get_stats_raw(dev,
952 MLXSW_REG_PPCNT_TC_CONG_CNT,
953 i, ppcnt_pl);
954 if (err)
955 goto tc_cnt;
956
957 xstats->wred_drop[i] =
958 mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
959 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
960
961 tc_cnt:
962 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
963 i, ppcnt_pl);
964 if (err)
965 continue;
966
967 xstats->backlog[i] =
968 mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
969 xstats->tail_drop[i] =
970 mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
971 }
972
973 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
974 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
975 i, ppcnt_pl);
976 if (err)
977 continue;
978
979 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
980 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
981 }
982 }
983
update_stats_cache(struct work_struct * work)984 static void update_stats_cache(struct work_struct *work)
985 {
986 struct mlxsw_sp_port *mlxsw_sp_port =
987 container_of(work, struct mlxsw_sp_port,
988 periodic_hw_stats.update_dw.work);
989
990 if (!netif_carrier_ok(mlxsw_sp_port->dev))
991 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
992 * necessary when port goes down.
993 */
994 goto out;
995
996 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
997 &mlxsw_sp_port->periodic_hw_stats.stats);
998 mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
999 &mlxsw_sp_port->periodic_hw_stats.xstats);
1000
1001 out:
1002 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1003 MLXSW_HW_STATS_UPDATE_TIME);
1004 }
1005
1006 /* Return the stats from a cache that is updated periodically,
1007 * as this function might get called in an atomic context.
1008 */
1009 static void
mlxsw_sp_port_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1010 mlxsw_sp_port_get_stats64(struct net_device *dev,
1011 struct rtnl_link_stats64 *stats)
1012 {
1013 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1014
1015 memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1016 }
1017
__mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1018 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1019 u16 vid_begin, u16 vid_end,
1020 bool is_member, bool untagged)
1021 {
1022 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1023 char *spvm_pl;
1024 int err;
1025
1026 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1027 if (!spvm_pl)
1028 return -ENOMEM;
1029
1030 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1031 vid_end, is_member, untagged);
1032 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1033 kfree(spvm_pl);
1034 return err;
1035 }
1036
mlxsw_sp_port_vlan_set(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid_begin,u16 vid_end,bool is_member,bool untagged)1037 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1038 u16 vid_end, bool is_member, bool untagged)
1039 {
1040 u16 vid, vid_e;
1041 int err;
1042
1043 for (vid = vid_begin; vid <= vid_end;
1044 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1045 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1046 vid_end);
1047
1048 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1049 is_member, untagged);
1050 if (err)
1051 return err;
1052 }
1053
1054 return 0;
1055 }
1056
mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port * mlxsw_sp_port,bool flush_default)1057 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1058 bool flush_default)
1059 {
1060 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1061
1062 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1063 &mlxsw_sp_port->vlans_list, list) {
1064 if (!flush_default &&
1065 mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1066 continue;
1067 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1068 }
1069 }
1070
1071 static void
mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1072 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1073 {
1074 if (mlxsw_sp_port_vlan->bridge_port)
1075 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1076 else if (mlxsw_sp_port_vlan->fid)
1077 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1078 }
1079
1080 struct mlxsw_sp_port_vlan *
mlxsw_sp_port_vlan_create(struct mlxsw_sp_port * mlxsw_sp_port,u16 vid)1081 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1082 {
1083 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1084 bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1085 int err;
1086
1087 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1088 if (mlxsw_sp_port_vlan)
1089 return ERR_PTR(-EEXIST);
1090
1091 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1092 if (err)
1093 return ERR_PTR(err);
1094
1095 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1096 if (!mlxsw_sp_port_vlan) {
1097 err = -ENOMEM;
1098 goto err_port_vlan_alloc;
1099 }
1100
1101 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1102 mlxsw_sp_port_vlan->vid = vid;
1103 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1104
1105 return mlxsw_sp_port_vlan;
1106
1107 err_port_vlan_alloc:
1108 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1109 return ERR_PTR(err);
1110 }
1111
mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan)1112 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1113 {
1114 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1115 u16 vid = mlxsw_sp_port_vlan->vid;
1116
1117 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1118 list_del(&mlxsw_sp_port_vlan->list);
1119 kfree(mlxsw_sp_port_vlan);
1120 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1121 }
1122
mlxsw_sp_port_add_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1123 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1124 __be16 __always_unused proto, u16 vid)
1125 {
1126 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1127
1128 /* VLAN 0 is added to HW filter when device goes up, but it is
1129 * reserved in our case, so simply return.
1130 */
1131 if (!vid)
1132 return 0;
1133
1134 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1135 }
1136
mlxsw_sp_port_kill_vid(struct net_device * dev,__be16 __always_unused proto,u16 vid)1137 int mlxsw_sp_port_kill_vid(struct net_device *dev,
1138 __be16 __always_unused proto, u16 vid)
1139 {
1140 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1141 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1142
1143 /* VLAN 0 is removed from HW filter when device goes down, but
1144 * it is reserved in our case, so simply return.
1145 */
1146 if (!vid)
1147 return 0;
1148
1149 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1150 if (!mlxsw_sp_port_vlan)
1151 return 0;
1152 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1153
1154 return 0;
1155 }
1156
mlxsw_sp_setup_tc_block(struct mlxsw_sp_port * mlxsw_sp_port,struct flow_block_offload * f)1157 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1158 struct flow_block_offload *f)
1159 {
1160 switch (f->binder_type) {
1161 case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1162 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1163 case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1164 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1165 case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1166 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1167 case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1168 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1169 default:
1170 return -EOPNOTSUPP;
1171 }
1172 }
1173
mlxsw_sp_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1174 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1175 void *type_data)
1176 {
1177 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1178
1179 switch (type) {
1180 case TC_SETUP_BLOCK:
1181 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1182 case TC_SETUP_QDISC_RED:
1183 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1184 case TC_SETUP_QDISC_PRIO:
1185 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1186 case TC_SETUP_QDISC_ETS:
1187 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1188 case TC_SETUP_QDISC_TBF:
1189 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1190 case TC_SETUP_QDISC_FIFO:
1191 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1192 default:
1193 return -EOPNOTSUPP;
1194 }
1195 }
1196
mlxsw_sp_feature_hw_tc(struct net_device * dev,bool enable)1197 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1198 {
1199 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1200
1201 if (!enable) {
1202 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1203 mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1204 netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1205 return -EINVAL;
1206 }
1207 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1208 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1209 } else {
1210 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1211 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1212 }
1213 return 0;
1214 }
1215
mlxsw_sp_feature_loopback(struct net_device * dev,bool enable)1216 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1217 {
1218 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1219 char pplr_pl[MLXSW_REG_PPLR_LEN];
1220 int err;
1221
1222 if (netif_running(dev))
1223 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1224
1225 mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1226 err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1227 pplr_pl);
1228
1229 if (netif_running(dev))
1230 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1231
1232 return err;
1233 }
1234
1235 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1236
mlxsw_sp_handle_feature(struct net_device * dev,netdev_features_t wanted_features,netdev_features_t feature,mlxsw_sp_feature_handler feature_handler)1237 static int mlxsw_sp_handle_feature(struct net_device *dev,
1238 netdev_features_t wanted_features,
1239 netdev_features_t feature,
1240 mlxsw_sp_feature_handler feature_handler)
1241 {
1242 netdev_features_t changes = wanted_features ^ dev->features;
1243 bool enable = !!(wanted_features & feature);
1244 int err;
1245
1246 if (!(changes & feature))
1247 return 0;
1248
1249 err = feature_handler(dev, enable);
1250 if (err) {
1251 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1252 enable ? "Enable" : "Disable", &feature, err);
1253 return err;
1254 }
1255
1256 if (enable)
1257 dev->features |= feature;
1258 else
1259 dev->features &= ~feature;
1260
1261 return 0;
1262 }
mlxsw_sp_set_features(struct net_device * dev,netdev_features_t features)1263 static int mlxsw_sp_set_features(struct net_device *dev,
1264 netdev_features_t features)
1265 {
1266 netdev_features_t oper_features = dev->features;
1267 int err = 0;
1268
1269 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1270 mlxsw_sp_feature_hw_tc);
1271 err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1272 mlxsw_sp_feature_loopback);
1273
1274 if (err) {
1275 dev->features = oper_features;
1276 return -EINVAL;
1277 }
1278
1279 return 0;
1280 }
1281
mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port * mlxsw_sp_port,struct ifreq * ifr)1282 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1283 struct ifreq *ifr)
1284 {
1285 struct hwtstamp_config config;
1286 int err;
1287
1288 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1289 return -EFAULT;
1290
1291 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1292 &config);
1293 if (err)
1294 return err;
1295
1296 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1297 return -EFAULT;
1298
1299 return 0;
1300 }
1301
mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port * mlxsw_sp_port,struct ifreq * ifr)1302 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1303 struct ifreq *ifr)
1304 {
1305 struct hwtstamp_config config;
1306 int err;
1307
1308 err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1309 &config);
1310 if (err)
1311 return err;
1312
1313 if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1314 return -EFAULT;
1315
1316 return 0;
1317 }
1318
mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port * mlxsw_sp_port)1319 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1320 {
1321 struct hwtstamp_config config = {0};
1322
1323 mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1324 }
1325
1326 static int
mlxsw_sp_port_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1327 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1328 {
1329 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1330
1331 switch (cmd) {
1332 case SIOCSHWTSTAMP:
1333 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1334 case SIOCGHWTSTAMP:
1335 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1336 default:
1337 return -EOPNOTSUPP;
1338 }
1339 }
1340
1341 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1342 .ndo_open = mlxsw_sp_port_open,
1343 .ndo_stop = mlxsw_sp_port_stop,
1344 .ndo_start_xmit = mlxsw_sp_port_xmit,
1345 .ndo_setup_tc = mlxsw_sp_setup_tc,
1346 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1347 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1348 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1349 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1350 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1351 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1352 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1353 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1354 .ndo_set_features = mlxsw_sp_set_features,
1355 .ndo_eth_ioctl = mlxsw_sp_port_ioctl,
1356 };
1357
1358 static int
mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port * mlxsw_sp_port)1359 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1360 {
1361 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1362 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1363 const struct mlxsw_sp_port_type_speed_ops *ops;
1364 char ptys_pl[MLXSW_REG_PTYS_LEN];
1365 u32 eth_proto_cap_masked;
1366 int err;
1367
1368 ops = mlxsw_sp->port_type_speed_ops;
1369
1370 /* Set advertised speeds to speeds supported by both the driver
1371 * and the device.
1372 */
1373 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1374 0, false);
1375 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1376 if (err)
1377 return err;
1378
1379 ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
1380 ð_proto_admin, ð_proto_oper);
1381 eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1382 ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1383 eth_proto_cap_masked,
1384 mlxsw_sp_port->link.autoneg);
1385 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1386 }
1387
mlxsw_sp_port_speed_get(struct mlxsw_sp_port * mlxsw_sp_port,u32 * speed)1388 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1389 {
1390 const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1391 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1392 char ptys_pl[MLXSW_REG_PTYS_LEN];
1393 u32 eth_proto_oper;
1394 int err;
1395
1396 port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1397 port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1398 mlxsw_sp_port->local_port, 0,
1399 false);
1400 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1401 if (err)
1402 return err;
1403 port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1404 ð_proto_oper);
1405 *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1406 return 0;
1407 }
1408
mlxsw_sp_port_ets_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,bool dwrr,u8 dwrr_weight)1409 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1410 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1411 bool dwrr, u8 dwrr_weight)
1412 {
1413 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1414 char qeec_pl[MLXSW_REG_QEEC_LEN];
1415
1416 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1417 next_index);
1418 mlxsw_reg_qeec_de_set(qeec_pl, true);
1419 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1420 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1421 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1422 }
1423
mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 maxrate,u8 burst_size)1424 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1425 enum mlxsw_reg_qeec_hr hr, u8 index,
1426 u8 next_index, u32 maxrate, u8 burst_size)
1427 {
1428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1429 char qeec_pl[MLXSW_REG_QEEC_LEN];
1430
1431 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1432 next_index);
1433 mlxsw_reg_qeec_mase_set(qeec_pl, true);
1434 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1435 mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1437 }
1438
mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port * mlxsw_sp_port,enum mlxsw_reg_qeec_hr hr,u8 index,u8 next_index,u32 minrate)1439 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1440 enum mlxsw_reg_qeec_hr hr, u8 index,
1441 u8 next_index, u32 minrate)
1442 {
1443 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1444 char qeec_pl[MLXSW_REG_QEEC_LEN];
1445
1446 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1447 next_index);
1448 mlxsw_reg_qeec_mise_set(qeec_pl, true);
1449 mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1450
1451 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1452 }
1453
mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port * mlxsw_sp_port,u8 switch_prio,u8 tclass)1454 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1455 u8 switch_prio, u8 tclass)
1456 {
1457 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1458 char qtct_pl[MLXSW_REG_QTCT_LEN];
1459
1460 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1461 tclass);
1462 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1463 }
1464
mlxsw_sp_port_ets_init(struct mlxsw_sp_port * mlxsw_sp_port)1465 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1466 {
1467 int err, i;
1468
1469 /* Setup the elements hierarcy, so that each TC is linked to
1470 * one subgroup, which are all member in the same group.
1471 */
1472 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1473 MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1474 if (err)
1475 return err;
1476 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1477 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1478 MLXSW_REG_QEEC_HR_SUBGROUP, i,
1479 0, false, 0);
1480 if (err)
1481 return err;
1482 }
1483 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1484 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1485 MLXSW_REG_QEEC_HR_TC, i, i,
1486 false, 0);
1487 if (err)
1488 return err;
1489
1490 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1491 MLXSW_REG_QEEC_HR_TC,
1492 i + 8, i,
1493 true, 100);
1494 if (err)
1495 return err;
1496 }
1497
1498 /* Make sure the max shaper is disabled in all hierarchies that support
1499 * it. Note that this disables ptps (PTP shaper), but that is intended
1500 * for the initial configuration.
1501 */
1502 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1503 MLXSW_REG_QEEC_HR_PORT, 0, 0,
1504 MLXSW_REG_QEEC_MAS_DIS, 0);
1505 if (err)
1506 return err;
1507 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1508 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1509 MLXSW_REG_QEEC_HR_SUBGROUP,
1510 i, 0,
1511 MLXSW_REG_QEEC_MAS_DIS, 0);
1512 if (err)
1513 return err;
1514 }
1515 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1516 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1517 MLXSW_REG_QEEC_HR_TC,
1518 i, i,
1519 MLXSW_REG_QEEC_MAS_DIS, 0);
1520 if (err)
1521 return err;
1522
1523 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1524 MLXSW_REG_QEEC_HR_TC,
1525 i + 8, i,
1526 MLXSW_REG_QEEC_MAS_DIS, 0);
1527 if (err)
1528 return err;
1529 }
1530
1531 /* Configure the min shaper for multicast TCs. */
1532 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1533 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1534 MLXSW_REG_QEEC_HR_TC,
1535 i + 8, i,
1536 MLXSW_REG_QEEC_MIS_MIN);
1537 if (err)
1538 return err;
1539 }
1540
1541 /* Map all priorities to traffic class 0. */
1542 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1543 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1544 if (err)
1545 return err;
1546 }
1547
1548 return 0;
1549 }
1550
mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)1551 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1552 bool enable)
1553 {
1554 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1555 char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1556
1557 mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1558 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1559 }
1560
mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port * mlxsw_sp_port)1561 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1562 {
1563 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1564 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1565 u8 module = mlxsw_sp_port->mapping.module;
1566 u64 overheat_counter;
1567 int err;
1568
1569 err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1570 module, &overheat_counter);
1571 if (err)
1572 return err;
1573
1574 mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1575 return 0;
1576 }
1577
1578 int
mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port * mlxsw_sp_port,bool is_8021ad_tagged,bool is_8021q_tagged)1579 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1580 bool is_8021ad_tagged,
1581 bool is_8021q_tagged)
1582 {
1583 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1584 char spvc_pl[MLXSW_REG_SPVC_LEN];
1585
1586 mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1587 is_8021ad_tagged, is_8021q_tagged);
1588 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1589 }
1590
mlxsw_sp_port_label_info_get(struct mlxsw_sp * mlxsw_sp,u16 local_port,u8 * port_number,u8 * split_port_subnumber,u8 * slot_index)1591 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1592 u16 local_port, u8 *port_number,
1593 u8 *split_port_subnumber,
1594 u8 *slot_index)
1595 {
1596 char pllp_pl[MLXSW_REG_PLLP_LEN];
1597 int err;
1598
1599 mlxsw_reg_pllp_pack(pllp_pl, local_port);
1600 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1601 if (err)
1602 return err;
1603 mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1604 split_port_subnumber, slot_index);
1605 return 0;
1606 }
1607
mlxsw_sp_port_create(struct mlxsw_sp * mlxsw_sp,u16 local_port,bool split,struct mlxsw_sp_port_mapping * port_mapping)1608 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1609 bool split,
1610 struct mlxsw_sp_port_mapping *port_mapping)
1611 {
1612 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1613 struct mlxsw_sp_port *mlxsw_sp_port;
1614 u32 lanes = port_mapping->width;
1615 u8 split_port_subnumber;
1616 struct net_device *dev;
1617 u8 port_number;
1618 u8 slot_index;
1619 bool splittable;
1620 int err;
1621
1622 err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1623 if (err) {
1624 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1625 local_port);
1626 return err;
1627 }
1628
1629 err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1630 if (err) {
1631 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1632 local_port);
1633 goto err_port_swid_set;
1634 }
1635
1636 err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1637 &split_port_subnumber, &slot_index);
1638 if (err) {
1639 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1640 local_port);
1641 goto err_port_label_info_get;
1642 }
1643
1644 splittable = lanes > 1 && !split;
1645 err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1646 port_number, split, split_port_subnumber,
1647 splittable, lanes, mlxsw_sp->base_mac,
1648 sizeof(mlxsw_sp->base_mac));
1649 if (err) {
1650 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1651 local_port);
1652 goto err_core_port_init;
1653 }
1654
1655 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1656 if (!dev) {
1657 err = -ENOMEM;
1658 goto err_alloc_etherdev;
1659 }
1660 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1661 dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1662 mlxsw_sp_port = netdev_priv(dev);
1663 mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1664 mlxsw_sp_port, dev);
1665 mlxsw_sp_port->dev = dev;
1666 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1667 mlxsw_sp_port->local_port = local_port;
1668 mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1669 mlxsw_sp_port->split = split;
1670 mlxsw_sp_port->mapping = *port_mapping;
1671 mlxsw_sp_port->link.autoneg = 1;
1672 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1673
1674 mlxsw_sp_port->pcpu_stats =
1675 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1676 if (!mlxsw_sp_port->pcpu_stats) {
1677 err = -ENOMEM;
1678 goto err_alloc_stats;
1679 }
1680
1681 INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1682 &update_stats_cache);
1683
1684 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1685 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1686
1687 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1688 if (err) {
1689 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1690 mlxsw_sp_port->local_port);
1691 goto err_dev_addr_init;
1692 }
1693
1694 netif_carrier_off(dev);
1695
1696 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1697 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1698 dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1699
1700 dev->min_mtu = 0;
1701 dev->max_mtu = ETH_MAX_MTU;
1702
1703 /* Each packet needs to have a Tx header (metadata) on top all other
1704 * headers.
1705 */
1706 dev->needed_headroom = MLXSW_TXHDR_LEN;
1707
1708 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1709 if (err) {
1710 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1711 mlxsw_sp_port->local_port);
1712 goto err_port_system_port_mapping_set;
1713 }
1714
1715 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1716 if (err) {
1717 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1718 mlxsw_sp_port->local_port);
1719 goto err_port_speed_by_width_set;
1720 }
1721
1722 err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1723 &mlxsw_sp_port->max_speed);
1724 if (err) {
1725 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1726 mlxsw_sp_port->local_port);
1727 goto err_max_speed_get;
1728 }
1729
1730 err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1731 if (err) {
1732 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1733 mlxsw_sp_port->local_port);
1734 goto err_port_max_mtu_get;
1735 }
1736
1737 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1738 if (err) {
1739 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1740 mlxsw_sp_port->local_port);
1741 goto err_port_mtu_set;
1742 }
1743
1744 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1745 if (err)
1746 goto err_port_admin_status_set;
1747
1748 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1749 if (err) {
1750 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1751 mlxsw_sp_port->local_port);
1752 goto err_port_buffers_init;
1753 }
1754
1755 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1756 if (err) {
1757 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1758 mlxsw_sp_port->local_port);
1759 goto err_port_ets_init;
1760 }
1761
1762 err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1763 if (err) {
1764 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1765 mlxsw_sp_port->local_port);
1766 goto err_port_tc_mc_mode;
1767 }
1768
1769 /* ETS and buffers must be initialized before DCB. */
1770 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1771 if (err) {
1772 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1773 mlxsw_sp_port->local_port);
1774 goto err_port_dcb_init;
1775 }
1776
1777 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1778 if (err) {
1779 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1780 mlxsw_sp_port->local_port);
1781 goto err_port_fids_init;
1782 }
1783
1784 err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1785 if (err) {
1786 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1787 mlxsw_sp_port->local_port);
1788 goto err_port_qdiscs_init;
1789 }
1790
1791 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1792 false);
1793 if (err) {
1794 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1795 mlxsw_sp_port->local_port);
1796 goto err_port_vlan_clear;
1797 }
1798
1799 err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1800 if (err) {
1801 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1802 mlxsw_sp_port->local_port);
1803 goto err_port_nve_init;
1804 }
1805
1806 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1807 ETH_P_8021Q);
1808 if (err) {
1809 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1810 mlxsw_sp_port->local_port);
1811 goto err_port_pvid_set;
1812 }
1813
1814 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1815 MLXSW_SP_DEFAULT_VID);
1816 if (IS_ERR(mlxsw_sp_port_vlan)) {
1817 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1818 mlxsw_sp_port->local_port);
1819 err = PTR_ERR(mlxsw_sp_port_vlan);
1820 goto err_port_vlan_create;
1821 }
1822 mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1823
1824 /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1825 * only packets with 802.1q header as tagged packets.
1826 */
1827 err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1828 if (err) {
1829 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1830 local_port);
1831 goto err_port_vlan_classification_set;
1832 }
1833
1834 INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1835 mlxsw_sp->ptp_ops->shaper_work);
1836
1837 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1838
1839 err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1840 if (err) {
1841 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1842 mlxsw_sp_port->local_port);
1843 goto err_port_overheat_init_val_set;
1844 }
1845
1846 err = register_netdev(dev);
1847 if (err) {
1848 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1849 mlxsw_sp_port->local_port);
1850 goto err_register_netdev;
1851 }
1852
1853 mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1854 return 0;
1855
1856 err_register_netdev:
1857 err_port_overheat_init_val_set:
1858 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1859 err_port_vlan_classification_set:
1860 mlxsw_sp->ports[local_port] = NULL;
1861 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1862 err_port_vlan_create:
1863 err_port_pvid_set:
1864 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1865 err_port_nve_init:
1866 err_port_vlan_clear:
1867 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1868 err_port_qdiscs_init:
1869 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1870 err_port_fids_init:
1871 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1872 err_port_dcb_init:
1873 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1874 err_port_tc_mc_mode:
1875 err_port_ets_init:
1876 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1877 err_port_buffers_init:
1878 err_port_admin_status_set:
1879 err_port_mtu_set:
1880 err_port_max_mtu_get:
1881 err_max_speed_get:
1882 err_port_speed_by_width_set:
1883 err_port_system_port_mapping_set:
1884 err_dev_addr_init:
1885 free_percpu(mlxsw_sp_port->pcpu_stats);
1886 err_alloc_stats:
1887 free_netdev(dev);
1888 err_alloc_etherdev:
1889 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1890 err_core_port_init:
1891 err_port_label_info_get:
1892 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1893 MLXSW_PORT_SWID_DISABLED_PORT);
1894 err_port_swid_set:
1895 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1896 port_mapping->slot_index,
1897 port_mapping->module);
1898 return err;
1899 }
1900
mlxsw_sp_port_remove(struct mlxsw_sp * mlxsw_sp,u16 local_port)1901 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1902 {
1903 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1904 u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1905 u8 module = mlxsw_sp_port->mapping.module;
1906
1907 cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1908 cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1909 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1910 mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1911 mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1912 mlxsw_sp->ports[local_port] = NULL;
1913 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1914 mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1915 mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1916 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1917 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1918 mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1919 mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1920 free_percpu(mlxsw_sp_port->pcpu_stats);
1921 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1922 free_netdev(mlxsw_sp_port->dev);
1923 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1924 mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1925 MLXSW_PORT_SWID_DISABLED_PORT);
1926 mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1927 }
1928
mlxsw_sp_cpu_port_create(struct mlxsw_sp * mlxsw_sp)1929 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1930 {
1931 struct mlxsw_sp_port *mlxsw_sp_port;
1932 int err;
1933
1934 mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1935 if (!mlxsw_sp_port)
1936 return -ENOMEM;
1937
1938 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1939 mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1940
1941 err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1942 mlxsw_sp_port,
1943 mlxsw_sp->base_mac,
1944 sizeof(mlxsw_sp->base_mac));
1945 if (err) {
1946 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1947 goto err_core_cpu_port_init;
1948 }
1949
1950 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1951 return 0;
1952
1953 err_core_cpu_port_init:
1954 kfree(mlxsw_sp_port);
1955 return err;
1956 }
1957
mlxsw_sp_cpu_port_remove(struct mlxsw_sp * mlxsw_sp)1958 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1959 {
1960 struct mlxsw_sp_port *mlxsw_sp_port =
1961 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1962
1963 mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1964 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1965 kfree(mlxsw_sp_port);
1966 }
1967
mlxsw_sp_local_port_valid(u16 local_port)1968 static bool mlxsw_sp_local_port_valid(u16 local_port)
1969 {
1970 return local_port != MLXSW_PORT_CPU_PORT;
1971 }
1972
mlxsw_sp_port_created(struct mlxsw_sp * mlxsw_sp,u16 local_port)1973 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1974 {
1975 if (!mlxsw_sp_local_port_valid(local_port))
1976 return false;
1977 return mlxsw_sp->ports[local_port] != NULL;
1978 }
1979
mlxsw_sp_port_mapping_event_set(struct mlxsw_sp * mlxsw_sp,u16 local_port,bool enable)1980 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1981 u16 local_port, bool enable)
1982 {
1983 char pmecr_pl[MLXSW_REG_PMECR_LEN];
1984
1985 mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1986 enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1987 MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1988 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1989 }
1990
1991 struct mlxsw_sp_port_mapping_event {
1992 struct list_head list;
1993 char pmlp_pl[MLXSW_REG_PMLP_LEN];
1994 };
1995
mlxsw_sp_port_mapping_events_work(struct work_struct * work)1996 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1997 {
1998 struct mlxsw_sp_port_mapping_event *event, *next_event;
1999 struct mlxsw_sp_port_mapping_events *events;
2000 struct mlxsw_sp_port_mapping port_mapping;
2001 struct mlxsw_sp *mlxsw_sp;
2002 struct devlink *devlink;
2003 LIST_HEAD(event_queue);
2004 u16 local_port;
2005 int err;
2006
2007 events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
2008 mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
2009 devlink = priv_to_devlink(mlxsw_sp->core);
2010
2011 spin_lock_bh(&events->queue_lock);
2012 list_splice_init(&events->queue, &event_queue);
2013 spin_unlock_bh(&events->queue_lock);
2014
2015 list_for_each_entry_safe(event, next_event, &event_queue, list) {
2016 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2017 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2018 event->pmlp_pl, &port_mapping);
2019 if (err)
2020 goto out;
2021
2022 if (WARN_ON_ONCE(!port_mapping.width))
2023 goto out;
2024
2025 devl_lock(devlink);
2026
2027 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2028 mlxsw_sp_port_create(mlxsw_sp, local_port,
2029 false, &port_mapping);
2030 else
2031 WARN_ON_ONCE(1);
2032
2033 devl_unlock(devlink);
2034
2035 mlxsw_sp->port_mapping[local_port] = port_mapping;
2036
2037 out:
2038 kfree(event);
2039 }
2040 }
2041
2042 static void
mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info * reg,char * pmlp_pl,void * priv)2043 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2044 char *pmlp_pl, void *priv)
2045 {
2046 struct mlxsw_sp_port_mapping_events *events;
2047 struct mlxsw_sp_port_mapping_event *event;
2048 struct mlxsw_sp *mlxsw_sp = priv;
2049 u16 local_port;
2050
2051 local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2052 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2053 return;
2054
2055 events = &mlxsw_sp->port_mapping_events;
2056 event = kmalloc(sizeof(*event), GFP_ATOMIC);
2057 if (!event)
2058 return;
2059 memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2060 spin_lock(&events->queue_lock);
2061 list_add_tail(&event->list, &events->queue);
2062 spin_unlock(&events->queue_lock);
2063 mlxsw_core_schedule_work(&events->work);
2064 }
2065
2066 static void
__mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp * mlxsw_sp)2067 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2068 {
2069 struct mlxsw_sp_port_mapping_event *event, *next_event;
2070 struct mlxsw_sp_port_mapping_events *events;
2071
2072 events = &mlxsw_sp->port_mapping_events;
2073
2074 /* Caller needs to make sure that no new event is going to appear. */
2075 cancel_work_sync(&events->work);
2076 list_for_each_entry_safe(event, next_event, &events->queue, list) {
2077 list_del(&event->list);
2078 kfree(event);
2079 }
2080 }
2081
mlxsw_sp_ports_remove(struct mlxsw_sp * mlxsw_sp)2082 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2083 {
2084 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2085 int i;
2086
2087 for (i = 1; i < max_ports; i++)
2088 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2089 /* Make sure all scheduled events are processed */
2090 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2091
2092 for (i = 1; i < max_ports; i++)
2093 if (mlxsw_sp_port_created(mlxsw_sp, i))
2094 mlxsw_sp_port_remove(mlxsw_sp, i);
2095 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2096 kfree(mlxsw_sp->ports);
2097 mlxsw_sp->ports = NULL;
2098 }
2099
2100 static void
mlxsw_sp_ports_remove_selected(struct mlxsw_core * mlxsw_core,bool (* selector)(void * priv,u16 local_port),void * priv)2101 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2102 bool (*selector)(void *priv, u16 local_port),
2103 void *priv)
2104 {
2105 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2106 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2107 int i;
2108
2109 for (i = 1; i < max_ports; i++)
2110 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2111 mlxsw_sp_port_remove(mlxsw_sp, i);
2112 }
2113
mlxsw_sp_ports_create(struct mlxsw_sp * mlxsw_sp)2114 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2115 {
2116 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2117 struct mlxsw_sp_port_mapping_events *events;
2118 struct mlxsw_sp_port_mapping *port_mapping;
2119 size_t alloc_size;
2120 int i;
2121 int err;
2122
2123 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2124 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2125 if (!mlxsw_sp->ports)
2126 return -ENOMEM;
2127
2128 events = &mlxsw_sp->port_mapping_events;
2129 INIT_LIST_HEAD(&events->queue);
2130 spin_lock_init(&events->queue_lock);
2131 INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2132
2133 for (i = 1; i < max_ports; i++) {
2134 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2135 if (err)
2136 goto err_event_enable;
2137 }
2138
2139 err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2140 if (err)
2141 goto err_cpu_port_create;
2142
2143 for (i = 1; i < max_ports; i++) {
2144 port_mapping = &mlxsw_sp->port_mapping[i];
2145 if (!port_mapping->width)
2146 continue;
2147 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2148 if (err)
2149 goto err_port_create;
2150 }
2151 return 0;
2152
2153 err_port_create:
2154 for (i--; i >= 1; i--)
2155 if (mlxsw_sp_port_created(mlxsw_sp, i))
2156 mlxsw_sp_port_remove(mlxsw_sp, i);
2157 i = max_ports;
2158 mlxsw_sp_cpu_port_remove(mlxsw_sp);
2159 err_cpu_port_create:
2160 err_event_enable:
2161 for (i--; i >= 1; i--)
2162 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2163 /* Make sure all scheduled events are processed */
2164 __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2165 kfree(mlxsw_sp->ports);
2166 mlxsw_sp->ports = NULL;
2167 return err;
2168 }
2169
mlxsw_sp_port_module_info_init(struct mlxsw_sp * mlxsw_sp)2170 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2171 {
2172 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2173 struct mlxsw_sp_port_mapping *port_mapping;
2174 int i;
2175 int err;
2176
2177 mlxsw_sp->port_mapping = kcalloc(max_ports,
2178 sizeof(struct mlxsw_sp_port_mapping),
2179 GFP_KERNEL);
2180 if (!mlxsw_sp->port_mapping)
2181 return -ENOMEM;
2182
2183 for (i = 1; i < max_ports; i++) {
2184 port_mapping = &mlxsw_sp->port_mapping[i];
2185 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2186 if (err)
2187 goto err_port_module_info_get;
2188 }
2189 return 0;
2190
2191 err_port_module_info_get:
2192 kfree(mlxsw_sp->port_mapping);
2193 return err;
2194 }
2195
mlxsw_sp_port_module_info_fini(struct mlxsw_sp * mlxsw_sp)2196 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2197 {
2198 kfree(mlxsw_sp->port_mapping);
2199 }
2200
2201 static int
mlxsw_sp_port_split_create(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_port_mapping * port_mapping,unsigned int count,const char * pmtdb_pl)2202 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2203 struct mlxsw_sp_port_mapping *port_mapping,
2204 unsigned int count, const char *pmtdb_pl)
2205 {
2206 struct mlxsw_sp_port_mapping split_port_mapping;
2207 int err, i;
2208
2209 split_port_mapping = *port_mapping;
2210 split_port_mapping.width /= count;
2211 for (i = 0; i < count; i++) {
2212 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2213
2214 if (!mlxsw_sp_local_port_valid(s_local_port))
2215 continue;
2216
2217 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2218 true, &split_port_mapping);
2219 if (err)
2220 goto err_port_create;
2221 split_port_mapping.lane += split_port_mapping.width;
2222 }
2223
2224 return 0;
2225
2226 err_port_create:
2227 for (i--; i >= 0; i--) {
2228 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2229
2230 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2231 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2232 }
2233 return err;
2234 }
2235
mlxsw_sp_port_unsplit_create(struct mlxsw_sp * mlxsw_sp,unsigned int count,const char * pmtdb_pl)2236 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2237 unsigned int count,
2238 const char *pmtdb_pl)
2239 {
2240 struct mlxsw_sp_port_mapping *port_mapping;
2241 int i;
2242
2243 /* Go over original unsplit ports in the gap and recreate them. */
2244 for (i = 0; i < count; i++) {
2245 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2246
2247 port_mapping = &mlxsw_sp->port_mapping[local_port];
2248 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2249 continue;
2250 mlxsw_sp_port_create(mlxsw_sp, local_port,
2251 false, port_mapping);
2252 }
2253 }
2254
2255 static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp * mlxsw_sp,u16 local_port)2256 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2257 {
2258 if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2259 return mlxsw_sp->ports[local_port];
2260 return NULL;
2261 }
2262
mlxsw_sp_port_split(struct mlxsw_core * mlxsw_core,u16 local_port,unsigned int count,struct netlink_ext_ack * extack)2263 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2264 unsigned int count,
2265 struct netlink_ext_ack *extack)
2266 {
2267 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2268 struct mlxsw_sp_port_mapping port_mapping;
2269 struct mlxsw_sp_port *mlxsw_sp_port;
2270 enum mlxsw_reg_pmtdb_status status;
2271 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2272 int i;
2273 int err;
2274
2275 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2276 if (!mlxsw_sp_port) {
2277 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2278 local_port);
2279 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2280 return -EINVAL;
2281 }
2282
2283 if (mlxsw_sp_port->split) {
2284 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2285 return -EINVAL;
2286 }
2287
2288 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2289 mlxsw_sp_port->mapping.module,
2290 mlxsw_sp_port->mapping.module_width / count,
2291 count);
2292 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2293 if (err) {
2294 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2295 return err;
2296 }
2297
2298 status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2299 if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2300 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2301 return -EINVAL;
2302 }
2303
2304 port_mapping = mlxsw_sp_port->mapping;
2305
2306 for (i = 0; i < count; i++) {
2307 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2308
2309 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2310 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2311 }
2312
2313 err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2314 count, pmtdb_pl);
2315 if (err) {
2316 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2317 goto err_port_split_create;
2318 }
2319
2320 return 0;
2321
2322 err_port_split_create:
2323 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2324
2325 return err;
2326 }
2327
mlxsw_sp_port_unsplit(struct mlxsw_core * mlxsw_core,u16 local_port,struct netlink_ext_ack * extack)2328 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2329 struct netlink_ext_ack *extack)
2330 {
2331 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2332 struct mlxsw_sp_port *mlxsw_sp_port;
2333 char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2334 unsigned int count;
2335 int i;
2336 int err;
2337
2338 mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2339 if (!mlxsw_sp_port) {
2340 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2341 local_port);
2342 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2343 return -EINVAL;
2344 }
2345
2346 if (!mlxsw_sp_port->split) {
2347 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2348 return -EINVAL;
2349 }
2350
2351 count = mlxsw_sp_port->mapping.module_width /
2352 mlxsw_sp_port->mapping.width;
2353
2354 mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2355 mlxsw_sp_port->mapping.module,
2356 mlxsw_sp_port->mapping.module_width / count,
2357 count);
2358 err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2359 if (err) {
2360 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2361 return err;
2362 }
2363
2364 for (i = 0; i < count; i++) {
2365 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2366
2367 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2368 mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2369 }
2370
2371 mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2372
2373 return 0;
2374 }
2375
2376 static void
mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port * mlxsw_sp_port)2377 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2378 {
2379 int i;
2380
2381 for (i = 0; i < TC_MAX_QUEUE; i++)
2382 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2383 }
2384
mlxsw_sp_pude_event_func(const struct mlxsw_reg_info * reg,char * pude_pl,void * priv)2385 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2386 char *pude_pl, void *priv)
2387 {
2388 struct mlxsw_sp *mlxsw_sp = priv;
2389 struct mlxsw_sp_port *mlxsw_sp_port;
2390 enum mlxsw_reg_pude_oper_status status;
2391 u16 local_port;
2392
2393 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2394
2395 if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2396 return;
2397 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2398 if (!mlxsw_sp_port)
2399 return;
2400
2401 status = mlxsw_reg_pude_oper_status_get(pude_pl);
2402 if (status == MLXSW_PORT_OPER_STATUS_UP) {
2403 netdev_info(mlxsw_sp_port->dev, "link up\n");
2404 netif_carrier_on(mlxsw_sp_port->dev);
2405 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2406 } else {
2407 netdev_info(mlxsw_sp_port->dev, "link down\n");
2408 netif_carrier_off(mlxsw_sp_port->dev);
2409 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2410 }
2411 }
2412
mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp * mlxsw_sp,char * mtpptr_pl,bool ingress)2413 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2414 char *mtpptr_pl, bool ingress)
2415 {
2416 u16 local_port;
2417 u8 num_rec;
2418 int i;
2419
2420 local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2421 num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2422 for (i = 0; i < num_rec; i++) {
2423 u8 domain_number;
2424 u8 message_type;
2425 u16 sequence_id;
2426 u64 timestamp;
2427
2428 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2429 &domain_number, &sequence_id,
2430 ×tamp);
2431 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2432 message_type, domain_number,
2433 sequence_id, timestamp);
2434 }
2435 }
2436
mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info * reg,char * mtpptr_pl,void * priv)2437 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2438 char *mtpptr_pl, void *priv)
2439 {
2440 struct mlxsw_sp *mlxsw_sp = priv;
2441
2442 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2443 }
2444
mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info * reg,char * mtpptr_pl,void * priv)2445 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2446 char *mtpptr_pl, void *priv)
2447 {
2448 struct mlxsw_sp *mlxsw_sp = priv;
2449
2450 mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2451 }
2452
mlxsw_sp_rx_listener_no_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2453 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2454 u16 local_port, void *priv)
2455 {
2456 struct mlxsw_sp *mlxsw_sp = priv;
2457 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2458 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2459
2460 if (unlikely(!mlxsw_sp_port)) {
2461 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2462 local_port);
2463 return;
2464 }
2465
2466 skb->dev = mlxsw_sp_port->dev;
2467
2468 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2469 u64_stats_update_begin(&pcpu_stats->syncp);
2470 pcpu_stats->rx_packets++;
2471 pcpu_stats->rx_bytes += skb->len;
2472 u64_stats_update_end(&pcpu_stats->syncp);
2473
2474 skb->protocol = eth_type_trans(skb, skb->dev);
2475 netif_receive_skb(skb);
2476 }
2477
mlxsw_sp_rx_listener_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2478 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2479 void *priv)
2480 {
2481 skb->offload_fwd_mark = 1;
2482 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2483 }
2484
mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff * skb,u16 local_port,void * priv)2485 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2486 u16 local_port, void *priv)
2487 {
2488 skb->offload_l3_fwd_mark = 1;
2489 skb->offload_fwd_mark = 1;
2490 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2491 }
2492
mlxsw_sp_ptp_receive(struct mlxsw_sp * mlxsw_sp,struct sk_buff * skb,u16 local_port)2493 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2494 u16 local_port)
2495 {
2496 mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2497 }
2498
2499 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2500 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2501 _is_ctrl, SP_##_trap_group, DISCARD)
2502
2503 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2504 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
2505 _is_ctrl, SP_##_trap_group, DISCARD)
2506
2507 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
2508 MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2509 _is_ctrl, SP_##_trap_group, DISCARD)
2510
2511 #define MLXSW_SP_EVENTL(_func, _trap_id) \
2512 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2513
2514 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2515 /* Events */
2516 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2517 /* L2 traps */
2518 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2519 /* L3 traps */
2520 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2521 false),
2522 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2523 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2524 false),
2525 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2526 ROUTER_EXP, false),
2527 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2528 ROUTER_EXP, false),
2529 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2530 ROUTER_EXP, false),
2531 MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2532 ROUTER_EXP, false),
2533 /* Multicast Router Traps */
2534 MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2535 MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2536 /* NVE traps */
2537 MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2538 };
2539
2540 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2541 /* Events */
2542 MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2543 MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2544 };
2545
2546 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2547 /* Events */
2548 MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2549 };
2550
mlxsw_sp_cpu_policers_set(struct mlxsw_core * mlxsw_core)2551 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2552 {
2553 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2554 char qpcr_pl[MLXSW_REG_QPCR_LEN];
2555 enum mlxsw_reg_qpcr_ir_units ir_units;
2556 int max_cpu_policers;
2557 bool is_bytes;
2558 u8 burst_size;
2559 u32 rate;
2560 int i, err;
2561
2562 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2563 return -EIO;
2564
2565 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2566
2567 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2568 for (i = 0; i < max_cpu_policers; i++) {
2569 is_bytes = false;
2570 switch (i) {
2571 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2572 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2573 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2574 rate = 1024;
2575 burst_size = 7;
2576 break;
2577 default:
2578 continue;
2579 }
2580
2581 __set_bit(i, mlxsw_sp->trap->policers_usage);
2582 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2583 burst_size);
2584 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2585 if (err)
2586 return err;
2587 }
2588
2589 return 0;
2590 }
2591
mlxsw_sp_trap_groups_set(struct mlxsw_core * mlxsw_core)2592 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2593 {
2594 char htgt_pl[MLXSW_REG_HTGT_LEN];
2595 enum mlxsw_reg_htgt_trap_group i;
2596 int max_cpu_policers;
2597 int max_trap_groups;
2598 u8 priority, tc;
2599 u16 policer_id;
2600 int err;
2601
2602 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2603 return -EIO;
2604
2605 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2606 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2607
2608 for (i = 0; i < max_trap_groups; i++) {
2609 policer_id = i;
2610 switch (i) {
2611 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2612 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2613 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2614 priority = 1;
2615 tc = 1;
2616 break;
2617 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2618 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2619 tc = MLXSW_REG_HTGT_DEFAULT_TC;
2620 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2621 break;
2622 default:
2623 continue;
2624 }
2625
2626 if (max_cpu_policers <= policer_id &&
2627 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2628 return -EIO;
2629
2630 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2631 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2632 if (err)
2633 return err;
2634 }
2635
2636 return 0;
2637 }
2638
mlxsw_sp_traps_init(struct mlxsw_sp * mlxsw_sp)2639 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2640 {
2641 struct mlxsw_sp_trap *trap;
2642 u64 max_policers;
2643 int err;
2644
2645 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2646 return -EIO;
2647 max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2648 trap = kzalloc(struct_size(trap, policers_usage,
2649 BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2650 if (!trap)
2651 return -ENOMEM;
2652 trap->max_policers = max_policers;
2653 mlxsw_sp->trap = trap;
2654
2655 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2656 if (err)
2657 goto err_cpu_policers_set;
2658
2659 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2660 if (err)
2661 goto err_trap_groups_set;
2662
2663 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2664 ARRAY_SIZE(mlxsw_sp_listener),
2665 mlxsw_sp);
2666 if (err)
2667 goto err_traps_register;
2668
2669 err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2670 mlxsw_sp->listeners_count, mlxsw_sp);
2671 if (err)
2672 goto err_extra_traps_init;
2673
2674 return 0;
2675
2676 err_extra_traps_init:
2677 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2678 ARRAY_SIZE(mlxsw_sp_listener),
2679 mlxsw_sp);
2680 err_traps_register:
2681 err_trap_groups_set:
2682 err_cpu_policers_set:
2683 kfree(trap);
2684 return err;
2685 }
2686
mlxsw_sp_traps_fini(struct mlxsw_sp * mlxsw_sp)2687 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2688 {
2689 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2690 mlxsw_sp->listeners_count,
2691 mlxsw_sp);
2692 mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2693 ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2694 kfree(mlxsw_sp->trap);
2695 }
2696
mlxsw_sp_lag_pgt_init(struct mlxsw_sp * mlxsw_sp)2697 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2698 {
2699 char sgcr_pl[MLXSW_REG_SGCR_LEN];
2700 int err;
2701
2702 if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2703 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2704 return 0;
2705
2706 /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2707 * entries. The LAG table address needs to be 8-aligned, but that ought
2708 * to be the case, since the LAG table is allocated first.
2709 */
2710 err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2711 mlxsw_sp->max_lag * 8);
2712 if (err)
2713 return err;
2714 if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2715 err = -EINVAL;
2716 goto err_mid_alloc_range;
2717 }
2718
2719 mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2720 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2721 if (err)
2722 goto err_mid_alloc_range;
2723
2724 return 0;
2725
2726 err_mid_alloc_range:
2727 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2728 mlxsw_sp->max_lag * 8);
2729 return err;
2730 }
2731
mlxsw_sp_lag_pgt_fini(struct mlxsw_sp * mlxsw_sp)2732 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2733 {
2734 if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2735 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2736 return;
2737
2738 mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2739 mlxsw_sp->max_lag * 8);
2740 }
2741
2742 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2743
2744 struct mlxsw_sp_lag {
2745 struct net_device *dev;
2746 refcount_t ref_count;
2747 u16 lag_id;
2748 };
2749
mlxsw_sp_lag_init(struct mlxsw_sp * mlxsw_sp)2750 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2751 {
2752 char slcr_pl[MLXSW_REG_SLCR_LEN];
2753 u32 seed;
2754 int err;
2755
2756 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2757 MLXSW_SP_LAG_SEED_INIT);
2758 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2759 MLXSW_REG_SLCR_LAG_HASH_DMAC |
2760 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2761 MLXSW_REG_SLCR_LAG_HASH_VLANID |
2762 MLXSW_REG_SLCR_LAG_HASH_SIP |
2763 MLXSW_REG_SLCR_LAG_HASH_DIP |
2764 MLXSW_REG_SLCR_LAG_HASH_SPORT |
2765 MLXSW_REG_SLCR_LAG_HASH_DPORT |
2766 MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2767 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2768 if (err)
2769 return err;
2770
2771 err = mlxsw_core_max_lag(mlxsw_sp->core, &mlxsw_sp->max_lag);
2772 if (err)
2773 return err;
2774
2775 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2776 return -EIO;
2777
2778 err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2779 if (err)
2780 return err;
2781
2782 mlxsw_sp->lags = kcalloc(mlxsw_sp->max_lag, sizeof(struct mlxsw_sp_lag),
2783 GFP_KERNEL);
2784 if (!mlxsw_sp->lags) {
2785 err = -ENOMEM;
2786 goto err_kcalloc;
2787 }
2788
2789 return 0;
2790
2791 err_kcalloc:
2792 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2793 return err;
2794 }
2795
mlxsw_sp_lag_fini(struct mlxsw_sp * mlxsw_sp)2796 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2797 {
2798 mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2799 kfree(mlxsw_sp->lags);
2800 }
2801
2802 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2803 .clock_init = mlxsw_sp1_ptp_clock_init,
2804 .clock_fini = mlxsw_sp1_ptp_clock_fini,
2805 .init = mlxsw_sp1_ptp_init,
2806 .fini = mlxsw_sp1_ptp_fini,
2807 .receive = mlxsw_sp1_ptp_receive,
2808 .transmitted = mlxsw_sp1_ptp_transmitted,
2809 .hwtstamp_get = mlxsw_sp1_ptp_hwtstamp_get,
2810 .hwtstamp_set = mlxsw_sp1_ptp_hwtstamp_set,
2811 .shaper_work = mlxsw_sp1_ptp_shaper_work,
2812 .get_ts_info = mlxsw_sp1_ptp_get_ts_info,
2813 .get_stats_count = mlxsw_sp1_get_stats_count,
2814 .get_stats_strings = mlxsw_sp1_get_stats_strings,
2815 .get_stats = mlxsw_sp1_get_stats,
2816 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2817 };
2818
2819 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2820 .clock_init = mlxsw_sp2_ptp_clock_init,
2821 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2822 .init = mlxsw_sp2_ptp_init,
2823 .fini = mlxsw_sp2_ptp_fini,
2824 .receive = mlxsw_sp2_ptp_receive,
2825 .transmitted = mlxsw_sp2_ptp_transmitted,
2826 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2827 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2828 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2829 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2830 .get_stats_count = mlxsw_sp2_get_stats_count,
2831 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2832 .get_stats = mlxsw_sp2_get_stats,
2833 .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2834 };
2835
2836 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2837 .clock_init = mlxsw_sp2_ptp_clock_init,
2838 .clock_fini = mlxsw_sp2_ptp_clock_fini,
2839 .init = mlxsw_sp2_ptp_init,
2840 .fini = mlxsw_sp2_ptp_fini,
2841 .receive = mlxsw_sp2_ptp_receive,
2842 .transmitted = mlxsw_sp2_ptp_transmitted,
2843 .hwtstamp_get = mlxsw_sp2_ptp_hwtstamp_get,
2844 .hwtstamp_set = mlxsw_sp2_ptp_hwtstamp_set,
2845 .shaper_work = mlxsw_sp2_ptp_shaper_work,
2846 .get_ts_info = mlxsw_sp2_ptp_get_ts_info,
2847 .get_stats_count = mlxsw_sp2_get_stats_count,
2848 .get_stats_strings = mlxsw_sp2_get_stats_strings,
2849 .get_stats = mlxsw_sp2_get_stats,
2850 .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2851 };
2852
2853 struct mlxsw_sp_sample_trigger_node {
2854 struct mlxsw_sp_sample_trigger trigger;
2855 struct mlxsw_sp_sample_params params;
2856 struct rhash_head ht_node;
2857 struct rcu_head rcu;
2858 refcount_t refcount;
2859 };
2860
2861 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2862 .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2863 .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2864 .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2865 .automatic_shrinking = true,
2866 };
2867
2868 static void
mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger * key,const struct mlxsw_sp_sample_trigger * trigger)2869 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2870 const struct mlxsw_sp_sample_trigger *trigger)
2871 {
2872 memset(key, 0, sizeof(*key));
2873 key->type = trigger->type;
2874 key->local_port = trigger->local_port;
2875 }
2876
2877 /* RCU read lock must be held */
2878 struct mlxsw_sp_sample_params *
mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger)2879 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2880 const struct mlxsw_sp_sample_trigger *trigger)
2881 {
2882 struct mlxsw_sp_sample_trigger_node *trigger_node;
2883 struct mlxsw_sp_sample_trigger key;
2884
2885 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2886 trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2887 mlxsw_sp_sample_trigger_ht_params);
2888 if (!trigger_node)
2889 return NULL;
2890
2891 return &trigger_node->params;
2892 }
2893
2894 static int
mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger,const struct mlxsw_sp_sample_params * params)2895 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2896 const struct mlxsw_sp_sample_trigger *trigger,
2897 const struct mlxsw_sp_sample_params *params)
2898 {
2899 struct mlxsw_sp_sample_trigger_node *trigger_node;
2900 int err;
2901
2902 trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2903 if (!trigger_node)
2904 return -ENOMEM;
2905
2906 trigger_node->trigger = *trigger;
2907 trigger_node->params = *params;
2908 refcount_set(&trigger_node->refcount, 1);
2909
2910 err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2911 &trigger_node->ht_node,
2912 mlxsw_sp_sample_trigger_ht_params);
2913 if (err)
2914 goto err_rhashtable_insert;
2915
2916 return 0;
2917
2918 err_rhashtable_insert:
2919 kfree(trigger_node);
2920 return err;
2921 }
2922
2923 static void
mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_sample_trigger_node * trigger_node)2924 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2925 struct mlxsw_sp_sample_trigger_node *trigger_node)
2926 {
2927 rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2928 &trigger_node->ht_node,
2929 mlxsw_sp_sample_trigger_ht_params);
2930 kfree_rcu(trigger_node, rcu);
2931 }
2932
2933 int
mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger,const struct mlxsw_sp_sample_params * params,struct netlink_ext_ack * extack)2934 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2935 const struct mlxsw_sp_sample_trigger *trigger,
2936 const struct mlxsw_sp_sample_params *params,
2937 struct netlink_ext_ack *extack)
2938 {
2939 struct mlxsw_sp_sample_trigger_node *trigger_node;
2940 struct mlxsw_sp_sample_trigger key;
2941
2942 ASSERT_RTNL();
2943
2944 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2945
2946 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2947 &key,
2948 mlxsw_sp_sample_trigger_ht_params);
2949 if (!trigger_node)
2950 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2951 params);
2952
2953 if (trigger_node->trigger.local_port) {
2954 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2955 return -EINVAL;
2956 }
2957
2958 if (trigger_node->params.psample_group != params->psample_group ||
2959 trigger_node->params.truncate != params->truncate ||
2960 trigger_node->params.rate != params->rate ||
2961 trigger_node->params.trunc_size != params->trunc_size) {
2962 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2963 return -EINVAL;
2964 }
2965
2966 refcount_inc(&trigger_node->refcount);
2967
2968 return 0;
2969 }
2970
2971 void
mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp * mlxsw_sp,const struct mlxsw_sp_sample_trigger * trigger)2972 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2973 const struct mlxsw_sp_sample_trigger *trigger)
2974 {
2975 struct mlxsw_sp_sample_trigger_node *trigger_node;
2976 struct mlxsw_sp_sample_trigger key;
2977
2978 ASSERT_RTNL();
2979
2980 mlxsw_sp_sample_trigger_key_init(&key, trigger);
2981
2982 trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2983 &key,
2984 mlxsw_sp_sample_trigger_ht_params);
2985 if (!trigger_node)
2986 return;
2987
2988 if (!refcount_dec_and_test(&trigger_node->refcount))
2989 return;
2990
2991 mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2992 }
2993
2994 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2995 unsigned long event, void *ptr);
2996
2997 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2998 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2999 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
3000
mlxsw_sp_parsing_init(struct mlxsw_sp * mlxsw_sp)3001 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
3002 {
3003 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
3004 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
3005 mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
3006 mutex_init(&mlxsw_sp->parsing.lock);
3007 }
3008
mlxsw_sp_parsing_fini(struct mlxsw_sp * mlxsw_sp)3009 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
3010 {
3011 mutex_destroy(&mlxsw_sp->parsing.lock);
3012 WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
3013 }
3014
3015 struct mlxsw_sp_ipv6_addr_node {
3016 struct in6_addr key;
3017 struct rhash_head ht_node;
3018 u32 kvdl_index;
3019 refcount_t refcount;
3020 };
3021
3022 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3023 .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3024 .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3025 .key_len = sizeof(struct in6_addr),
3026 .automatic_shrinking = true,
3027 };
3028
3029 static int
mlxsw_sp_ipv6_addr_init(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6,u32 * p_kvdl_index)3030 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3031 u32 *p_kvdl_index)
3032 {
3033 struct mlxsw_sp_ipv6_addr_node *node;
3034 char rips_pl[MLXSW_REG_RIPS_LEN];
3035 int err;
3036
3037 err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3038 MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3039 p_kvdl_index);
3040 if (err)
3041 return err;
3042
3043 mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
3044 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
3045 if (err)
3046 goto err_rips_write;
3047
3048 node = kzalloc(sizeof(*node), GFP_KERNEL);
3049 if (!node) {
3050 err = -ENOMEM;
3051 goto err_node_alloc;
3052 }
3053
3054 node->key = *addr6;
3055 node->kvdl_index = *p_kvdl_index;
3056 refcount_set(&node->refcount, 1);
3057
3058 err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
3059 &node->ht_node,
3060 mlxsw_sp_ipv6_addr_ht_params);
3061 if (err)
3062 goto err_rhashtable_insert;
3063
3064 return 0;
3065
3066 err_rhashtable_insert:
3067 kfree(node);
3068 err_node_alloc:
3069 err_rips_write:
3070 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3071 *p_kvdl_index);
3072 return err;
3073 }
3074
mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_ipv6_addr_node * node)3075 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3076 struct mlxsw_sp_ipv6_addr_node *node)
3077 {
3078 u32 kvdl_index = node->kvdl_index;
3079
3080 rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3081 mlxsw_sp_ipv6_addr_ht_params);
3082 kfree(node);
3083 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3084 kvdl_index);
3085 }
3086
mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6,u32 * p_kvdl_index)3087 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3088 const struct in6_addr *addr6,
3089 u32 *p_kvdl_index)
3090 {
3091 struct mlxsw_sp_ipv6_addr_node *node;
3092 int err = 0;
3093
3094 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3095 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3096 mlxsw_sp_ipv6_addr_ht_params);
3097 if (node) {
3098 refcount_inc(&node->refcount);
3099 *p_kvdl_index = node->kvdl_index;
3100 goto out_unlock;
3101 }
3102
3103 err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3104
3105 out_unlock:
3106 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3107 return err;
3108 }
3109
3110 void
mlxsw_sp_ipv6_addr_put(struct mlxsw_sp * mlxsw_sp,const struct in6_addr * addr6)3111 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3112 {
3113 struct mlxsw_sp_ipv6_addr_node *node;
3114
3115 mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3116 node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3117 mlxsw_sp_ipv6_addr_ht_params);
3118 if (WARN_ON(!node))
3119 goto out_unlock;
3120
3121 if (!refcount_dec_and_test(&node->refcount))
3122 goto out_unlock;
3123
3124 mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3125
3126 out_unlock:
3127 mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3128 }
3129
mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp * mlxsw_sp)3130 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3131 {
3132 int err;
3133
3134 err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3135 &mlxsw_sp_ipv6_addr_ht_params);
3136 if (err)
3137 return err;
3138
3139 mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3140 return 0;
3141 }
3142
mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp * mlxsw_sp)3143 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3144 {
3145 mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3146 rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3147 }
3148
mlxsw_sp_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3149 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3150 const struct mlxsw_bus_info *mlxsw_bus_info,
3151 struct netlink_ext_ack *extack)
3152 {
3153 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3154 int err;
3155
3156 mlxsw_sp->core = mlxsw_core;
3157 mlxsw_sp->bus_info = mlxsw_bus_info;
3158
3159 mlxsw_sp_parsing_init(mlxsw_sp);
3160
3161 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3162 if (err) {
3163 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3164 return err;
3165 }
3166
3167 err = mlxsw_sp_kvdl_init(mlxsw_sp);
3168 if (err) {
3169 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3170 return err;
3171 }
3172
3173 err = mlxsw_sp_pgt_init(mlxsw_sp);
3174 if (err) {
3175 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3176 goto err_pgt_init;
3177 }
3178
3179 /* Initialize before FIDs so that the LAG table is at the start of PGT
3180 * and 8-aligned without overallocation.
3181 */
3182 err = mlxsw_sp_lag_init(mlxsw_sp);
3183 if (err) {
3184 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3185 goto err_lag_init;
3186 }
3187
3188 err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3189 if (err) {
3190 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3191 goto err_fid_core_init;
3192 }
3193
3194 err = mlxsw_sp_policers_init(mlxsw_sp);
3195 if (err) {
3196 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3197 goto err_policers_init;
3198 }
3199
3200 err = mlxsw_sp_traps_init(mlxsw_sp);
3201 if (err) {
3202 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3203 goto err_traps_init;
3204 }
3205
3206 err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3207 if (err) {
3208 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3209 goto err_devlink_traps_init;
3210 }
3211
3212 err = mlxsw_sp_buffers_init(mlxsw_sp);
3213 if (err) {
3214 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3215 goto err_buffers_init;
3216 }
3217
3218 /* Initialize SPAN before router and switchdev, so that those components
3219 * can call mlxsw_sp_span_respin().
3220 */
3221 err = mlxsw_sp_span_init(mlxsw_sp);
3222 if (err) {
3223 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3224 goto err_span_init;
3225 }
3226
3227 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3228 if (err) {
3229 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3230 goto err_switchdev_init;
3231 }
3232
3233 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3234 if (err) {
3235 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3236 goto err_counter_pool_init;
3237 }
3238
3239 err = mlxsw_sp_afa_init(mlxsw_sp);
3240 if (err) {
3241 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3242 goto err_afa_init;
3243 }
3244
3245 err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3246 if (err) {
3247 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3248 goto err_ipv6_addr_ht_init;
3249 }
3250
3251 err = mlxsw_sp_nve_init(mlxsw_sp);
3252 if (err) {
3253 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3254 goto err_nve_init;
3255 }
3256
3257 err = mlxsw_sp_port_range_init(mlxsw_sp);
3258 if (err) {
3259 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3260 goto err_port_range_init;
3261 }
3262
3263 err = mlxsw_sp_acl_init(mlxsw_sp);
3264 if (err) {
3265 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3266 goto err_acl_init;
3267 }
3268
3269 err = mlxsw_sp_router_init(mlxsw_sp, extack);
3270 if (err) {
3271 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3272 goto err_router_init;
3273 }
3274
3275 if (mlxsw_sp->bus_info->read_clock_capable) {
3276 /* NULL is a valid return value from clock_init */
3277 mlxsw_sp->clock =
3278 mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3279 mlxsw_sp->bus_info->dev);
3280 if (IS_ERR(mlxsw_sp->clock)) {
3281 err = PTR_ERR(mlxsw_sp->clock);
3282 dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3283 goto err_ptp_clock_init;
3284 }
3285 }
3286
3287 if (mlxsw_sp->clock) {
3288 /* NULL is a valid return value from ptp_ops->init */
3289 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3290 if (IS_ERR(mlxsw_sp->ptp_state)) {
3291 err = PTR_ERR(mlxsw_sp->ptp_state);
3292 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3293 goto err_ptp_init;
3294 }
3295 }
3296
3297 /* Initialize netdevice notifier after SPAN is initialized, so that the
3298 * event handler can call SPAN respin.
3299 */
3300 mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3301 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3302 &mlxsw_sp->netdevice_nb);
3303 if (err) {
3304 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3305 goto err_netdev_notifier;
3306 }
3307
3308 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3309 if (err) {
3310 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3311 goto err_dpipe_init;
3312 }
3313
3314 err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3315 if (err) {
3316 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3317 goto err_port_module_info_init;
3318 }
3319
3320 err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3321 &mlxsw_sp_sample_trigger_ht_params);
3322 if (err) {
3323 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3324 goto err_sample_trigger_init;
3325 }
3326
3327 err = mlxsw_sp_ports_create(mlxsw_sp);
3328 if (err) {
3329 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3330 goto err_ports_create;
3331 }
3332
3333 return 0;
3334
3335 err_ports_create:
3336 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3337 err_sample_trigger_init:
3338 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3339 err_port_module_info_init:
3340 mlxsw_sp_dpipe_fini(mlxsw_sp);
3341 err_dpipe_init:
3342 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3343 &mlxsw_sp->netdevice_nb);
3344 err_netdev_notifier:
3345 if (mlxsw_sp->clock)
3346 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3347 err_ptp_init:
3348 if (mlxsw_sp->clock)
3349 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3350 err_ptp_clock_init:
3351 mlxsw_sp_router_fini(mlxsw_sp);
3352 err_router_init:
3353 mlxsw_sp_acl_fini(mlxsw_sp);
3354 err_acl_init:
3355 mlxsw_sp_port_range_fini(mlxsw_sp);
3356 err_port_range_init:
3357 mlxsw_sp_nve_fini(mlxsw_sp);
3358 err_nve_init:
3359 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3360 err_ipv6_addr_ht_init:
3361 mlxsw_sp_afa_fini(mlxsw_sp);
3362 err_afa_init:
3363 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3364 err_counter_pool_init:
3365 mlxsw_sp_switchdev_fini(mlxsw_sp);
3366 err_switchdev_init:
3367 mlxsw_sp_span_fini(mlxsw_sp);
3368 err_span_init:
3369 mlxsw_sp_buffers_fini(mlxsw_sp);
3370 err_buffers_init:
3371 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3372 err_devlink_traps_init:
3373 mlxsw_sp_traps_fini(mlxsw_sp);
3374 err_traps_init:
3375 mlxsw_sp_policers_fini(mlxsw_sp);
3376 err_policers_init:
3377 mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3378 err_fid_core_init:
3379 mlxsw_sp_lag_fini(mlxsw_sp);
3380 err_lag_init:
3381 mlxsw_sp_pgt_fini(mlxsw_sp);
3382 err_pgt_init:
3383 mlxsw_sp_kvdl_fini(mlxsw_sp);
3384 mlxsw_sp_parsing_fini(mlxsw_sp);
3385 return err;
3386 }
3387
mlxsw_sp1_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3388 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3389 const struct mlxsw_bus_info *mlxsw_bus_info,
3390 struct netlink_ext_ack *extack)
3391 {
3392 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3393
3394 mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3395 mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3396 mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3397 mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3398 mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3399 mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3400 mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3401 mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3402 mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3403 mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3404 mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3405 mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3406 mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3407 mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3408 mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3409 mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3410 mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3411 mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3412 mlxsw_sp->listeners = mlxsw_sp1_listener;
3413 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3414 mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3415 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3416 mlxsw_sp->pgt_smpe_index_valid = true;
3417
3418 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3419 }
3420
mlxsw_sp2_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3421 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3422 const struct mlxsw_bus_info *mlxsw_bus_info,
3423 struct netlink_ext_ack *extack)
3424 {
3425 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3426
3427 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3428 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3429 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3430 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3431 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3432 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3433 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3434 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3435 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3436 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3437 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3438 mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3439 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3440 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3441 mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3442 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3443 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3444 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3445 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3446 mlxsw_sp->listeners = mlxsw_sp2_listener;
3447 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3448 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3449 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3450 mlxsw_sp->pgt_smpe_index_valid = false;
3451
3452 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3453 }
3454
mlxsw_sp3_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3455 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3456 const struct mlxsw_bus_info *mlxsw_bus_info,
3457 struct netlink_ext_ack *extack)
3458 {
3459 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3460
3461 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3462 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3463 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3464 mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3465 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3466 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3467 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3468 mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3469 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3470 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3471 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3472 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3473 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3474 mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3475 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3476 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3477 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3478 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3479 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3480 mlxsw_sp->listeners = mlxsw_sp2_listener;
3481 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3482 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3483 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3484 mlxsw_sp->pgt_smpe_index_valid = false;
3485
3486 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3487 }
3488
mlxsw_sp4_init(struct mlxsw_core * mlxsw_core,const struct mlxsw_bus_info * mlxsw_bus_info,struct netlink_ext_ack * extack)3489 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3490 const struct mlxsw_bus_info *mlxsw_bus_info,
3491 struct netlink_ext_ack *extack)
3492 {
3493 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3494
3495 mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3496 mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3497 mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3498 mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3499 mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3500 mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3501 mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3502 mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3503 mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3504 mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3505 mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3506 mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3507 mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3508 mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3509 mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3510 mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3511 mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3512 mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3513 mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3514 mlxsw_sp->listeners = mlxsw_sp2_listener;
3515 mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3516 mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3517 mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3518 mlxsw_sp->pgt_smpe_index_valid = false;
3519
3520 return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3521 }
3522
mlxsw_sp_fini(struct mlxsw_core * mlxsw_core)3523 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3524 {
3525 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3526
3527 mlxsw_sp_ports_remove(mlxsw_sp);
3528 rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3529 mlxsw_sp_port_module_info_fini(mlxsw_sp);
3530 mlxsw_sp_dpipe_fini(mlxsw_sp);
3531 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3532 &mlxsw_sp->netdevice_nb);
3533 if (mlxsw_sp->clock) {
3534 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3535 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3536 }
3537 mlxsw_sp_router_fini(mlxsw_sp);
3538 mlxsw_sp_acl_fini(mlxsw_sp);
3539 mlxsw_sp_port_range_fini(mlxsw_sp);
3540 mlxsw_sp_nve_fini(mlxsw_sp);
3541 mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3542 mlxsw_sp_afa_fini(mlxsw_sp);
3543 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3544 mlxsw_sp_switchdev_fini(mlxsw_sp);
3545 mlxsw_sp_span_fini(mlxsw_sp);
3546 mlxsw_sp_buffers_fini(mlxsw_sp);
3547 mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3548 mlxsw_sp_traps_fini(mlxsw_sp);
3549 mlxsw_sp_policers_fini(mlxsw_sp);
3550 mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3551 mlxsw_sp_lag_fini(mlxsw_sp);
3552 mlxsw_sp_pgt_fini(mlxsw_sp);
3553 mlxsw_sp_kvdl_fini(mlxsw_sp);
3554 mlxsw_sp_parsing_fini(mlxsw_sp);
3555 }
3556
3557 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3558 .used_flood_mode = 1,
3559 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3560 .used_max_ib_mc = 1,
3561 .max_ib_mc = 0,
3562 .used_max_pkey = 1,
3563 .max_pkey = 0,
3564 .used_ubridge = 1,
3565 .ubridge = 1,
3566 .used_kvd_sizes = 1,
3567 .kvd_hash_single_parts = 59,
3568 .kvd_hash_double_parts = 41,
3569 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3570 .swid_config = {
3571 {
3572 .used_type = 1,
3573 .type = MLXSW_PORT_SWID_TYPE_ETH,
3574 }
3575 },
3576 };
3577
3578 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3579 .used_flood_mode = 1,
3580 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3581 .used_max_ib_mc = 1,
3582 .max_ib_mc = 0,
3583 .used_max_pkey = 1,
3584 .max_pkey = 0,
3585 .used_ubridge = 1,
3586 .ubridge = 1,
3587 .swid_config = {
3588 {
3589 .used_type = 1,
3590 .type = MLXSW_PORT_SWID_TYPE_ETH,
3591 }
3592 },
3593 .used_cqe_time_stamp_type = 1,
3594 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3595 .lag_mode_prefer_sw = true,
3596 .flood_mode_prefer_cff = true,
3597 };
3598
3599 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3600 * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3601 * table.
3602 */
3603 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3604
3605 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3606 .used_max_lag = 1,
3607 .max_lag = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3608 .used_flood_mode = 1,
3609 .flood_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3610 .used_max_ib_mc = 1,
3611 .max_ib_mc = 0,
3612 .used_max_pkey = 1,
3613 .max_pkey = 0,
3614 .used_ubridge = 1,
3615 .ubridge = 1,
3616 .swid_config = {
3617 {
3618 .used_type = 1,
3619 .type = MLXSW_PORT_SWID_TYPE_ETH,
3620 }
3621 },
3622 .used_cqe_time_stamp_type = 1,
3623 .cqe_time_stamp_type = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3624 .lag_mode_prefer_sw = true,
3625 .flood_mode_prefer_cff = true,
3626 };
3627
3628 static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core * mlxsw_core,struct devlink_resource_size_params * kvd_size_params,struct devlink_resource_size_params * linear_size_params,struct devlink_resource_size_params * hash_double_size_params,struct devlink_resource_size_params * hash_single_size_params)3629 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3630 struct devlink_resource_size_params *kvd_size_params,
3631 struct devlink_resource_size_params *linear_size_params,
3632 struct devlink_resource_size_params *hash_double_size_params,
3633 struct devlink_resource_size_params *hash_single_size_params)
3634 {
3635 u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3636 KVD_SINGLE_MIN_SIZE);
3637 u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3638 KVD_DOUBLE_MIN_SIZE);
3639 u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3640 u32 linear_size_min = 0;
3641
3642 devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3643 MLXSW_SP_KVD_GRANULARITY,
3644 DEVLINK_RESOURCE_UNIT_ENTRY);
3645 devlink_resource_size_params_init(linear_size_params, linear_size_min,
3646 kvd_size - single_size_min -
3647 double_size_min,
3648 MLXSW_SP_KVD_GRANULARITY,
3649 DEVLINK_RESOURCE_UNIT_ENTRY);
3650 devlink_resource_size_params_init(hash_double_size_params,
3651 double_size_min,
3652 kvd_size - single_size_min -
3653 linear_size_min,
3654 MLXSW_SP_KVD_GRANULARITY,
3655 DEVLINK_RESOURCE_UNIT_ENTRY);
3656 devlink_resource_size_params_init(hash_single_size_params,
3657 single_size_min,
3658 kvd_size - double_size_min -
3659 linear_size_min,
3660 MLXSW_SP_KVD_GRANULARITY,
3661 DEVLINK_RESOURCE_UNIT_ENTRY);
3662 }
3663
mlxsw_sp1_resources_kvd_register(struct mlxsw_core * mlxsw_core)3664 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3665 {
3666 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3667 struct devlink_resource_size_params hash_single_size_params;
3668 struct devlink_resource_size_params hash_double_size_params;
3669 struct devlink_resource_size_params linear_size_params;
3670 struct devlink_resource_size_params kvd_size_params;
3671 u32 kvd_size, single_size, double_size, linear_size;
3672 const struct mlxsw_config_profile *profile;
3673 int err;
3674
3675 profile = &mlxsw_sp1_config_profile;
3676 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3677 return -EIO;
3678
3679 mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3680 &linear_size_params,
3681 &hash_double_size_params,
3682 &hash_single_size_params);
3683
3684 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3685 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3686 kvd_size, MLXSW_SP_RESOURCE_KVD,
3687 DEVLINK_RESOURCE_ID_PARENT_TOP,
3688 &kvd_size_params);
3689 if (err)
3690 return err;
3691
3692 linear_size = profile->kvd_linear_size;
3693 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3694 linear_size,
3695 MLXSW_SP_RESOURCE_KVD_LINEAR,
3696 MLXSW_SP_RESOURCE_KVD,
3697 &linear_size_params);
3698 if (err)
3699 return err;
3700
3701 err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3702 if (err)
3703 return err;
3704
3705 double_size = kvd_size - linear_size;
3706 double_size *= profile->kvd_hash_double_parts;
3707 double_size /= profile->kvd_hash_double_parts +
3708 profile->kvd_hash_single_parts;
3709 double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3710 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3711 double_size,
3712 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3713 MLXSW_SP_RESOURCE_KVD,
3714 &hash_double_size_params);
3715 if (err)
3716 return err;
3717
3718 single_size = kvd_size - double_size - linear_size;
3719 err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3720 single_size,
3721 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3722 MLXSW_SP_RESOURCE_KVD,
3723 &hash_single_size_params);
3724 if (err)
3725 return err;
3726
3727 return 0;
3728 }
3729
mlxsw_sp2_resources_kvd_register(struct mlxsw_core * mlxsw_core)3730 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3731 {
3732 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3733 struct devlink_resource_size_params kvd_size_params;
3734 u32 kvd_size;
3735
3736 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3737 return -EIO;
3738
3739 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3740 devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3741 MLXSW_SP_KVD_GRANULARITY,
3742 DEVLINK_RESOURCE_UNIT_ENTRY);
3743
3744 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3745 kvd_size, MLXSW_SP_RESOURCE_KVD,
3746 DEVLINK_RESOURCE_ID_PARENT_TOP,
3747 &kvd_size_params);
3748 }
3749
mlxsw_sp_resources_span_register(struct mlxsw_core * mlxsw_core)3750 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3751 {
3752 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3753 struct devlink_resource_size_params span_size_params;
3754 u32 max_span;
3755
3756 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3757 return -EIO;
3758
3759 max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3760 devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3761 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3762
3763 return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3764 max_span, MLXSW_SP_RESOURCE_SPAN,
3765 DEVLINK_RESOURCE_ID_PARENT_TOP,
3766 &span_size_params);
3767 }
3768
3769 static int
mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core * mlxsw_core)3770 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3771 {
3772 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3773 struct devlink_resource_size_params size_params;
3774 u8 max_rif_mac_profiles;
3775
3776 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3777 max_rif_mac_profiles = 1;
3778 else
3779 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3780 MAX_RIF_MAC_PROFILES);
3781 devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3782 max_rif_mac_profiles, 1,
3783 DEVLINK_RESOURCE_UNIT_ENTRY);
3784
3785 return devl_resource_register(devlink,
3786 "rif_mac_profiles",
3787 max_rif_mac_profiles,
3788 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3789 DEVLINK_RESOURCE_ID_PARENT_TOP,
3790 &size_params);
3791 }
3792
mlxsw_sp_resources_rifs_register(struct mlxsw_core * mlxsw_core)3793 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3794 {
3795 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3796 struct devlink_resource_size_params size_params;
3797 u64 max_rifs;
3798
3799 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3800 return -EIO;
3801
3802 max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3803 devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3804 1, DEVLINK_RESOURCE_UNIT_ENTRY);
3805
3806 return devl_resource_register(devlink, "rifs", max_rifs,
3807 MLXSW_SP_RESOURCE_RIFS,
3808 DEVLINK_RESOURCE_ID_PARENT_TOP,
3809 &size_params);
3810 }
3811
3812 static int
mlxsw_sp_resources_port_range_register(struct mlxsw_core * mlxsw_core)3813 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3814 {
3815 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3816 struct devlink_resource_size_params size_params;
3817 u64 max;
3818
3819 if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3820 return -EIO;
3821
3822 max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3823 devlink_resource_size_params_init(&size_params, max, max, 1,
3824 DEVLINK_RESOURCE_UNIT_ENTRY);
3825
3826 return devl_resource_register(devlink, "port_range_registers", max,
3827 MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3828 DEVLINK_RESOURCE_ID_PARENT_TOP,
3829 &size_params);
3830 }
3831
mlxsw_sp1_resources_register(struct mlxsw_core * mlxsw_core)3832 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3833 {
3834 int err;
3835
3836 err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3837 if (err)
3838 return err;
3839
3840 err = mlxsw_sp_resources_span_register(mlxsw_core);
3841 if (err)
3842 goto err_resources_span_register;
3843
3844 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3845 if (err)
3846 goto err_resources_counter_register;
3847
3848 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3849 if (err)
3850 goto err_policer_resources_register;
3851
3852 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3853 if (err)
3854 goto err_resources_rif_mac_profile_register;
3855
3856 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3857 if (err)
3858 goto err_resources_rifs_register;
3859
3860 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3861 if (err)
3862 goto err_resources_port_range_register;
3863
3864 return 0;
3865
3866 err_resources_port_range_register:
3867 err_resources_rifs_register:
3868 err_resources_rif_mac_profile_register:
3869 err_policer_resources_register:
3870 err_resources_counter_register:
3871 err_resources_span_register:
3872 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3873 return err;
3874 }
3875
mlxsw_sp2_resources_register(struct mlxsw_core * mlxsw_core)3876 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3877 {
3878 int err;
3879
3880 err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3881 if (err)
3882 return err;
3883
3884 err = mlxsw_sp_resources_span_register(mlxsw_core);
3885 if (err)
3886 goto err_resources_span_register;
3887
3888 err = mlxsw_sp_counter_resources_register(mlxsw_core);
3889 if (err)
3890 goto err_resources_counter_register;
3891
3892 err = mlxsw_sp_policer_resources_register(mlxsw_core);
3893 if (err)
3894 goto err_policer_resources_register;
3895
3896 err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3897 if (err)
3898 goto err_resources_rif_mac_profile_register;
3899
3900 err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3901 if (err)
3902 goto err_resources_rifs_register;
3903
3904 err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3905 if (err)
3906 goto err_resources_port_range_register;
3907
3908 return 0;
3909
3910 err_resources_port_range_register:
3911 err_resources_rifs_register:
3912 err_resources_rif_mac_profile_register:
3913 err_policer_resources_register:
3914 err_resources_counter_register:
3915 err_resources_span_register:
3916 devl_resources_unregister(priv_to_devlink(mlxsw_core));
3917 return err;
3918 }
3919
mlxsw_sp_kvd_sizes_get(struct mlxsw_core * mlxsw_core,const struct mlxsw_config_profile * profile,u64 * p_single_size,u64 * p_double_size,u64 * p_linear_size)3920 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3921 const struct mlxsw_config_profile *profile,
3922 u64 *p_single_size, u64 *p_double_size,
3923 u64 *p_linear_size)
3924 {
3925 struct devlink *devlink = priv_to_devlink(mlxsw_core);
3926 u32 double_size;
3927 int err;
3928
3929 if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3930 !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3931 return -EIO;
3932
3933 /* The hash part is what left of the kvd without the
3934 * linear part. It is split to the single size and
3935 * double size by the parts ratio from the profile.
3936 * Both sizes must be a multiplications of the
3937 * granularity from the profile. In case the user
3938 * provided the sizes they are obtained via devlink.
3939 */
3940 err = devl_resource_size_get(devlink,
3941 MLXSW_SP_RESOURCE_KVD_LINEAR,
3942 p_linear_size);
3943 if (err)
3944 *p_linear_size = profile->kvd_linear_size;
3945
3946 err = devl_resource_size_get(devlink,
3947 MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3948 p_double_size);
3949 if (err) {
3950 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3951 *p_linear_size;
3952 double_size *= profile->kvd_hash_double_parts;
3953 double_size /= profile->kvd_hash_double_parts +
3954 profile->kvd_hash_single_parts;
3955 *p_double_size = rounddown(double_size,
3956 MLXSW_SP_KVD_GRANULARITY);
3957 }
3958
3959 err = devl_resource_size_get(devlink,
3960 MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3961 p_single_size);
3962 if (err)
3963 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3964 *p_double_size - *p_linear_size;
3965
3966 /* Check results are legal. */
3967 if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3968 *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3969 MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3970 return -EIO;
3971
3972 return 0;
3973 }
3974
mlxsw_sp_ptp_transmitted(struct mlxsw_core * mlxsw_core,struct sk_buff * skb,u16 local_port)3975 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3976 struct sk_buff *skb, u16 local_port)
3977 {
3978 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3979
3980 skb_pull(skb, MLXSW_TXHDR_LEN);
3981 mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3982 }
3983
3984 static struct mlxsw_driver mlxsw_sp1_driver = {
3985 .kind = mlxsw_sp1_driver_name,
3986 .priv_size = sizeof(struct mlxsw_sp),
3987 .fw_req_rev = &mlxsw_sp1_fw_rev,
3988 .fw_filename = MLXSW_SP1_FW_FILENAME,
3989 .init = mlxsw_sp1_init,
3990 .fini = mlxsw_sp_fini,
3991 .port_split = mlxsw_sp_port_split,
3992 .port_unsplit = mlxsw_sp_port_unsplit,
3993 .sb_pool_get = mlxsw_sp_sb_pool_get,
3994 .sb_pool_set = mlxsw_sp_sb_pool_set,
3995 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3996 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3997 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3998 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3999 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4000 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4001 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4002 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4003 .trap_init = mlxsw_sp_trap_init,
4004 .trap_fini = mlxsw_sp_trap_fini,
4005 .trap_action_set = mlxsw_sp_trap_action_set,
4006 .trap_group_init = mlxsw_sp_trap_group_init,
4007 .trap_group_set = mlxsw_sp_trap_group_set,
4008 .trap_policer_init = mlxsw_sp_trap_policer_init,
4009 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4010 .trap_policer_set = mlxsw_sp_trap_policer_set,
4011 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4012 .txhdr_construct = mlxsw_sp_txhdr_construct,
4013 .resources_register = mlxsw_sp1_resources_register,
4014 .kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
4015 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4016 .txhdr_len = MLXSW_TXHDR_LEN,
4017 .profile = &mlxsw_sp1_config_profile,
4018 .sdq_supports_cqe_v2 = false,
4019 };
4020
4021 static struct mlxsw_driver mlxsw_sp2_driver = {
4022 .kind = mlxsw_sp2_driver_name,
4023 .priv_size = sizeof(struct mlxsw_sp),
4024 .fw_req_rev = &mlxsw_sp2_fw_rev,
4025 .fw_filename = MLXSW_SP2_FW_FILENAME,
4026 .init = mlxsw_sp2_init,
4027 .fini = mlxsw_sp_fini,
4028 .port_split = mlxsw_sp_port_split,
4029 .port_unsplit = mlxsw_sp_port_unsplit,
4030 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4031 .sb_pool_get = mlxsw_sp_sb_pool_get,
4032 .sb_pool_set = mlxsw_sp_sb_pool_set,
4033 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4034 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4035 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4036 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4037 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4038 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4039 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4040 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4041 .trap_init = mlxsw_sp_trap_init,
4042 .trap_fini = mlxsw_sp_trap_fini,
4043 .trap_action_set = mlxsw_sp_trap_action_set,
4044 .trap_group_init = mlxsw_sp_trap_group_init,
4045 .trap_group_set = mlxsw_sp_trap_group_set,
4046 .trap_policer_init = mlxsw_sp_trap_policer_init,
4047 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4048 .trap_policer_set = mlxsw_sp_trap_policer_set,
4049 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4050 .txhdr_construct = mlxsw_sp_txhdr_construct,
4051 .resources_register = mlxsw_sp2_resources_register,
4052 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4053 .txhdr_len = MLXSW_TXHDR_LEN,
4054 .profile = &mlxsw_sp2_config_profile,
4055 .sdq_supports_cqe_v2 = true,
4056 };
4057
4058 static struct mlxsw_driver mlxsw_sp3_driver = {
4059 .kind = mlxsw_sp3_driver_name,
4060 .priv_size = sizeof(struct mlxsw_sp),
4061 .fw_req_rev = &mlxsw_sp3_fw_rev,
4062 .fw_filename = MLXSW_SP3_FW_FILENAME,
4063 .init = mlxsw_sp3_init,
4064 .fini = mlxsw_sp_fini,
4065 .port_split = mlxsw_sp_port_split,
4066 .port_unsplit = mlxsw_sp_port_unsplit,
4067 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4068 .sb_pool_get = mlxsw_sp_sb_pool_get,
4069 .sb_pool_set = mlxsw_sp_sb_pool_set,
4070 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4071 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4072 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4073 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4074 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4075 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4076 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4077 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4078 .trap_init = mlxsw_sp_trap_init,
4079 .trap_fini = mlxsw_sp_trap_fini,
4080 .trap_action_set = mlxsw_sp_trap_action_set,
4081 .trap_group_init = mlxsw_sp_trap_group_init,
4082 .trap_group_set = mlxsw_sp_trap_group_set,
4083 .trap_policer_init = mlxsw_sp_trap_policer_init,
4084 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4085 .trap_policer_set = mlxsw_sp_trap_policer_set,
4086 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4087 .txhdr_construct = mlxsw_sp_txhdr_construct,
4088 .resources_register = mlxsw_sp2_resources_register,
4089 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4090 .txhdr_len = MLXSW_TXHDR_LEN,
4091 .profile = &mlxsw_sp2_config_profile,
4092 .sdq_supports_cqe_v2 = true,
4093 };
4094
4095 static struct mlxsw_driver mlxsw_sp4_driver = {
4096 .kind = mlxsw_sp4_driver_name,
4097 .priv_size = sizeof(struct mlxsw_sp),
4098 .init = mlxsw_sp4_init,
4099 .fini = mlxsw_sp_fini,
4100 .port_split = mlxsw_sp_port_split,
4101 .port_unsplit = mlxsw_sp_port_unsplit,
4102 .ports_remove_selected = mlxsw_sp_ports_remove_selected,
4103 .sb_pool_get = mlxsw_sp_sb_pool_get,
4104 .sb_pool_set = mlxsw_sp_sb_pool_set,
4105 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
4106 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
4107 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
4108 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
4109 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
4110 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
4111 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
4112 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
4113 .trap_init = mlxsw_sp_trap_init,
4114 .trap_fini = mlxsw_sp_trap_fini,
4115 .trap_action_set = mlxsw_sp_trap_action_set,
4116 .trap_group_init = mlxsw_sp_trap_group_init,
4117 .trap_group_set = mlxsw_sp_trap_group_set,
4118 .trap_policer_init = mlxsw_sp_trap_policer_init,
4119 .trap_policer_fini = mlxsw_sp_trap_policer_fini,
4120 .trap_policer_set = mlxsw_sp_trap_policer_set,
4121 .trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
4122 .txhdr_construct = mlxsw_sp_txhdr_construct,
4123 .resources_register = mlxsw_sp2_resources_register,
4124 .ptp_transmitted = mlxsw_sp_ptp_transmitted,
4125 .txhdr_len = MLXSW_TXHDR_LEN,
4126 .profile = &mlxsw_sp4_config_profile,
4127 .sdq_supports_cqe_v2 = true,
4128 };
4129
mlxsw_sp_port_dev_check(const struct net_device * dev)4130 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4131 {
4132 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4133 }
4134
mlxsw_sp_lower_dev_walk(struct net_device * lower_dev,struct netdev_nested_priv * priv)4135 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4136 struct netdev_nested_priv *priv)
4137 {
4138 int ret = 0;
4139
4140 if (mlxsw_sp_port_dev_check(lower_dev)) {
4141 priv->data = (void *)netdev_priv(lower_dev);
4142 ret = 1;
4143 }
4144
4145 return ret;
4146 }
4147
mlxsw_sp_port_dev_lower_find(struct net_device * dev)4148 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4149 {
4150 struct netdev_nested_priv priv = {
4151 .data = NULL,
4152 };
4153
4154 if (mlxsw_sp_port_dev_check(dev))
4155 return netdev_priv(dev);
4156
4157 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4158
4159 return (struct mlxsw_sp_port *)priv.data;
4160 }
4161
mlxsw_sp_lower_get(struct net_device * dev)4162 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4163 {
4164 struct mlxsw_sp_port *mlxsw_sp_port;
4165
4166 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4167 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4168 }
4169
mlxsw_sp_port_dev_lower_find_rcu(struct net_device * dev)4170 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4171 {
4172 struct netdev_nested_priv priv = {
4173 .data = NULL,
4174 };
4175
4176 if (mlxsw_sp_port_dev_check(dev))
4177 return netdev_priv(dev);
4178
4179 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4180 &priv);
4181
4182 return (struct mlxsw_sp_port *)priv.data;
4183 }
4184
mlxsw_sp_parsing_depth_inc(struct mlxsw_sp * mlxsw_sp)4185 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4186 {
4187 char mprs_pl[MLXSW_REG_MPRS_LEN];
4188 int err = 0;
4189
4190 mutex_lock(&mlxsw_sp->parsing.lock);
4191
4192 if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4193 goto out_unlock;
4194
4195 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4196 mlxsw_sp->parsing.vxlan_udp_dport);
4197 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4198 if (err)
4199 goto out_unlock;
4200
4201 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4202 refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4203
4204 out_unlock:
4205 mutex_unlock(&mlxsw_sp->parsing.lock);
4206 return err;
4207 }
4208
mlxsw_sp_parsing_depth_dec(struct mlxsw_sp * mlxsw_sp)4209 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4210 {
4211 char mprs_pl[MLXSW_REG_MPRS_LEN];
4212
4213 mutex_lock(&mlxsw_sp->parsing.lock);
4214
4215 if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4216 goto out_unlock;
4217
4218 mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4219 mlxsw_sp->parsing.vxlan_udp_dport);
4220 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4221 mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4222
4223 out_unlock:
4224 mutex_unlock(&mlxsw_sp->parsing.lock);
4225 }
4226
mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp * mlxsw_sp,__be16 udp_dport)4227 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4228 __be16 udp_dport)
4229 {
4230 char mprs_pl[MLXSW_REG_MPRS_LEN];
4231 int err;
4232
4233 mutex_lock(&mlxsw_sp->parsing.lock);
4234
4235 mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4236 be16_to_cpu(udp_dport));
4237 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4238 if (err)
4239 goto out_unlock;
4240
4241 mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4242
4243 out_unlock:
4244 mutex_unlock(&mlxsw_sp->parsing.lock);
4245 return err;
4246 }
4247
4248 static void
mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4249 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4250 struct net_device *lag_dev)
4251 {
4252 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4253 struct net_device *upper_dev;
4254 struct list_head *iter;
4255
4256 if (netif_is_bridge_port(lag_dev))
4257 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4258
4259 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4260 if (!netif_is_bridge_port(upper_dev))
4261 continue;
4262 br_dev = netdev_master_upper_dev_get(upper_dev);
4263 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4264 }
4265 }
4266
4267 static struct mlxsw_sp_lag *
mlxsw_sp_lag_create(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netlink_ext_ack * extack)4268 mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4269 struct netlink_ext_ack *extack)
4270 {
4271 char sldr_pl[MLXSW_REG_SLDR_LEN];
4272 struct mlxsw_sp_lag *lag;
4273 u16 lag_id;
4274 int i, err;
4275
4276 for (i = 0; i < mlxsw_sp->max_lag; i++) {
4277 if (!mlxsw_sp->lags[i].dev)
4278 break;
4279 }
4280
4281 if (i == mlxsw_sp->max_lag) {
4282 NL_SET_ERR_MSG_MOD(extack,
4283 "Exceeded number of supported LAG devices");
4284 return ERR_PTR(-EBUSY);
4285 }
4286
4287 lag_id = i;
4288 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4289 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4290 if (err)
4291 return ERR_PTR(err);
4292
4293 lag = &mlxsw_sp->lags[lag_id];
4294 lag->lag_id = lag_id;
4295 lag->dev = lag_dev;
4296 refcount_set(&lag->ref_count, 1);
4297
4298 return lag;
4299 }
4300
4301 static int
mlxsw_sp_lag_destroy(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lag * lag)4302 mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4303 {
4304 char sldr_pl[MLXSW_REG_SLDR_LEN];
4305
4306 lag->dev = NULL;
4307
4308 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag->lag_id);
4309 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4310 }
4311
mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id,u8 port_index)4312 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4313 u16 lag_id, u8 port_index)
4314 {
4315 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4316 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4317
4318 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4319 lag_id, port_index);
4320 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4321 }
4322
mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4323 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4324 u16 lag_id)
4325 {
4326 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4327 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4328
4329 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4330 lag_id);
4331 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4332 }
4333
mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4334 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4335 u16 lag_id)
4336 {
4337 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4338 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4339
4340 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4341 lag_id);
4342 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4343 }
4344
mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4345 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4346 u16 lag_id)
4347 {
4348 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4349 char slcor_pl[MLXSW_REG_SLCOR_LEN];
4350
4351 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4352 lag_id);
4353 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4354 }
4355
4356 static struct mlxsw_sp_lag *
mlxsw_sp_lag_find(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev)4357 mlxsw_sp_lag_find(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev)
4358 {
4359 int i;
4360
4361 for (i = 0; i < mlxsw_sp->max_lag; i++) {
4362 if (!mlxsw_sp->lags[i].dev)
4363 continue;
4364
4365 if (mlxsw_sp->lags[i].dev == lag_dev)
4366 return &mlxsw_sp->lags[i];
4367 }
4368
4369 return NULL;
4370 }
4371
4372 static struct mlxsw_sp_lag *
mlxsw_sp_lag_get(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netlink_ext_ack * extack)4373 mlxsw_sp_lag_get(struct mlxsw_sp *mlxsw_sp, struct net_device *lag_dev,
4374 struct netlink_ext_ack *extack)
4375 {
4376 struct mlxsw_sp_lag *lag;
4377
4378 lag = mlxsw_sp_lag_find(mlxsw_sp, lag_dev);
4379 if (lag) {
4380 refcount_inc(&lag->ref_count);
4381 return lag;
4382 }
4383
4384 return mlxsw_sp_lag_create(mlxsw_sp, lag_dev, extack);
4385 }
4386
4387 static void
mlxsw_sp_lag_put(struct mlxsw_sp * mlxsw_sp,struct mlxsw_sp_lag * lag)4388 mlxsw_sp_lag_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lag *lag)
4389 {
4390 if (!refcount_dec_and_test(&lag->ref_count))
4391 return;
4392
4393 mlxsw_sp_lag_destroy(mlxsw_sp, lag);
4394 }
4395
4396 static bool
mlxsw_sp_master_lag_check(struct mlxsw_sp * mlxsw_sp,struct net_device * lag_dev,struct netdev_lag_upper_info * lag_upper_info,struct netlink_ext_ack * extack)4397 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4398 struct net_device *lag_dev,
4399 struct netdev_lag_upper_info *lag_upper_info,
4400 struct netlink_ext_ack *extack)
4401 {
4402 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4403 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4404 return false;
4405 }
4406 return true;
4407 }
4408
mlxsw_sp_port_lag_index_get(struct mlxsw_sp * mlxsw_sp,u16 lag_id,u8 * p_port_index)4409 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4410 u16 lag_id, u8 *p_port_index)
4411 {
4412 u64 max_lag_members;
4413 int i;
4414
4415 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4416 MAX_LAG_MEMBERS);
4417 for (i = 0; i < max_lag_members; i++) {
4418 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4419 *p_port_index = i;
4420 return 0;
4421 }
4422 }
4423 return -EBUSY;
4424 }
4425
mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)4426 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4427 struct net_device *lag_dev,
4428 struct netlink_ext_ack *extack)
4429 {
4430 struct net_device *upper_dev;
4431 struct net_device *master;
4432 struct list_head *iter;
4433 int done = 0;
4434 int err;
4435
4436 master = netdev_master_upper_dev_get(lag_dev);
4437 if (master && netif_is_bridge_master(master)) {
4438 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4439 extack);
4440 if (err)
4441 return err;
4442 }
4443
4444 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4445 if (!is_vlan_dev(upper_dev))
4446 continue;
4447
4448 master = netdev_master_upper_dev_get(upper_dev);
4449 if (master && netif_is_bridge_master(master)) {
4450 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4451 upper_dev, master,
4452 extack);
4453 if (err)
4454 goto err_port_bridge_join;
4455 }
4456
4457 ++done;
4458 }
4459
4460 return 0;
4461
4462 err_port_bridge_join:
4463 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4464 if (!is_vlan_dev(upper_dev))
4465 continue;
4466
4467 master = netdev_master_upper_dev_get(upper_dev);
4468 if (!master || !netif_is_bridge_master(master))
4469 continue;
4470
4471 if (!done--)
4472 break;
4473
4474 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4475 }
4476
4477 master = netdev_master_upper_dev_get(lag_dev);
4478 if (master && netif_is_bridge_master(master))
4479 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4480
4481 return err;
4482 }
4483
4484 static void
mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4485 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4486 struct net_device *lag_dev)
4487 {
4488 struct net_device *upper_dev;
4489 struct net_device *master;
4490 struct list_head *iter;
4491
4492 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4493 if (!is_vlan_dev(upper_dev))
4494 continue;
4495
4496 master = netdev_master_upper_dev_get(upper_dev);
4497 if (!master)
4498 continue;
4499
4500 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4501 }
4502
4503 master = netdev_master_upper_dev_get(lag_dev);
4504 if (master)
4505 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4506 }
4507
mlxsw_sp_port_lag_join(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev,struct netlink_ext_ack * extack)4508 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4509 struct net_device *lag_dev,
4510 struct netlink_ext_ack *extack)
4511 {
4512 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4513 struct mlxsw_sp_lag *lag;
4514 u16 lag_id;
4515 u8 port_index;
4516 int err;
4517
4518 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_dev, extack);
4519 if (IS_ERR(lag))
4520 return PTR_ERR(lag);
4521
4522 lag_id = lag->lag_id;
4523 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4524 if (err)
4525 return err;
4526
4527 err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4528 extack);
4529 if (err)
4530 goto err_lag_uppers_bridge_join;
4531
4532 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4533 if (err)
4534 goto err_col_port_add;
4535
4536 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4537 mlxsw_sp_port->local_port);
4538 mlxsw_sp_port->lag_id = lag_id;
4539 mlxsw_sp_port->lagged = 1;
4540
4541 err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4542 if (err)
4543 goto err_fid_port_join_lag;
4544
4545 /* Port is no longer usable as a router interface */
4546 if (mlxsw_sp_port->default_vlan->fid)
4547 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4548
4549 /* Join a router interface configured on the LAG, if exists */
4550 err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4551 extack);
4552 if (err)
4553 goto err_router_join;
4554
4555 err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4556 if (err)
4557 goto err_replay;
4558
4559 return 0;
4560
4561 err_replay:
4562 mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4563 err_router_join:
4564 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4565 err_fid_port_join_lag:
4566 mlxsw_sp_port->lagged = 0;
4567 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4568 mlxsw_sp_port->local_port);
4569 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4570 err_col_port_add:
4571 mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4572 err_lag_uppers_bridge_join:
4573 mlxsw_sp_lag_put(mlxsw_sp, lag);
4574 return err;
4575 }
4576
mlxsw_sp_port_lag_leave(struct mlxsw_sp_port * mlxsw_sp_port,struct net_device * lag_dev)4577 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4578 struct net_device *lag_dev)
4579 {
4580 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4581 u16 lag_id = mlxsw_sp_port->lag_id;
4582 struct mlxsw_sp_lag *lag;
4583
4584 if (!mlxsw_sp_port->lagged)
4585 return;
4586 lag = &mlxsw_sp->lags[lag_id];
4587
4588 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4589
4590 /* Any VLANs configured on the port are no longer valid */
4591 mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4592 mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4593 /* Make the LAG and its directly linked uppers leave bridges they
4594 * are memeber in
4595 */
4596 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4597
4598 mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4599
4600 mlxsw_sp_lag_put(mlxsw_sp, lag);
4601
4602 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4603 mlxsw_sp_port->local_port);
4604 mlxsw_sp_port->lagged = 0;
4605
4606 /* Make sure untagged frames are allowed to ingress */
4607 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4608 ETH_P_8021Q);
4609 }
4610
mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4611 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4612 u16 lag_id)
4613 {
4614 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4615 char sldr_pl[MLXSW_REG_SLDR_LEN];
4616
4617 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4618 mlxsw_sp_port->local_port);
4619 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4620 }
4621
mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port * mlxsw_sp_port,u16 lag_id)4622 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4623 u16 lag_id)
4624 {
4625 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4626 char sldr_pl[MLXSW_REG_SLDR_LEN];
4627
4628 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4629 mlxsw_sp_port->local_port);
4630 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4631 }
4632
4633 static int
mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port * mlxsw_sp_port)4634 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4635 {
4636 int err;
4637
4638 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4639 mlxsw_sp_port->lag_id);
4640 if (err)
4641 return err;
4642
4643 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4644 if (err)
4645 goto err_dist_port_add;
4646
4647 return 0;
4648
4649 err_dist_port_add:
4650 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4651 return err;
4652 }
4653
4654 static int
mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port * mlxsw_sp_port)4655 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4656 {
4657 int err;
4658
4659 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4660 mlxsw_sp_port->lag_id);
4661 if (err)
4662 return err;
4663
4664 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4665 mlxsw_sp_port->lag_id);
4666 if (err)
4667 goto err_col_port_disable;
4668
4669 return 0;
4670
4671 err_col_port_disable:
4672 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4673 return err;
4674 }
4675
mlxsw_sp_port_lag_changed(struct mlxsw_sp_port * mlxsw_sp_port,struct netdev_lag_lower_state_info * info)4676 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4677 struct netdev_lag_lower_state_info *info)
4678 {
4679 if (info->tx_enabled)
4680 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4681 else
4682 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4683 }
4684
mlxsw_sp_port_stp_set(struct mlxsw_sp_port * mlxsw_sp_port,bool enable)4685 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4686 bool enable)
4687 {
4688 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4689 enum mlxsw_reg_spms_state spms_state;
4690 char *spms_pl;
4691 u16 vid;
4692 int err;
4693
4694 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4695 MLXSW_REG_SPMS_STATE_DISCARDING;
4696
4697 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4698 if (!spms_pl)
4699 return -ENOMEM;
4700 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4701
4702 for (vid = 0; vid < VLAN_N_VID; vid++)
4703 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4704
4705 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4706 kfree(spms_pl);
4707 return err;
4708 }
4709
mlxsw_sp_port_ovs_join(struct mlxsw_sp_port * mlxsw_sp_port)4710 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4711 {
4712 u16 vid = 1;
4713 int err;
4714
4715 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4716 if (err)
4717 return err;
4718 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4719 if (err)
4720 goto err_port_stp_set;
4721 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4722 true, false);
4723 if (err)
4724 goto err_port_vlan_set;
4725
4726 for (; vid <= VLAN_N_VID - 1; vid++) {
4727 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4728 vid, false);
4729 if (err)
4730 goto err_vid_learning_set;
4731 }
4732
4733 return 0;
4734
4735 err_vid_learning_set:
4736 for (vid--; vid >= 1; vid--)
4737 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4738 err_port_vlan_set:
4739 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4740 err_port_stp_set:
4741 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4742 return err;
4743 }
4744
mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port * mlxsw_sp_port)4745 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4746 {
4747 u16 vid;
4748
4749 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4750 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4751 vid, true);
4752
4753 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4754 false, false);
4755 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4756 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4757 }
4758
mlxsw_sp_bridge_has_multiple_vxlans(struct net_device * br_dev)4759 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4760 {
4761 unsigned int num_vxlans = 0;
4762 struct net_device *dev;
4763 struct list_head *iter;
4764
4765 netdev_for_each_lower_dev(br_dev, dev, iter) {
4766 if (netif_is_vxlan(dev))
4767 num_vxlans++;
4768 }
4769
4770 return num_vxlans > 1;
4771 }
4772
mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device * br_dev)4773 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4774 {
4775 DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4776 struct net_device *dev;
4777 struct list_head *iter;
4778
4779 netdev_for_each_lower_dev(br_dev, dev, iter) {
4780 u16 pvid;
4781 int err;
4782
4783 if (!netif_is_vxlan(dev))
4784 continue;
4785
4786 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4787 if (err || !pvid)
4788 continue;
4789
4790 if (test_and_set_bit(pvid, vlans))
4791 return false;
4792 }
4793
4794 return true;
4795 }
4796
mlxsw_sp_bridge_vxlan_is_valid(struct net_device * br_dev,struct netlink_ext_ack * extack)4797 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4798 struct netlink_ext_ack *extack)
4799 {
4800 if (br_multicast_enabled(br_dev)) {
4801 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4802 return false;
4803 }
4804
4805 if (!br_vlan_enabled(br_dev) &&
4806 mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4807 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4808 return false;
4809 }
4810
4811 if (br_vlan_enabled(br_dev) &&
4812 !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4813 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4814 return false;
4815 }
4816
4817 return true;
4818 }
4819
mlxsw_sp_netdev_is_master(struct net_device * upper_dev,struct net_device * dev)4820 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4821 struct net_device *dev)
4822 {
4823 return upper_dev == netdev_master_upper_dev_get(dev);
4824 }
4825
4826 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4827 unsigned long event, void *ptr,
4828 bool process_foreign);
4829
mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,struct netlink_ext_ack * extack)4830 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4831 struct net_device *dev,
4832 struct netlink_ext_ack *extack)
4833 {
4834 struct net_device *upper_dev;
4835 struct list_head *iter;
4836 int err;
4837
4838 netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4839 struct netdev_notifier_changeupper_info info = {
4840 .info = {
4841 .dev = dev,
4842 .extack = extack,
4843 },
4844 .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4845 .upper_dev = upper_dev,
4846 .linking = true,
4847
4848 /* upper_info is relevant for LAG devices. But we would
4849 * only need this if LAG were a valid upper above
4850 * another upper (e.g. a bridge that is a member of a
4851 * LAG), and that is never a valid configuration. So we
4852 * can keep this as NULL.
4853 */
4854 .upper_info = NULL,
4855 };
4856
4857 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4858 NETDEV_PRECHANGEUPPER,
4859 &info, true);
4860 if (err)
4861 return err;
4862
4863 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4864 extack);
4865 if (err)
4866 return err;
4867 }
4868
4869 return 0;
4870 }
4871
mlxsw_sp_netdevice_port_upper_event(struct net_device * lower_dev,struct net_device * dev,unsigned long event,void * ptr,bool replay_deslavement)4872 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4873 struct net_device *dev,
4874 unsigned long event, void *ptr,
4875 bool replay_deslavement)
4876 {
4877 struct netdev_notifier_changeupper_info *info;
4878 struct mlxsw_sp_port *mlxsw_sp_port;
4879 struct netlink_ext_ack *extack;
4880 struct net_device *upper_dev;
4881 struct mlxsw_sp *mlxsw_sp;
4882 int err = 0;
4883 u16 proto;
4884
4885 mlxsw_sp_port = netdev_priv(dev);
4886 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4887 info = ptr;
4888 extack = netdev_notifier_info_to_extack(&info->info);
4889
4890 switch (event) {
4891 case NETDEV_PRECHANGEUPPER:
4892 upper_dev = info->upper_dev;
4893 if (!is_vlan_dev(upper_dev) &&
4894 !netif_is_lag_master(upper_dev) &&
4895 !netif_is_bridge_master(upper_dev) &&
4896 !netif_is_ovs_master(upper_dev) &&
4897 !netif_is_macvlan(upper_dev) &&
4898 !netif_is_l3_master(upper_dev)) {
4899 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4900 return -EINVAL;
4901 }
4902 if (!info->linking)
4903 break;
4904 if (netif_is_bridge_master(upper_dev) &&
4905 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4906 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4907 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4908 return -EOPNOTSUPP;
4909 if (netdev_has_any_upper_dev(upper_dev) &&
4910 (!netif_is_bridge_master(upper_dev) ||
4911 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4912 upper_dev))) {
4913 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4914 upper_dev,
4915 extack);
4916 if (err)
4917 return err;
4918 }
4919 if (netif_is_lag_master(upper_dev) &&
4920 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4921 info->upper_info, extack))
4922 return -EINVAL;
4923 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4924 NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4925 return -EINVAL;
4926 }
4927 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4928 !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4929 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4930 return -EINVAL;
4931 }
4932 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4933 NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4934 return -EINVAL;
4935 }
4936 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4937 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4938 return -EINVAL;
4939 }
4940 if (netif_is_bridge_master(upper_dev)) {
4941 br_vlan_get_proto(upper_dev, &proto);
4942 if (br_vlan_enabled(upper_dev) &&
4943 proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4944 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4945 return -EOPNOTSUPP;
4946 }
4947 if (vlan_uses_dev(lower_dev) &&
4948 br_vlan_enabled(upper_dev) &&
4949 proto == ETH_P_8021AD) {
4950 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4951 return -EOPNOTSUPP;
4952 }
4953 }
4954 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4955 struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4956
4957 if (br_vlan_enabled(br_dev)) {
4958 br_vlan_get_proto(br_dev, &proto);
4959 if (proto == ETH_P_8021AD) {
4960 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4961 return -EOPNOTSUPP;
4962 }
4963 }
4964 }
4965 if (is_vlan_dev(upper_dev) &&
4966 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4967 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4968 return -EOPNOTSUPP;
4969 }
4970 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4971 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4972 return -EOPNOTSUPP;
4973 }
4974 break;
4975 case NETDEV_CHANGEUPPER:
4976 upper_dev = info->upper_dev;
4977 if (netif_is_bridge_master(upper_dev)) {
4978 if (info->linking) {
4979 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4980 lower_dev,
4981 upper_dev,
4982 extack);
4983 } else {
4984 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4985 lower_dev,
4986 upper_dev);
4987 if (!replay_deslavement)
4988 break;
4989 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4990 lower_dev);
4991 }
4992 } else if (netif_is_lag_master(upper_dev)) {
4993 if (info->linking) {
4994 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4995 upper_dev, extack);
4996 } else {
4997 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4998 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4999 upper_dev);
5000 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5001 dev);
5002 }
5003 } else if (netif_is_ovs_master(upper_dev)) {
5004 if (info->linking)
5005 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
5006 else
5007 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
5008 } else if (netif_is_macvlan(upper_dev)) {
5009 if (!info->linking)
5010 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5011 } else if (is_vlan_dev(upper_dev)) {
5012 struct net_device *br_dev;
5013
5014 if (!netif_is_bridge_port(upper_dev))
5015 break;
5016 if (info->linking)
5017 break;
5018 br_dev = netdev_master_upper_dev_get(upper_dev);
5019 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
5020 br_dev);
5021 }
5022 break;
5023 }
5024
5025 return err;
5026 }
5027
mlxsw_sp_netdevice_port_lower_event(struct net_device * dev,unsigned long event,void * ptr)5028 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5029 unsigned long event, void *ptr)
5030 {
5031 struct netdev_notifier_changelowerstate_info *info;
5032 struct mlxsw_sp_port *mlxsw_sp_port;
5033 int err;
5034
5035 mlxsw_sp_port = netdev_priv(dev);
5036 info = ptr;
5037
5038 switch (event) {
5039 case NETDEV_CHANGELOWERSTATE:
5040 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5041 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5042 info->lower_state_info);
5043 if (err)
5044 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5045 }
5046 break;
5047 }
5048
5049 return 0;
5050 }
5051
mlxsw_sp_netdevice_port_event(struct net_device * lower_dev,struct net_device * port_dev,unsigned long event,void * ptr,bool replay_deslavement)5052 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5053 struct net_device *port_dev,
5054 unsigned long event, void *ptr,
5055 bool replay_deslavement)
5056 {
5057 switch (event) {
5058 case NETDEV_PRECHANGEUPPER:
5059 case NETDEV_CHANGEUPPER:
5060 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5061 event, ptr,
5062 replay_deslavement);
5063 case NETDEV_CHANGELOWERSTATE:
5064 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5065 ptr);
5066 }
5067
5068 return 0;
5069 }
5070
5071 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5072 * to do any per-LAG / per-LAG-upper processing.
5073 */
mlxsw_sp_netdevice_post_lag_event(struct net_device * dev,unsigned long event,void * ptr)5074 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5075 unsigned long event,
5076 void *ptr)
5077 {
5078 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5079 struct netdev_notifier_changeupper_info *info = ptr;
5080
5081 if (!mlxsw_sp)
5082 return 0;
5083
5084 switch (event) {
5085 case NETDEV_CHANGEUPPER:
5086 if (info->linking)
5087 break;
5088 if (netif_is_bridge_master(info->upper_dev))
5089 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5090 break;
5091 }
5092 return 0;
5093 }
5094
mlxsw_sp_netdevice_lag_event(struct net_device * lag_dev,unsigned long event,void * ptr)5095 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5096 unsigned long event, void *ptr)
5097 {
5098 struct net_device *dev;
5099 struct list_head *iter;
5100 int ret;
5101
5102 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5103 if (mlxsw_sp_port_dev_check(dev)) {
5104 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5105 ptr, false);
5106 if (ret)
5107 return ret;
5108 }
5109 }
5110
5111 return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5112 }
5113
mlxsw_sp_netdevice_port_vlan_event(struct net_device * vlan_dev,struct net_device * dev,unsigned long event,void * ptr,u16 vid,bool replay_deslavement)5114 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5115 struct net_device *dev,
5116 unsigned long event, void *ptr,
5117 u16 vid, bool replay_deslavement)
5118 {
5119 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5120 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5121 struct netdev_notifier_changeupper_info *info = ptr;
5122 struct netlink_ext_ack *extack;
5123 struct net_device *upper_dev;
5124 int err = 0;
5125
5126 extack = netdev_notifier_info_to_extack(&info->info);
5127
5128 switch (event) {
5129 case NETDEV_PRECHANGEUPPER:
5130 upper_dev = info->upper_dev;
5131 if (!netif_is_bridge_master(upper_dev) &&
5132 !netif_is_macvlan(upper_dev) &&
5133 !netif_is_l3_master(upper_dev)) {
5134 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5135 return -EINVAL;
5136 }
5137 if (!info->linking)
5138 break;
5139 if (netif_is_bridge_master(upper_dev) &&
5140 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5141 mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5142 !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5143 return -EOPNOTSUPP;
5144 if (netdev_has_any_upper_dev(upper_dev) &&
5145 (!netif_is_bridge_master(upper_dev) ||
5146 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5147 upper_dev))) {
5148 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5149 upper_dev,
5150 extack);
5151 if (err)
5152 return err;
5153 }
5154 break;
5155 case NETDEV_CHANGEUPPER:
5156 upper_dev = info->upper_dev;
5157 if (netif_is_bridge_master(upper_dev)) {
5158 if (info->linking) {
5159 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5160 vlan_dev,
5161 upper_dev,
5162 extack);
5163 } else {
5164 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5165 vlan_dev,
5166 upper_dev);
5167 if (!replay_deslavement)
5168 break;
5169 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5170 vlan_dev);
5171 }
5172 } else if (netif_is_macvlan(upper_dev)) {
5173 if (!info->linking)
5174 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5175 }
5176 break;
5177 }
5178
5179 return err;
5180 }
5181
mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device * vlan_dev,struct net_device * lag_dev,unsigned long event,void * ptr,u16 vid)5182 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5183 struct net_device *lag_dev,
5184 unsigned long event,
5185 void *ptr, u16 vid)
5186 {
5187 struct net_device *dev;
5188 struct list_head *iter;
5189 int ret;
5190
5191 netdev_for_each_lower_dev(lag_dev, dev, iter) {
5192 if (mlxsw_sp_port_dev_check(dev)) {
5193 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5194 event, ptr,
5195 vid, false);
5196 if (ret)
5197 return ret;
5198 }
5199 }
5200
5201 return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5202 }
5203
mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,struct net_device * br_dev,unsigned long event,void * ptr,u16 vid,bool process_foreign)5204 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5205 struct net_device *vlan_dev,
5206 struct net_device *br_dev,
5207 unsigned long event, void *ptr,
5208 u16 vid, bool process_foreign)
5209 {
5210 struct netdev_notifier_changeupper_info *info = ptr;
5211 struct netlink_ext_ack *extack;
5212 struct net_device *upper_dev;
5213
5214 if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5215 return 0;
5216
5217 extack = netdev_notifier_info_to_extack(&info->info);
5218
5219 switch (event) {
5220 case NETDEV_PRECHANGEUPPER:
5221 upper_dev = info->upper_dev;
5222 if (!netif_is_macvlan(upper_dev) &&
5223 !netif_is_l3_master(upper_dev)) {
5224 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5225 return -EOPNOTSUPP;
5226 }
5227 break;
5228 case NETDEV_CHANGEUPPER:
5229 upper_dev = info->upper_dev;
5230 if (info->linking)
5231 break;
5232 if (netif_is_macvlan(upper_dev))
5233 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5234 break;
5235 }
5236
5237 return 0;
5238 }
5239
mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * vlan_dev,unsigned long event,void * ptr,bool process_foreign)5240 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5241 struct net_device *vlan_dev,
5242 unsigned long event, void *ptr,
5243 bool process_foreign)
5244 {
5245 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5246 u16 vid = vlan_dev_vlan_id(vlan_dev);
5247
5248 if (mlxsw_sp_port_dev_check(real_dev))
5249 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5250 event, ptr, vid,
5251 true);
5252 else if (netif_is_lag_master(real_dev))
5253 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5254 real_dev, event,
5255 ptr, vid);
5256 else if (netif_is_bridge_master(real_dev))
5257 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5258 real_dev, event,
5259 ptr, vid,
5260 process_foreign);
5261
5262 return 0;
5263 }
5264
mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp * mlxsw_sp,struct net_device * br_dev,unsigned long event,void * ptr,bool process_foreign)5265 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5266 struct net_device *br_dev,
5267 unsigned long event, void *ptr,
5268 bool process_foreign)
5269 {
5270 struct netdev_notifier_changeupper_info *info = ptr;
5271 struct netlink_ext_ack *extack;
5272 struct net_device *upper_dev;
5273 u16 proto;
5274
5275 if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5276 return 0;
5277
5278 extack = netdev_notifier_info_to_extack(&info->info);
5279
5280 switch (event) {
5281 case NETDEV_PRECHANGEUPPER:
5282 upper_dev = info->upper_dev;
5283 if (!is_vlan_dev(upper_dev) &&
5284 !netif_is_macvlan(upper_dev) &&
5285 !netif_is_l3_master(upper_dev)) {
5286 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5287 return -EOPNOTSUPP;
5288 }
5289 if (!info->linking)
5290 break;
5291 if (br_vlan_enabled(br_dev)) {
5292 br_vlan_get_proto(br_dev, &proto);
5293 if (proto == ETH_P_8021AD) {
5294 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5295 return -EOPNOTSUPP;
5296 }
5297 }
5298 if (is_vlan_dev(upper_dev) &&
5299 ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5300 NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5301 return -EOPNOTSUPP;
5302 }
5303 break;
5304 case NETDEV_CHANGEUPPER:
5305 upper_dev = info->upper_dev;
5306 if (info->linking)
5307 break;
5308 if (is_vlan_dev(upper_dev))
5309 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5310 if (netif_is_macvlan(upper_dev))
5311 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5312 break;
5313 }
5314
5315 return 0;
5316 }
5317
mlxsw_sp_netdevice_macvlan_event(struct net_device * macvlan_dev,unsigned long event,void * ptr)5318 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5319 unsigned long event, void *ptr)
5320 {
5321 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5322 struct netdev_notifier_changeupper_info *info = ptr;
5323 struct netlink_ext_ack *extack;
5324 struct net_device *upper_dev;
5325
5326 if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5327 return 0;
5328
5329 extack = netdev_notifier_info_to_extack(&info->info);
5330 upper_dev = info->upper_dev;
5331
5332 if (!netif_is_l3_master(upper_dev)) {
5333 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5334 return -EOPNOTSUPP;
5335 }
5336
5337 return 0;
5338 }
5339
mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp * mlxsw_sp,struct net_device * dev,unsigned long event,void * ptr)5340 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5341 struct net_device *dev,
5342 unsigned long event, void *ptr)
5343 {
5344 struct netdev_notifier_changeupper_info *cu_info;
5345 struct netdev_notifier_info *info = ptr;
5346 struct netlink_ext_ack *extack;
5347 struct net_device *upper_dev;
5348
5349 extack = netdev_notifier_info_to_extack(info);
5350
5351 switch (event) {
5352 case NETDEV_CHANGEUPPER:
5353 cu_info = container_of(info,
5354 struct netdev_notifier_changeupper_info,
5355 info);
5356 upper_dev = cu_info->upper_dev;
5357 if (!netif_is_bridge_master(upper_dev))
5358 return 0;
5359 if (!mlxsw_sp_lower_get(upper_dev))
5360 return 0;
5361 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5362 return -EOPNOTSUPP;
5363 if (cu_info->linking) {
5364 if (!netif_running(dev))
5365 return 0;
5366 /* When the bridge is VLAN-aware, the VNI of the VxLAN
5367 * device needs to be mapped to a VLAN, but at this
5368 * point no VLANs are configured on the VxLAN device
5369 */
5370 if (br_vlan_enabled(upper_dev))
5371 return 0;
5372 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5373 dev, 0, extack);
5374 } else {
5375 /* VLANs were already flushed, which triggered the
5376 * necessary cleanup
5377 */
5378 if (br_vlan_enabled(upper_dev))
5379 return 0;
5380 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5381 }
5382 break;
5383 case NETDEV_PRE_UP:
5384 upper_dev = netdev_master_upper_dev_get(dev);
5385 if (!upper_dev)
5386 return 0;
5387 if (!netif_is_bridge_master(upper_dev))
5388 return 0;
5389 if (!mlxsw_sp_lower_get(upper_dev))
5390 return 0;
5391 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5392 extack);
5393 case NETDEV_DOWN:
5394 upper_dev = netdev_master_upper_dev_get(dev);
5395 if (!upper_dev)
5396 return 0;
5397 if (!netif_is_bridge_master(upper_dev))
5398 return 0;
5399 if (!mlxsw_sp_lower_get(upper_dev))
5400 return 0;
5401 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5402 break;
5403 }
5404
5405 return 0;
5406 }
5407
__mlxsw_sp_netdevice_event(struct mlxsw_sp * mlxsw_sp,unsigned long event,void * ptr,bool process_foreign)5408 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5409 unsigned long event, void *ptr,
5410 bool process_foreign)
5411 {
5412 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5413 struct mlxsw_sp_span_entry *span_entry;
5414 int err = 0;
5415
5416 if (event == NETDEV_UNREGISTER) {
5417 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5418 if (span_entry)
5419 mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5420 }
5421
5422 if (netif_is_vxlan(dev))
5423 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5424 else if (mlxsw_sp_port_dev_check(dev))
5425 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5426 else if (netif_is_lag_master(dev))
5427 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5428 else if (is_vlan_dev(dev))
5429 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5430 process_foreign);
5431 else if (netif_is_bridge_master(dev))
5432 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5433 process_foreign);
5434 else if (netif_is_macvlan(dev))
5435 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5436
5437 return err;
5438 }
5439
mlxsw_sp_netdevice_event(struct notifier_block * nb,unsigned long event,void * ptr)5440 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5441 unsigned long event, void *ptr)
5442 {
5443 struct mlxsw_sp *mlxsw_sp;
5444 int err;
5445
5446 mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5447 mlxsw_sp_span_respin(mlxsw_sp);
5448 err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5449
5450 return notifier_from_errno(err);
5451 }
5452
5453 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5454 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5455 {0, },
5456 };
5457
5458 static struct pci_driver mlxsw_sp1_pci_driver = {
5459 .name = mlxsw_sp1_driver_name,
5460 .id_table = mlxsw_sp1_pci_id_table,
5461 };
5462
5463 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5464 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5465 {0, },
5466 };
5467
5468 static struct pci_driver mlxsw_sp2_pci_driver = {
5469 .name = mlxsw_sp2_driver_name,
5470 .id_table = mlxsw_sp2_pci_id_table,
5471 };
5472
5473 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5474 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5475 {0, },
5476 };
5477
5478 static struct pci_driver mlxsw_sp3_pci_driver = {
5479 .name = mlxsw_sp3_driver_name,
5480 .id_table = mlxsw_sp3_pci_id_table,
5481 };
5482
5483 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5484 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5485 {0, },
5486 };
5487
5488 static struct pci_driver mlxsw_sp4_pci_driver = {
5489 .name = mlxsw_sp4_driver_name,
5490 .id_table = mlxsw_sp4_pci_id_table,
5491 };
5492
mlxsw_sp_module_init(void)5493 static int __init mlxsw_sp_module_init(void)
5494 {
5495 int err;
5496
5497 err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5498 if (err)
5499 return err;
5500
5501 err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5502 if (err)
5503 goto err_sp2_core_driver_register;
5504
5505 err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5506 if (err)
5507 goto err_sp3_core_driver_register;
5508
5509 err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5510 if (err)
5511 goto err_sp4_core_driver_register;
5512
5513 err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5514 if (err)
5515 goto err_sp1_pci_driver_register;
5516
5517 err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5518 if (err)
5519 goto err_sp2_pci_driver_register;
5520
5521 err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5522 if (err)
5523 goto err_sp3_pci_driver_register;
5524
5525 err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5526 if (err)
5527 goto err_sp4_pci_driver_register;
5528
5529 return 0;
5530
5531 err_sp4_pci_driver_register:
5532 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5533 err_sp3_pci_driver_register:
5534 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5535 err_sp2_pci_driver_register:
5536 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5537 err_sp1_pci_driver_register:
5538 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5539 err_sp4_core_driver_register:
5540 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5541 err_sp3_core_driver_register:
5542 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5543 err_sp2_core_driver_register:
5544 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5545 return err;
5546 }
5547
mlxsw_sp_module_exit(void)5548 static void __exit mlxsw_sp_module_exit(void)
5549 {
5550 mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5551 mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5552 mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5553 mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5554 mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5555 mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5556 mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5557 mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5558 }
5559
5560 module_init(mlxsw_sp_module_init);
5561 module_exit(mlxsw_sp_module_exit);
5562
5563 MODULE_LICENSE("Dual BSD/GPL");
5564 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5565 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5566 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5567 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5568 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5569 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5570 MODULE_FIRMWARE(MLXSW_SP1_FW_FILENAME);
5571 MODULE_FIRMWARE(MLXSW_SP2_FW_FILENAME);
5572 MODULE_FIRMWARE(MLXSW_SP3_FW_FILENAME);
5573 MODULE_FIRMWARE(MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME);
5574