xref: /linux/drivers/net/dsa/microchip/ksz_common.c (revision 9a6b55ac)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microchip switch driver main logic
4  *
5  * Copyright (C) 2017-2019 Microchip Technology Inc.
6  */
7 
8 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/gpio/consumer.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_data/microchip-ksz.h>
14 #include <linux/phy.h>
15 #include <linux/etherdevice.h>
16 #include <linux/if_bridge.h>
17 #include <linux/of_net.h>
18 #include <net/dsa.h>
19 #include <net/switchdev.h>
20 
21 #include "ksz_common.h"
22 
23 void ksz_update_port_member(struct ksz_device *dev, int port)
24 {
25 	struct ksz_port *p;
26 	int i;
27 
28 	for (i = 0; i < dev->port_cnt; i++) {
29 		if (i == port || i == dev->cpu_port)
30 			continue;
31 		p = &dev->ports[i];
32 		if (!(dev->member & (1 << i)))
33 			continue;
34 
35 		/* Port is a member of the bridge and is forwarding. */
36 		if (p->stp_state == BR_STATE_FORWARDING &&
37 		    p->member != dev->member)
38 			dev->dev_ops->cfg_port_member(dev, i, dev->member);
39 	}
40 }
41 EXPORT_SYMBOL_GPL(ksz_update_port_member);
42 
43 static void port_r_cnt(struct ksz_device *dev, int port)
44 {
45 	struct ksz_port_mib *mib = &dev->ports[port].mib;
46 	u64 *dropped;
47 
48 	/* Some ports may not have MIB counters before SWITCH_COUNTER_NUM. */
49 	while (mib->cnt_ptr < dev->reg_mib_cnt) {
50 		dev->dev_ops->r_mib_cnt(dev, port, mib->cnt_ptr,
51 					&mib->counters[mib->cnt_ptr]);
52 		++mib->cnt_ptr;
53 	}
54 
55 	/* last one in storage */
56 	dropped = &mib->counters[dev->mib_cnt];
57 
58 	/* Some ports may not have MIB counters after SWITCH_COUNTER_NUM. */
59 	while (mib->cnt_ptr < dev->mib_cnt) {
60 		dev->dev_ops->r_mib_pkt(dev, port, mib->cnt_ptr,
61 					dropped, &mib->counters[mib->cnt_ptr]);
62 		++mib->cnt_ptr;
63 	}
64 	mib->cnt_ptr = 0;
65 }
66 
67 static void ksz_mib_read_work(struct work_struct *work)
68 {
69 	struct ksz_device *dev = container_of(work, struct ksz_device,
70 					      mib_read);
71 	struct ksz_port_mib *mib;
72 	struct ksz_port *p;
73 	int i;
74 
75 	for (i = 0; i < dev->mib_port_cnt; i++) {
76 		if (dsa_is_unused_port(dev->ds, i))
77 			continue;
78 
79 		p = &dev->ports[i];
80 		mib = &p->mib;
81 		mutex_lock(&mib->cnt_mutex);
82 
83 		/* Only read MIB counters when the port is told to do.
84 		 * If not, read only dropped counters when link is not up.
85 		 */
86 		if (!p->read) {
87 			const struct dsa_port *dp = dsa_to_port(dev->ds, i);
88 
89 			if (!netif_carrier_ok(dp->slave))
90 				mib->cnt_ptr = dev->reg_mib_cnt;
91 		}
92 		port_r_cnt(dev, i);
93 		p->read = false;
94 		mutex_unlock(&mib->cnt_mutex);
95 	}
96 }
97 
98 static void mib_monitor(struct timer_list *t)
99 {
100 	struct ksz_device *dev = from_timer(dev, t, mib_read_timer);
101 
102 	mod_timer(&dev->mib_read_timer, jiffies + dev->mib_read_interval);
103 	schedule_work(&dev->mib_read);
104 }
105 
106 void ksz_init_mib_timer(struct ksz_device *dev)
107 {
108 	int i;
109 
110 	/* Read MIB counters every 30 seconds to avoid overflow. */
111 	dev->mib_read_interval = msecs_to_jiffies(30000);
112 
113 	INIT_WORK(&dev->mib_read, ksz_mib_read_work);
114 	timer_setup(&dev->mib_read_timer, mib_monitor, 0);
115 
116 	for (i = 0; i < dev->mib_port_cnt; i++)
117 		dev->dev_ops->port_init_cnt(dev, i);
118 
119 	/* Start the timer 2 seconds later. */
120 	dev->mib_read_timer.expires = jiffies + msecs_to_jiffies(2000);
121 	add_timer(&dev->mib_read_timer);
122 }
123 EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
124 
125 int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
126 {
127 	struct ksz_device *dev = ds->priv;
128 	u16 val = 0xffff;
129 
130 	dev->dev_ops->r_phy(dev, addr, reg, &val);
131 
132 	return val;
133 }
134 EXPORT_SYMBOL_GPL(ksz_phy_read16);
135 
136 int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
137 {
138 	struct ksz_device *dev = ds->priv;
139 
140 	dev->dev_ops->w_phy(dev, addr, reg, val);
141 
142 	return 0;
143 }
144 EXPORT_SYMBOL_GPL(ksz_phy_write16);
145 
146 void ksz_adjust_link(struct dsa_switch *ds, int port,
147 		     struct phy_device *phydev)
148 {
149 	struct ksz_device *dev = ds->priv;
150 	struct ksz_port *p = &dev->ports[port];
151 
152 	/* Read all MIB counters when the link is going down. */
153 	if (!phydev->link) {
154 		p->read = true;
155 		schedule_work(&dev->mib_read);
156 	}
157 	mutex_lock(&dev->dev_mutex);
158 	if (!phydev->link)
159 		dev->live_ports &= ~(1 << port);
160 	else
161 		/* Remember which port is connected and active. */
162 		dev->live_ports |= (1 << port) & dev->on_ports;
163 	mutex_unlock(&dev->dev_mutex);
164 }
165 EXPORT_SYMBOL_GPL(ksz_adjust_link);
166 
167 int ksz_sset_count(struct dsa_switch *ds, int port, int sset)
168 {
169 	struct ksz_device *dev = ds->priv;
170 
171 	if (sset != ETH_SS_STATS)
172 		return 0;
173 
174 	return dev->mib_cnt;
175 }
176 EXPORT_SYMBOL_GPL(ksz_sset_count);
177 
178 void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *buf)
179 {
180 	const struct dsa_port *dp = dsa_to_port(ds, port);
181 	struct ksz_device *dev = ds->priv;
182 	struct ksz_port_mib *mib;
183 
184 	mib = &dev->ports[port].mib;
185 	mutex_lock(&mib->cnt_mutex);
186 
187 	/* Only read dropped counters if no link. */
188 	if (!netif_carrier_ok(dp->slave))
189 		mib->cnt_ptr = dev->reg_mib_cnt;
190 	port_r_cnt(dev, port);
191 	memcpy(buf, mib->counters, dev->mib_cnt * sizeof(u64));
192 	mutex_unlock(&mib->cnt_mutex);
193 }
194 EXPORT_SYMBOL_GPL(ksz_get_ethtool_stats);
195 
196 int ksz_port_bridge_join(struct dsa_switch *ds, int port,
197 			 struct net_device *br)
198 {
199 	struct ksz_device *dev = ds->priv;
200 
201 	mutex_lock(&dev->dev_mutex);
202 	dev->br_member |= (1 << port);
203 	mutex_unlock(&dev->dev_mutex);
204 
205 	/* port_stp_state_set() will be called after to put the port in
206 	 * appropriate state so there is no need to do anything.
207 	 */
208 
209 	return 0;
210 }
211 EXPORT_SYMBOL_GPL(ksz_port_bridge_join);
212 
213 void ksz_port_bridge_leave(struct dsa_switch *ds, int port,
214 			   struct net_device *br)
215 {
216 	struct ksz_device *dev = ds->priv;
217 
218 	mutex_lock(&dev->dev_mutex);
219 	dev->br_member &= ~(1 << port);
220 	dev->member &= ~(1 << port);
221 	mutex_unlock(&dev->dev_mutex);
222 
223 	/* port_stp_state_set() will be called after to put the port in
224 	 * forwarding state so there is no need to do anything.
225 	 */
226 }
227 EXPORT_SYMBOL_GPL(ksz_port_bridge_leave);
228 
229 void ksz_port_fast_age(struct dsa_switch *ds, int port)
230 {
231 	struct ksz_device *dev = ds->priv;
232 
233 	dev->dev_ops->flush_dyn_mac_table(dev, port);
234 }
235 EXPORT_SYMBOL_GPL(ksz_port_fast_age);
236 
237 int ksz_port_vlan_prepare(struct dsa_switch *ds, int port,
238 			  const struct switchdev_obj_port_vlan *vlan)
239 {
240 	/* nothing needed */
241 
242 	return 0;
243 }
244 EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare);
245 
246 int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb,
247 		      void *data)
248 {
249 	struct ksz_device *dev = ds->priv;
250 	int ret = 0;
251 	u16 i = 0;
252 	u16 entries = 0;
253 	u8 timestamp = 0;
254 	u8 fid;
255 	u8 member;
256 	struct alu_struct alu;
257 
258 	do {
259 		alu.is_static = false;
260 		ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid,
261 						    &member, &timestamp,
262 						    &entries);
263 		if (!ret && (member & BIT(port))) {
264 			ret = cb(alu.mac, alu.fid, alu.is_static, data);
265 			if (ret)
266 				break;
267 		}
268 		i++;
269 	} while (i < entries);
270 	if (i >= entries)
271 		ret = 0;
272 
273 	return ret;
274 }
275 EXPORT_SYMBOL_GPL(ksz_port_fdb_dump);
276 
277 int ksz_port_mdb_prepare(struct dsa_switch *ds, int port,
278 			 const struct switchdev_obj_port_mdb *mdb)
279 {
280 	/* nothing to do */
281 	return 0;
282 }
283 EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare);
284 
285 void ksz_port_mdb_add(struct dsa_switch *ds, int port,
286 		      const struct switchdev_obj_port_mdb *mdb)
287 {
288 	struct ksz_device *dev = ds->priv;
289 	struct alu_struct alu;
290 	int index;
291 	int empty = 0;
292 
293 	alu.port_forward = 0;
294 	for (index = 0; index < dev->num_statics; index++) {
295 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
296 			/* Found one already in static MAC table. */
297 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
298 			    alu.fid == mdb->vid)
299 				break;
300 		/* Remember the first empty entry. */
301 		} else if (!empty) {
302 			empty = index + 1;
303 		}
304 	}
305 
306 	/* no available entry */
307 	if (index == dev->num_statics && !empty)
308 		return;
309 
310 	/* add entry */
311 	if (index == dev->num_statics) {
312 		index = empty - 1;
313 		memset(&alu, 0, sizeof(alu));
314 		memcpy(alu.mac, mdb->addr, ETH_ALEN);
315 		alu.is_static = true;
316 	}
317 	alu.port_forward |= BIT(port);
318 	if (mdb->vid) {
319 		alu.is_use_fid = true;
320 
321 		/* Need a way to map VID to FID. */
322 		alu.fid = mdb->vid;
323 	}
324 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
325 }
326 EXPORT_SYMBOL_GPL(ksz_port_mdb_add);
327 
328 int ksz_port_mdb_del(struct dsa_switch *ds, int port,
329 		     const struct switchdev_obj_port_mdb *mdb)
330 {
331 	struct ksz_device *dev = ds->priv;
332 	struct alu_struct alu;
333 	int index;
334 	int ret = 0;
335 
336 	for (index = 0; index < dev->num_statics; index++) {
337 		if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) {
338 			/* Found one already in static MAC table. */
339 			if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) &&
340 			    alu.fid == mdb->vid)
341 				break;
342 		}
343 	}
344 
345 	/* no available entry */
346 	if (index == dev->num_statics)
347 		goto exit;
348 
349 	/* clear port */
350 	alu.port_forward &= ~BIT(port);
351 	if (!alu.port_forward)
352 		alu.is_static = false;
353 	dev->dev_ops->w_sta_mac_table(dev, index, &alu);
354 
355 exit:
356 	return ret;
357 }
358 EXPORT_SYMBOL_GPL(ksz_port_mdb_del);
359 
360 int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
361 {
362 	struct ksz_device *dev = ds->priv;
363 
364 	if (!dsa_is_user_port(ds, port))
365 		return 0;
366 
367 	/* setup slave port */
368 	dev->dev_ops->port_setup(dev, port, false);
369 	if (dev->dev_ops->phy_setup)
370 		dev->dev_ops->phy_setup(dev, port, phy);
371 
372 	/* port_stp_state_set() will be called after to enable the port so
373 	 * there is no need to do anything.
374 	 */
375 
376 	return 0;
377 }
378 EXPORT_SYMBOL_GPL(ksz_enable_port);
379 
380 void ksz_disable_port(struct dsa_switch *ds, int port)
381 {
382 	struct ksz_device *dev = ds->priv;
383 
384 	if (!dsa_is_user_port(ds, port))
385 		return;
386 
387 	dev->on_ports &= ~(1 << port);
388 	dev->live_ports &= ~(1 << port);
389 
390 	/* port_stp_state_set() will be called after to disable the port so
391 	 * there is no need to do anything.
392 	 */
393 }
394 EXPORT_SYMBOL_GPL(ksz_disable_port);
395 
396 struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
397 {
398 	struct dsa_switch *ds;
399 	struct ksz_device *swdev;
400 
401 	ds = devm_kzalloc(base, sizeof(*ds), GFP_KERNEL);
402 	if (!ds)
403 		return NULL;
404 
405 	ds->dev = base;
406 	ds->num_ports = DSA_MAX_PORTS;
407 
408 	swdev = devm_kzalloc(base, sizeof(*swdev), GFP_KERNEL);
409 	if (!swdev)
410 		return NULL;
411 
412 	ds->priv = swdev;
413 	swdev->dev = base;
414 
415 	swdev->ds = ds;
416 	swdev->priv = priv;
417 
418 	return swdev;
419 }
420 EXPORT_SYMBOL(ksz_switch_alloc);
421 
422 int ksz_switch_register(struct ksz_device *dev,
423 			const struct ksz_dev_ops *ops)
424 {
425 	phy_interface_t interface;
426 	int ret;
427 
428 	if (dev->pdata)
429 		dev->chip_id = dev->pdata->chip_id;
430 
431 	dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset",
432 						  GPIOD_OUT_LOW);
433 	if (IS_ERR(dev->reset_gpio))
434 		return PTR_ERR(dev->reset_gpio);
435 
436 	if (dev->reset_gpio) {
437 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
438 		mdelay(10);
439 		gpiod_set_value_cansleep(dev->reset_gpio, 0);
440 	}
441 
442 	mutex_init(&dev->dev_mutex);
443 	mutex_init(&dev->regmap_mutex);
444 	mutex_init(&dev->alu_mutex);
445 	mutex_init(&dev->vlan_mutex);
446 
447 	dev->dev_ops = ops;
448 
449 	if (dev->dev_ops->detect(dev))
450 		return -EINVAL;
451 
452 	ret = dev->dev_ops->init(dev);
453 	if (ret)
454 		return ret;
455 
456 	/* Host port interface will be self detected, or specifically set in
457 	 * device tree.
458 	 */
459 	if (dev->dev->of_node) {
460 		ret = of_get_phy_mode(dev->dev->of_node, &interface);
461 		if (ret == 0)
462 			dev->interface = interface;
463 		dev->synclko_125 = of_property_read_bool(dev->dev->of_node,
464 							 "microchip,synclko-125");
465 	}
466 
467 	ret = dsa_register_switch(dev->ds);
468 	if (ret) {
469 		dev->dev_ops->exit(dev);
470 		return ret;
471 	}
472 
473 	return 0;
474 }
475 EXPORT_SYMBOL(ksz_switch_register);
476 
477 void ksz_switch_remove(struct ksz_device *dev)
478 {
479 	/* timer started */
480 	if (dev->mib_read_timer.expires) {
481 		del_timer_sync(&dev->mib_read_timer);
482 		flush_work(&dev->mib_read);
483 	}
484 
485 	dev->dev_ops->exit(dev);
486 	dsa_unregister_switch(dev->ds);
487 
488 	if (dev->reset_gpio)
489 		gpiod_set_value_cansleep(dev->reset_gpio, 1);
490 
491 }
492 EXPORT_SYMBOL(ksz_switch_remove);
493 
494 MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>");
495 MODULE_DESCRIPTION("Microchip KSZ Series Switch DSA Driver");
496 MODULE_LICENSE("GPL");
497