1 // SPDX-License-Identifier: GPL-2.0+
2 /* Microchip Sparx5 Switch driver
3  *
4  * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
5  */
6 
7 #include <linux/if_bridge.h>
8 #include <net/switchdev.h>
9 
10 #include "sparx5_main_regs.h"
11 #include "sparx5_main.h"
12 
13 static struct workqueue_struct *sparx5_owq;
14 
15 struct sparx5_switchdev_event_work {
16 	struct work_struct work;
17 	struct switchdev_notifier_fdb_info fdb_info;
18 	struct net_device *dev;
19 	struct sparx5 *sparx5;
20 	unsigned long event;
21 };
22 
23 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
24 					     struct switchdev_brport_flags flags)
25 {
26 	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
27 		return -EINVAL;
28 
29 	return 0;
30 }
31 
32 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
33 					  struct switchdev_brport_flags flags)
34 {
35 	int pgid;
36 
37 	if (flags.mask & BR_MCAST_FLOOD)
38 		for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
39 			sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
40 	if (flags.mask & BR_FLOOD)
41 		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
42 	if (flags.mask & BR_BCAST_FLOOD)
43 		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
44 }
45 
46 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
47 				      u8 state)
48 {
49 	struct sparx5 *sparx5 = port->sparx5;
50 
51 	if (!test_bit(port->portno, sparx5->bridge_mask)) {
52 		netdev_err(port->ndev,
53 			   "Controlling non-bridged port %d?\n", port->portno);
54 		return;
55 	}
56 
57 	switch (state) {
58 	case BR_STATE_FORWARDING:
59 		set_bit(port->portno, sparx5->bridge_fwd_mask);
60 		fallthrough;
61 	case BR_STATE_LEARNING:
62 		set_bit(port->portno, sparx5->bridge_lrn_mask);
63 		break;
64 
65 	default:
66 		/* All other states treated as blocking */
67 		clear_bit(port->portno, sparx5->bridge_fwd_mask);
68 		clear_bit(port->portno, sparx5->bridge_lrn_mask);
69 		break;
70 	}
71 
72 	/* apply the bridge_fwd_mask to all the ports */
73 	sparx5_update_fwd(sparx5);
74 }
75 
76 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
77 					unsigned long ageing_clock_t)
78 {
79 	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
80 	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
81 
82 	sparx5_set_ageing(port->sparx5, ageing_time);
83 }
84 
85 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
86 				const struct switchdev_attr *attr,
87 				struct netlink_ext_ack *extack)
88 {
89 	struct sparx5_port *port = netdev_priv(dev);
90 
91 	switch (attr->id) {
92 	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
93 		return sparx5_port_attr_pre_bridge_flags(port,
94 							 attr->u.brport_flags);
95 	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
96 		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
97 		break;
98 	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
99 		sparx5_attr_stp_state_set(port, attr->u.stp_state);
100 		break;
101 	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
102 		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
103 		break;
104 	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
105 		/* Used PVID 1 when default_pvid is 0, to avoid
106 		 * collision with non-bridged ports.
107 		 */
108 		if (port->pvid == 0)
109 			port->pvid = 1;
110 		port->vlan_aware = attr->u.vlan_filtering;
111 		sparx5_vlan_port_apply(port->sparx5, port);
112 		break;
113 	default:
114 		return -EOPNOTSUPP;
115 	}
116 
117 	return 0;
118 }
119 
120 static int sparx5_port_bridge_join(struct sparx5_port *port,
121 				   struct net_device *bridge,
122 				   struct netlink_ext_ack *extack)
123 {
124 	struct sparx5 *sparx5 = port->sparx5;
125 	struct net_device *ndev = port->ndev;
126 	int err;
127 
128 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
129 		/* First bridged port */
130 		sparx5->hw_bridge_dev = bridge;
131 	else
132 		if (sparx5->hw_bridge_dev != bridge)
133 			/* This is adding the port to a second bridge, this is
134 			 * unsupported
135 			 */
136 			return -ENODEV;
137 
138 	set_bit(port->portno, sparx5->bridge_mask);
139 
140 	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
141 					    false, extack);
142 	if (err)
143 		goto err_switchdev_offload;
144 
145 	/* Remove standalone port entry */
146 	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
147 
148 	/* Port enters in bridge mode therefor don't need to copy to CPU
149 	 * frames for multicast in case the bridge is not requesting them
150 	 */
151 	__dev_mc_unsync(ndev, sparx5_mc_unsync);
152 
153 	return 0;
154 
155 err_switchdev_offload:
156 	clear_bit(port->portno, sparx5->bridge_mask);
157 	return err;
158 }
159 
160 static void sparx5_port_bridge_leave(struct sparx5_port *port,
161 				     struct net_device *bridge)
162 {
163 	struct sparx5 *sparx5 = port->sparx5;
164 
165 	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
166 
167 	clear_bit(port->portno, sparx5->bridge_mask);
168 	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
169 		sparx5->hw_bridge_dev = NULL;
170 
171 	/* Clear bridge vlan settings before updating the port settings */
172 	port->vlan_aware = 0;
173 	port->pvid = NULL_VID;
174 	port->vid = NULL_VID;
175 
176 	/* Forward frames to CPU */
177 	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
178 
179 	/* Port enters in host more therefore restore mc list */
180 	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
181 }
182 
183 static int sparx5_port_changeupper(struct net_device *dev,
184 				   struct netdev_notifier_changeupper_info *info)
185 {
186 	struct sparx5_port *port = netdev_priv(dev);
187 	struct netlink_ext_ack *extack;
188 	int err = 0;
189 
190 	extack = netdev_notifier_info_to_extack(&info->info);
191 
192 	if (netif_is_bridge_master(info->upper_dev)) {
193 		if (info->linking)
194 			err = sparx5_port_bridge_join(port, info->upper_dev,
195 						      extack);
196 		else
197 			sparx5_port_bridge_leave(port, info->upper_dev);
198 
199 		sparx5_vlan_port_apply(port->sparx5, port);
200 	}
201 
202 	return err;
203 }
204 
205 static int sparx5_port_add_addr(struct net_device *dev, bool up)
206 {
207 	struct sparx5_port *port = netdev_priv(dev);
208 	struct sparx5 *sparx5 = port->sparx5;
209 	u16 vid = port->pvid;
210 
211 	if (up)
212 		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
213 	else
214 		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
215 
216 	return 0;
217 }
218 
219 static int sparx5_netdevice_port_event(struct net_device *dev,
220 				       struct notifier_block *nb,
221 				       unsigned long event, void *ptr)
222 {
223 	int err = 0;
224 
225 	if (!sparx5_netdevice_check(dev))
226 		return 0;
227 
228 	switch (event) {
229 	case NETDEV_CHANGEUPPER:
230 		err = sparx5_port_changeupper(dev, ptr);
231 		break;
232 	case NETDEV_PRE_UP:
233 		err = sparx5_port_add_addr(dev, true);
234 		break;
235 	case NETDEV_DOWN:
236 		err = sparx5_port_add_addr(dev, false);
237 		break;
238 	}
239 
240 	return err;
241 }
242 
243 static int sparx5_netdevice_event(struct notifier_block *nb,
244 				  unsigned long event, void *ptr)
245 {
246 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
247 	int ret = 0;
248 
249 	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
250 
251 	return notifier_from_errno(ret);
252 }
253 
254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
255 {
256 	struct sparx5_switchdev_event_work *switchdev_work =
257 		container_of(work, struct sparx5_switchdev_event_work, work);
258 	struct net_device *dev = switchdev_work->dev;
259 	struct switchdev_notifier_fdb_info *fdb_info;
260 	struct sparx5_port *port;
261 	struct sparx5 *sparx5;
262 	bool host_addr;
263 	u16 vid;
264 
265 	rtnl_lock();
266 	if (!sparx5_netdevice_check(dev)) {
267 		host_addr = true;
268 		sparx5 = switchdev_work->sparx5;
269 	} else {
270 		host_addr = false;
271 		sparx5 = switchdev_work->sparx5;
272 		port = netdev_priv(dev);
273 	}
274 
275 	fdb_info = &switchdev_work->fdb_info;
276 
277 	/* Used PVID 1 when default_pvid is 0, to avoid
278 	 * collision with non-bridged ports.
279 	 */
280 	if (fdb_info->vid == 0)
281 		vid = 1;
282 	else
283 		vid = fdb_info->vid;
284 
285 	switch (switchdev_work->event) {
286 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
287 		if (host_addr)
288 			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
289 					      fdb_info->addr, vid);
290 		else
291 			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
292 					      fdb_info->addr, vid);
293 		break;
294 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
295 		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
296 		break;
297 	}
298 
299 	rtnl_unlock();
300 	kfree(switchdev_work->fdb_info.addr);
301 	kfree(switchdev_work);
302 	dev_put(dev);
303 }
304 
305 static void sparx5_schedule_work(struct work_struct *work)
306 {
307 	queue_work(sparx5_owq, work);
308 }
309 
310 static int sparx5_switchdev_event(struct notifier_block *nb,
311 				  unsigned long event, void *ptr)
312 {
313 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
314 	struct sparx5_switchdev_event_work *switchdev_work;
315 	struct switchdev_notifier_fdb_info *fdb_info;
316 	struct switchdev_notifier_info *info = ptr;
317 	struct sparx5 *spx5;
318 	int err;
319 
320 	spx5 = container_of(nb, struct sparx5, switchdev_nb);
321 
322 	switch (event) {
323 	case SWITCHDEV_PORT_ATTR_SET:
324 		err = switchdev_handle_port_attr_set(dev, ptr,
325 						     sparx5_netdevice_check,
326 						     sparx5_port_attr_set);
327 		return notifier_from_errno(err);
328 	case SWITCHDEV_FDB_ADD_TO_DEVICE:
329 		fallthrough;
330 	case SWITCHDEV_FDB_DEL_TO_DEVICE:
331 		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
332 		if (!switchdev_work)
333 			return NOTIFY_BAD;
334 
335 		switchdev_work->dev = dev;
336 		switchdev_work->event = event;
337 		switchdev_work->sparx5 = spx5;
338 
339 		fdb_info = container_of(info,
340 					struct switchdev_notifier_fdb_info,
341 					info);
342 		INIT_WORK(&switchdev_work->work,
343 			  sparx5_switchdev_bridge_fdb_event_work);
344 		memcpy(&switchdev_work->fdb_info, ptr,
345 		       sizeof(switchdev_work->fdb_info));
346 		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
347 		if (!switchdev_work->fdb_info.addr)
348 			goto err_addr_alloc;
349 
350 		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
351 				fdb_info->addr);
352 		dev_hold(dev);
353 
354 		sparx5_schedule_work(&switchdev_work->work);
355 		break;
356 	}
357 
358 	return NOTIFY_DONE;
359 err_addr_alloc:
360 	kfree(switchdev_work);
361 	return NOTIFY_BAD;
362 }
363 
364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
365 				       struct notifier_block *nb,
366 				       const struct switchdev_obj_port_vlan *v)
367 {
368 	struct sparx5_port *port = netdev_priv(dev);
369 
370 	if (netif_is_bridge_master(dev)) {
371 		struct sparx5 *sparx5 =
372 			container_of(nb, struct sparx5,
373 				     switchdev_blocking_nb);
374 
375 		/* Flood broadcast to CPU */
376 		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
377 				  v->vid);
378 		return 0;
379 	}
380 
381 	if (!sparx5_netdevice_check(dev))
382 		return -EOPNOTSUPP;
383 
384 	return sparx5_vlan_vid_add(port, v->vid,
385 				  v->flags & BRIDGE_VLAN_INFO_PVID,
386 				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
387 }
388 
389 static int sparx5_handle_port_mdb_add(struct net_device *dev,
390 				      struct notifier_block *nb,
391 				      const struct switchdev_obj_port_mdb *v)
392 {
393 	struct sparx5_port *port = netdev_priv(dev);
394 	struct sparx5 *spx5 = port->sparx5;
395 	u16 pgid_idx, vid;
396 	u32 mact_entry;
397 	int res, err;
398 
399 	if (!sparx5_netdevice_check(dev))
400 		return -EOPNOTSUPP;
401 
402 	if (netif_is_bridge_master(v->obj.orig_dev)) {
403 		sparx5_mact_learn(spx5, PGID_CPU, v->addr, v->vid);
404 		return 0;
405 	}
406 
407 	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
408 	 * Fall back to bridge vid 1.
409 	 */
410 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
411 		vid = 1;
412 	else
413 		vid = v->vid;
414 
415 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
416 
417 	if (res == 0) {
418 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
419 
420 		/* MC_IDX starts after the port masks in the PGID table */
421 		pgid_idx += SPX5_PORTS;
422 		sparx5_pgid_update_mask(port, pgid_idx, true);
423 	} else {
424 		err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
425 		if (err) {
426 			netdev_warn(dev, "multicast pgid table full\n");
427 			return err;
428 		}
429 		sparx5_pgid_update_mask(port, pgid_idx, true);
430 		err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
431 		if (err) {
432 			netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
433 			sparx5_pgid_update_mask(port, pgid_idx, false);
434 			return err;
435 		}
436 	}
437 
438 	return 0;
439 }
440 
441 static int sparx5_mdb_del_entry(struct net_device *dev,
442 				struct sparx5 *spx5,
443 				const unsigned char mac[ETH_ALEN],
444 				const u16 vid,
445 				u16 pgid_idx)
446 {
447 	int err;
448 
449 	err = sparx5_mact_forget(spx5, mac, vid);
450 	if (err) {
451 		netdev_warn(dev, "could not forget mac address %pM", mac);
452 		return err;
453 	}
454 	err = sparx5_pgid_free(spx5, pgid_idx);
455 	if (err) {
456 		netdev_err(dev, "attempted to free already freed pgid\n");
457 		return err;
458 	}
459 	return 0;
460 }
461 
462 static int sparx5_handle_port_mdb_del(struct net_device *dev,
463 				      struct notifier_block *nb,
464 				      const struct switchdev_obj_port_mdb *v)
465 {
466 	struct sparx5_port *port = netdev_priv(dev);
467 	struct sparx5 *spx5 = port->sparx5;
468 	u16 pgid_idx, vid;
469 	u32 mact_entry, res, pgid_entry[3];
470 	int err;
471 
472 	if (!sparx5_netdevice_check(dev))
473 		return -EOPNOTSUPP;
474 
475 	if (netif_is_bridge_master(v->obj.orig_dev)) {
476 		sparx5_mact_forget(spx5, v->addr, v->vid);
477 		return 0;
478 	}
479 
480 	if (!br_vlan_enabled(spx5->hw_bridge_dev))
481 		vid = 1;
482 	else
483 		vid = v->vid;
484 
485 	res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
486 
487 	if (res == 0) {
488 		pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
489 
490 		/* MC_IDX starts after the port masks in the PGID table */
491 		pgid_idx += SPX5_PORTS;
492 		sparx5_pgid_update_mask(port, pgid_idx, false);
493 
494 		sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
495 		if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS)) {
496 			/* No ports are in MC group. Remove entry */
497 			err = sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
498 			if (err)
499 				return err;
500 		}
501 	}
502 
503 	return 0;
504 }
505 
506 static int sparx5_handle_port_obj_add(struct net_device *dev,
507 				      struct notifier_block *nb,
508 				      struct switchdev_notifier_port_obj_info *info)
509 {
510 	const struct switchdev_obj *obj = info->obj;
511 	int err;
512 
513 	switch (obj->id) {
514 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
515 		err = sparx5_handle_port_vlan_add(dev, nb,
516 						  SWITCHDEV_OBJ_PORT_VLAN(obj));
517 		break;
518 	case SWITCHDEV_OBJ_ID_PORT_MDB:
519 	case SWITCHDEV_OBJ_ID_HOST_MDB:
520 		err = sparx5_handle_port_mdb_add(dev, nb,
521 						 SWITCHDEV_OBJ_PORT_MDB(obj));
522 		break;
523 	default:
524 		err = -EOPNOTSUPP;
525 		break;
526 	}
527 
528 	info->handled = true;
529 	return err;
530 }
531 
532 static int sparx5_handle_port_vlan_del(struct net_device *dev,
533 				       struct notifier_block *nb,
534 				       u16 vid)
535 {
536 	struct sparx5_port *port = netdev_priv(dev);
537 	int ret;
538 
539 	/* Master bridge? */
540 	if (netif_is_bridge_master(dev)) {
541 		struct sparx5 *sparx5 =
542 			container_of(nb, struct sparx5,
543 				     switchdev_blocking_nb);
544 
545 		sparx5_mact_forget(sparx5, dev->broadcast, vid);
546 		return 0;
547 	}
548 
549 	if (!sparx5_netdevice_check(dev))
550 		return -EOPNOTSUPP;
551 
552 	ret = sparx5_vlan_vid_del(port, vid);
553 	if (ret)
554 		return ret;
555 
556 	return 0;
557 }
558 
559 static int sparx5_handle_port_obj_del(struct net_device *dev,
560 				      struct notifier_block *nb,
561 				      struct switchdev_notifier_port_obj_info *info)
562 {
563 	const struct switchdev_obj *obj = info->obj;
564 	int err;
565 
566 	switch (obj->id) {
567 	case SWITCHDEV_OBJ_ID_PORT_VLAN:
568 		err = sparx5_handle_port_vlan_del(dev, nb,
569 						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
570 		break;
571 	case SWITCHDEV_OBJ_ID_PORT_MDB:
572 	case SWITCHDEV_OBJ_ID_HOST_MDB:
573 		err = sparx5_handle_port_mdb_del(dev, nb,
574 						 SWITCHDEV_OBJ_PORT_MDB(obj));
575 		break;
576 	default:
577 		err = -EOPNOTSUPP;
578 		break;
579 	}
580 
581 	info->handled = true;
582 	return err;
583 }
584 
585 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
586 					   unsigned long event,
587 					   void *ptr)
588 {
589 	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
590 	int err;
591 
592 	switch (event) {
593 	case SWITCHDEV_PORT_OBJ_ADD:
594 		err = sparx5_handle_port_obj_add(dev, nb, ptr);
595 		return notifier_from_errno(err);
596 	case SWITCHDEV_PORT_OBJ_DEL:
597 		err = sparx5_handle_port_obj_del(dev, nb, ptr);
598 		return notifier_from_errno(err);
599 	case SWITCHDEV_PORT_ATTR_SET:
600 		err = switchdev_handle_port_attr_set(dev, ptr,
601 						     sparx5_netdevice_check,
602 						     sparx5_port_attr_set);
603 		return notifier_from_errno(err);
604 	}
605 
606 	return NOTIFY_DONE;
607 }
608 
609 int sparx5_register_notifier_blocks(struct sparx5 *s5)
610 {
611 	int err;
612 
613 	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
614 	err = register_netdevice_notifier(&s5->netdevice_nb);
615 	if (err)
616 		return err;
617 
618 	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
619 	err = register_switchdev_notifier(&s5->switchdev_nb);
620 	if (err)
621 		goto err_switchdev_nb;
622 
623 	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
624 	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
625 	if (err)
626 		goto err_switchdev_blocking_nb;
627 
628 	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
629 	if (!sparx5_owq) {
630 		err = -ENOMEM;
631 		goto err_switchdev_blocking_nb;
632 	}
633 
634 	return 0;
635 
636 err_switchdev_blocking_nb:
637 	unregister_switchdev_notifier(&s5->switchdev_nb);
638 err_switchdev_nb:
639 	unregister_netdevice_notifier(&s5->netdevice_nb);
640 
641 	return err;
642 }
643 
644 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
645 {
646 	destroy_workqueue(sparx5_owq);
647 
648 	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
649 	unregister_switchdev_notifier(&s5->switchdev_nb);
650 	unregister_netdevice_notifier(&s5->netdevice_nb);
651 }
652