1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2017 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include "lio_vf_rep.h"
29 
30 static int lio_vf_rep_open(struct net_device *ndev);
31 static int lio_vf_rep_stop(struct net_device *ndev);
32 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
33 				       struct net_device *ndev);
34 static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
35 static int lio_vf_rep_phys_port_name(struct net_device *dev,
36 				     char *buf, size_t len);
37 static void lio_vf_rep_get_stats64(struct net_device *dev,
38 				   struct rtnl_link_stats64 *stats64);
39 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
40 static int lio_vf_get_port_parent_id(struct net_device *dev,
41 				     struct netdev_phys_item_id *ppid);
42 
43 static const struct net_device_ops lio_vf_rep_ndev_ops = {
44 	.ndo_open = lio_vf_rep_open,
45 	.ndo_stop = lio_vf_rep_stop,
46 	.ndo_start_xmit = lio_vf_rep_pkt_xmit,
47 	.ndo_tx_timeout = lio_vf_rep_tx_timeout,
48 	.ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
49 	.ndo_get_stats64 = lio_vf_rep_get_stats64,
50 	.ndo_change_mtu = lio_vf_rep_change_mtu,
51 	.ndo_get_port_parent_id = lio_vf_get_port_parent_id,
52 };
53 
54 static int
lio_vf_rep_send_soft_command(struct octeon_device * oct,void * req,int req_size,void * resp,int resp_size)55 lio_vf_rep_send_soft_command(struct octeon_device *oct,
56 			     void *req, int req_size,
57 			     void *resp, int resp_size)
58 {
59 	int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
60 	struct octeon_soft_command *sc = NULL;
61 	struct lio_vf_rep_resp *rep_resp;
62 	void *sc_req;
63 	int err;
64 
65 	sc = (struct octeon_soft_command *)
66 		octeon_alloc_soft_command(oct, req_size,
67 					  tot_resp_size, 0);
68 	if (!sc)
69 		return -ENOMEM;
70 
71 	init_completion(&sc->complete);
72 	sc->sc_status = OCTEON_REQUEST_PENDING;
73 
74 	sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
75 	memcpy(sc_req, req, req_size);
76 
77 	rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
78 	memset(rep_resp, 0, tot_resp_size);
79 	WRITE_ONCE(rep_resp->status, 1);
80 
81 	sc->iq_no = 0;
82 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
83 				    OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
84 
85 	err = octeon_send_soft_command(oct, sc);
86 	if (err == IQ_SEND_FAILED)
87 		goto free_buff;
88 
89 	err = wait_for_sc_completion_timeout(oct, sc, 0);
90 	if (err)
91 		return err;
92 
93 	err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
94 	if (err)
95 		dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
96 	else if (resp)
97 		memcpy(resp, (rep_resp + 1), resp_size);
98 
99 	WRITE_ONCE(sc->caller_is_done, true);
100 	return err;
101 
102 free_buff:
103 	octeon_free_soft_command(oct, sc);
104 
105 	return err;
106 }
107 
108 static int
lio_vf_rep_open(struct net_device * ndev)109 lio_vf_rep_open(struct net_device *ndev)
110 {
111 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
112 	struct lio_vf_rep_req rep_cfg;
113 	struct octeon_device *oct;
114 	int ret;
115 
116 	oct = vf_rep->oct;
117 
118 	memset(&rep_cfg, 0, sizeof(rep_cfg));
119 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
120 	rep_cfg.ifidx = vf_rep->ifidx;
121 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
122 
123 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
124 					   sizeof(rep_cfg), NULL, 0);
125 
126 	if (ret) {
127 		dev_err(&oct->pci_dev->dev,
128 			"VF_REP open failed with err %d\n", ret);
129 		return -EIO;
130 	}
131 
132 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
133 				      LIO_IFSTATE_RUNNING));
134 
135 	netif_carrier_on(ndev);
136 	netif_start_queue(ndev);
137 
138 	return 0;
139 }
140 
141 static int
lio_vf_rep_stop(struct net_device * ndev)142 lio_vf_rep_stop(struct net_device *ndev)
143 {
144 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
145 	struct lio_vf_rep_req rep_cfg;
146 	struct octeon_device *oct;
147 	int ret;
148 
149 	oct = vf_rep->oct;
150 
151 	memset(&rep_cfg, 0, sizeof(rep_cfg));
152 	rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
153 	rep_cfg.ifidx = vf_rep->ifidx;
154 	rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
155 
156 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
157 					   sizeof(rep_cfg), NULL, 0);
158 
159 	if (ret) {
160 		dev_err(&oct->pci_dev->dev,
161 			"VF_REP dev stop failed with err %d\n", ret);
162 		return -EIO;
163 	}
164 
165 	atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
166 				      ~LIO_IFSTATE_RUNNING));
167 
168 	netif_tx_disable(ndev);
169 	netif_carrier_off(ndev);
170 
171 	return 0;
172 }
173 
174 static void
lio_vf_rep_tx_timeout(struct net_device * ndev,unsigned int txqueue)175 lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
176 {
177 	netif_trans_update(ndev);
178 
179 	netif_wake_queue(ndev);
180 }
181 
182 static void
lio_vf_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)183 lio_vf_rep_get_stats64(struct net_device *dev,
184 		       struct rtnl_link_stats64 *stats64)
185 {
186 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
187 
188 	/* Swap tx and rx stats as VF rep is a switch port */
189 	stats64->tx_packets = vf_rep->stats.rx_packets;
190 	stats64->tx_bytes   = vf_rep->stats.rx_bytes;
191 	stats64->tx_dropped = vf_rep->stats.rx_dropped;
192 
193 	stats64->rx_packets = vf_rep->stats.tx_packets;
194 	stats64->rx_bytes   = vf_rep->stats.tx_bytes;
195 	stats64->rx_dropped = vf_rep->stats.tx_dropped;
196 }
197 
198 static int
lio_vf_rep_change_mtu(struct net_device * ndev,int new_mtu)199 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
200 {
201 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
202 	struct lio_vf_rep_req rep_cfg;
203 	struct octeon_device *oct;
204 	int ret;
205 
206 	oct = vf_rep->oct;
207 
208 	memset(&rep_cfg, 0, sizeof(rep_cfg));
209 	rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
210 	rep_cfg.ifidx = vf_rep->ifidx;
211 	rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
212 
213 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
214 					   sizeof(rep_cfg), NULL, 0);
215 	if (ret) {
216 		dev_err(&oct->pci_dev->dev,
217 			"Change MTU failed with err %d\n", ret);
218 		return -EIO;
219 	}
220 
221 	WRITE_ONCE(ndev->mtu, new_mtu);
222 
223 	return 0;
224 }
225 
226 static int
lio_vf_rep_phys_port_name(struct net_device * dev,char * buf,size_t len)227 lio_vf_rep_phys_port_name(struct net_device *dev,
228 			  char *buf, size_t len)
229 {
230 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
231 	struct octeon_device *oct = vf_rep->oct;
232 	int ret;
233 
234 	ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
235 		       vf_rep->ifidx - oct->pf_num * 64 - 1);
236 	if (ret >= len)
237 		return -EOPNOTSUPP;
238 
239 	return 0;
240 }
241 
242 static struct net_device *
lio_vf_rep_get_ndev(struct octeon_device * oct,int ifidx)243 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
244 {
245 	int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
246 	int vfid_mask = max_vfs - 1;
247 
248 	if (ifidx <= oct->pf_num * max_vfs ||
249 	    ifidx >= oct->pf_num * max_vfs + max_vfs)
250 		return NULL;
251 
252 	/* ifidx 1-63 for PF0 VFs
253 	 * ifidx 65-127 for PF1 VFs
254 	 */
255 	vf_id = (ifidx & vfid_mask) - 1;
256 
257 	return oct->vf_rep_list.ndev[vf_id];
258 }
259 
260 static void
lio_vf_rep_copy_packet(struct octeon_device * oct,struct sk_buff * skb,int len)261 lio_vf_rep_copy_packet(struct octeon_device *oct,
262 		       struct sk_buff *skb,
263 		       int len)
264 {
265 	if (likely(len > MIN_SKB_SIZE)) {
266 		struct octeon_skb_page_info *pg_info;
267 		unsigned char *va;
268 
269 		pg_info = ((struct octeon_skb_page_info *)(skb->cb));
270 		if (pg_info->page) {
271 			va = page_address(pg_info->page) +
272 				pg_info->page_offset;
273 			memcpy(skb->data, va, MIN_SKB_SIZE);
274 			skb_put(skb, MIN_SKB_SIZE);
275 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
276 					pg_info->page,
277 					pg_info->page_offset + MIN_SKB_SIZE,
278 					len - MIN_SKB_SIZE,
279 					LIO_RXBUFFER_SZ);
280 		}
281 	} else {
282 		struct octeon_skb_page_info *pg_info =
283 			((struct octeon_skb_page_info *)(skb->cb));
284 
285 		skb_copy_to_linear_data(skb, page_address(pg_info->page) +
286 					pg_info->page_offset, len);
287 		skb_put(skb, len);
288 		put_page(pg_info->page);
289 	}
290 }
291 
292 static int
lio_vf_rep_pkt_recv(struct octeon_recv_info * recv_info,void * buf)293 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
294 {
295 	struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
296 	struct lio_vf_rep_desc *vf_rep;
297 	struct net_device *vf_ndev;
298 	struct octeon_device *oct;
299 	union octeon_rh *rh;
300 	struct sk_buff *skb;
301 	int i, ifidx;
302 
303 	oct = lio_get_device(recv_pkt->octeon_id);
304 	if (!oct)
305 		goto free_buffers;
306 
307 	skb = recv_pkt->buffer_ptr[0];
308 	rh = &recv_pkt->rh;
309 	ifidx = rh->r.ossp;
310 
311 	vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
312 	if (!vf_ndev)
313 		goto free_buffers;
314 
315 	vf_rep = netdev_priv(vf_ndev);
316 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
317 	    recv_pkt->buffer_count > 1)
318 		goto free_buffers;
319 
320 	skb->dev = vf_ndev;
321 
322 	/* Multiple buffers are not used for vf_rep packets.
323 	 * So just buffer_size[0] is valid.
324 	 */
325 	lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
326 
327 	skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
328 	skb->protocol = eth_type_trans(skb, skb->dev);
329 	skb->ip_summed = CHECKSUM_NONE;
330 
331 	netif_rx(skb);
332 
333 	octeon_free_recv_info(recv_info);
334 
335 	return 0;
336 
337 free_buffers:
338 	for (i = 0; i < recv_pkt->buffer_count; i++)
339 		recv_buffer_free(recv_pkt->buffer_ptr[i]);
340 
341 	octeon_free_recv_info(recv_info);
342 
343 	return 0;
344 }
345 
346 static void
lio_vf_rep_packet_sent_callback(struct octeon_device * oct,u32 status,void * buf)347 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
348 				u32 status, void *buf)
349 {
350 	struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
351 	struct sk_buff *skb = sc->ctxptr;
352 	struct net_device *ndev = skb->dev;
353 	u32 iq_no;
354 
355 	dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
356 			 sc->datasize, DMA_TO_DEVICE);
357 	dev_kfree_skb_any(skb);
358 	iq_no = sc->iq_no;
359 	octeon_free_soft_command(oct, sc);
360 
361 	if (octnet_iq_is_full(oct, iq_no))
362 		return;
363 
364 	if (netif_queue_stopped(ndev))
365 		netif_wake_queue(ndev);
366 }
367 
368 static netdev_tx_t
lio_vf_rep_pkt_xmit(struct sk_buff * skb,struct net_device * ndev)369 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
370 {
371 	struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
372 	struct net_device *parent_ndev = vf_rep->parent_ndev;
373 	struct octeon_device *oct = vf_rep->oct;
374 	struct octeon_instr_pki_ih3 *pki_ih3;
375 	struct octeon_soft_command *sc;
376 	struct lio *parent_lio;
377 	int status;
378 
379 	parent_lio = GET_LIO(parent_ndev);
380 
381 	if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
382 	    skb->len <= 0)
383 		goto xmit_failed;
384 
385 	if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
386 		dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
387 		netif_stop_queue(ndev);
388 		return NETDEV_TX_BUSY;
389 	}
390 
391 	sc = (struct octeon_soft_command *)
392 		octeon_alloc_soft_command(oct, 0, 16, 0);
393 	if (!sc) {
394 		dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
395 		goto xmit_failed;
396 	}
397 
398 	/* Multiple buffers are not used for vf_rep packets. */
399 	if (skb_shinfo(skb)->nr_frags != 0) {
400 		dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
401 		octeon_free_soft_command(oct, sc);
402 		goto xmit_failed;
403 	}
404 
405 	sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
406 				     skb->data, skb->len, DMA_TO_DEVICE);
407 	if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
408 		dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
409 		octeon_free_soft_command(oct, sc);
410 		goto xmit_failed;
411 	}
412 
413 	sc->virtdptr = skb->data;
414 	sc->datasize = skb->len;
415 	sc->ctxptr = skb;
416 	sc->iq_no = parent_lio->txq;
417 
418 	octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
419 				    vf_rep->ifidx, 0, 0);
420 	pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
421 	pki_ih3->tagtype = ORDERED_TAG;
422 
423 	sc->callback = lio_vf_rep_packet_sent_callback;
424 	sc->callback_arg = sc;
425 
426 	status = octeon_send_soft_command(oct, sc);
427 	if (status == IQ_SEND_FAILED) {
428 		dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
429 				 sc->datasize, DMA_TO_DEVICE);
430 		octeon_free_soft_command(oct, sc);
431 		goto xmit_failed;
432 	}
433 
434 	if (status == IQ_SEND_STOP)
435 		netif_stop_queue(ndev);
436 
437 	netif_trans_update(ndev);
438 
439 	return NETDEV_TX_OK;
440 
441 xmit_failed:
442 	dev_kfree_skb_any(skb);
443 
444 	return NETDEV_TX_OK;
445 }
446 
lio_vf_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)447 static int lio_vf_get_port_parent_id(struct net_device *dev,
448 				     struct netdev_phys_item_id *ppid)
449 {
450 	struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
451 	struct net_device *parent_ndev = vf_rep->parent_ndev;
452 	struct lio *lio = GET_LIO(parent_ndev);
453 
454 	ppid->id_len = ETH_ALEN;
455 	ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
456 
457 	return 0;
458 }
459 
460 static void
lio_vf_rep_fetch_stats(struct work_struct * work)461 lio_vf_rep_fetch_stats(struct work_struct *work)
462 {
463 	struct cavium_wk *wk = (struct cavium_wk *)work;
464 	struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
465 	struct lio_vf_rep_stats stats;
466 	struct lio_vf_rep_req rep_cfg;
467 	struct octeon_device *oct;
468 	int ret;
469 
470 	oct = vf_rep->oct;
471 
472 	memset(&rep_cfg, 0, sizeof(rep_cfg));
473 	rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
474 	rep_cfg.ifidx = vf_rep->ifidx;
475 
476 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
477 					   &stats, sizeof(stats));
478 
479 	if (!ret) {
480 		octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
481 		memcpy(&vf_rep->stats, &stats, sizeof(stats));
482 	}
483 
484 	schedule_delayed_work(&vf_rep->stats_wk.work,
485 			      msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
486 }
487 
488 int
lio_vf_rep_create(struct octeon_device * oct)489 lio_vf_rep_create(struct octeon_device *oct)
490 {
491 	struct lio_vf_rep_desc *vf_rep;
492 	struct net_device *ndev;
493 	int i, num_vfs;
494 
495 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
496 		return 0;
497 
498 	if (!oct->sriov_info.sriov_enabled)
499 		return 0;
500 
501 	num_vfs = oct->sriov_info.num_vfs_alloced;
502 
503 	oct->vf_rep_list.num_vfs = 0;
504 	for (i = 0; i < num_vfs; i++) {
505 		ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
506 
507 		if (!ndev) {
508 			dev_err(&oct->pci_dev->dev,
509 				"VF rep device %d creation failed\n", i);
510 			goto cleanup;
511 		}
512 
513 		ndev->min_mtu = LIO_MIN_MTU_SIZE;
514 		ndev->max_mtu = LIO_MAX_MTU_SIZE;
515 		ndev->netdev_ops = &lio_vf_rep_ndev_ops;
516 
517 		vf_rep = netdev_priv(ndev);
518 		memset(vf_rep, 0, sizeof(*vf_rep));
519 
520 		vf_rep->ndev = ndev;
521 		vf_rep->oct = oct;
522 		vf_rep->parent_ndev = oct->props[0].netdev;
523 		vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
524 
525 		eth_hw_addr_random(ndev);
526 
527 		if (register_netdev(ndev)) {
528 			dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
529 
530 			free_netdev(ndev);
531 			goto cleanup;
532 		}
533 
534 		netif_carrier_off(ndev);
535 
536 		INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
537 				  lio_vf_rep_fetch_stats);
538 		vf_rep->stats_wk.ctxptr = (void *)vf_rep;
539 		schedule_delayed_work(&vf_rep->stats_wk.work,
540 				      msecs_to_jiffies
541 				      (LIO_VF_REP_STATS_POLL_TIME_MS));
542 		oct->vf_rep_list.num_vfs++;
543 		oct->vf_rep_list.ndev[i] = ndev;
544 	}
545 
546 	if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
547 					OPCODE_NIC_VF_REP_PKT,
548 					lio_vf_rep_pkt_recv, oct)) {
549 		dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
550 
551 		goto cleanup;
552 	}
553 
554 	return 0;
555 
556 cleanup:
557 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
558 		ndev = oct->vf_rep_list.ndev[i];
559 		oct->vf_rep_list.ndev[i] = NULL;
560 		if (ndev) {
561 			vf_rep = netdev_priv(ndev);
562 			cancel_delayed_work_sync
563 				(&vf_rep->stats_wk.work);
564 			unregister_netdev(ndev);
565 			free_netdev(ndev);
566 		}
567 	}
568 
569 	oct->vf_rep_list.num_vfs = 0;
570 
571 	return -1;
572 }
573 
574 void
lio_vf_rep_destroy(struct octeon_device * oct)575 lio_vf_rep_destroy(struct octeon_device *oct)
576 {
577 	struct lio_vf_rep_desc *vf_rep;
578 	struct net_device *ndev;
579 	int i;
580 
581 	if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
582 		return;
583 
584 	if (!oct->sriov_info.sriov_enabled)
585 		return;
586 
587 	for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
588 		ndev = oct->vf_rep_list.ndev[i];
589 		oct->vf_rep_list.ndev[i] = NULL;
590 		if (ndev) {
591 			vf_rep = netdev_priv(ndev);
592 			cancel_delayed_work_sync
593 				(&vf_rep->stats_wk.work);
594 			netif_tx_disable(ndev);
595 			netif_carrier_off(ndev);
596 
597 			unregister_netdev(ndev);
598 			free_netdev(ndev);
599 		}
600 	}
601 
602 	oct->vf_rep_list.num_vfs = 0;
603 }
604 
605 static int
lio_vf_rep_netdev_event(struct notifier_block * nb,unsigned long event,void * ptr)606 lio_vf_rep_netdev_event(struct notifier_block *nb,
607 			unsigned long event, void *ptr)
608 {
609 	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
610 	struct lio_vf_rep_desc *vf_rep;
611 	struct lio_vf_rep_req rep_cfg;
612 	struct octeon_device *oct;
613 	int ret;
614 
615 	switch (event) {
616 	case NETDEV_REGISTER:
617 	case NETDEV_CHANGENAME:
618 		break;
619 
620 	default:
621 		return NOTIFY_DONE;
622 	}
623 
624 	if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
625 		return NOTIFY_DONE;
626 
627 	vf_rep = netdev_priv(ndev);
628 	oct = vf_rep->oct;
629 
630 	if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
631 		dev_err(&oct->pci_dev->dev,
632 			"Device name change sync failed as the size is > %d\n",
633 			LIO_IF_NAME_SIZE);
634 		return NOTIFY_DONE;
635 	}
636 
637 	memset(&rep_cfg, 0, sizeof(rep_cfg));
638 	rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
639 	rep_cfg.ifidx = vf_rep->ifidx;
640 	strscpy(rep_cfg.rep_name.name, ndev->name,
641 		sizeof(rep_cfg.rep_name.name));
642 
643 	ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
644 					   sizeof(rep_cfg), NULL, 0);
645 	if (ret)
646 		dev_err(&oct->pci_dev->dev,
647 			"vf_rep netdev name change failed with err %d\n", ret);
648 
649 	return NOTIFY_DONE;
650 }
651 
652 static struct notifier_block lio_vf_rep_netdev_notifier = {
653 	.notifier_call = lio_vf_rep_netdev_event,
654 };
655 
656 int
lio_vf_rep_modinit(void)657 lio_vf_rep_modinit(void)
658 {
659 	if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
660 		pr_err("netdev notifier registration failed\n");
661 		return -EFAULT;
662 	}
663 
664 	return 0;
665 }
666 
667 void
lio_vf_rep_modexit(void)668 lio_vf_rep_modexit(void)
669 {
670 	if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
671 		pr_err("netdev notifier unregister failed\n");
672 }
673