1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
11 
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
22 
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
24 
25 #define NFP_MIN_INT_PORT_ID	1
26 #define NFP_MAX_INT_PORT_ID	256
27 
nfp_flower_extra_cap(struct nfp_app * app,struct nfp_net * nn)28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
29 {
30 	return "FLOWER";
31 }
32 
eswitch_mode_get(struct nfp_app * app)33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
34 {
35 	return DEVLINK_ESWITCH_MODE_SWITCHDEV;
36 }
37 
38 static int
nfp_flower_lookup_internal_port_id(struct nfp_flower_priv * priv,struct net_device * netdev)39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
40 				   struct net_device *netdev)
41 {
42 	struct net_device *entry;
43 	int i, id = 0;
44 
45 	rcu_read_lock();
46 	idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
47 		if (entry == netdev) {
48 			id = i;
49 			break;
50 		}
51 	rcu_read_unlock();
52 
53 	return id;
54 }
55 
56 static int
nfp_flower_get_internal_port_id(struct nfp_app * app,struct net_device * netdev)57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
58 {
59 	struct nfp_flower_priv *priv = app->priv;
60 	int id;
61 
62 	id = nfp_flower_lookup_internal_port_id(priv, netdev);
63 	if (id > 0)
64 		return id;
65 
66 	idr_preload(GFP_ATOMIC);
67 	spin_lock_bh(&priv->internal_ports.lock);
68 	id = idr_alloc(&priv->internal_ports.port_ids, netdev,
69 		       NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
70 	spin_unlock_bh(&priv->internal_ports.lock);
71 	idr_preload_end();
72 
73 	return id;
74 }
75 
nfp_flower_get_port_id_from_netdev(struct nfp_app * app,struct net_device * netdev)76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
77 				       struct net_device *netdev)
78 {
79 	int ext_port;
80 
81 	if (nfp_netdev_is_nfp_repr(netdev)) {
82 		return nfp_repr_get_port_id(netdev);
83 	} else if (nfp_flower_internal_port_can_offload(app, netdev)) {
84 		ext_port = nfp_flower_get_internal_port_id(app, netdev);
85 		if (ext_port < 0)
86 			return 0;
87 
88 		return nfp_flower_internal_port_get_port_id(ext_port);
89 	}
90 
91 	return 0;
92 }
93 
94 static struct net_device *
nfp_flower_get_netdev_from_internal_port_id(struct nfp_app * app,int port_id)95 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
96 {
97 	struct nfp_flower_priv *priv = app->priv;
98 	struct net_device *netdev;
99 
100 	rcu_read_lock();
101 	netdev = idr_find(&priv->internal_ports.port_ids, port_id);
102 	rcu_read_unlock();
103 
104 	return netdev;
105 }
106 
107 static void
nfp_flower_free_internal_port_id(struct nfp_app * app,struct net_device * netdev)108 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
109 {
110 	struct nfp_flower_priv *priv = app->priv;
111 	int id;
112 
113 	id = nfp_flower_lookup_internal_port_id(priv, netdev);
114 	if (!id)
115 		return;
116 
117 	spin_lock_bh(&priv->internal_ports.lock);
118 	idr_remove(&priv->internal_ports.port_ids, id);
119 	spin_unlock_bh(&priv->internal_ports.lock);
120 }
121 
122 static int
nfp_flower_internal_port_event_handler(struct nfp_app * app,struct net_device * netdev,unsigned long event)123 nfp_flower_internal_port_event_handler(struct nfp_app *app,
124 				       struct net_device *netdev,
125 				       unsigned long event)
126 {
127 	if (event == NETDEV_UNREGISTER &&
128 	    nfp_flower_internal_port_can_offload(app, netdev))
129 		nfp_flower_free_internal_port_id(app, netdev);
130 
131 	return NOTIFY_OK;
132 }
133 
nfp_flower_internal_port_init(struct nfp_flower_priv * priv)134 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
135 {
136 	spin_lock_init(&priv->internal_ports.lock);
137 	idr_init(&priv->internal_ports.port_ids);
138 }
139 
nfp_flower_internal_port_cleanup(struct nfp_flower_priv * priv)140 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
141 {
142 	idr_destroy(&priv->internal_ports.port_ids);
143 }
144 
145 static struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_lookup(struct nfp_app * app,struct net_device * netdev)146 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
147 {
148 	struct nfp_flower_priv *priv = app->priv;
149 	struct nfp_flower_non_repr_priv *entry;
150 
151 	ASSERT_RTNL();
152 
153 	list_for_each_entry(entry, &priv->non_repr_priv, list)
154 		if (entry->netdev == netdev)
155 			return entry;
156 
157 	return NULL;
158 }
159 
160 void
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv * non_repr_priv)161 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
162 {
163 	non_repr_priv->ref_count++;
164 }
165 
166 struct nfp_flower_non_repr_priv *
nfp_flower_non_repr_priv_get(struct nfp_app * app,struct net_device * netdev)167 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
168 {
169 	struct nfp_flower_priv *priv = app->priv;
170 	struct nfp_flower_non_repr_priv *entry;
171 
172 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
173 	if (entry)
174 		goto inc_ref;
175 
176 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
177 	if (!entry)
178 		return NULL;
179 
180 	entry->netdev = netdev;
181 	list_add(&entry->list, &priv->non_repr_priv);
182 
183 inc_ref:
184 	__nfp_flower_non_repr_priv_get(entry);
185 	return entry;
186 }
187 
188 void
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv * non_repr_priv)189 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
190 {
191 	if (--non_repr_priv->ref_count)
192 		return;
193 
194 	list_del(&non_repr_priv->list);
195 	kfree(non_repr_priv);
196 }
197 
198 void
nfp_flower_non_repr_priv_put(struct nfp_app * app,struct net_device * netdev)199 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
200 {
201 	struct nfp_flower_non_repr_priv *entry;
202 
203 	entry = nfp_flower_non_repr_priv_lookup(app, netdev);
204 	if (!entry)
205 		return;
206 
207 	__nfp_flower_non_repr_priv_put(entry);
208 }
209 
210 static enum nfp_repr_type
nfp_flower_repr_get_type_and_port(struct nfp_app * app,u32 port_id,u8 * port)211 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
212 {
213 	switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
214 	case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
215 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
216 				  port_id);
217 		return NFP_REPR_TYPE_PHYS_PORT;
218 
219 	case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
220 		*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
221 		if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
222 		    NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
223 			return NFP_REPR_TYPE_PF;
224 		else
225 			return NFP_REPR_TYPE_VF;
226 	}
227 
228 	return __NFP_REPR_TYPE_MAX;
229 }
230 
231 static struct net_device *
nfp_flower_dev_get(struct nfp_app * app,u32 port_id,bool * redir_egress)232 nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
233 {
234 	enum nfp_repr_type repr_type;
235 	struct nfp_reprs *reprs;
236 	u8 port = 0;
237 
238 	/* Check if the port is internal. */
239 	if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
240 	    NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
241 		if (redir_egress)
242 			*redir_egress = true;
243 		port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
244 		return nfp_flower_get_netdev_from_internal_port_id(app, port);
245 	}
246 
247 	repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
248 	if (repr_type > NFP_REPR_TYPE_MAX)
249 		return NULL;
250 
251 	reprs = rcu_dereference(app->reprs[repr_type]);
252 	if (!reprs)
253 		return NULL;
254 
255 	if (port >= reprs->num_reprs)
256 		return NULL;
257 
258 	return rcu_dereference(reprs->reprs[port]);
259 }
260 
261 static int
nfp_flower_reprs_reify(struct nfp_app * app,enum nfp_repr_type type,bool exists)262 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
263 		       bool exists)
264 {
265 	struct nfp_reprs *reprs;
266 	int i, err, count = 0;
267 
268 	reprs = rcu_dereference_protected(app->reprs[type],
269 					  lockdep_is_held(&app->pf->lock));
270 	if (!reprs)
271 		return 0;
272 
273 	for (i = 0; i < reprs->num_reprs; i++) {
274 		struct net_device *netdev;
275 
276 		netdev = nfp_repr_get_locked(app, reprs, i);
277 		if (netdev) {
278 			struct nfp_repr *repr = netdev_priv(netdev);
279 
280 			err = nfp_flower_cmsg_portreify(repr, exists);
281 			if (err)
282 				return err;
283 			count++;
284 		}
285 	}
286 
287 	return count;
288 }
289 
290 static int
nfp_flower_wait_repr_reify(struct nfp_app * app,atomic_t * replies,int tot_repl)291 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
292 {
293 	struct nfp_flower_priv *priv = app->priv;
294 
295 	if (!tot_repl)
296 		return 0;
297 
298 	lockdep_assert_held(&app->pf->lock);
299 	if (!wait_event_timeout(priv->reify_wait_queue,
300 				atomic_read(replies) >= tot_repl,
301 				NFP_FL_REPLY_TIMEOUT)) {
302 		nfp_warn(app->cpp, "Not all reprs responded to reify\n");
303 		return -EIO;
304 	}
305 
306 	return 0;
307 }
308 
309 static int
nfp_flower_repr_netdev_open(struct nfp_app * app,struct nfp_repr * repr)310 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
311 {
312 	int err;
313 
314 	err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
315 	if (err)
316 		return err;
317 
318 	netif_tx_wake_all_queues(repr->netdev);
319 
320 	return 0;
321 }
322 
323 static int
nfp_flower_repr_netdev_stop(struct nfp_app * app,struct nfp_repr * repr)324 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
325 {
326 	netif_tx_disable(repr->netdev);
327 
328 	return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
329 }
330 
331 static void
nfp_flower_repr_netdev_clean(struct nfp_app * app,struct net_device * netdev)332 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
333 {
334 	struct nfp_repr *repr = netdev_priv(netdev);
335 
336 	kfree(repr->app_priv);
337 }
338 
339 static void
nfp_flower_repr_netdev_preclean(struct nfp_app * app,struct net_device * netdev)340 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
341 {
342 	struct nfp_repr *repr = netdev_priv(netdev);
343 	struct nfp_flower_priv *priv = app->priv;
344 	atomic_t *replies = &priv->reify_replies;
345 	int err;
346 
347 	atomic_set(replies, 0);
348 	err = nfp_flower_cmsg_portreify(repr, false);
349 	if (err) {
350 		nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
351 		return;
352 	}
353 
354 	nfp_flower_wait_repr_reify(app, replies, 1);
355 }
356 
nfp_flower_sriov_disable(struct nfp_app * app)357 static void nfp_flower_sriov_disable(struct nfp_app *app)
358 {
359 	struct nfp_flower_priv *priv = app->priv;
360 
361 	if (!priv->nn)
362 		return;
363 
364 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
365 }
366 
367 static int
nfp_flower_spawn_vnic_reprs(struct nfp_app * app,enum nfp_flower_cmsg_port_vnic_type vnic_type,enum nfp_repr_type repr_type,unsigned int cnt)368 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
369 			    enum nfp_flower_cmsg_port_vnic_type vnic_type,
370 			    enum nfp_repr_type repr_type, unsigned int cnt)
371 {
372 	u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
373 	struct nfp_flower_priv *priv = app->priv;
374 	atomic_t *replies = &priv->reify_replies;
375 	struct nfp_flower_repr_priv *repr_priv;
376 	enum nfp_port_type port_type;
377 	struct nfp_repr *nfp_repr;
378 	struct nfp_reprs *reprs;
379 	int i, err, reify_cnt;
380 	const u8 queue = 0;
381 
382 	port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
383 						    NFP_PORT_VF_PORT;
384 
385 	reprs = nfp_reprs_alloc(cnt);
386 	if (!reprs)
387 		return -ENOMEM;
388 
389 	for (i = 0; i < cnt; i++) {
390 		struct net_device *repr;
391 		struct nfp_port *port;
392 		u32 port_id;
393 
394 		repr = nfp_repr_alloc(app);
395 		if (!repr) {
396 			err = -ENOMEM;
397 			goto err_reprs_clean;
398 		}
399 
400 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
401 		if (!repr_priv) {
402 			err = -ENOMEM;
403 			nfp_repr_free(repr);
404 			goto err_reprs_clean;
405 		}
406 
407 		nfp_repr = netdev_priv(repr);
408 		nfp_repr->app_priv = repr_priv;
409 		repr_priv->nfp_repr = nfp_repr;
410 
411 		/* For now we only support 1 PF */
412 		WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
413 
414 		port = nfp_port_alloc(app, port_type, repr);
415 		if (IS_ERR(port)) {
416 			err = PTR_ERR(port);
417 			kfree(repr_priv);
418 			nfp_repr_free(repr);
419 			goto err_reprs_clean;
420 		}
421 		if (repr_type == NFP_REPR_TYPE_PF) {
422 			port->pf_id = i;
423 			port->vnic = priv->nn->dp.ctrl_bar;
424 		} else {
425 			port->pf_id = 0;
426 			port->vf_id = i;
427 			port->vnic =
428 				app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
429 		}
430 
431 		eth_hw_addr_random(repr);
432 
433 		port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
434 						    i, queue);
435 		err = nfp_repr_init(app, repr,
436 				    port_id, port, priv->nn->dp.netdev);
437 		if (err) {
438 			kfree(repr_priv);
439 			nfp_port_free(port);
440 			nfp_repr_free(repr);
441 			goto err_reprs_clean;
442 		}
443 
444 		RCU_INIT_POINTER(reprs->reprs[i], repr);
445 		nfp_info(app->cpp, "%s%d Representor(%s) created\n",
446 			 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
447 			 repr->name);
448 	}
449 
450 	nfp_app_reprs_set(app, repr_type, reprs);
451 
452 	atomic_set(replies, 0);
453 	reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
454 	if (reify_cnt < 0) {
455 		err = reify_cnt;
456 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
457 		goto err_reprs_remove;
458 	}
459 
460 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
461 	if (err)
462 		goto err_reprs_remove;
463 
464 	return 0;
465 err_reprs_remove:
466 	reprs = nfp_app_reprs_set(app, repr_type, NULL);
467 err_reprs_clean:
468 	nfp_reprs_clean_and_free(app, reprs);
469 	return err;
470 }
471 
nfp_flower_sriov_enable(struct nfp_app * app,int num_vfs)472 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
473 {
474 	struct nfp_flower_priv *priv = app->priv;
475 
476 	if (!priv->nn)
477 		return 0;
478 
479 	return nfp_flower_spawn_vnic_reprs(app,
480 					   NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
481 					   NFP_REPR_TYPE_VF, num_vfs);
482 }
483 
484 static int
nfp_flower_spawn_phy_reprs(struct nfp_app * app,struct nfp_flower_priv * priv)485 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
486 {
487 	struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
488 	atomic_t *replies = &priv->reify_replies;
489 	struct nfp_flower_repr_priv *repr_priv;
490 	struct nfp_repr *nfp_repr;
491 	struct sk_buff *ctrl_skb;
492 	struct nfp_reprs *reprs;
493 	int err, reify_cnt;
494 	unsigned int i;
495 
496 	ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
497 	if (!ctrl_skb)
498 		return -ENOMEM;
499 
500 	reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
501 	if (!reprs) {
502 		err = -ENOMEM;
503 		goto err_free_ctrl_skb;
504 	}
505 
506 	for (i = 0; i < eth_tbl->count; i++) {
507 		unsigned int phys_port = eth_tbl->ports[i].index;
508 		struct net_device *repr;
509 		struct nfp_port *port;
510 		u32 cmsg_port_id;
511 
512 		repr = nfp_repr_alloc(app);
513 		if (!repr) {
514 			err = -ENOMEM;
515 			goto err_reprs_clean;
516 		}
517 
518 		repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
519 		if (!repr_priv) {
520 			err = -ENOMEM;
521 			nfp_repr_free(repr);
522 			goto err_reprs_clean;
523 		}
524 
525 		nfp_repr = netdev_priv(repr);
526 		nfp_repr->app_priv = repr_priv;
527 		repr_priv->nfp_repr = nfp_repr;
528 
529 		port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
530 		if (IS_ERR(port)) {
531 			err = PTR_ERR(port);
532 			kfree(repr_priv);
533 			nfp_repr_free(repr);
534 			goto err_reprs_clean;
535 		}
536 		err = nfp_port_init_phy_port(app->pf, app, port, i);
537 		if (err) {
538 			kfree(repr_priv);
539 			nfp_port_free(port);
540 			nfp_repr_free(repr);
541 			goto err_reprs_clean;
542 		}
543 
544 		SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
545 		nfp_net_get_mac_addr(app->pf, repr, port);
546 
547 		cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
548 		err = nfp_repr_init(app, repr,
549 				    cmsg_port_id, port, priv->nn->dp.netdev);
550 		if (err) {
551 			kfree(repr_priv);
552 			nfp_port_free(port);
553 			nfp_repr_free(repr);
554 			goto err_reprs_clean;
555 		}
556 
557 		nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
558 					     eth_tbl->ports[i].nbi,
559 					     eth_tbl->ports[i].base,
560 					     phys_port);
561 
562 		RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
563 		nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
564 			 phys_port, repr->name);
565 	}
566 
567 	nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
568 
569 	/* The REIFY/MAC_REPR control messages should be sent after the MAC
570 	 * representors are registered using nfp_app_reprs_set().  This is
571 	 * because the firmware may respond with control messages for the
572 	 * MAC representors, f.e. to provide the driver with information
573 	 * about their state, and without registration the driver will drop
574 	 * any such messages.
575 	 */
576 	atomic_set(replies, 0);
577 	reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
578 	if (reify_cnt < 0) {
579 		err = reify_cnt;
580 		nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
581 		goto err_reprs_remove;
582 	}
583 
584 	err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
585 	if (err)
586 		goto err_reprs_remove;
587 
588 	nfp_ctrl_tx(app->ctrl, ctrl_skb);
589 
590 	return 0;
591 err_reprs_remove:
592 	reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
593 err_reprs_clean:
594 	nfp_reprs_clean_and_free(app, reprs);
595 err_free_ctrl_skb:
596 	kfree_skb(ctrl_skb);
597 	return err;
598 }
599 
nfp_flower_vnic_alloc(struct nfp_app * app,struct nfp_net * nn,unsigned int id)600 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
601 				 unsigned int id)
602 {
603 	if (id > 0) {
604 		nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
605 		goto err_invalid_port;
606 	}
607 
608 	eth_hw_addr_random(nn->dp.netdev);
609 	netif_keep_dst(nn->dp.netdev);
610 	nn->vnic_no_name = true;
611 
612 	return 0;
613 
614 err_invalid_port:
615 	nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
616 	return PTR_ERR_OR_ZERO(nn->port);
617 }
618 
nfp_flower_vnic_clean(struct nfp_app * app,struct nfp_net * nn)619 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
620 {
621 	struct nfp_flower_priv *priv = app->priv;
622 
623 	if (app->pf->num_vfs)
624 		nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
625 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
626 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
627 
628 	priv->nn = NULL;
629 }
630 
nfp_flower_vnic_init(struct nfp_app * app,struct nfp_net * nn)631 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
632 {
633 	struct nfp_flower_priv *priv = app->priv;
634 	int err;
635 
636 	priv->nn = nn;
637 
638 	err = nfp_flower_spawn_phy_reprs(app, app->priv);
639 	if (err)
640 		goto err_clear_nn;
641 
642 	err = nfp_flower_spawn_vnic_reprs(app,
643 					  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
644 					  NFP_REPR_TYPE_PF, 1);
645 	if (err)
646 		goto err_destroy_reprs_phy;
647 
648 	if (app->pf->num_vfs) {
649 		err = nfp_flower_spawn_vnic_reprs(app,
650 						  NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
651 						  NFP_REPR_TYPE_VF,
652 						  app->pf->num_vfs);
653 		if (err)
654 			goto err_destroy_reprs_pf;
655 	}
656 
657 	return 0;
658 
659 err_destroy_reprs_pf:
660 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
661 err_destroy_reprs_phy:
662 	nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
663 err_clear_nn:
664 	priv->nn = NULL;
665 	return err;
666 }
667 
nfp_flower_wait_host_bit(struct nfp_app * app)668 static void nfp_flower_wait_host_bit(struct nfp_app *app)
669 {
670 	unsigned long err_at;
671 	u64 feat;
672 	int err;
673 
674 	/* Wait for HOST_ACK flag bit to propagate */
675 	err_at = jiffies + msecs_to_jiffies(100);
676 	do {
677 		feat = nfp_rtsym_read_le(app->pf->rtbl,
678 					 "_abi_flower_combined_features_global",
679 					 &err);
680 		if (time_is_before_eq_jiffies(err_at)) {
681 			nfp_warn(app->cpp,
682 				 "HOST_ACK bit not propagated in FW.\n");
683 			break;
684 		}
685 		usleep_range(1000, 2000);
686 	} while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));
687 
688 	if (err)
689 		nfp_warn(app->cpp,
690 			 "Could not read global features entry from FW\n");
691 }
692 
nfp_flower_sync_feature_bits(struct nfp_app * app)693 static int nfp_flower_sync_feature_bits(struct nfp_app *app)
694 {
695 	struct nfp_flower_priv *app_priv = app->priv;
696 	int err;
697 
698 	/* Tell the firmware of the host supported features. */
699 	err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask",
700 				 app_priv->flower_ext_feats |
701 				 NFP_FL_FEATS_HOST_ACK);
702 	if (!err)
703 		nfp_flower_wait_host_bit(app);
704 	else if (err != -ENOENT)
705 		return err;
706 
707 	/* Tell the firmware that the driver supports lag. */
708 	err = nfp_rtsym_write_le(app->pf->rtbl,
709 				 "_abi_flower_balance_sync_enable", 1);
710 	if (!err) {
711 		app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
712 		nfp_flower_lag_init(&app_priv->nfp_lag);
713 	} else if (err == -ENOENT) {
714 		nfp_warn(app->cpp, "LAG not supported by FW.\n");
715 	} else {
716 		return err;
717 	}
718 
719 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
720 		/* Tell the firmware that the driver supports flow merging. */
721 		err = nfp_rtsym_write_le(app->pf->rtbl,
722 					 "_abi_flower_merge_hint_enable", 1);
723 		if (!err) {
724 			app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
725 			nfp_flower_internal_port_init(app_priv);
726 		} else if (err == -ENOENT) {
727 			nfp_warn(app->cpp,
728 				 "Flow merge not supported by FW.\n");
729 		} else {
730 			return err;
731 		}
732 	} else {
733 		nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
734 	}
735 
736 	return 0;
737 }
738 
nfp_flower_init(struct nfp_app * app)739 static int nfp_flower_init(struct nfp_app *app)
740 {
741 	u64 version, features, ctx_count, num_mems;
742 	const struct nfp_pf *pf = app->pf;
743 	struct nfp_flower_priv *app_priv;
744 	int err;
745 
746 	if (!pf->eth_tbl) {
747 		nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
748 		return -EINVAL;
749 	}
750 
751 	if (!pf->mac_stats_bar) {
752 		nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
753 		return -EINVAL;
754 	}
755 
756 	if (!pf->vf_cfg_bar) {
757 		nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
758 		return -EINVAL;
759 	}
760 
761 	version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
762 	if (err) {
763 		nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
764 		return err;
765 	}
766 
767 	num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
768 				     &err);
769 	if (err) {
770 		nfp_warn(app->cpp,
771 			 "FlowerNIC: unsupported host context memory: %d\n",
772 			 err);
773 		err = 0;
774 		num_mems = 1;
775 	}
776 
777 	if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
778 		nfp_warn(app->cpp,
779 			 "FlowerNIC: invalid host context memory: %llu\n",
780 			 num_mems);
781 		return -EINVAL;
782 	}
783 
784 	ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
785 				      &err);
786 	if (err) {
787 		nfp_warn(app->cpp,
788 			 "FlowerNIC: unsupported host context count: %d\n",
789 			 err);
790 		err = 0;
791 		ctx_count = BIT(17);
792 	}
793 
794 	/* We need to ensure hardware has enough flower capabilities. */
795 	if (version != NFP_FLOWER_ALLOWED_VER) {
796 		nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
797 		return -EINVAL;
798 	}
799 
800 	app_priv = vzalloc(sizeof(struct nfp_flower_priv));
801 	if (!app_priv)
802 		return -ENOMEM;
803 
804 	app_priv->total_mem_units = num_mems;
805 	app_priv->active_mem_unit = 0;
806 	app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
807 	app->priv = app_priv;
808 	app_priv->app = app;
809 	skb_queue_head_init(&app_priv->cmsg_skbs_high);
810 	skb_queue_head_init(&app_priv->cmsg_skbs_low);
811 	INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
812 	init_waitqueue_head(&app_priv->reify_wait_queue);
813 
814 	init_waitqueue_head(&app_priv->mtu_conf.wait_q);
815 	spin_lock_init(&app_priv->mtu_conf.lock);
816 
817 	err = nfp_flower_metadata_init(app, ctx_count, num_mems);
818 	if (err)
819 		goto err_free_app_priv;
820 
821 	/* Extract the extra features supported by the firmware. */
822 	features = nfp_rtsym_read_le(app->pf->rtbl,
823 				     "_abi_flower_extra_features", &err);
824 	if (err)
825 		app_priv->flower_ext_feats = 0;
826 	else
827 		app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
828 
829 	err = nfp_flower_sync_feature_bits(app);
830 	if (err)
831 		goto err_cleanup;
832 
833 	err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
834 	if (err)
835 		goto err_cleanup;
836 
837 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
838 		nfp_flower_qos_init(app);
839 
840 	INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
841 	INIT_LIST_HEAD(&app_priv->non_repr_priv);
842 	app_priv->pre_tun_rule_cnt = 0;
843 
844 	return 0;
845 
846 err_cleanup:
847 	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
848 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
849 	nfp_flower_metadata_cleanup(app);
850 err_free_app_priv:
851 	vfree(app->priv);
852 	return err;
853 }
854 
nfp_flower_clean(struct nfp_app * app)855 static void nfp_flower_clean(struct nfp_app *app)
856 {
857 	struct nfp_flower_priv *app_priv = app->priv;
858 
859 	skb_queue_purge(&app_priv->cmsg_skbs_high);
860 	skb_queue_purge(&app_priv->cmsg_skbs_low);
861 	flush_work(&app_priv->cmsg_work);
862 
863 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
864 		nfp_flower_qos_cleanup(app);
865 
866 	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
867 		nfp_flower_lag_cleanup(&app_priv->nfp_lag);
868 
869 	if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
870 		nfp_flower_internal_port_cleanup(app_priv);
871 
872 	nfp_flower_metadata_cleanup(app);
873 	vfree(app->priv);
874 	app->priv = NULL;
875 }
876 
nfp_flower_check_ack(struct nfp_flower_priv * app_priv)877 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
878 {
879 	bool ret;
880 
881 	spin_lock_bh(&app_priv->mtu_conf.lock);
882 	ret = app_priv->mtu_conf.ack;
883 	spin_unlock_bh(&app_priv->mtu_conf.lock);
884 
885 	return ret;
886 }
887 
888 static int
nfp_flower_repr_change_mtu(struct nfp_app * app,struct net_device * netdev,int new_mtu)889 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
890 			   int new_mtu)
891 {
892 	struct nfp_flower_priv *app_priv = app->priv;
893 	struct nfp_repr *repr = netdev_priv(netdev);
894 	int err;
895 
896 	/* Only need to config FW for physical port MTU change. */
897 	if (repr->port->type != NFP_PORT_PHYS_PORT)
898 		return 0;
899 
900 	if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
901 		nfp_err(app->cpp, "Physical port MTU setting not supported\n");
902 		return -EINVAL;
903 	}
904 
905 	spin_lock_bh(&app_priv->mtu_conf.lock);
906 	app_priv->mtu_conf.ack = false;
907 	app_priv->mtu_conf.requested_val = new_mtu;
908 	app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
909 	spin_unlock_bh(&app_priv->mtu_conf.lock);
910 
911 	err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
912 				      true);
913 	if (err) {
914 		spin_lock_bh(&app_priv->mtu_conf.lock);
915 		app_priv->mtu_conf.requested_val = 0;
916 		spin_unlock_bh(&app_priv->mtu_conf.lock);
917 		return err;
918 	}
919 
920 	/* Wait for fw to ack the change. */
921 	if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
922 				nfp_flower_check_ack(app_priv),
923 				NFP_FL_REPLY_TIMEOUT)) {
924 		spin_lock_bh(&app_priv->mtu_conf.lock);
925 		app_priv->mtu_conf.requested_val = 0;
926 		spin_unlock_bh(&app_priv->mtu_conf.lock);
927 		nfp_warn(app->cpp, "MTU change not verified with fw\n");
928 		return -EIO;
929 	}
930 
931 	return 0;
932 }
933 
nfp_flower_start(struct nfp_app * app)934 static int nfp_flower_start(struct nfp_app *app)
935 {
936 	struct nfp_flower_priv *app_priv = app->priv;
937 	int err;
938 
939 	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
940 		err = nfp_flower_lag_reset(&app_priv->nfp_lag);
941 		if (err)
942 			return err;
943 	}
944 
945 	return nfp_tunnel_config_start(app);
946 }
947 
nfp_flower_stop(struct nfp_app * app)948 static void nfp_flower_stop(struct nfp_app *app)
949 {
950 	nfp_tunnel_config_stop(app);
951 
952 	flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
953 				 nfp_flower_setup_indr_tc_release);
954 }
955 
956 static int
nfp_flower_netdev_event(struct nfp_app * app,struct net_device * netdev,unsigned long event,void * ptr)957 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
958 			unsigned long event, void *ptr)
959 {
960 	struct nfp_flower_priv *app_priv = app->priv;
961 	int ret;
962 
963 	if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
964 		ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
965 		if (ret & NOTIFY_STOP_MASK)
966 			return ret;
967 	}
968 
969 	ret = nfp_flower_internal_port_event_handler(app, netdev, event);
970 	if (ret & NOTIFY_STOP_MASK)
971 		return ret;
972 
973 	return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
974 }
975 
976 const struct nfp_app_type app_flower = {
977 	.id		= NFP_APP_FLOWER_NIC,
978 	.name		= "flower",
979 
980 	.ctrl_cap_mask	= ~0U,
981 	.ctrl_has_meta	= true,
982 
983 	.extra_cap	= nfp_flower_extra_cap,
984 
985 	.init		= nfp_flower_init,
986 	.clean		= nfp_flower_clean,
987 
988 	.repr_change_mtu  = nfp_flower_repr_change_mtu,
989 
990 	.vnic_alloc	= nfp_flower_vnic_alloc,
991 	.vnic_init	= nfp_flower_vnic_init,
992 	.vnic_clean	= nfp_flower_vnic_clean,
993 
994 	.repr_preclean	= nfp_flower_repr_netdev_preclean,
995 	.repr_clean	= nfp_flower_repr_netdev_clean,
996 
997 	.repr_open	= nfp_flower_repr_netdev_open,
998 	.repr_stop	= nfp_flower_repr_netdev_stop,
999 
1000 	.start		= nfp_flower_start,
1001 	.stop		= nfp_flower_stop,
1002 
1003 	.netdev_event	= nfp_flower_netdev_event,
1004 
1005 	.ctrl_msg_rx	= nfp_flower_cmsg_rx,
1006 
1007 	.sriov_enable	= nfp_flower_sriov_enable,
1008 	.sriov_disable	= nfp_flower_sriov_disable,
1009 
1010 	.eswitch_mode_get  = eswitch_mode_get,
1011 	.dev_get	= nfp_flower_dev_get,
1012 
1013 	.setup_tc	= nfp_flower_setup_tc,
1014 };
1015