1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/if_ether.h>
7 #include <linux/rhashtable.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/flow_offload.h>
11 #include <net/pkt_cls.h>
12 #include <net/dsa.h>
13 #include "mtk_eth_soc.h"
14 #include "mtk_wed.h"
15 
16 struct mtk_flow_data {
17 	struct ethhdr eth;
18 
19 	union {
20 		struct {
21 			__be32 src_addr;
22 			__be32 dst_addr;
23 		} v4;
24 
25 		struct {
26 			struct in6_addr src_addr;
27 			struct in6_addr dst_addr;
28 		} v6;
29 	};
30 
31 	__be16 src_port;
32 	__be16 dst_port;
33 
34 	u16 vlan_in;
35 
36 	struct {
37 		u16 id;
38 		__be16 proto;
39 		u8 num;
40 	} vlan;
41 	struct {
42 		u16 sid;
43 		u8 num;
44 	} pppoe;
45 };
46 
47 static const struct rhashtable_params mtk_flow_ht_params = {
48 	.head_offset = offsetof(struct mtk_flow_entry, node),
49 	.key_offset = offsetof(struct mtk_flow_entry, cookie),
50 	.key_len = sizeof(unsigned long),
51 	.automatic_shrinking = true,
52 };
53 
54 static int
mtk_flow_set_ipv4_addr(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct mtk_flow_data * data,bool egress)55 mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
56 		       struct mtk_flow_data *data, bool egress)
57 {
58 	return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
59 					    data->v4.src_addr, data->src_port,
60 					    data->v4.dst_addr, data->dst_port);
61 }
62 
63 static int
mtk_flow_set_ipv6_addr(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct mtk_flow_data * data)64 mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
65 		       struct mtk_flow_data *data)
66 {
67 	return mtk_foe_entry_set_ipv6_tuple(eth, foe,
68 					    data->v6.src_addr.s6_addr32, data->src_port,
69 					    data->v6.dst_addr.s6_addr32, data->dst_port);
70 }
71 
72 static void
mtk_flow_offload_mangle_eth(const struct flow_action_entry * act,void * eth)73 mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
74 {
75 	void *dest = eth + act->mangle.offset;
76 	const void *src = &act->mangle.val;
77 
78 	if (act->mangle.offset > 8)
79 		return;
80 
81 	if (act->mangle.mask == 0xffff) {
82 		src += 2;
83 		dest += 2;
84 	}
85 
86 	memcpy(dest, src, act->mangle.mask ? 2 : 4);
87 }
88 
89 static int
mtk_flow_get_wdma_info(struct net_device * dev,const u8 * addr,struct mtk_wdma_info * info)90 mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
91 {
92 	struct net_device_path_stack stack;
93 	struct net_device_path *path;
94 	int err;
95 
96 	if (!dev)
97 		return -ENODEV;
98 
99 	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
100 		return -1;
101 
102 	err = dev_fill_forward_path(dev, addr, &stack);
103 	if (err)
104 		return err;
105 
106 	path = &stack.path[stack.num_paths - 1];
107 	if (path->type != DEV_PATH_MTK_WDMA)
108 		return -1;
109 
110 	info->wdma_idx = path->mtk_wdma.wdma_idx;
111 	info->queue = path->mtk_wdma.queue;
112 	info->bss = path->mtk_wdma.bss;
113 	info->wcid = path->mtk_wdma.wcid;
114 	info->amsdu = path->mtk_wdma.amsdu;
115 
116 	return 0;
117 }
118 
119 
120 static int
mtk_flow_mangle_ports(const struct flow_action_entry * act,struct mtk_flow_data * data)121 mtk_flow_mangle_ports(const struct flow_action_entry *act,
122 		      struct mtk_flow_data *data)
123 {
124 	u32 val = ntohl(act->mangle.val);
125 
126 	switch (act->mangle.offset) {
127 	case 0:
128 		if (act->mangle.mask == ~htonl(0xffff))
129 			data->dst_port = cpu_to_be16(val);
130 		else
131 			data->src_port = cpu_to_be16(val >> 16);
132 		break;
133 	case 2:
134 		data->dst_port = cpu_to_be16(val);
135 		break;
136 	default:
137 		return -EINVAL;
138 	}
139 
140 	return 0;
141 }
142 
143 static int
mtk_flow_mangle_ipv4(const struct flow_action_entry * act,struct mtk_flow_data * data)144 mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
145 		     struct mtk_flow_data *data)
146 {
147 	__be32 *dest;
148 
149 	switch (act->mangle.offset) {
150 	case offsetof(struct iphdr, saddr):
151 		dest = &data->v4.src_addr;
152 		break;
153 	case offsetof(struct iphdr, daddr):
154 		dest = &data->v4.dst_addr;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	memcpy(dest, &act->mangle.val, sizeof(u32));
161 
162 	return 0;
163 }
164 
165 static int
mtk_flow_get_dsa_port(struct net_device ** dev)166 mtk_flow_get_dsa_port(struct net_device **dev)
167 {
168 #if IS_ENABLED(CONFIG_NET_DSA)
169 	struct dsa_port *dp;
170 
171 	dp = dsa_port_from_netdev(*dev);
172 	if (IS_ERR(dp))
173 		return -ENODEV;
174 
175 	if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
176 		return -ENODEV;
177 
178 	*dev = dsa_port_to_conduit(dp);
179 
180 	return dp->index;
181 #else
182 	return -ENODEV;
183 #endif
184 }
185 
186 static int
mtk_flow_set_output_device(struct mtk_eth * eth,struct mtk_foe_entry * foe,struct net_device * dev,const u8 * dest_mac,int * wed_index)187 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
188 			   struct net_device *dev, const u8 *dest_mac,
189 			   int *wed_index)
190 {
191 	struct mtk_wdma_info info = {};
192 	int pse_port, dsa_port, queue;
193 
194 	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
195 		mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
196 				       info.bss, info.wcid, info.amsdu);
197 		if (mtk_is_netsys_v2_or_greater(eth)) {
198 			switch (info.wdma_idx) {
199 			case 0:
200 				pse_port = PSE_WDMA0_PORT;
201 				break;
202 			case 1:
203 				pse_port = PSE_WDMA1_PORT;
204 				break;
205 			case 2:
206 				pse_port = PSE_WDMA2_PORT;
207 				break;
208 			default:
209 				return -EINVAL;
210 			}
211 		} else {
212 			pse_port = 3;
213 		}
214 		*wed_index = info.wdma_idx;
215 		goto out;
216 	}
217 
218 	dsa_port = mtk_flow_get_dsa_port(&dev);
219 
220 	if (dev == eth->netdev[0])
221 		pse_port = PSE_GDM1_PORT;
222 	else if (dev == eth->netdev[1])
223 		pse_port = PSE_GDM2_PORT;
224 	else if (dev == eth->netdev[2])
225 		pse_port = PSE_GDM3_PORT;
226 	else
227 		return -EOPNOTSUPP;
228 
229 	if (dsa_port >= 0) {
230 		mtk_foe_entry_set_dsa(eth, foe, dsa_port);
231 		queue = 3 + dsa_port;
232 	} else {
233 		queue = pse_port - 1;
234 	}
235 	mtk_foe_entry_set_queue(eth, foe, queue);
236 
237 out:
238 	mtk_foe_entry_set_pse_port(eth, foe, pse_port);
239 
240 	return 0;
241 }
242 
243 static int
mtk_flow_offload_replace(struct mtk_eth * eth,struct flow_cls_offload * f,int ppe_index)244 mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
245 			 int ppe_index)
246 {
247 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
248 	struct flow_action_entry *act;
249 	struct mtk_flow_data data = {};
250 	struct mtk_foe_entry foe;
251 	struct net_device *odev = NULL;
252 	struct mtk_flow_entry *entry;
253 	int offload_type = 0;
254 	int wed_index = -1;
255 	u16 addr_type = 0;
256 	u8 l4proto = 0;
257 	int err = 0;
258 	int i;
259 
260 	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
261 		return -EEXIST;
262 
263 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
264 		struct flow_match_meta match;
265 
266 		flow_rule_match_meta(rule, &match);
267 	} else {
268 		return -EOPNOTSUPP;
269 	}
270 
271 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
272 		struct flow_match_control match;
273 
274 		flow_rule_match_control(rule, &match);
275 		addr_type = match.key->addr_type;
276 
277 		if (flow_rule_has_control_flags(match.mask->flags,
278 						f->common.extack))
279 			return -EOPNOTSUPP;
280 	} else {
281 		return -EOPNOTSUPP;
282 	}
283 
284 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
285 		struct flow_match_basic match;
286 
287 		flow_rule_match_basic(rule, &match);
288 		l4proto = match.key->ip_proto;
289 	} else {
290 		return -EOPNOTSUPP;
291 	}
292 
293 	switch (addr_type) {
294 	case 0:
295 		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
296 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
297 			struct flow_match_eth_addrs match;
298 
299 			flow_rule_match_eth_addrs(rule, &match);
300 			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
301 			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
302 		} else {
303 			return -EOPNOTSUPP;
304 		}
305 
306 		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
307 			struct flow_match_vlan match;
308 
309 			flow_rule_match_vlan(rule, &match);
310 
311 			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
312 				return -EOPNOTSUPP;
313 
314 			data.vlan_in = match.key->vlan_id;
315 		}
316 		break;
317 	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
318 		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
319 		break;
320 	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
321 		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
322 		break;
323 	default:
324 		return -EOPNOTSUPP;
325 	}
326 
327 	flow_action_for_each(i, act, &rule->action) {
328 		switch (act->id) {
329 		case FLOW_ACTION_MANGLE:
330 			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
331 				return -EOPNOTSUPP;
332 			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
333 				mtk_flow_offload_mangle_eth(act, &data.eth);
334 			break;
335 		case FLOW_ACTION_REDIRECT:
336 			odev = act->dev;
337 			break;
338 		case FLOW_ACTION_CSUM:
339 			break;
340 		case FLOW_ACTION_VLAN_PUSH:
341 			if (data.vlan.num == 1 ||
342 			    act->vlan.proto != htons(ETH_P_8021Q))
343 				return -EOPNOTSUPP;
344 
345 			data.vlan.id = act->vlan.vid;
346 			data.vlan.proto = act->vlan.proto;
347 			data.vlan.num++;
348 			break;
349 		case FLOW_ACTION_VLAN_POP:
350 			break;
351 		case FLOW_ACTION_PPPOE_PUSH:
352 			if (data.pppoe.num == 1)
353 				return -EOPNOTSUPP;
354 
355 			data.pppoe.sid = act->pppoe.sid;
356 			data.pppoe.num++;
357 			break;
358 		default:
359 			return -EOPNOTSUPP;
360 		}
361 	}
362 
363 	if (!is_valid_ether_addr(data.eth.h_source) ||
364 	    !is_valid_ether_addr(data.eth.h_dest))
365 		return -EINVAL;
366 
367 	err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
368 				    data.eth.h_source, data.eth.h_dest);
369 	if (err)
370 		return err;
371 
372 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
373 		struct flow_match_ports ports;
374 
375 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
376 			return -EOPNOTSUPP;
377 
378 		flow_rule_match_ports(rule, &ports);
379 		data.src_port = ports.key->src;
380 		data.dst_port = ports.key->dst;
381 	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
382 		return -EOPNOTSUPP;
383 	}
384 
385 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
386 		struct flow_match_ipv4_addrs addrs;
387 
388 		flow_rule_match_ipv4_addrs(rule, &addrs);
389 
390 		data.v4.src_addr = addrs.key->src;
391 		data.v4.dst_addr = addrs.key->dst;
392 
393 		mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
394 	}
395 
396 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
397 		struct flow_match_ipv6_addrs addrs;
398 
399 		flow_rule_match_ipv6_addrs(rule, &addrs);
400 
401 		data.v6.src_addr = addrs.key->src;
402 		data.v6.dst_addr = addrs.key->dst;
403 
404 		mtk_flow_set_ipv6_addr(eth, &foe, &data);
405 	}
406 
407 	flow_action_for_each(i, act, &rule->action) {
408 		if (act->id != FLOW_ACTION_MANGLE)
409 			continue;
410 
411 		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
412 			return -EOPNOTSUPP;
413 
414 		switch (act->mangle.htype) {
415 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
416 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
417 			err = mtk_flow_mangle_ports(act, &data);
418 			break;
419 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
420 			err = mtk_flow_mangle_ipv4(act, &data);
421 			break;
422 		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
423 			/* handled earlier */
424 			break;
425 		default:
426 			return -EOPNOTSUPP;
427 		}
428 
429 		if (err)
430 			return err;
431 	}
432 
433 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
434 		err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
435 		if (err)
436 			return err;
437 	}
438 
439 	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
440 		foe.bridge.vlan = data.vlan_in;
441 
442 	if (data.vlan.num == 1) {
443 		if (data.vlan.proto != htons(ETH_P_8021Q))
444 			return -EOPNOTSUPP;
445 
446 		mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
447 	}
448 	if (data.pppoe.num == 1)
449 		mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
450 
451 	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
452 					 &wed_index);
453 	if (err)
454 		return err;
455 
456 	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
457 		return err;
458 
459 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
460 	if (!entry)
461 		return -ENOMEM;
462 
463 	entry->cookie = f->cookie;
464 	memcpy(&entry->data, &foe, sizeof(entry->data));
465 	entry->wed_index = wed_index;
466 	entry->ppe_index = ppe_index;
467 
468 	err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
469 	if (err < 0)
470 		goto free;
471 
472 	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
473 				     mtk_flow_ht_params);
474 	if (err < 0)
475 		goto clear;
476 
477 	return 0;
478 
479 clear:
480 	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
481 free:
482 	kfree(entry);
483 	if (wed_index >= 0)
484 	    mtk_wed_flow_remove(wed_index);
485 	return err;
486 }
487 
488 static int
mtk_flow_offload_destroy(struct mtk_eth * eth,struct flow_cls_offload * f)489 mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
490 {
491 	struct mtk_flow_entry *entry;
492 
493 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
494 				  mtk_flow_ht_params);
495 	if (!entry)
496 		return -ENOENT;
497 
498 	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
499 	rhashtable_remove_fast(&eth->flow_table, &entry->node,
500 			       mtk_flow_ht_params);
501 	if (entry->wed_index >= 0)
502 		mtk_wed_flow_remove(entry->wed_index);
503 	kfree(entry);
504 
505 	return 0;
506 }
507 
508 static int
mtk_flow_offload_stats(struct mtk_eth * eth,struct flow_cls_offload * f)509 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
510 {
511 	struct mtk_flow_entry *entry;
512 	struct mtk_foe_accounting diff;
513 	u32 idle;
514 
515 	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
516 				  mtk_flow_ht_params);
517 	if (!entry)
518 		return -ENOENT;
519 
520 	idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
521 	f->stats.lastused = jiffies - idle * HZ;
522 
523 	if (entry->hash != 0xFFFF &&
524 	    mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
525 				  &diff)) {
526 		f->stats.pkts += diff.packets;
527 		f->stats.bytes += diff.bytes;
528 	}
529 
530 	return 0;
531 }
532 
533 static DEFINE_MUTEX(mtk_flow_offload_mutex);
534 
mtk_flow_offload_cmd(struct mtk_eth * eth,struct flow_cls_offload * cls,int ppe_index)535 int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
536 			 int ppe_index)
537 {
538 	int err;
539 
540 	mutex_lock(&mtk_flow_offload_mutex);
541 	switch (cls->command) {
542 	case FLOW_CLS_REPLACE:
543 		err = mtk_flow_offload_replace(eth, cls, ppe_index);
544 		break;
545 	case FLOW_CLS_DESTROY:
546 		err = mtk_flow_offload_destroy(eth, cls);
547 		break;
548 	case FLOW_CLS_STATS:
549 		err = mtk_flow_offload_stats(eth, cls);
550 		break;
551 	default:
552 		err = -EOPNOTSUPP;
553 		break;
554 	}
555 	mutex_unlock(&mtk_flow_offload_mutex);
556 
557 	return err;
558 }
559 
560 static int
mtk_eth_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)561 mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
562 {
563 	struct flow_cls_offload *cls = type_data;
564 	struct net_device *dev = cb_priv;
565 	struct mtk_mac *mac;
566 	struct mtk_eth *eth;
567 
568 	mac = netdev_priv(dev);
569 	eth = mac->hw;
570 
571 	if (!tc_can_offload(dev))
572 		return -EOPNOTSUPP;
573 
574 	if (type != TC_SETUP_CLSFLOWER)
575 		return -EOPNOTSUPP;
576 
577 	return mtk_flow_offload_cmd(eth, cls, 0);
578 }
579 
580 static int
mtk_eth_setup_tc_block(struct net_device * dev,struct flow_block_offload * f)581 mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
582 {
583 	struct mtk_mac *mac = netdev_priv(dev);
584 	struct mtk_eth *eth = mac->hw;
585 	static LIST_HEAD(block_cb_list);
586 	struct flow_block_cb *block_cb;
587 	flow_setup_cb_t *cb;
588 
589 	if (!eth->soc->offload_version)
590 		return -EOPNOTSUPP;
591 
592 	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
593 		return -EOPNOTSUPP;
594 
595 	cb = mtk_eth_setup_tc_block_cb;
596 	f->driver_block_list = &block_cb_list;
597 
598 	switch (f->command) {
599 	case FLOW_BLOCK_BIND:
600 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
601 		if (block_cb) {
602 			flow_block_cb_incref(block_cb);
603 			return 0;
604 		}
605 		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
606 		if (IS_ERR(block_cb))
607 			return PTR_ERR(block_cb);
608 
609 		flow_block_cb_incref(block_cb);
610 		flow_block_cb_add(block_cb, f);
611 		list_add_tail(&block_cb->driver_list, &block_cb_list);
612 		return 0;
613 	case FLOW_BLOCK_UNBIND:
614 		block_cb = flow_block_cb_lookup(f->block, cb, dev);
615 		if (!block_cb)
616 			return -ENOENT;
617 
618 		if (!flow_block_cb_decref(block_cb)) {
619 			flow_block_cb_remove(block_cb, f);
620 			list_del(&block_cb->driver_list);
621 		}
622 		return 0;
623 	default:
624 		return -EOPNOTSUPP;
625 	}
626 }
627 
mtk_eth_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)628 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
629 		     void *type_data)
630 {
631 	switch (type) {
632 	case TC_SETUP_BLOCK:
633 	case TC_SETUP_FT:
634 		return mtk_eth_setup_tc_block(dev, type_data);
635 	default:
636 		return -EOPNOTSUPP;
637 	}
638 }
639 
mtk_eth_offload_init(struct mtk_eth * eth)640 int mtk_eth_offload_init(struct mtk_eth *eth)
641 {
642 	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
643 }
644