1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4 #include <linux/skbuff.h>
5 #include <net/devlink.h>
6 #include <net/pkt_cls.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfpcore/nfp_cpp.h"
11 #include "../nfpcore/nfp_nsp.h"
12 #include "../nfp_app.h"
13 #include "../nfp_main.h"
14 #include "../nfp_net.h"
15 #include "../nfp_port.h"
16
17 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
18 (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
19 TCPHDR_PSH | TCPHDR_URG)
20
21 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
22 (FLOW_DIS_IS_FRAGMENT | \
23 FLOW_DIS_FIRST_FRAG)
24
25 #define NFP_FLOWER_WHITELIST_DISSECTOR \
26 (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
27 BIT(FLOW_DISSECTOR_KEY_BASIC) | \
28 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
29 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
30 BIT(FLOW_DISSECTOR_KEY_TCP) | \
31 BIT(FLOW_DISSECTOR_KEY_PORTS) | \
32 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
33 BIT(FLOW_DISSECTOR_KEY_VLAN) | \
34 BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
35 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
36 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
37 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
38 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
39 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
40 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
41 BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
42 BIT(FLOW_DISSECTOR_KEY_MPLS) | \
43 BIT(FLOW_DISSECTOR_KEY_IP))
44
45 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
46 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
47 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
48 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
49 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
50 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
51 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
52 BIT(FLOW_DISSECTOR_KEY_ENC_IP))
53
54 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
55 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
56 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS))
57
58 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R \
59 (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
60 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS))
61
62 #define NFP_FLOWER_MERGE_FIELDS \
63 (NFP_FLOWER_LAYER_PORT | \
64 NFP_FLOWER_LAYER_MAC | \
65 NFP_FLOWER_LAYER_TP | \
66 NFP_FLOWER_LAYER_IPV4 | \
67 NFP_FLOWER_LAYER_IPV6)
68
69 #define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
70 (NFP_FLOWER_LAYER_EXT_META | \
71 NFP_FLOWER_LAYER_PORT | \
72 NFP_FLOWER_LAYER_MAC | \
73 NFP_FLOWER_LAYER_IPV4 | \
74 NFP_FLOWER_LAYER_IPV6)
75
76 struct nfp_flower_merge_check {
77 union {
78 struct {
79 __be16 tci;
80 struct nfp_flower_mac_mpls l2;
81 struct nfp_flower_tp_ports l4;
82 union {
83 struct nfp_flower_ipv4 ipv4;
84 struct nfp_flower_ipv6 ipv6;
85 };
86 };
87 unsigned long vals[8];
88 };
89 };
90
91 static int
nfp_flower_xmit_flow(struct nfp_app * app,struct nfp_fl_payload * nfp_flow,u8 mtype)92 nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
93 u8 mtype)
94 {
95 u32 meta_len, key_len, mask_len, act_len, tot_len;
96 struct sk_buff *skb;
97 unsigned char *msg;
98
99 meta_len = sizeof(struct nfp_fl_rule_metadata);
100 key_len = nfp_flow->meta.key_len;
101 mask_len = nfp_flow->meta.mask_len;
102 act_len = nfp_flow->meta.act_len;
103
104 tot_len = meta_len + key_len + mask_len + act_len;
105
106 /* Convert to long words as firmware expects
107 * lengths in units of NFP_FL_LW_SIZ.
108 */
109 nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
110 nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
111 nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
112
113 skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
114 if (!skb)
115 return -ENOMEM;
116
117 msg = nfp_flower_cmsg_get_data(skb);
118 memcpy(msg, &nfp_flow->meta, meta_len);
119 memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
120 memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
121 memcpy(&msg[meta_len + key_len + mask_len],
122 nfp_flow->action_data, act_len);
123
124 /* Convert back to bytes as software expects
125 * lengths in units of bytes.
126 */
127 nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
128 nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
129 nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
130
131 nfp_ctrl_tx(app->ctrl, skb);
132
133 return 0;
134 }
135
nfp_flower_check_higher_than_mac(struct flow_cls_offload * f)136 static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f)
137 {
138 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
139
140 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
141 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
142 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
143 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
144 }
145
nfp_flower_check_higher_than_l3(struct flow_cls_offload * f)146 static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f)
147 {
148 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
149
150 return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) ||
151 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP);
152 }
153
154 static int
nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts * enc_opts,u32 * key_layer_two,int * key_size,bool ipv6,struct netlink_ext_ack * extack)155 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
156 u32 *key_layer_two, int *key_size, bool ipv6,
157 struct netlink_ext_ack *extack)
158 {
159 if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY ||
160 (ipv6 && enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY_V6)) {
161 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: geneve options exceed maximum length");
162 return -EOPNOTSUPP;
163 }
164
165 if (enc_opts->len > 0) {
166 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
167 *key_size += sizeof(struct nfp_flower_geneve_options);
168 }
169
170 return 0;
171 }
172
173 static int
nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports * enc_ports,struct flow_dissector_key_enc_opts * enc_op,u32 * key_layer_two,u8 * key_layer,int * key_size,struct nfp_flower_priv * priv,enum nfp_flower_tun_type * tun_type,bool ipv6,struct netlink_ext_ack * extack)174 nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports,
175 struct flow_dissector_key_enc_opts *enc_op,
176 u32 *key_layer_two, u8 *key_layer, int *key_size,
177 struct nfp_flower_priv *priv,
178 enum nfp_flower_tun_type *tun_type, bool ipv6,
179 struct netlink_ext_ack *extack)
180 {
181 int err;
182
183 switch (enc_ports->dst) {
184 case htons(IANA_VXLAN_UDP_PORT):
185 *tun_type = NFP_FL_TUNNEL_VXLAN;
186 *key_layer |= NFP_FLOWER_LAYER_VXLAN;
187
188 if (ipv6) {
189 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
190 *key_size += sizeof(struct nfp_flower_ext_meta);
191 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
192 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
193 } else {
194 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
195 }
196
197 if (enc_op) {
198 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on vxlan tunnels");
199 return -EOPNOTSUPP;
200 }
201 break;
202 case htons(GENEVE_UDP_PORT):
203 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)) {
204 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve offload");
205 return -EOPNOTSUPP;
206 }
207 *tun_type = NFP_FL_TUNNEL_GENEVE;
208 *key_layer |= NFP_FLOWER_LAYER_EXT_META;
209 *key_size += sizeof(struct nfp_flower_ext_meta);
210 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
211
212 if (ipv6) {
213 *key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
214 *key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
215 } else {
216 *key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
217 }
218
219 if (!enc_op)
220 break;
221 if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)) {
222 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support geneve option offload");
223 return -EOPNOTSUPP;
224 }
225 err = nfp_flower_calc_opt_layer(enc_op, key_layer_two, key_size,
226 ipv6, extack);
227 if (err)
228 return err;
229 break;
230 default:
231 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel type unknown");
232 return -EOPNOTSUPP;
233 }
234
235 return 0;
236 }
237
238 static int
nfp_flower_calculate_key_layers(struct nfp_app * app,struct net_device * netdev,struct nfp_fl_key_ls * ret_key_ls,struct flow_cls_offload * flow,enum nfp_flower_tun_type * tun_type,struct netlink_ext_ack * extack)239 nfp_flower_calculate_key_layers(struct nfp_app *app,
240 struct net_device *netdev,
241 struct nfp_fl_key_ls *ret_key_ls,
242 struct flow_cls_offload *flow,
243 enum nfp_flower_tun_type *tun_type,
244 struct netlink_ext_ack *extack)
245 {
246 struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
247 struct flow_dissector *dissector = rule->match.dissector;
248 struct flow_match_basic basic = { NULL, NULL};
249 struct nfp_flower_priv *priv = app->priv;
250 u32 key_layer_two;
251 u8 key_layer;
252 int key_size;
253 int err;
254
255 if (dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR) {
256 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match not supported");
257 return -EOPNOTSUPP;
258 }
259
260 /* If any tun dissector is used then the required set must be used. */
261 if (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
262 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R)
263 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_V6_R &&
264 (dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
265 != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R) {
266 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel match not supported");
267 return -EOPNOTSUPP;
268 }
269
270 key_layer_two = 0;
271 key_layer = NFP_FLOWER_LAYER_PORT;
272 key_size = sizeof(struct nfp_flower_meta_tci) +
273 sizeof(struct nfp_flower_in_port);
274
275 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
276 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
277 key_layer |= NFP_FLOWER_LAYER_MAC;
278 key_size += sizeof(struct nfp_flower_mac_mpls);
279 }
280
281 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
282 struct flow_match_vlan vlan;
283
284 flow_rule_match_vlan(rule, &vlan);
285 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
286 vlan.key->vlan_priority) {
287 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
288 return -EOPNOTSUPP;
289 }
290 if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
291 !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
292 key_layer |= NFP_FLOWER_LAYER_EXT_META;
293 key_size += sizeof(struct nfp_flower_ext_meta);
294 key_size += sizeof(struct nfp_flower_vlan);
295 key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
296 }
297 }
298
299 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
300 struct flow_match_vlan cvlan;
301
302 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
303 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
304 return -EOPNOTSUPP;
305 }
306
307 flow_rule_match_vlan(rule, &cvlan);
308 if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
309 key_layer |= NFP_FLOWER_LAYER_EXT_META;
310 key_size += sizeof(struct nfp_flower_ext_meta);
311 key_size += sizeof(struct nfp_flower_vlan);
312 key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
313 }
314 }
315
316 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
317 struct flow_match_enc_opts enc_op = { NULL, NULL };
318 struct flow_match_ipv4_addrs ipv4_addrs;
319 struct flow_match_ipv6_addrs ipv6_addrs;
320 struct flow_match_control enc_ctl;
321 struct flow_match_ports enc_ports;
322 bool ipv6_tun = false;
323
324 flow_rule_match_enc_control(rule, &enc_ctl);
325
326 if (enc_ctl.mask->addr_type != 0xffff) {
327 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
328 return -EOPNOTSUPP;
329 }
330
331 ipv6_tun = enc_ctl.key->addr_type ==
332 FLOW_DISSECTOR_KEY_IPV6_ADDRS;
333 if (ipv6_tun &&
334 !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN)) {
335 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: firmware does not support IPv6 tunnels");
336 return -EOPNOTSUPP;
337 }
338
339 if (!ipv6_tun &&
340 enc_ctl.key->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
341 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: tunnel address type not IPv4 or IPv6");
342 return -EOPNOTSUPP;
343 }
344
345 if (ipv6_tun) {
346 flow_rule_match_enc_ipv6_addrs(rule, &ipv6_addrs);
347 if (memchr_inv(&ipv6_addrs.mask->dst, 0xff,
348 sizeof(ipv6_addrs.mask->dst))) {
349 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv6 destination address is supported");
350 return -EOPNOTSUPP;
351 }
352 } else {
353 flow_rule_match_enc_ipv4_addrs(rule, &ipv4_addrs);
354 if (ipv4_addrs.mask->dst != cpu_to_be32(~0)) {
355 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match IPv4 destination address is supported");
356 return -EOPNOTSUPP;
357 }
358 }
359
360 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS))
361 flow_rule_match_enc_opts(rule, &enc_op);
362
363 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
364 /* check if GRE, which has no enc_ports */
365 if (!netif_is_gretap(netdev)) {
366 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
367 return -EOPNOTSUPP;
368 }
369
370 *tun_type = NFP_FL_TUNNEL_GRE;
371 key_layer |= NFP_FLOWER_LAYER_EXT_META;
372 key_size += sizeof(struct nfp_flower_ext_meta);
373 key_layer_two |= NFP_FLOWER_LAYER2_GRE;
374
375 if (ipv6_tun) {
376 key_layer_two |= NFP_FLOWER_LAYER2_TUN_IPV6;
377 key_size +=
378 sizeof(struct nfp_flower_ipv6_udp_tun);
379 } else {
380 key_size +=
381 sizeof(struct nfp_flower_ipv4_udp_tun);
382 }
383
384 if (enc_op.key) {
385 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: encap options not supported on GRE tunnels");
386 return -EOPNOTSUPP;
387 }
388 } else {
389 flow_rule_match_enc_ports(rule, &enc_ports);
390 if (enc_ports.mask->dst != cpu_to_be16(~0)) {
391 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: only an exact match L4 destination port is supported");
392 return -EOPNOTSUPP;
393 }
394
395 err = nfp_flower_calc_udp_tun_layer(enc_ports.key,
396 enc_op.key,
397 &key_layer_two,
398 &key_layer,
399 &key_size, priv,
400 tun_type, ipv6_tun,
401 extack);
402 if (err)
403 return err;
404
405 /* Ensure the ingress netdev matches the expected
406 * tun type.
407 */
408 if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) {
409 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ingress netdev does not match the expected tunnel type");
410 return -EOPNOTSUPP;
411 }
412 }
413 }
414
415 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC))
416 flow_rule_match_basic(rule, &basic);
417
418 if (basic.mask && basic.mask->n_proto) {
419 /* Ethernet type is present in the key. */
420 switch (basic.key->n_proto) {
421 case cpu_to_be16(ETH_P_IP):
422 key_layer |= NFP_FLOWER_LAYER_IPV4;
423 key_size += sizeof(struct nfp_flower_ipv4);
424 break;
425
426 case cpu_to_be16(ETH_P_IPV6):
427 key_layer |= NFP_FLOWER_LAYER_IPV6;
428 key_size += sizeof(struct nfp_flower_ipv6);
429 break;
430
431 /* Currently we do not offload ARP
432 * because we rely on it to get to the host.
433 */
434 case cpu_to_be16(ETH_P_ARP):
435 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: ARP not supported");
436 return -EOPNOTSUPP;
437
438 case cpu_to_be16(ETH_P_MPLS_UC):
439 case cpu_to_be16(ETH_P_MPLS_MC):
440 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
441 key_layer |= NFP_FLOWER_LAYER_MAC;
442 key_size += sizeof(struct nfp_flower_mac_mpls);
443 }
444 break;
445
446 /* Will be included in layer 2. */
447 case cpu_to_be16(ETH_P_8021Q):
448 break;
449
450 default:
451 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported");
452 return -EOPNOTSUPP;
453 }
454 } else if (nfp_flower_check_higher_than_mac(flow)) {
455 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType");
456 return -EOPNOTSUPP;
457 }
458
459 if (basic.mask && basic.mask->ip_proto) {
460 switch (basic.key->ip_proto) {
461 case IPPROTO_TCP:
462 case IPPROTO_UDP:
463 case IPPROTO_SCTP:
464 case IPPROTO_ICMP:
465 case IPPROTO_ICMPV6:
466 key_layer |= NFP_FLOWER_LAYER_TP;
467 key_size += sizeof(struct nfp_flower_tp_ports);
468 break;
469 }
470 }
471
472 if (!(key_layer & NFP_FLOWER_LAYER_TP) &&
473 nfp_flower_check_higher_than_l3(flow)) {
474 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type");
475 return -EOPNOTSUPP;
476 }
477
478 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
479 struct flow_match_tcp tcp;
480 u32 tcp_flags;
481
482 flow_rule_match_tcp(rule, &tcp);
483 tcp_flags = be16_to_cpu(tcp.key->flags);
484
485 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS) {
486 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: no match support for selected TCP flags");
487 return -EOPNOTSUPP;
488 }
489
490 /* We only support PSH and URG flags when either
491 * FIN, SYN or RST is present as well.
492 */
493 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
494 !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST))) {
495 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: PSH and URG is only supported when used with FIN, SYN or RST");
496 return -EOPNOTSUPP;
497 }
498
499 /* We need to store TCP flags in the either the IPv4 or IPv6 key
500 * space, thus we need to ensure we include a IPv4/IPv6 key
501 * layer if we have not done so already.
502 */
503 if (!basic.key) {
504 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on L3 protocol");
505 return -EOPNOTSUPP;
506 }
507
508 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
509 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
510 switch (basic.key->n_proto) {
511 case cpu_to_be16(ETH_P_IP):
512 key_layer |= NFP_FLOWER_LAYER_IPV4;
513 key_size += sizeof(struct nfp_flower_ipv4);
514 break;
515
516 case cpu_to_be16(ETH_P_IPV6):
517 key_layer |= NFP_FLOWER_LAYER_IPV6;
518 key_size += sizeof(struct nfp_flower_ipv6);
519 break;
520
521 default:
522 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on TCP flags requires a match on IPv4/IPv6");
523 return -EOPNOTSUPP;
524 }
525 }
526 }
527
528 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
529 struct flow_match_control ctl;
530
531 flow_rule_match_control(rule, &ctl);
532 if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
533 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
534 return -EOPNOTSUPP;
535 }
536 }
537
538 ret_key_ls->key_layer = key_layer;
539 ret_key_ls->key_layer_two = key_layer_two;
540 ret_key_ls->key_size = key_size;
541
542 return 0;
543 }
544
545 static struct nfp_fl_payload *
nfp_flower_allocate_new(struct nfp_fl_key_ls * key_layer)546 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
547 {
548 struct nfp_fl_payload *flow_pay;
549
550 flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
551 if (!flow_pay)
552 return NULL;
553
554 flow_pay->meta.key_len = key_layer->key_size;
555 flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
556 if (!flow_pay->unmasked_data)
557 goto err_free_flow;
558
559 flow_pay->meta.mask_len = key_layer->key_size;
560 flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
561 if (!flow_pay->mask_data)
562 goto err_free_unmasked;
563
564 flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
565 if (!flow_pay->action_data)
566 goto err_free_mask;
567
568 flow_pay->nfp_tun_ipv4_addr = 0;
569 flow_pay->nfp_tun_ipv6 = NULL;
570 flow_pay->meta.flags = 0;
571 INIT_LIST_HEAD(&flow_pay->linked_flows);
572 flow_pay->in_hw = false;
573 flow_pay->pre_tun_rule.dev = NULL;
574
575 return flow_pay;
576
577 err_free_mask:
578 kfree(flow_pay->mask_data);
579 err_free_unmasked:
580 kfree(flow_pay->unmasked_data);
581 err_free_flow:
582 kfree(flow_pay);
583 return NULL;
584 }
585
586 static int
nfp_flower_update_merge_with_actions(struct nfp_fl_payload * flow,struct nfp_flower_merge_check * merge,u8 * last_act_id,int * act_out)587 nfp_flower_update_merge_with_actions(struct nfp_fl_payload *flow,
588 struct nfp_flower_merge_check *merge,
589 u8 *last_act_id, int *act_out)
590 {
591 struct nfp_fl_set_ipv6_tc_hl_fl *ipv6_tc_hl_fl;
592 struct nfp_fl_set_ip4_ttl_tos *ipv4_ttl_tos;
593 struct nfp_fl_set_ip4_addrs *ipv4_add;
594 struct nfp_fl_set_ipv6_addr *ipv6_add;
595 struct nfp_fl_push_vlan *push_vlan;
596 struct nfp_fl_pre_tunnel *pre_tun;
597 struct nfp_fl_set_tport *tport;
598 struct nfp_fl_set_eth *eth;
599 struct nfp_fl_act_head *a;
600 unsigned int act_off = 0;
601 bool ipv6_tun = false;
602 u8 act_id = 0;
603 u8 *ports;
604 int i;
605
606 while (act_off < flow->meta.act_len) {
607 a = (struct nfp_fl_act_head *)&flow->action_data[act_off];
608 act_id = a->jump_id;
609
610 switch (act_id) {
611 case NFP_FL_ACTION_OPCODE_OUTPUT:
612 if (act_out)
613 (*act_out)++;
614 break;
615 case NFP_FL_ACTION_OPCODE_PUSH_VLAN:
616 push_vlan = (struct nfp_fl_push_vlan *)a;
617 if (push_vlan->vlan_tci)
618 merge->tci = cpu_to_be16(0xffff);
619 break;
620 case NFP_FL_ACTION_OPCODE_POP_VLAN:
621 merge->tci = cpu_to_be16(0);
622 break;
623 case NFP_FL_ACTION_OPCODE_SET_TUNNEL:
624 /* New tunnel header means l2 to l4 can be matched. */
625 eth_broadcast_addr(&merge->l2.mac_dst[0]);
626 eth_broadcast_addr(&merge->l2.mac_src[0]);
627 memset(&merge->l4, 0xff,
628 sizeof(struct nfp_flower_tp_ports));
629 if (ipv6_tun)
630 memset(&merge->ipv6, 0xff,
631 sizeof(struct nfp_flower_ipv6));
632 else
633 memset(&merge->ipv4, 0xff,
634 sizeof(struct nfp_flower_ipv4));
635 break;
636 case NFP_FL_ACTION_OPCODE_SET_ETHERNET:
637 eth = (struct nfp_fl_set_eth *)a;
638 for (i = 0; i < ETH_ALEN; i++)
639 merge->l2.mac_dst[i] |= eth->eth_addr_mask[i];
640 for (i = 0; i < ETH_ALEN; i++)
641 merge->l2.mac_src[i] |=
642 eth->eth_addr_mask[ETH_ALEN + i];
643 break;
644 case NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS:
645 ipv4_add = (struct nfp_fl_set_ip4_addrs *)a;
646 merge->ipv4.ipv4_src |= ipv4_add->ipv4_src_mask;
647 merge->ipv4.ipv4_dst |= ipv4_add->ipv4_dst_mask;
648 break;
649 case NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS:
650 ipv4_ttl_tos = (struct nfp_fl_set_ip4_ttl_tos *)a;
651 merge->ipv4.ip_ext.ttl |= ipv4_ttl_tos->ipv4_ttl_mask;
652 merge->ipv4.ip_ext.tos |= ipv4_ttl_tos->ipv4_tos_mask;
653 break;
654 case NFP_FL_ACTION_OPCODE_SET_IPV6_SRC:
655 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
656 for (i = 0; i < 4; i++)
657 merge->ipv6.ipv6_src.in6_u.u6_addr32[i] |=
658 ipv6_add->ipv6[i].mask;
659 break;
660 case NFP_FL_ACTION_OPCODE_SET_IPV6_DST:
661 ipv6_add = (struct nfp_fl_set_ipv6_addr *)a;
662 for (i = 0; i < 4; i++)
663 merge->ipv6.ipv6_dst.in6_u.u6_addr32[i] |=
664 ipv6_add->ipv6[i].mask;
665 break;
666 case NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL:
667 ipv6_tc_hl_fl = (struct nfp_fl_set_ipv6_tc_hl_fl *)a;
668 merge->ipv6.ip_ext.ttl |=
669 ipv6_tc_hl_fl->ipv6_hop_limit_mask;
670 merge->ipv6.ip_ext.tos |= ipv6_tc_hl_fl->ipv6_tc_mask;
671 merge->ipv6.ipv6_flow_label_exthdr |=
672 ipv6_tc_hl_fl->ipv6_label_mask;
673 break;
674 case NFP_FL_ACTION_OPCODE_SET_UDP:
675 case NFP_FL_ACTION_OPCODE_SET_TCP:
676 tport = (struct nfp_fl_set_tport *)a;
677 ports = (u8 *)&merge->l4.port_src;
678 for (i = 0; i < 4; i++)
679 ports[i] |= tport->tp_port_mask[i];
680 break;
681 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
682 pre_tun = (struct nfp_fl_pre_tunnel *)a;
683 ipv6_tun = be16_to_cpu(pre_tun->flags) &
684 NFP_FL_PRE_TUN_IPV6;
685 break;
686 case NFP_FL_ACTION_OPCODE_PRE_LAG:
687 case NFP_FL_ACTION_OPCODE_PUSH_GENEVE:
688 break;
689 default:
690 return -EOPNOTSUPP;
691 }
692
693 act_off += a->len_lw << NFP_FL_LW_SIZ;
694 }
695
696 if (last_act_id)
697 *last_act_id = act_id;
698
699 return 0;
700 }
701
702 static int
nfp_flower_populate_merge_match(struct nfp_fl_payload * flow,struct nfp_flower_merge_check * merge,bool extra_fields)703 nfp_flower_populate_merge_match(struct nfp_fl_payload *flow,
704 struct nfp_flower_merge_check *merge,
705 bool extra_fields)
706 {
707 struct nfp_flower_meta_tci *meta_tci;
708 u8 *mask = flow->mask_data;
709 u8 key_layer, match_size;
710
711 memset(merge, 0, sizeof(struct nfp_flower_merge_check));
712
713 meta_tci = (struct nfp_flower_meta_tci *)mask;
714 key_layer = meta_tci->nfp_flow_key_layer;
715
716 if (key_layer & ~NFP_FLOWER_MERGE_FIELDS && !extra_fields)
717 return -EOPNOTSUPP;
718
719 merge->tci = meta_tci->tci;
720 mask += sizeof(struct nfp_flower_meta_tci);
721
722 if (key_layer & NFP_FLOWER_LAYER_EXT_META)
723 mask += sizeof(struct nfp_flower_ext_meta);
724
725 mask += sizeof(struct nfp_flower_in_port);
726
727 if (key_layer & NFP_FLOWER_LAYER_MAC) {
728 match_size = sizeof(struct nfp_flower_mac_mpls);
729 memcpy(&merge->l2, mask, match_size);
730 mask += match_size;
731 }
732
733 if (key_layer & NFP_FLOWER_LAYER_TP) {
734 match_size = sizeof(struct nfp_flower_tp_ports);
735 memcpy(&merge->l4, mask, match_size);
736 mask += match_size;
737 }
738
739 if (key_layer & NFP_FLOWER_LAYER_IPV4) {
740 match_size = sizeof(struct nfp_flower_ipv4);
741 memcpy(&merge->ipv4, mask, match_size);
742 }
743
744 if (key_layer & NFP_FLOWER_LAYER_IPV6) {
745 match_size = sizeof(struct nfp_flower_ipv6);
746 memcpy(&merge->ipv6, mask, match_size);
747 }
748
749 return 0;
750 }
751
752 static int
nfp_flower_can_merge(struct nfp_fl_payload * sub_flow1,struct nfp_fl_payload * sub_flow2)753 nfp_flower_can_merge(struct nfp_fl_payload *sub_flow1,
754 struct nfp_fl_payload *sub_flow2)
755 {
756 /* Two flows can be merged if sub_flow2 only matches on bits that are
757 * either matched by sub_flow1 or set by a sub_flow1 action. This
758 * ensures that every packet that hits sub_flow1 and recirculates is
759 * guaranteed to hit sub_flow2.
760 */
761 struct nfp_flower_merge_check sub_flow1_merge, sub_flow2_merge;
762 int err, act_out = 0;
763 u8 last_act_id = 0;
764
765 err = nfp_flower_populate_merge_match(sub_flow1, &sub_flow1_merge,
766 true);
767 if (err)
768 return err;
769
770 err = nfp_flower_populate_merge_match(sub_flow2, &sub_flow2_merge,
771 false);
772 if (err)
773 return err;
774
775 err = nfp_flower_update_merge_with_actions(sub_flow1, &sub_flow1_merge,
776 &last_act_id, &act_out);
777 if (err)
778 return err;
779
780 /* Must only be 1 output action and it must be the last in sequence. */
781 if (act_out != 1 || last_act_id != NFP_FL_ACTION_OPCODE_OUTPUT)
782 return -EOPNOTSUPP;
783
784 /* Reject merge if sub_flow2 matches on something that is not matched
785 * on or set in an action by sub_flow1.
786 */
787 err = bitmap_andnot(sub_flow2_merge.vals, sub_flow2_merge.vals,
788 sub_flow1_merge.vals,
789 sizeof(struct nfp_flower_merge_check) * 8);
790 if (err)
791 return -EINVAL;
792
793 return 0;
794 }
795
796 static unsigned int
nfp_flower_copy_pre_actions(char * act_dst,char * act_src,int len,bool * tunnel_act)797 nfp_flower_copy_pre_actions(char *act_dst, char *act_src, int len,
798 bool *tunnel_act)
799 {
800 unsigned int act_off = 0, act_len;
801 struct nfp_fl_act_head *a;
802 u8 act_id = 0;
803
804 while (act_off < len) {
805 a = (struct nfp_fl_act_head *)&act_src[act_off];
806 act_len = a->len_lw << NFP_FL_LW_SIZ;
807 act_id = a->jump_id;
808
809 switch (act_id) {
810 case NFP_FL_ACTION_OPCODE_PRE_TUNNEL:
811 if (tunnel_act)
812 *tunnel_act = true;
813 fallthrough;
814 case NFP_FL_ACTION_OPCODE_PRE_LAG:
815 memcpy(act_dst + act_off, act_src + act_off, act_len);
816 break;
817 default:
818 return act_off;
819 }
820
821 act_off += act_len;
822 }
823
824 return act_off;
825 }
826
827 static int
nfp_fl_verify_post_tun_acts(char * acts,int len,struct nfp_fl_push_vlan ** vlan)828 nfp_fl_verify_post_tun_acts(char *acts, int len, struct nfp_fl_push_vlan **vlan)
829 {
830 struct nfp_fl_act_head *a;
831 unsigned int act_off = 0;
832
833 while (act_off < len) {
834 a = (struct nfp_fl_act_head *)&acts[act_off];
835
836 if (a->jump_id == NFP_FL_ACTION_OPCODE_PUSH_VLAN && !act_off)
837 *vlan = (struct nfp_fl_push_vlan *)a;
838 else if (a->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT)
839 return -EOPNOTSUPP;
840
841 act_off += a->len_lw << NFP_FL_LW_SIZ;
842 }
843
844 /* Ensure any VLAN push also has an egress action. */
845 if (*vlan && act_off <= sizeof(struct nfp_fl_push_vlan))
846 return -EOPNOTSUPP;
847
848 return 0;
849 }
850
851 static int
nfp_fl_push_vlan_after_tun(char * acts,int len,struct nfp_fl_push_vlan * vlan)852 nfp_fl_push_vlan_after_tun(char *acts, int len, struct nfp_fl_push_vlan *vlan)
853 {
854 struct nfp_fl_set_tun *tun;
855 struct nfp_fl_act_head *a;
856 unsigned int act_off = 0;
857
858 while (act_off < len) {
859 a = (struct nfp_fl_act_head *)&acts[act_off];
860
861 if (a->jump_id == NFP_FL_ACTION_OPCODE_SET_TUNNEL) {
862 tun = (struct nfp_fl_set_tun *)a;
863 tun->outer_vlan_tpid = vlan->vlan_tpid;
864 tun->outer_vlan_tci = vlan->vlan_tci;
865
866 return 0;
867 }
868
869 act_off += a->len_lw << NFP_FL_LW_SIZ;
870 }
871
872 /* Return error if no tunnel action is found. */
873 return -EOPNOTSUPP;
874 }
875
876 static int
nfp_flower_merge_action(struct nfp_fl_payload * sub_flow1,struct nfp_fl_payload * sub_flow2,struct nfp_fl_payload * merge_flow)877 nfp_flower_merge_action(struct nfp_fl_payload *sub_flow1,
878 struct nfp_fl_payload *sub_flow2,
879 struct nfp_fl_payload *merge_flow)
880 {
881 unsigned int sub1_act_len, sub2_act_len, pre_off1, pre_off2;
882 struct nfp_fl_push_vlan *post_tun_push_vlan = NULL;
883 bool tunnel_act = false;
884 char *merge_act;
885 int err;
886
887 /* The last action of sub_flow1 must be output - do not merge this. */
888 sub1_act_len = sub_flow1->meta.act_len - sizeof(struct nfp_fl_output);
889 sub2_act_len = sub_flow2->meta.act_len;
890
891 if (!sub2_act_len)
892 return -EINVAL;
893
894 if (sub1_act_len + sub2_act_len > NFP_FL_MAX_A_SIZ)
895 return -EINVAL;
896
897 /* A shortcut can only be applied if there is a single action. */
898 if (sub1_act_len)
899 merge_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
900 else
901 merge_flow->meta.shortcut = sub_flow2->meta.shortcut;
902
903 merge_flow->meta.act_len = sub1_act_len + sub2_act_len;
904 merge_act = merge_flow->action_data;
905
906 /* Copy any pre-actions to the start of merge flow action list. */
907 pre_off1 = nfp_flower_copy_pre_actions(merge_act,
908 sub_flow1->action_data,
909 sub1_act_len, &tunnel_act);
910 merge_act += pre_off1;
911 sub1_act_len -= pre_off1;
912 pre_off2 = nfp_flower_copy_pre_actions(merge_act,
913 sub_flow2->action_data,
914 sub2_act_len, NULL);
915 merge_act += pre_off2;
916 sub2_act_len -= pre_off2;
917
918 /* FW does a tunnel push when egressing, therefore, if sub_flow 1 pushes
919 * a tunnel, there are restrictions on what sub_flow 2 actions lead to a
920 * valid merge.
921 */
922 if (tunnel_act) {
923 char *post_tun_acts = &sub_flow2->action_data[pre_off2];
924
925 err = nfp_fl_verify_post_tun_acts(post_tun_acts, sub2_act_len,
926 &post_tun_push_vlan);
927 if (err)
928 return err;
929
930 if (post_tun_push_vlan) {
931 pre_off2 += sizeof(*post_tun_push_vlan);
932 sub2_act_len -= sizeof(*post_tun_push_vlan);
933 }
934 }
935
936 /* Copy remaining actions from sub_flows 1 and 2. */
937 memcpy(merge_act, sub_flow1->action_data + pre_off1, sub1_act_len);
938
939 if (post_tun_push_vlan) {
940 /* Update tunnel action in merge to include VLAN push. */
941 err = nfp_fl_push_vlan_after_tun(merge_act, sub1_act_len,
942 post_tun_push_vlan);
943 if (err)
944 return err;
945
946 merge_flow->meta.act_len -= sizeof(*post_tun_push_vlan);
947 }
948
949 merge_act += sub1_act_len;
950 memcpy(merge_act, sub_flow2->action_data + pre_off2, sub2_act_len);
951
952 return 0;
953 }
954
955 /* Flow link code should only be accessed under RTNL. */
nfp_flower_unlink_flow(struct nfp_fl_payload_link * link)956 static void nfp_flower_unlink_flow(struct nfp_fl_payload_link *link)
957 {
958 list_del(&link->merge_flow.list);
959 list_del(&link->sub_flow.list);
960 kfree(link);
961 }
962
nfp_flower_unlink_flows(struct nfp_fl_payload * merge_flow,struct nfp_fl_payload * sub_flow)963 static void nfp_flower_unlink_flows(struct nfp_fl_payload *merge_flow,
964 struct nfp_fl_payload *sub_flow)
965 {
966 struct nfp_fl_payload_link *link;
967
968 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list)
969 if (link->sub_flow.flow == sub_flow) {
970 nfp_flower_unlink_flow(link);
971 return;
972 }
973 }
974
nfp_flower_link_flows(struct nfp_fl_payload * merge_flow,struct nfp_fl_payload * sub_flow)975 static int nfp_flower_link_flows(struct nfp_fl_payload *merge_flow,
976 struct nfp_fl_payload *sub_flow)
977 {
978 struct nfp_fl_payload_link *link;
979
980 link = kmalloc(sizeof(*link), GFP_KERNEL);
981 if (!link)
982 return -ENOMEM;
983
984 link->merge_flow.flow = merge_flow;
985 list_add_tail(&link->merge_flow.list, &merge_flow->linked_flows);
986 link->sub_flow.flow = sub_flow;
987 list_add_tail(&link->sub_flow.list, &sub_flow->linked_flows);
988
989 return 0;
990 }
991
992 /**
993 * nfp_flower_merge_offloaded_flows() - Merge 2 existing flows to single flow.
994 * @app: Pointer to the APP handle
995 * @sub_flow1: Initial flow matched to produce merge hint
996 * @sub_flow2: Post recirculation flow matched in merge hint
997 *
998 * Combines 2 flows (if valid) to a single flow, removing the initial from hw
999 * and offloading the new, merged flow.
1000 *
1001 * Return: negative value on error, 0 in success.
1002 */
nfp_flower_merge_offloaded_flows(struct nfp_app * app,struct nfp_fl_payload * sub_flow1,struct nfp_fl_payload * sub_flow2)1003 int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
1004 struct nfp_fl_payload *sub_flow1,
1005 struct nfp_fl_payload *sub_flow2)
1006 {
1007 struct flow_cls_offload merge_tc_off;
1008 struct nfp_flower_priv *priv = app->priv;
1009 struct netlink_ext_ack *extack = NULL;
1010 struct nfp_fl_payload *merge_flow;
1011 struct nfp_fl_key_ls merge_key_ls;
1012 struct nfp_merge_info *merge_info;
1013 u64 parent_ctx = 0;
1014 int err;
1015
1016 ASSERT_RTNL();
1017
1018 extack = merge_tc_off.common.extack;
1019 if (sub_flow1 == sub_flow2 ||
1020 nfp_flower_is_merge_flow(sub_flow1) ||
1021 nfp_flower_is_merge_flow(sub_flow2))
1022 return -EINVAL;
1023
1024 /* check if the two flows are already merged */
1025 parent_ctx = (u64)(be32_to_cpu(sub_flow1->meta.host_ctx_id)) << 32;
1026 parent_ctx |= (u64)(be32_to_cpu(sub_flow2->meta.host_ctx_id));
1027 if (rhashtable_lookup_fast(&priv->merge_table,
1028 &parent_ctx, merge_table_params)) {
1029 nfp_flower_cmsg_warn(app, "The two flows are already merged.\n");
1030 return 0;
1031 }
1032
1033 err = nfp_flower_can_merge(sub_flow1, sub_flow2);
1034 if (err)
1035 return err;
1036
1037 merge_key_ls.key_size = sub_flow1->meta.key_len;
1038
1039 merge_flow = nfp_flower_allocate_new(&merge_key_ls);
1040 if (!merge_flow)
1041 return -ENOMEM;
1042
1043 merge_flow->tc_flower_cookie = (unsigned long)merge_flow;
1044 merge_flow->ingress_dev = sub_flow1->ingress_dev;
1045
1046 memcpy(merge_flow->unmasked_data, sub_flow1->unmasked_data,
1047 sub_flow1->meta.key_len);
1048 memcpy(merge_flow->mask_data, sub_flow1->mask_data,
1049 sub_flow1->meta.mask_len);
1050
1051 err = nfp_flower_merge_action(sub_flow1, sub_flow2, merge_flow);
1052 if (err)
1053 goto err_destroy_merge_flow;
1054
1055 err = nfp_flower_link_flows(merge_flow, sub_flow1);
1056 if (err)
1057 goto err_destroy_merge_flow;
1058
1059 err = nfp_flower_link_flows(merge_flow, sub_flow2);
1060 if (err)
1061 goto err_unlink_sub_flow1;
1062
1063 merge_tc_off.cookie = merge_flow->tc_flower_cookie;
1064 err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow,
1065 merge_flow->ingress_dev, extack);
1066 if (err)
1067 goto err_unlink_sub_flow2;
1068
1069 err = rhashtable_insert_fast(&priv->flow_table, &merge_flow->fl_node,
1070 nfp_flower_table_params);
1071 if (err)
1072 goto err_release_metadata;
1073
1074 merge_info = kmalloc(sizeof(*merge_info), GFP_KERNEL);
1075 if (!merge_info) {
1076 err = -ENOMEM;
1077 goto err_remove_rhash;
1078 }
1079 merge_info->parent_ctx = parent_ctx;
1080 err = rhashtable_insert_fast(&priv->merge_table, &merge_info->ht_node,
1081 merge_table_params);
1082 if (err)
1083 goto err_destroy_merge_info;
1084
1085 err = nfp_flower_xmit_flow(app, merge_flow,
1086 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1087 if (err)
1088 goto err_remove_merge_info;
1089
1090 merge_flow->in_hw = true;
1091 sub_flow1->in_hw = false;
1092
1093 return 0;
1094
1095 err_remove_merge_info:
1096 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1097 &merge_info->ht_node,
1098 merge_table_params));
1099 err_destroy_merge_info:
1100 kfree(merge_info);
1101 err_remove_rhash:
1102 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1103 &merge_flow->fl_node,
1104 nfp_flower_table_params));
1105 err_release_metadata:
1106 nfp_modify_flow_metadata(app, merge_flow);
1107 err_unlink_sub_flow2:
1108 nfp_flower_unlink_flows(merge_flow, sub_flow2);
1109 err_unlink_sub_flow1:
1110 nfp_flower_unlink_flows(merge_flow, sub_flow1);
1111 err_destroy_merge_flow:
1112 kfree(merge_flow->action_data);
1113 kfree(merge_flow->mask_data);
1114 kfree(merge_flow->unmasked_data);
1115 kfree(merge_flow);
1116 return err;
1117 }
1118
1119 /**
1120 * nfp_flower_validate_pre_tun_rule()
1121 * @app: Pointer to the APP handle
1122 * @flow: Pointer to NFP flow representation of rule
1123 * @key_ls: Pointer to NFP key layers structure
1124 * @extack: Netlink extended ACK report
1125 *
1126 * Verifies the flow as a pre-tunnel rule.
1127 *
1128 * Return: negative value on error, 0 if verified.
1129 */
1130 static int
nfp_flower_validate_pre_tun_rule(struct nfp_app * app,struct nfp_fl_payload * flow,struct nfp_fl_key_ls * key_ls,struct netlink_ext_ack * extack)1131 nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
1132 struct nfp_fl_payload *flow,
1133 struct nfp_fl_key_ls *key_ls,
1134 struct netlink_ext_ack *extack)
1135 {
1136 struct nfp_flower_priv *priv = app->priv;
1137 struct nfp_flower_meta_tci *meta_tci;
1138 struct nfp_flower_mac_mpls *mac;
1139 u8 *ext = flow->unmasked_data;
1140 struct nfp_fl_act_head *act;
1141 u8 *mask = flow->mask_data;
1142 bool vlan = false;
1143 int act_offset;
1144 u8 key_layer;
1145
1146 meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
1147 key_layer = key_ls->key_layer;
1148 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1149 if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
1150 u16 vlan_tci = be16_to_cpu(meta_tci->tci);
1151
1152 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1153 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1154 vlan = true;
1155 } else {
1156 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1157 }
1158 }
1159
1160 if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
1161 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
1162 return -EOPNOTSUPP;
1163 } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
1164 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
1165 return -EOPNOTSUPP;
1166 }
1167
1168 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
1169 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MAC fields match required");
1170 return -EOPNOTSUPP;
1171 }
1172
1173 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
1174 !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
1175 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
1176 return -EOPNOTSUPP;
1177 }
1178
1179 /* Skip fields known to exist. */
1180 mask += sizeof(struct nfp_flower_meta_tci);
1181 ext += sizeof(struct nfp_flower_meta_tci);
1182 if (key_ls->key_layer_two) {
1183 mask += sizeof(struct nfp_flower_ext_meta);
1184 ext += sizeof(struct nfp_flower_ext_meta);
1185 }
1186 mask += sizeof(struct nfp_flower_in_port);
1187 ext += sizeof(struct nfp_flower_in_port);
1188
1189 /* Ensure destination MAC address matches pre_tun_dev. */
1190 mac = (struct nfp_flower_mac_mpls *)ext;
1191 if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
1192 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
1193 return -EOPNOTSUPP;
1194 }
1195
1196 /* Ensure destination MAC address is fully matched. */
1197 mac = (struct nfp_flower_mac_mpls *)mask;
1198 if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
1199 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC field must not be masked");
1200 return -EOPNOTSUPP;
1201 }
1202
1203 if (mac->mpls_lse) {
1204 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
1205 return -EOPNOTSUPP;
1206 }
1207
1208 mask += sizeof(struct nfp_flower_mac_mpls);
1209 ext += sizeof(struct nfp_flower_mac_mpls);
1210 if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
1211 key_layer & NFP_FLOWER_LAYER_IPV6) {
1212 /* Flags and proto fields have same offset in IPv4 and IPv6. */
1213 int ip_flags = offsetof(struct nfp_flower_ipv4, ip_ext.flags);
1214 int ip_proto = offsetof(struct nfp_flower_ipv4, ip_ext.proto);
1215 int size;
1216 int i;
1217
1218 size = key_layer & NFP_FLOWER_LAYER_IPV4 ?
1219 sizeof(struct nfp_flower_ipv4) :
1220 sizeof(struct nfp_flower_ipv6);
1221
1222
1223 /* Ensure proto and flags are the only IP layer fields. */
1224 for (i = 0; i < size; i++)
1225 if (mask[i] && i != ip_flags && i != ip_proto) {
1226 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
1227 return -EOPNOTSUPP;
1228 }
1229 ext += size;
1230 mask += size;
1231 }
1232
1233 if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
1234 if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
1235 struct nfp_flower_vlan *vlan_tags;
1236 u16 vlan_tci;
1237
1238 vlan_tags = (struct nfp_flower_vlan *)ext;
1239
1240 vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
1241
1242 vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
1243 flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
1244 vlan = true;
1245 } else {
1246 flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
1247 }
1248 }
1249
1250 /* Action must be a single egress or pop_vlan and egress. */
1251 act_offset = 0;
1252 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1253 if (vlan) {
1254 if (act->jump_id != NFP_FL_ACTION_OPCODE_POP_VLAN) {
1255 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on VLAN must have VLAN pop as first action");
1256 return -EOPNOTSUPP;
1257 }
1258
1259 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1260 act = (struct nfp_fl_act_head *)&flow->action_data[act_offset];
1261 }
1262
1263 if (act->jump_id != NFP_FL_ACTION_OPCODE_OUTPUT) {
1264 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non egress action detected where egress was expected");
1265 return -EOPNOTSUPP;
1266 }
1267
1268 act_offset += act->len_lw << NFP_FL_LW_SIZ;
1269
1270 /* Ensure there are no more actions after egress. */
1271 if (act_offset != flow->meta.act_len) {
1272 NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: egress is not the last action");
1273 return -EOPNOTSUPP;
1274 }
1275
1276 return 0;
1277 }
1278
1279 /**
1280 * nfp_flower_add_offload() - Adds a new flow to hardware.
1281 * @app: Pointer to the APP handle
1282 * @netdev: netdev structure.
1283 * @flow: TC flower classifier offload structure.
1284 *
1285 * Adds a new flow to the repeated hash structure and action payload.
1286 *
1287 * Return: negative value on error, 0 if configured successfully.
1288 */
1289 static int
nfp_flower_add_offload(struct nfp_app * app,struct net_device * netdev,struct flow_cls_offload * flow)1290 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
1291 struct flow_cls_offload *flow)
1292 {
1293 enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
1294 struct nfp_flower_priv *priv = app->priv;
1295 struct netlink_ext_ack *extack = NULL;
1296 struct nfp_fl_payload *flow_pay;
1297 struct nfp_fl_key_ls *key_layer;
1298 struct nfp_port *port = NULL;
1299 int err;
1300
1301 extack = flow->common.extack;
1302 if (nfp_netdev_is_nfp_repr(netdev))
1303 port = nfp_port_from_netdev(netdev);
1304
1305 key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
1306 if (!key_layer)
1307 return -ENOMEM;
1308
1309 err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
1310 &tun_type, extack);
1311 if (err)
1312 goto err_free_key_ls;
1313
1314 flow_pay = nfp_flower_allocate_new(key_layer);
1315 if (!flow_pay) {
1316 err = -ENOMEM;
1317 goto err_free_key_ls;
1318 }
1319
1320 err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
1321 flow_pay, tun_type, extack);
1322 if (err)
1323 goto err_destroy_flow;
1324
1325 err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack);
1326 if (err)
1327 goto err_destroy_flow;
1328
1329 if (flow_pay->pre_tun_rule.dev) {
1330 err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
1331 if (err)
1332 goto err_destroy_flow;
1333 }
1334
1335 err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack);
1336 if (err)
1337 goto err_destroy_flow;
1338
1339 flow_pay->tc_flower_cookie = flow->cookie;
1340 err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1341 nfp_flower_table_params);
1342 if (err) {
1343 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot insert flow into tables for offloads");
1344 goto err_release_metadata;
1345 }
1346
1347 if (flow_pay->pre_tun_rule.dev)
1348 err = nfp_flower_xmit_pre_tun_flow(app, flow_pay);
1349 else
1350 err = nfp_flower_xmit_flow(app, flow_pay,
1351 NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1352 if (err)
1353 goto err_remove_rhash;
1354
1355 if (port)
1356 port->tc_offload_cnt++;
1357
1358 flow_pay->in_hw = true;
1359
1360 /* Deallocate flow payload when flower rule has been destroyed. */
1361 kfree(key_layer);
1362
1363 return 0;
1364
1365 err_remove_rhash:
1366 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1367 &flow_pay->fl_node,
1368 nfp_flower_table_params));
1369 err_release_metadata:
1370 nfp_modify_flow_metadata(app, flow_pay);
1371 err_destroy_flow:
1372 if (flow_pay->nfp_tun_ipv6)
1373 nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1374 kfree(flow_pay->action_data);
1375 kfree(flow_pay->mask_data);
1376 kfree(flow_pay->unmasked_data);
1377 kfree(flow_pay);
1378 err_free_key_ls:
1379 kfree(key_layer);
1380 return err;
1381 }
1382
1383 static void
nfp_flower_remove_merge_flow(struct nfp_app * app,struct nfp_fl_payload * del_sub_flow,struct nfp_fl_payload * merge_flow)1384 nfp_flower_remove_merge_flow(struct nfp_app *app,
1385 struct nfp_fl_payload *del_sub_flow,
1386 struct nfp_fl_payload *merge_flow)
1387 {
1388 struct nfp_flower_priv *priv = app->priv;
1389 struct nfp_fl_payload_link *link, *temp;
1390 struct nfp_merge_info *merge_info;
1391 struct nfp_fl_payload *origin;
1392 u64 parent_ctx = 0;
1393 bool mod = false;
1394 int err;
1395
1396 link = list_first_entry(&merge_flow->linked_flows,
1397 struct nfp_fl_payload_link, merge_flow.list);
1398 origin = link->sub_flow.flow;
1399
1400 /* Re-add rule the merge had overwritten if it has not been deleted. */
1401 if (origin != del_sub_flow)
1402 mod = true;
1403
1404 err = nfp_modify_flow_metadata(app, merge_flow);
1405 if (err) {
1406 nfp_flower_cmsg_warn(app, "Metadata fail for merge flow delete.\n");
1407 goto err_free_links;
1408 }
1409
1410 if (!mod) {
1411 err = nfp_flower_xmit_flow(app, merge_flow,
1412 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1413 if (err) {
1414 nfp_flower_cmsg_warn(app, "Failed to delete merged flow.\n");
1415 goto err_free_links;
1416 }
1417 } else {
1418 __nfp_modify_flow_metadata(priv, origin);
1419 err = nfp_flower_xmit_flow(app, origin,
1420 NFP_FLOWER_CMSG_TYPE_FLOW_MOD);
1421 if (err)
1422 nfp_flower_cmsg_warn(app, "Failed to revert merge flow.\n");
1423 origin->in_hw = true;
1424 }
1425
1426 err_free_links:
1427 /* Clean any links connected with the merged flow. */
1428 list_for_each_entry_safe(link, temp, &merge_flow->linked_flows,
1429 merge_flow.list) {
1430 u32 ctx_id = be32_to_cpu(link->sub_flow.flow->meta.host_ctx_id);
1431
1432 parent_ctx = (parent_ctx << 32) | (u64)(ctx_id);
1433 nfp_flower_unlink_flow(link);
1434 }
1435
1436 merge_info = rhashtable_lookup_fast(&priv->merge_table,
1437 &parent_ctx,
1438 merge_table_params);
1439 if (merge_info) {
1440 WARN_ON_ONCE(rhashtable_remove_fast(&priv->merge_table,
1441 &merge_info->ht_node,
1442 merge_table_params));
1443 kfree(merge_info);
1444 }
1445
1446 kfree(merge_flow->action_data);
1447 kfree(merge_flow->mask_data);
1448 kfree(merge_flow->unmasked_data);
1449 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1450 &merge_flow->fl_node,
1451 nfp_flower_table_params));
1452 kfree_rcu(merge_flow, rcu);
1453 }
1454
1455 static void
nfp_flower_del_linked_merge_flows(struct nfp_app * app,struct nfp_fl_payload * sub_flow)1456 nfp_flower_del_linked_merge_flows(struct nfp_app *app,
1457 struct nfp_fl_payload *sub_flow)
1458 {
1459 struct nfp_fl_payload_link *link, *temp;
1460
1461 /* Remove any merge flow formed from the deleted sub_flow. */
1462 list_for_each_entry_safe(link, temp, &sub_flow->linked_flows,
1463 sub_flow.list)
1464 nfp_flower_remove_merge_flow(app, sub_flow,
1465 link->merge_flow.flow);
1466 }
1467
1468 /**
1469 * nfp_flower_del_offload() - Removes a flow from hardware.
1470 * @app: Pointer to the APP handle
1471 * @netdev: netdev structure.
1472 * @flow: TC flower classifier offload structure
1473 *
1474 * Removes a flow from the repeated hash structure and clears the
1475 * action payload. Any flows merged from this are also deleted.
1476 *
1477 * Return: negative value on error, 0 if removed successfully.
1478 */
1479 static int
nfp_flower_del_offload(struct nfp_app * app,struct net_device * netdev,struct flow_cls_offload * flow)1480 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
1481 struct flow_cls_offload *flow)
1482 {
1483 struct nfp_flower_priv *priv = app->priv;
1484 struct netlink_ext_ack *extack = NULL;
1485 struct nfp_fl_payload *nfp_flow;
1486 struct nfp_port *port = NULL;
1487 int err;
1488
1489 extack = flow->common.extack;
1490 if (nfp_netdev_is_nfp_repr(netdev))
1491 port = nfp_port_from_netdev(netdev);
1492
1493 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1494 if (!nfp_flow) {
1495 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
1496 return -ENOENT;
1497 }
1498
1499 err = nfp_modify_flow_metadata(app, nfp_flow);
1500 if (err)
1501 goto err_free_merge_flow;
1502
1503 if (nfp_flow->nfp_tun_ipv4_addr)
1504 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
1505
1506 if (nfp_flow->nfp_tun_ipv6)
1507 nfp_tunnel_put_ipv6_off(app, nfp_flow->nfp_tun_ipv6);
1508
1509 if (!nfp_flow->in_hw) {
1510 err = 0;
1511 goto err_free_merge_flow;
1512 }
1513
1514 if (nfp_flow->pre_tun_rule.dev)
1515 err = nfp_flower_xmit_pre_tun_del_flow(app, nfp_flow);
1516 else
1517 err = nfp_flower_xmit_flow(app, nfp_flow,
1518 NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1519 /* Fall through on error. */
1520
1521 err_free_merge_flow:
1522 nfp_flower_del_linked_merge_flows(app, nfp_flow);
1523 if (port)
1524 port->tc_offload_cnt--;
1525 kfree(nfp_flow->action_data);
1526 kfree(nfp_flow->mask_data);
1527 kfree(nfp_flow->unmasked_data);
1528 WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1529 &nfp_flow->fl_node,
1530 nfp_flower_table_params));
1531 kfree_rcu(nfp_flow, rcu);
1532 return err;
1533 }
1534
1535 static void
__nfp_flower_update_merge_stats(struct nfp_app * app,struct nfp_fl_payload * merge_flow)1536 __nfp_flower_update_merge_stats(struct nfp_app *app,
1537 struct nfp_fl_payload *merge_flow)
1538 {
1539 struct nfp_flower_priv *priv = app->priv;
1540 struct nfp_fl_payload_link *link;
1541 struct nfp_fl_payload *sub_flow;
1542 u64 pkts, bytes, used;
1543 u32 ctx_id;
1544
1545 ctx_id = be32_to_cpu(merge_flow->meta.host_ctx_id);
1546 pkts = priv->stats[ctx_id].pkts;
1547 /* Do not cycle subflows if no stats to distribute. */
1548 if (!pkts)
1549 return;
1550 bytes = priv->stats[ctx_id].bytes;
1551 used = priv->stats[ctx_id].used;
1552
1553 /* Reset stats for the merge flow. */
1554 priv->stats[ctx_id].pkts = 0;
1555 priv->stats[ctx_id].bytes = 0;
1556
1557 /* The merge flow has received stats updates from firmware.
1558 * Distribute these stats to all subflows that form the merge.
1559 * The stats will collected from TC via the subflows.
1560 */
1561 list_for_each_entry(link, &merge_flow->linked_flows, merge_flow.list) {
1562 sub_flow = link->sub_flow.flow;
1563 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id);
1564 priv->stats[ctx_id].pkts += pkts;
1565 priv->stats[ctx_id].bytes += bytes;
1566 priv->stats[ctx_id].used = max_t(u64, used,
1567 priv->stats[ctx_id].used);
1568 }
1569 }
1570
1571 static void
nfp_flower_update_merge_stats(struct nfp_app * app,struct nfp_fl_payload * sub_flow)1572 nfp_flower_update_merge_stats(struct nfp_app *app,
1573 struct nfp_fl_payload *sub_flow)
1574 {
1575 struct nfp_fl_payload_link *link;
1576
1577 /* Get merge flows that the subflow forms to distribute their stats. */
1578 list_for_each_entry(link, &sub_flow->linked_flows, sub_flow.list)
1579 __nfp_flower_update_merge_stats(app, link->merge_flow.flow);
1580 }
1581
1582 /**
1583 * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
1584 * @app: Pointer to the APP handle
1585 * @netdev: Netdev structure.
1586 * @flow: TC flower classifier offload structure
1587 *
1588 * Populates a flow statistics structure which which corresponds to a
1589 * specific flow.
1590 *
1591 * Return: negative value on error, 0 if stats populated successfully.
1592 */
1593 static int
nfp_flower_get_stats(struct nfp_app * app,struct net_device * netdev,struct flow_cls_offload * flow)1594 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
1595 struct flow_cls_offload *flow)
1596 {
1597 struct nfp_flower_priv *priv = app->priv;
1598 struct netlink_ext_ack *extack = NULL;
1599 struct nfp_fl_payload *nfp_flow;
1600 u32 ctx_id;
1601
1602 extack = flow->common.extack;
1603 nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
1604 if (!nfp_flow) {
1605 NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot dump stats for flow that does not exist");
1606 return -EINVAL;
1607 }
1608
1609 ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1610
1611 spin_lock_bh(&priv->stats_lock);
1612 /* If request is for a sub_flow, update stats from merged flows. */
1613 if (!list_empty(&nfp_flow->linked_flows))
1614 nfp_flower_update_merge_stats(app, nfp_flow);
1615
1616 flow_stats_update(&flow->stats, priv->stats[ctx_id].bytes,
1617 priv->stats[ctx_id].pkts, 0, priv->stats[ctx_id].used,
1618 FLOW_ACTION_HW_STATS_DELAYED);
1619
1620 priv->stats[ctx_id].pkts = 0;
1621 priv->stats[ctx_id].bytes = 0;
1622 spin_unlock_bh(&priv->stats_lock);
1623
1624 return 0;
1625 }
1626
1627 static int
nfp_flower_repr_offload(struct nfp_app * app,struct net_device * netdev,struct flow_cls_offload * flower)1628 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
1629 struct flow_cls_offload *flower)
1630 {
1631 if (!eth_proto_is_802_3(flower->common.protocol))
1632 return -EOPNOTSUPP;
1633
1634 switch (flower->command) {
1635 case FLOW_CLS_REPLACE:
1636 return nfp_flower_add_offload(app, netdev, flower);
1637 case FLOW_CLS_DESTROY:
1638 return nfp_flower_del_offload(app, netdev, flower);
1639 case FLOW_CLS_STATS:
1640 return nfp_flower_get_stats(app, netdev, flower);
1641 default:
1642 return -EOPNOTSUPP;
1643 }
1644 }
1645
nfp_flower_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1646 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
1647 void *type_data, void *cb_priv)
1648 {
1649 struct nfp_repr *repr = cb_priv;
1650
1651 if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
1652 return -EOPNOTSUPP;
1653
1654 switch (type) {
1655 case TC_SETUP_CLSFLOWER:
1656 return nfp_flower_repr_offload(repr->app, repr->netdev,
1657 type_data);
1658 case TC_SETUP_CLSMATCHALL:
1659 return nfp_flower_setup_qos_offload(repr->app, repr->netdev,
1660 type_data);
1661 default:
1662 return -EOPNOTSUPP;
1663 }
1664 }
1665
1666 static LIST_HEAD(nfp_block_cb_list);
1667
nfp_flower_setup_tc_block(struct net_device * netdev,struct flow_block_offload * f)1668 static int nfp_flower_setup_tc_block(struct net_device *netdev,
1669 struct flow_block_offload *f)
1670 {
1671 struct nfp_repr *repr = netdev_priv(netdev);
1672 struct nfp_flower_repr_priv *repr_priv;
1673 struct flow_block_cb *block_cb;
1674
1675 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1676 return -EOPNOTSUPP;
1677
1678 repr_priv = repr->app_priv;
1679 repr_priv->block_shared = f->block_shared;
1680 f->driver_block_list = &nfp_block_cb_list;
1681
1682 switch (f->command) {
1683 case FLOW_BLOCK_BIND:
1684 if (flow_block_cb_is_busy(nfp_flower_setup_tc_block_cb, repr,
1685 &nfp_block_cb_list))
1686 return -EBUSY;
1687
1688 block_cb = flow_block_cb_alloc(nfp_flower_setup_tc_block_cb,
1689 repr, repr, NULL);
1690 if (IS_ERR(block_cb))
1691 return PTR_ERR(block_cb);
1692
1693 flow_block_cb_add(block_cb, f);
1694 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1695 return 0;
1696 case FLOW_BLOCK_UNBIND:
1697 block_cb = flow_block_cb_lookup(f->block,
1698 nfp_flower_setup_tc_block_cb,
1699 repr);
1700 if (!block_cb)
1701 return -ENOENT;
1702
1703 flow_block_cb_remove(block_cb, f);
1704 list_del(&block_cb->driver_list);
1705 return 0;
1706 default:
1707 return -EOPNOTSUPP;
1708 }
1709 }
1710
nfp_flower_setup_tc(struct nfp_app * app,struct net_device * netdev,enum tc_setup_type type,void * type_data)1711 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
1712 enum tc_setup_type type, void *type_data)
1713 {
1714 switch (type) {
1715 case TC_SETUP_BLOCK:
1716 return nfp_flower_setup_tc_block(netdev, type_data);
1717 default:
1718 return -EOPNOTSUPP;
1719 }
1720 }
1721
1722 struct nfp_flower_indr_block_cb_priv {
1723 struct net_device *netdev;
1724 struct nfp_app *app;
1725 struct list_head list;
1726 };
1727
1728 static struct nfp_flower_indr_block_cb_priv *
nfp_flower_indr_block_cb_priv_lookup(struct nfp_app * app,struct net_device * netdev)1729 nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
1730 struct net_device *netdev)
1731 {
1732 struct nfp_flower_indr_block_cb_priv *cb_priv;
1733 struct nfp_flower_priv *priv = app->priv;
1734
1735 /* All callback list access should be protected by RTNL. */
1736 ASSERT_RTNL();
1737
1738 list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
1739 if (cb_priv->netdev == netdev)
1740 return cb_priv;
1741
1742 return NULL;
1743 }
1744
nfp_flower_setup_indr_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)1745 static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
1746 void *type_data, void *cb_priv)
1747 {
1748 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1749 struct flow_cls_offload *flower = type_data;
1750
1751 if (flower->common.chain_index)
1752 return -EOPNOTSUPP;
1753
1754 switch (type) {
1755 case TC_SETUP_CLSFLOWER:
1756 return nfp_flower_repr_offload(priv->app, priv->netdev,
1757 type_data);
1758 default:
1759 return -EOPNOTSUPP;
1760 }
1761 }
1762
nfp_flower_setup_indr_tc_release(void * cb_priv)1763 void nfp_flower_setup_indr_tc_release(void *cb_priv)
1764 {
1765 struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
1766
1767 list_del(&priv->list);
1768 kfree(priv);
1769 }
1770
1771 static int
nfp_flower_setup_indr_tc_block(struct net_device * netdev,struct Qdisc * sch,struct nfp_app * app,struct flow_block_offload * f,void * data,void (* cleanup)(struct flow_block_cb * block_cb))1772 nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, struct nfp_app *app,
1773 struct flow_block_offload *f, void *data,
1774 void (*cleanup)(struct flow_block_cb *block_cb))
1775 {
1776 struct nfp_flower_indr_block_cb_priv *cb_priv;
1777 struct nfp_flower_priv *priv = app->priv;
1778 struct flow_block_cb *block_cb;
1779
1780 if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
1781 !nfp_flower_internal_port_can_offload(app, netdev)) ||
1782 (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS &&
1783 nfp_flower_internal_port_can_offload(app, netdev)))
1784 return -EOPNOTSUPP;
1785
1786 switch (f->command) {
1787 case FLOW_BLOCK_BIND:
1788 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1789 if (cb_priv &&
1790 flow_block_cb_is_busy(nfp_flower_setup_indr_block_cb,
1791 cb_priv,
1792 &nfp_block_cb_list))
1793 return -EBUSY;
1794
1795 cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
1796 if (!cb_priv)
1797 return -ENOMEM;
1798
1799 cb_priv->netdev = netdev;
1800 cb_priv->app = app;
1801 list_add(&cb_priv->list, &priv->indr_block_cb_priv);
1802
1803 block_cb = flow_indr_block_cb_alloc(nfp_flower_setup_indr_block_cb,
1804 cb_priv, cb_priv,
1805 nfp_flower_setup_indr_tc_release,
1806 f, netdev, sch, data, app, cleanup);
1807 if (IS_ERR(block_cb)) {
1808 list_del(&cb_priv->list);
1809 kfree(cb_priv);
1810 return PTR_ERR(block_cb);
1811 }
1812
1813 flow_block_cb_add(block_cb, f);
1814 list_add_tail(&block_cb->driver_list, &nfp_block_cb_list);
1815 return 0;
1816 case FLOW_BLOCK_UNBIND:
1817 cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
1818 if (!cb_priv)
1819 return -ENOENT;
1820
1821 block_cb = flow_block_cb_lookup(f->block,
1822 nfp_flower_setup_indr_block_cb,
1823 cb_priv);
1824 if (!block_cb)
1825 return -ENOENT;
1826
1827 flow_indr_block_cb_remove(block_cb, f);
1828 list_del(&block_cb->driver_list);
1829 return 0;
1830 default:
1831 return -EOPNOTSUPP;
1832 }
1833 return 0;
1834 }
1835
1836 int
nfp_flower_indr_setup_tc_cb(struct net_device * netdev,struct Qdisc * sch,void * cb_priv,enum tc_setup_type type,void * type_data,void * data,void (* cleanup)(struct flow_block_cb * block_cb))1837 nfp_flower_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
1838 enum tc_setup_type type, void *type_data,
1839 void *data,
1840 void (*cleanup)(struct flow_block_cb *block_cb))
1841 {
1842 if (!nfp_fl_is_netdev_to_offload(netdev))
1843 return -EOPNOTSUPP;
1844
1845 switch (type) {
1846 case TC_SETUP_BLOCK:
1847 return nfp_flower_setup_indr_tc_block(netdev, sch, cb_priv,
1848 type_data, data, cleanup);
1849 default:
1850 return -EOPNOTSUPP;
1851 }
1852 }
1853