xref: /linux/net/core/flow_offload.c (revision c6fbb759)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/slab.h>
4 #include <net/act_api.h>
5 #include <net/flow_offload.h>
6 #include <linux/rtnetlink.h>
7 #include <linux/mutex.h>
8 #include <linux/rhashtable.h>
9 
10 struct flow_rule *flow_rule_alloc(unsigned int num_actions)
11 {
12 	struct flow_rule *rule;
13 	int i;
14 
15 	rule = kzalloc(struct_size(rule, action.entries, num_actions),
16 		       GFP_KERNEL);
17 	if (!rule)
18 		return NULL;
19 
20 	rule->action.num_entries = num_actions;
21 	/* Pre-fill each action hw_stats with DONT_CARE.
22 	 * Caller can override this if it wants stats for a given action.
23 	 */
24 	for (i = 0; i < num_actions; i++)
25 		rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
26 
27 	return rule;
28 }
29 EXPORT_SYMBOL(flow_rule_alloc);
30 
31 struct flow_offload_action *offload_action_alloc(unsigned int num_actions)
32 {
33 	struct flow_offload_action *fl_action;
34 	int i;
35 
36 	fl_action = kzalloc(struct_size(fl_action, action.entries, num_actions),
37 			    GFP_KERNEL);
38 	if (!fl_action)
39 		return NULL;
40 
41 	fl_action->action.num_entries = num_actions;
42 	/* Pre-fill each action hw_stats with DONT_CARE.
43 	 * Caller can override this if it wants stats for a given action.
44 	 */
45 	for (i = 0; i < num_actions; i++)
46 		fl_action->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
47 
48 	return fl_action;
49 }
50 
51 #define FLOW_DISSECTOR_MATCH(__rule, __type, __out)				\
52 	const struct flow_match *__m = &(__rule)->match;			\
53 	struct flow_dissector *__d = (__m)->dissector;				\
54 										\
55 	(__out)->key = skb_flow_dissector_target(__d, __type, (__m)->key);	\
56 	(__out)->mask = skb_flow_dissector_target(__d, __type, (__m)->mask);	\
57 
58 void flow_rule_match_meta(const struct flow_rule *rule,
59 			  struct flow_match_meta *out)
60 {
61 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out);
62 }
63 EXPORT_SYMBOL(flow_rule_match_meta);
64 
65 void flow_rule_match_basic(const struct flow_rule *rule,
66 			   struct flow_match_basic *out)
67 {
68 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out);
69 }
70 EXPORT_SYMBOL(flow_rule_match_basic);
71 
72 void flow_rule_match_control(const struct flow_rule *rule,
73 			     struct flow_match_control *out)
74 {
75 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CONTROL, out);
76 }
77 EXPORT_SYMBOL(flow_rule_match_control);
78 
79 void flow_rule_match_eth_addrs(const struct flow_rule *rule,
80 			       struct flow_match_eth_addrs *out)
81 {
82 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS, out);
83 }
84 EXPORT_SYMBOL(flow_rule_match_eth_addrs);
85 
86 void flow_rule_match_vlan(const struct flow_rule *rule,
87 			  struct flow_match_vlan *out)
88 {
89 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_VLAN, out);
90 }
91 EXPORT_SYMBOL(flow_rule_match_vlan);
92 
93 void flow_rule_match_cvlan(const struct flow_rule *rule,
94 			   struct flow_match_vlan *out)
95 {
96 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out);
97 }
98 EXPORT_SYMBOL(flow_rule_match_cvlan);
99 
100 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
101 				struct flow_match_ipv4_addrs *out)
102 {
103 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS, out);
104 }
105 EXPORT_SYMBOL(flow_rule_match_ipv4_addrs);
106 
107 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
108 				struct flow_match_ipv6_addrs *out)
109 {
110 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS, out);
111 }
112 EXPORT_SYMBOL(flow_rule_match_ipv6_addrs);
113 
114 void flow_rule_match_ip(const struct flow_rule *rule,
115 			struct flow_match_ip *out)
116 {
117 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_IP, out);
118 }
119 EXPORT_SYMBOL(flow_rule_match_ip);
120 
121 void flow_rule_match_ports(const struct flow_rule *rule,
122 			   struct flow_match_ports *out)
123 {
124 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS, out);
125 }
126 EXPORT_SYMBOL(flow_rule_match_ports);
127 
128 void flow_rule_match_ports_range(const struct flow_rule *rule,
129 				 struct flow_match_ports_range *out)
130 {
131 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PORTS_RANGE, out);
132 }
133 EXPORT_SYMBOL(flow_rule_match_ports_range);
134 
135 void flow_rule_match_tcp(const struct flow_rule *rule,
136 			 struct flow_match_tcp *out)
137 {
138 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_TCP, out);
139 }
140 EXPORT_SYMBOL(flow_rule_match_tcp);
141 
142 void flow_rule_match_icmp(const struct flow_rule *rule,
143 			  struct flow_match_icmp *out)
144 {
145 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ICMP, out);
146 }
147 EXPORT_SYMBOL(flow_rule_match_icmp);
148 
149 void flow_rule_match_mpls(const struct flow_rule *rule,
150 			  struct flow_match_mpls *out)
151 {
152 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_MPLS, out);
153 }
154 EXPORT_SYMBOL(flow_rule_match_mpls);
155 
156 void flow_rule_match_enc_control(const struct flow_rule *rule,
157 				 struct flow_match_control *out)
158 {
159 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL, out);
160 }
161 EXPORT_SYMBOL(flow_rule_match_enc_control);
162 
163 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
164 				    struct flow_match_ipv4_addrs *out)
165 {
166 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS, out);
167 }
168 EXPORT_SYMBOL(flow_rule_match_enc_ipv4_addrs);
169 
170 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
171 				    struct flow_match_ipv6_addrs *out)
172 {
173 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, out);
174 }
175 EXPORT_SYMBOL(flow_rule_match_enc_ipv6_addrs);
176 
177 void flow_rule_match_enc_ip(const struct flow_rule *rule,
178 			    struct flow_match_ip *out)
179 {
180 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_IP, out);
181 }
182 EXPORT_SYMBOL(flow_rule_match_enc_ip);
183 
184 void flow_rule_match_enc_ports(const struct flow_rule *rule,
185 			       struct flow_match_ports *out)
186 {
187 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_PORTS, out);
188 }
189 EXPORT_SYMBOL(flow_rule_match_enc_ports);
190 
191 void flow_rule_match_enc_keyid(const struct flow_rule *rule,
192 			       struct flow_match_enc_keyid *out)
193 {
194 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_KEYID, out);
195 }
196 EXPORT_SYMBOL(flow_rule_match_enc_keyid);
197 
198 void flow_rule_match_enc_opts(const struct flow_rule *rule,
199 			      struct flow_match_enc_opts *out)
200 {
201 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_ENC_OPTS, out);
202 }
203 EXPORT_SYMBOL(flow_rule_match_enc_opts);
204 
205 struct flow_action_cookie *flow_action_cookie_create(void *data,
206 						     unsigned int len,
207 						     gfp_t gfp)
208 {
209 	struct flow_action_cookie *cookie;
210 
211 	cookie = kmalloc(sizeof(*cookie) + len, gfp);
212 	if (!cookie)
213 		return NULL;
214 	cookie->cookie_len = len;
215 	memcpy(cookie->cookie, data, len);
216 	return cookie;
217 }
218 EXPORT_SYMBOL(flow_action_cookie_create);
219 
220 void flow_action_cookie_destroy(struct flow_action_cookie *cookie)
221 {
222 	kfree(cookie);
223 }
224 EXPORT_SYMBOL(flow_action_cookie_destroy);
225 
226 void flow_rule_match_ct(const struct flow_rule *rule,
227 			struct flow_match_ct *out)
228 {
229 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CT, out);
230 }
231 EXPORT_SYMBOL(flow_rule_match_ct);
232 
233 void flow_rule_match_pppoe(const struct flow_rule *rule,
234 			   struct flow_match_pppoe *out)
235 {
236 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_PPPOE, out);
237 }
238 EXPORT_SYMBOL(flow_rule_match_pppoe);
239 
240 void flow_rule_match_l2tpv3(const struct flow_rule *rule,
241 			    struct flow_match_l2tpv3 *out)
242 {
243 	FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_L2TPV3, out);
244 }
245 EXPORT_SYMBOL(flow_rule_match_l2tpv3);
246 
247 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
248 					  void *cb_ident, void *cb_priv,
249 					  void (*release)(void *cb_priv))
250 {
251 	struct flow_block_cb *block_cb;
252 
253 	block_cb = kzalloc(sizeof(*block_cb), GFP_KERNEL);
254 	if (!block_cb)
255 		return ERR_PTR(-ENOMEM);
256 
257 	block_cb->cb = cb;
258 	block_cb->cb_ident = cb_ident;
259 	block_cb->cb_priv = cb_priv;
260 	block_cb->release = release;
261 
262 	return block_cb;
263 }
264 EXPORT_SYMBOL(flow_block_cb_alloc);
265 
266 void flow_block_cb_free(struct flow_block_cb *block_cb)
267 {
268 	if (block_cb->release)
269 		block_cb->release(block_cb->cb_priv);
270 
271 	kfree(block_cb);
272 }
273 EXPORT_SYMBOL(flow_block_cb_free);
274 
275 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
276 					   flow_setup_cb_t *cb, void *cb_ident)
277 {
278 	struct flow_block_cb *block_cb;
279 
280 	list_for_each_entry(block_cb, &block->cb_list, list) {
281 		if (block_cb->cb == cb &&
282 		    block_cb->cb_ident == cb_ident)
283 			return block_cb;
284 	}
285 
286 	return NULL;
287 }
288 EXPORT_SYMBOL(flow_block_cb_lookup);
289 
290 void *flow_block_cb_priv(struct flow_block_cb *block_cb)
291 {
292 	return block_cb->cb_priv;
293 }
294 EXPORT_SYMBOL(flow_block_cb_priv);
295 
296 void flow_block_cb_incref(struct flow_block_cb *block_cb)
297 {
298 	block_cb->refcnt++;
299 }
300 EXPORT_SYMBOL(flow_block_cb_incref);
301 
302 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb)
303 {
304 	return --block_cb->refcnt;
305 }
306 EXPORT_SYMBOL(flow_block_cb_decref);
307 
308 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
309 			   struct list_head *driver_block_list)
310 {
311 	struct flow_block_cb *block_cb;
312 
313 	list_for_each_entry(block_cb, driver_block_list, driver_list) {
314 		if (block_cb->cb == cb &&
315 		    block_cb->cb_ident == cb_ident)
316 			return true;
317 	}
318 
319 	return false;
320 }
321 EXPORT_SYMBOL(flow_block_cb_is_busy);
322 
323 int flow_block_cb_setup_simple(struct flow_block_offload *f,
324 			       struct list_head *driver_block_list,
325 			       flow_setup_cb_t *cb,
326 			       void *cb_ident, void *cb_priv,
327 			       bool ingress_only)
328 {
329 	struct flow_block_cb *block_cb;
330 
331 	if (ingress_only &&
332 	    f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
333 		return -EOPNOTSUPP;
334 
335 	f->driver_block_list = driver_block_list;
336 
337 	switch (f->command) {
338 	case FLOW_BLOCK_BIND:
339 		if (flow_block_cb_is_busy(cb, cb_ident, driver_block_list))
340 			return -EBUSY;
341 
342 		block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, NULL);
343 		if (IS_ERR(block_cb))
344 			return PTR_ERR(block_cb);
345 
346 		flow_block_cb_add(block_cb, f);
347 		list_add_tail(&block_cb->driver_list, driver_block_list);
348 		return 0;
349 	case FLOW_BLOCK_UNBIND:
350 		block_cb = flow_block_cb_lookup(f->block, cb, cb_ident);
351 		if (!block_cb)
352 			return -ENOENT;
353 
354 		flow_block_cb_remove(block_cb, f);
355 		list_del(&block_cb->driver_list);
356 		return 0;
357 	default:
358 		return -EOPNOTSUPP;
359 	}
360 }
361 EXPORT_SYMBOL(flow_block_cb_setup_simple);
362 
363 static DEFINE_MUTEX(flow_indr_block_lock);
364 static LIST_HEAD(flow_block_indr_list);
365 static LIST_HEAD(flow_block_indr_dev_list);
366 static LIST_HEAD(flow_indir_dev_list);
367 
368 struct flow_indr_dev {
369 	struct list_head		list;
370 	flow_indr_block_bind_cb_t	*cb;
371 	void				*cb_priv;
372 	refcount_t			refcnt;
373 };
374 
375 static struct flow_indr_dev *flow_indr_dev_alloc(flow_indr_block_bind_cb_t *cb,
376 						 void *cb_priv)
377 {
378 	struct flow_indr_dev *indr_dev;
379 
380 	indr_dev = kmalloc(sizeof(*indr_dev), GFP_KERNEL);
381 	if (!indr_dev)
382 		return NULL;
383 
384 	indr_dev->cb		= cb;
385 	indr_dev->cb_priv	= cb_priv;
386 	refcount_set(&indr_dev->refcnt, 1);
387 
388 	return indr_dev;
389 }
390 
391 struct flow_indir_dev_info {
392 	void *data;
393 	struct net_device *dev;
394 	struct Qdisc *sch;
395 	enum tc_setup_type type;
396 	void (*cleanup)(struct flow_block_cb *block_cb);
397 	struct list_head list;
398 	enum flow_block_command command;
399 	enum flow_block_binder_type binder_type;
400 	struct list_head *cb_list;
401 };
402 
403 static void existing_qdiscs_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
404 {
405 	struct flow_block_offload bo;
406 	struct flow_indir_dev_info *cur;
407 
408 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
409 		memset(&bo, 0, sizeof(bo));
410 		bo.command = cur->command;
411 		bo.binder_type = cur->binder_type;
412 		INIT_LIST_HEAD(&bo.cb_list);
413 		cb(cur->dev, cur->sch, cb_priv, cur->type, &bo, cur->data, cur->cleanup);
414 		list_splice(&bo.cb_list, cur->cb_list);
415 	}
416 }
417 
418 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
419 {
420 	struct flow_indr_dev *indr_dev;
421 
422 	mutex_lock(&flow_indr_block_lock);
423 	list_for_each_entry(indr_dev, &flow_block_indr_dev_list, list) {
424 		if (indr_dev->cb == cb &&
425 		    indr_dev->cb_priv == cb_priv) {
426 			refcount_inc(&indr_dev->refcnt);
427 			mutex_unlock(&flow_indr_block_lock);
428 			return 0;
429 		}
430 	}
431 
432 	indr_dev = flow_indr_dev_alloc(cb, cb_priv);
433 	if (!indr_dev) {
434 		mutex_unlock(&flow_indr_block_lock);
435 		return -ENOMEM;
436 	}
437 
438 	list_add(&indr_dev->list, &flow_block_indr_dev_list);
439 	existing_qdiscs_register(cb, cb_priv);
440 	mutex_unlock(&flow_indr_block_lock);
441 
442 	tcf_action_reoffload_cb(cb, cb_priv, true);
443 
444 	return 0;
445 }
446 EXPORT_SYMBOL(flow_indr_dev_register);
447 
448 static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
449 				      void *cb_priv,
450 				      struct list_head *cleanup_list)
451 {
452 	struct flow_block_cb *this, *next;
453 
454 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
455 		if (this->release == release &&
456 		    this->indr.cb_priv == cb_priv)
457 			list_move(&this->indr.list, cleanup_list);
458 	}
459 }
460 
461 static void flow_block_indr_notify(struct list_head *cleanup_list)
462 {
463 	struct flow_block_cb *this, *next;
464 
465 	list_for_each_entry_safe(this, next, cleanup_list, indr.list) {
466 		list_del(&this->indr.list);
467 		this->indr.cleanup(this);
468 	}
469 }
470 
471 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
472 			      void (*release)(void *cb_priv))
473 {
474 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
475 	LIST_HEAD(cleanup_list);
476 
477 	mutex_lock(&flow_indr_block_lock);
478 	list_for_each_entry_safe(this, next, &flow_block_indr_dev_list, list) {
479 		if (this->cb == cb &&
480 		    this->cb_priv == cb_priv &&
481 		    refcount_dec_and_test(&this->refcnt)) {
482 			indr_dev = this;
483 			list_del(&indr_dev->list);
484 			break;
485 		}
486 	}
487 
488 	if (!indr_dev) {
489 		mutex_unlock(&flow_indr_block_lock);
490 		return;
491 	}
492 
493 	__flow_block_indr_cleanup(release, cb_priv, &cleanup_list);
494 	mutex_unlock(&flow_indr_block_lock);
495 
496 	tcf_action_reoffload_cb(cb, cb_priv, false);
497 	flow_block_indr_notify(&cleanup_list);
498 	kfree(indr_dev);
499 }
500 EXPORT_SYMBOL(flow_indr_dev_unregister);
501 
502 static void flow_block_indr_init(struct flow_block_cb *flow_block,
503 				 struct flow_block_offload *bo,
504 				 struct net_device *dev, struct Qdisc *sch, void *data,
505 				 void *cb_priv,
506 				 void (*cleanup)(struct flow_block_cb *block_cb))
507 {
508 	flow_block->indr.binder_type = bo->binder_type;
509 	flow_block->indr.data = data;
510 	flow_block->indr.cb_priv = cb_priv;
511 	flow_block->indr.dev = dev;
512 	flow_block->indr.sch = sch;
513 	flow_block->indr.cleanup = cleanup;
514 }
515 
516 struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
517 					       void *cb_ident, void *cb_priv,
518 					       void (*release)(void *cb_priv),
519 					       struct flow_block_offload *bo,
520 					       struct net_device *dev,
521 					       struct Qdisc *sch, void *data,
522 					       void *indr_cb_priv,
523 					       void (*cleanup)(struct flow_block_cb *block_cb))
524 {
525 	struct flow_block_cb *block_cb;
526 
527 	block_cb = flow_block_cb_alloc(cb, cb_ident, cb_priv, release);
528 	if (IS_ERR(block_cb))
529 		goto out;
530 
531 	flow_block_indr_init(block_cb, bo, dev, sch, data, indr_cb_priv, cleanup);
532 	list_add(&block_cb->indr.list, &flow_block_indr_list);
533 
534 out:
535 	return block_cb;
536 }
537 EXPORT_SYMBOL(flow_indr_block_cb_alloc);
538 
539 static struct flow_indir_dev_info *find_indir_dev(void *data)
540 {
541 	struct flow_indir_dev_info *cur;
542 
543 	list_for_each_entry(cur, &flow_indir_dev_list, list) {
544 		if (cur->data == data)
545 			return cur;
546 	}
547 	return NULL;
548 }
549 
550 static int indir_dev_add(void *data, struct net_device *dev, struct Qdisc *sch,
551 			 enum tc_setup_type type, void (*cleanup)(struct flow_block_cb *block_cb),
552 			 struct flow_block_offload *bo)
553 {
554 	struct flow_indir_dev_info *info;
555 
556 	info = find_indir_dev(data);
557 	if (info)
558 		return -EEXIST;
559 
560 	info = kzalloc(sizeof(*info), GFP_KERNEL);
561 	if (!info)
562 		return -ENOMEM;
563 
564 	info->data = data;
565 	info->dev = dev;
566 	info->sch = sch;
567 	info->type = type;
568 	info->cleanup = cleanup;
569 	info->command = bo->command;
570 	info->binder_type = bo->binder_type;
571 	info->cb_list = bo->cb_list_head;
572 
573 	list_add(&info->list, &flow_indir_dev_list);
574 	return 0;
575 }
576 
577 static int indir_dev_remove(void *data)
578 {
579 	struct flow_indir_dev_info *info;
580 
581 	info = find_indir_dev(data);
582 	if (!info)
583 		return -ENOENT;
584 
585 	list_del(&info->list);
586 
587 	kfree(info);
588 	return 0;
589 }
590 
591 int flow_indr_dev_setup_offload(struct net_device *dev,	struct Qdisc *sch,
592 				enum tc_setup_type type, void *data,
593 				struct flow_block_offload *bo,
594 				void (*cleanup)(struct flow_block_cb *block_cb))
595 {
596 	struct flow_indr_dev *this;
597 	u32 count = 0;
598 	int err;
599 
600 	mutex_lock(&flow_indr_block_lock);
601 	if (bo) {
602 		if (bo->command == FLOW_BLOCK_BIND)
603 			indir_dev_add(data, dev, sch, type, cleanup, bo);
604 		else if (bo->command == FLOW_BLOCK_UNBIND)
605 			indir_dev_remove(data);
606 	}
607 
608 	list_for_each_entry(this, &flow_block_indr_dev_list, list) {
609 		err = this->cb(dev, sch, this->cb_priv, type, bo, data, cleanup);
610 		if (!err)
611 			count++;
612 	}
613 
614 	mutex_unlock(&flow_indr_block_lock);
615 
616 	return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count;
617 }
618 EXPORT_SYMBOL(flow_indr_dev_setup_offload);
619 
620 bool flow_indr_dev_exists(void)
621 {
622 	return !list_empty(&flow_block_indr_dev_list);
623 }
624 EXPORT_SYMBOL(flow_indr_dev_exists);
625