xref: /linux/net/core/sock_reuseport.c (revision 2da68a77)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * To speed up listener socket lookup, create an array to store all sockets
4  * listening on the same port.  This allows a decision to be made after finding
5  * the first socket.  An optional BPF program can also be configured for
6  * selecting the socket index from the array of available sockets.
7  */
8 
9 #include <net/ip.h>
10 #include <net/sock_reuseport.h>
11 #include <linux/bpf.h>
12 #include <linux/idr.h>
13 #include <linux/filter.h>
14 #include <linux/rcupdate.h>
15 
16 #define INIT_SOCKS 128
17 
18 DEFINE_SPINLOCK(reuseport_lock);
19 
20 static DEFINE_IDA(reuseport_ida);
21 static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
22 			       struct sock_reuseport *reuse, bool bind_inany);
23 
24 void reuseport_has_conns_set(struct sock *sk)
25 {
26 	struct sock_reuseport *reuse;
27 
28 	if (!rcu_access_pointer(sk->sk_reuseport_cb))
29 		return;
30 
31 	spin_lock_bh(&reuseport_lock);
32 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
33 					  lockdep_is_held(&reuseport_lock));
34 	if (likely(reuse))
35 		reuse->has_conns = 1;
36 	spin_unlock_bh(&reuseport_lock);
37 }
38 EXPORT_SYMBOL(reuseport_has_conns_set);
39 
40 static int reuseport_sock_index(struct sock *sk,
41 				const struct sock_reuseport *reuse,
42 				bool closed)
43 {
44 	int left, right;
45 
46 	if (!closed) {
47 		left = 0;
48 		right = reuse->num_socks;
49 	} else {
50 		left = reuse->max_socks - reuse->num_closed_socks;
51 		right = reuse->max_socks;
52 	}
53 
54 	for (; left < right; left++)
55 		if (reuse->socks[left] == sk)
56 			return left;
57 	return -1;
58 }
59 
60 static void __reuseport_add_sock(struct sock *sk,
61 				 struct sock_reuseport *reuse)
62 {
63 	reuse->socks[reuse->num_socks] = sk;
64 	/* paired with smp_rmb() in reuseport_(select|migrate)_sock() */
65 	smp_wmb();
66 	reuse->num_socks++;
67 }
68 
69 static bool __reuseport_detach_sock(struct sock *sk,
70 				    struct sock_reuseport *reuse)
71 {
72 	int i = reuseport_sock_index(sk, reuse, false);
73 
74 	if (i == -1)
75 		return false;
76 
77 	reuse->socks[i] = reuse->socks[reuse->num_socks - 1];
78 	reuse->num_socks--;
79 
80 	return true;
81 }
82 
83 static void __reuseport_add_closed_sock(struct sock *sk,
84 					struct sock_reuseport *reuse)
85 {
86 	reuse->socks[reuse->max_socks - reuse->num_closed_socks - 1] = sk;
87 	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
88 	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks + 1);
89 }
90 
91 static bool __reuseport_detach_closed_sock(struct sock *sk,
92 					   struct sock_reuseport *reuse)
93 {
94 	int i = reuseport_sock_index(sk, reuse, true);
95 
96 	if (i == -1)
97 		return false;
98 
99 	reuse->socks[i] = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
100 	/* paired with READ_ONCE() in inet_csk_bind_conflict() */
101 	WRITE_ONCE(reuse->num_closed_socks, reuse->num_closed_socks - 1);
102 
103 	return true;
104 }
105 
106 static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks)
107 {
108 	unsigned int size = sizeof(struct sock_reuseport) +
109 		      sizeof(struct sock *) * max_socks;
110 	struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC);
111 
112 	if (!reuse)
113 		return NULL;
114 
115 	reuse->max_socks = max_socks;
116 
117 	RCU_INIT_POINTER(reuse->prog, NULL);
118 	return reuse;
119 }
120 
121 int reuseport_alloc(struct sock *sk, bool bind_inany)
122 {
123 	struct sock_reuseport *reuse;
124 	int id, ret = 0;
125 
126 	/* bh lock used since this function call may precede hlist lock in
127 	 * soft irq of receive path or setsockopt from process context
128 	 */
129 	spin_lock_bh(&reuseport_lock);
130 
131 	/* Allocation attempts can occur concurrently via the setsockopt path
132 	 * and the bind/hash path.  Nothing to do when we lose the race.
133 	 */
134 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
135 					  lockdep_is_held(&reuseport_lock));
136 	if (reuse) {
137 		if (reuse->num_closed_socks) {
138 			/* sk was shutdown()ed before */
139 			ret = reuseport_resurrect(sk, reuse, NULL, bind_inany);
140 			goto out;
141 		}
142 
143 		/* Only set reuse->bind_inany if the bind_inany is true.
144 		 * Otherwise, it will overwrite the reuse->bind_inany
145 		 * which was set by the bind/hash path.
146 		 */
147 		if (bind_inany)
148 			reuse->bind_inany = bind_inany;
149 		goto out;
150 	}
151 
152 	reuse = __reuseport_alloc(INIT_SOCKS);
153 	if (!reuse) {
154 		ret = -ENOMEM;
155 		goto out;
156 	}
157 
158 	id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
159 	if (id < 0) {
160 		kfree(reuse);
161 		ret = id;
162 		goto out;
163 	}
164 
165 	reuse->reuseport_id = id;
166 	reuse->bind_inany = bind_inany;
167 	reuse->socks[0] = sk;
168 	reuse->num_socks = 1;
169 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
170 
171 out:
172 	spin_unlock_bh(&reuseport_lock);
173 
174 	return ret;
175 }
176 EXPORT_SYMBOL(reuseport_alloc);
177 
178 static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
179 {
180 	struct sock_reuseport *more_reuse;
181 	u32 more_socks_size, i;
182 
183 	more_socks_size = reuse->max_socks * 2U;
184 	if (more_socks_size > U16_MAX) {
185 		if (reuse->num_closed_socks) {
186 			/* Make room by removing a closed sk.
187 			 * The child has already been migrated.
188 			 * Only reqsk left at this point.
189 			 */
190 			struct sock *sk;
191 
192 			sk = reuse->socks[reuse->max_socks - reuse->num_closed_socks];
193 			RCU_INIT_POINTER(sk->sk_reuseport_cb, NULL);
194 			__reuseport_detach_closed_sock(sk, reuse);
195 
196 			return reuse;
197 		}
198 
199 		return NULL;
200 	}
201 
202 	more_reuse = __reuseport_alloc(more_socks_size);
203 	if (!more_reuse)
204 		return NULL;
205 
206 	more_reuse->num_socks = reuse->num_socks;
207 	more_reuse->num_closed_socks = reuse->num_closed_socks;
208 	more_reuse->prog = reuse->prog;
209 	more_reuse->reuseport_id = reuse->reuseport_id;
210 	more_reuse->bind_inany = reuse->bind_inany;
211 	more_reuse->has_conns = reuse->has_conns;
212 
213 	memcpy(more_reuse->socks, reuse->socks,
214 	       reuse->num_socks * sizeof(struct sock *));
215 	memcpy(more_reuse->socks +
216 	       (more_reuse->max_socks - more_reuse->num_closed_socks),
217 	       reuse->socks + (reuse->max_socks - reuse->num_closed_socks),
218 	       reuse->num_closed_socks * sizeof(struct sock *));
219 	more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts);
220 
221 	for (i = 0; i < reuse->max_socks; ++i)
222 		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
223 				   more_reuse);
224 
225 	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
226 	 * that reuse and more_reuse can temporarily share a reference
227 	 * to prog.
228 	 */
229 	kfree_rcu(reuse, rcu);
230 	return more_reuse;
231 }
232 
233 static void reuseport_free_rcu(struct rcu_head *head)
234 {
235 	struct sock_reuseport *reuse;
236 
237 	reuse = container_of(head, struct sock_reuseport, rcu);
238 	sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1));
239 	ida_free(&reuseport_ida, reuse->reuseport_id);
240 	kfree(reuse);
241 }
242 
243 /**
244  *  reuseport_add_sock - Add a socket to the reuseport group of another.
245  *  @sk:  New socket to add to the group.
246  *  @sk2: Socket belonging to the existing reuseport group.
247  *  @bind_inany: Whether or not the group is bound to a local INANY address.
248  *
249  *  May return ENOMEM and not add socket to group under memory pressure.
250  */
251 int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany)
252 {
253 	struct sock_reuseport *old_reuse, *reuse;
254 
255 	if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
256 		int err = reuseport_alloc(sk2, bind_inany);
257 
258 		if (err)
259 			return err;
260 	}
261 
262 	spin_lock_bh(&reuseport_lock);
263 	reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
264 					  lockdep_is_held(&reuseport_lock));
265 	old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
266 					      lockdep_is_held(&reuseport_lock));
267 	if (old_reuse && old_reuse->num_closed_socks) {
268 		/* sk was shutdown()ed before */
269 		int err = reuseport_resurrect(sk, old_reuse, reuse, reuse->bind_inany);
270 
271 		spin_unlock_bh(&reuseport_lock);
272 		return err;
273 	}
274 
275 	if (old_reuse && old_reuse->num_socks != 1) {
276 		spin_unlock_bh(&reuseport_lock);
277 		return -EBUSY;
278 	}
279 
280 	if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
281 		reuse = reuseport_grow(reuse);
282 		if (!reuse) {
283 			spin_unlock_bh(&reuseport_lock);
284 			return -ENOMEM;
285 		}
286 	}
287 
288 	__reuseport_add_sock(sk, reuse);
289 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
290 
291 	spin_unlock_bh(&reuseport_lock);
292 
293 	if (old_reuse)
294 		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
295 	return 0;
296 }
297 EXPORT_SYMBOL(reuseport_add_sock);
298 
299 static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
300 			       struct sock_reuseport *reuse, bool bind_inany)
301 {
302 	if (old_reuse == reuse) {
303 		/* If sk was in the same reuseport group, just pop sk out of
304 		 * the closed section and push sk into the listening section.
305 		 */
306 		__reuseport_detach_closed_sock(sk, old_reuse);
307 		__reuseport_add_sock(sk, old_reuse);
308 		return 0;
309 	}
310 
311 	if (!reuse) {
312 		/* In bind()/listen() path, we cannot carry over the eBPF prog
313 		 * for the shutdown()ed socket. In setsockopt() path, we should
314 		 * not change the eBPF prog of listening sockets by attaching a
315 		 * prog to the shutdown()ed socket. Thus, we will allocate a new
316 		 * reuseport group and detach sk from the old group.
317 		 */
318 		int id;
319 
320 		reuse = __reuseport_alloc(INIT_SOCKS);
321 		if (!reuse)
322 			return -ENOMEM;
323 
324 		id = ida_alloc(&reuseport_ida, GFP_ATOMIC);
325 		if (id < 0) {
326 			kfree(reuse);
327 			return id;
328 		}
329 
330 		reuse->reuseport_id = id;
331 		reuse->bind_inany = bind_inany;
332 	} else {
333 		/* Move sk from the old group to the new one if
334 		 * - all the other listeners in the old group were close()d or
335 		 *   shutdown()ed, and then sk2 has listen()ed on the same port
336 		 * OR
337 		 * - sk listen()ed without bind() (or with autobind), was
338 		 *   shutdown()ed, and then listen()s on another port which
339 		 *   sk2 listen()s on.
340 		 */
341 		if (reuse->num_socks + reuse->num_closed_socks == reuse->max_socks) {
342 			reuse = reuseport_grow(reuse);
343 			if (!reuse)
344 				return -ENOMEM;
345 		}
346 	}
347 
348 	__reuseport_detach_closed_sock(sk, old_reuse);
349 	__reuseport_add_sock(sk, reuse);
350 	rcu_assign_pointer(sk->sk_reuseport_cb, reuse);
351 
352 	if (old_reuse->num_socks + old_reuse->num_closed_socks == 0)
353 		call_rcu(&old_reuse->rcu, reuseport_free_rcu);
354 
355 	return 0;
356 }
357 
358 void reuseport_detach_sock(struct sock *sk)
359 {
360 	struct sock_reuseport *reuse;
361 
362 	spin_lock_bh(&reuseport_lock);
363 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
364 					  lockdep_is_held(&reuseport_lock));
365 
366 	/* reuseport_grow() has detached a closed sk */
367 	if (!reuse)
368 		goto out;
369 
370 	/* Notify the bpf side. The sk may be added to a sockarray
371 	 * map. If so, sockarray logic will remove it from the map.
372 	 *
373 	 * Other bpf map types that work with reuseport, like sockmap,
374 	 * don't need an explicit callback from here. They override sk
375 	 * unhash/close ops to remove the sk from the map before we
376 	 * get to this point.
377 	 */
378 	bpf_sk_reuseport_detach(sk);
379 
380 	rcu_assign_pointer(sk->sk_reuseport_cb, NULL);
381 
382 	if (!__reuseport_detach_closed_sock(sk, reuse))
383 		__reuseport_detach_sock(sk, reuse);
384 
385 	if (reuse->num_socks + reuse->num_closed_socks == 0)
386 		call_rcu(&reuse->rcu, reuseport_free_rcu);
387 
388 out:
389 	spin_unlock_bh(&reuseport_lock);
390 }
391 EXPORT_SYMBOL(reuseport_detach_sock);
392 
393 void reuseport_stop_listen_sock(struct sock *sk)
394 {
395 	if (sk->sk_protocol == IPPROTO_TCP) {
396 		struct sock_reuseport *reuse;
397 		struct bpf_prog *prog;
398 
399 		spin_lock_bh(&reuseport_lock);
400 
401 		reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
402 						  lockdep_is_held(&reuseport_lock));
403 		prog = rcu_dereference_protected(reuse->prog,
404 						 lockdep_is_held(&reuseport_lock));
405 
406 		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) ||
407 		    (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) {
408 			/* Migration capable, move sk from the listening section
409 			 * to the closed section.
410 			 */
411 			bpf_sk_reuseport_detach(sk);
412 
413 			__reuseport_detach_sock(sk, reuse);
414 			__reuseport_add_closed_sock(sk, reuse);
415 
416 			spin_unlock_bh(&reuseport_lock);
417 			return;
418 		}
419 
420 		spin_unlock_bh(&reuseport_lock);
421 	}
422 
423 	/* Not capable to do migration, detach immediately */
424 	reuseport_detach_sock(sk);
425 }
426 EXPORT_SYMBOL(reuseport_stop_listen_sock);
427 
428 static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks,
429 				   struct bpf_prog *prog, struct sk_buff *skb,
430 				   int hdr_len)
431 {
432 	struct sk_buff *nskb = NULL;
433 	u32 index;
434 
435 	if (skb_shared(skb)) {
436 		nskb = skb_clone(skb, GFP_ATOMIC);
437 		if (!nskb)
438 			return NULL;
439 		skb = nskb;
440 	}
441 
442 	/* temporarily advance data past protocol header */
443 	if (!pskb_pull(skb, hdr_len)) {
444 		kfree_skb(nskb);
445 		return NULL;
446 	}
447 	index = bpf_prog_run_save_cb(prog, skb);
448 	__skb_push(skb, hdr_len);
449 
450 	consume_skb(nskb);
451 
452 	if (index >= socks)
453 		return NULL;
454 
455 	return reuse->socks[index];
456 }
457 
458 static struct sock *reuseport_select_sock_by_hash(struct sock_reuseport *reuse,
459 						  u32 hash, u16 num_socks)
460 {
461 	int i, j;
462 
463 	i = j = reciprocal_scale(hash, num_socks);
464 	while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) {
465 		i++;
466 		if (i >= num_socks)
467 			i = 0;
468 		if (i == j)
469 			return NULL;
470 	}
471 
472 	return reuse->socks[i];
473 }
474 
475 /**
476  *  reuseport_select_sock - Select a socket from an SO_REUSEPORT group.
477  *  @sk: First socket in the group.
478  *  @hash: When no BPF filter is available, use this hash to select.
479  *  @skb: skb to run through BPF filter.
480  *  @hdr_len: BPF filter expects skb data pointer at payload data.  If
481  *    the skb does not yet point at the payload, this parameter represents
482  *    how far the pointer needs to advance to reach the payload.
483  *  Returns a socket that should receive the packet (or NULL on error).
484  */
485 struct sock *reuseport_select_sock(struct sock *sk,
486 				   u32 hash,
487 				   struct sk_buff *skb,
488 				   int hdr_len)
489 {
490 	struct sock_reuseport *reuse;
491 	struct bpf_prog *prog;
492 	struct sock *sk2 = NULL;
493 	u16 socks;
494 
495 	rcu_read_lock();
496 	reuse = rcu_dereference(sk->sk_reuseport_cb);
497 
498 	/* if memory allocation failed or add call is not yet complete */
499 	if (!reuse)
500 		goto out;
501 
502 	prog = rcu_dereference(reuse->prog);
503 	socks = READ_ONCE(reuse->num_socks);
504 	if (likely(socks)) {
505 		/* paired with smp_wmb() in __reuseport_add_sock() */
506 		smp_rmb();
507 
508 		if (!prog || !skb)
509 			goto select_by_hash;
510 
511 		if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
512 			sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, NULL, hash);
513 		else
514 			sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len);
515 
516 select_by_hash:
517 		/* no bpf or invalid bpf result: fall back to hash usage */
518 		if (!sk2)
519 			sk2 = reuseport_select_sock_by_hash(reuse, hash, socks);
520 	}
521 
522 out:
523 	rcu_read_unlock();
524 	return sk2;
525 }
526 EXPORT_SYMBOL(reuseport_select_sock);
527 
528 /**
529  *  reuseport_migrate_sock - Select a socket from an SO_REUSEPORT group.
530  *  @sk: close()ed or shutdown()ed socket in the group.
531  *  @migrating_sk: ESTABLISHED/SYN_RECV full socket in the accept queue or
532  *    NEW_SYN_RECV request socket during 3WHS.
533  *  @skb: skb to run through BPF filter.
534  *  Returns a socket (with sk_refcnt +1) that should accept the child socket
535  *  (or NULL on error).
536  */
537 struct sock *reuseport_migrate_sock(struct sock *sk,
538 				    struct sock *migrating_sk,
539 				    struct sk_buff *skb)
540 {
541 	struct sock_reuseport *reuse;
542 	struct sock *nsk = NULL;
543 	bool allocated = false;
544 	struct bpf_prog *prog;
545 	u16 socks;
546 	u32 hash;
547 
548 	rcu_read_lock();
549 
550 	reuse = rcu_dereference(sk->sk_reuseport_cb);
551 	if (!reuse)
552 		goto out;
553 
554 	socks = READ_ONCE(reuse->num_socks);
555 	if (unlikely(!socks))
556 		goto failure;
557 
558 	/* paired with smp_wmb() in __reuseport_add_sock() */
559 	smp_rmb();
560 
561 	hash = migrating_sk->sk_hash;
562 	prog = rcu_dereference(reuse->prog);
563 	if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) {
564 		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req))
565 			goto select_by_hash;
566 		goto failure;
567 	}
568 
569 	if (!skb) {
570 		skb = alloc_skb(0, GFP_ATOMIC);
571 		if (!skb)
572 			goto failure;
573 		allocated = true;
574 	}
575 
576 	nsk = bpf_run_sk_reuseport(reuse, sk, prog, skb, migrating_sk, hash);
577 
578 	if (allocated)
579 		kfree_skb(skb);
580 
581 select_by_hash:
582 	if (!nsk)
583 		nsk = reuseport_select_sock_by_hash(reuse, hash, socks);
584 
585 	if (IS_ERR_OR_NULL(nsk) || unlikely(!refcount_inc_not_zero(&nsk->sk_refcnt))) {
586 		nsk = NULL;
587 		goto failure;
588 	}
589 
590 out:
591 	rcu_read_unlock();
592 	return nsk;
593 
594 failure:
595 	__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
596 	goto out;
597 }
598 EXPORT_SYMBOL(reuseport_migrate_sock);
599 
600 int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog)
601 {
602 	struct sock_reuseport *reuse;
603 	struct bpf_prog *old_prog;
604 
605 	if (sk_unhashed(sk)) {
606 		int err;
607 
608 		if (!sk->sk_reuseport)
609 			return -EINVAL;
610 
611 		err = reuseport_alloc(sk, false);
612 		if (err)
613 			return err;
614 	} else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
615 		/* The socket wasn't bound with SO_REUSEPORT */
616 		return -EINVAL;
617 	}
618 
619 	spin_lock_bh(&reuseport_lock);
620 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
621 					  lockdep_is_held(&reuseport_lock));
622 	old_prog = rcu_dereference_protected(reuse->prog,
623 					     lockdep_is_held(&reuseport_lock));
624 	rcu_assign_pointer(reuse->prog, prog);
625 	spin_unlock_bh(&reuseport_lock);
626 
627 	sk_reuseport_prog_free(old_prog);
628 	return 0;
629 }
630 EXPORT_SYMBOL(reuseport_attach_prog);
631 
632 int reuseport_detach_prog(struct sock *sk)
633 {
634 	struct sock_reuseport *reuse;
635 	struct bpf_prog *old_prog;
636 
637 	old_prog = NULL;
638 	spin_lock_bh(&reuseport_lock);
639 	reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
640 					  lockdep_is_held(&reuseport_lock));
641 
642 	/* reuse must be checked after acquiring the reuseport_lock
643 	 * because reuseport_grow() can detach a closed sk.
644 	 */
645 	if (!reuse) {
646 		spin_unlock_bh(&reuseport_lock);
647 		return sk->sk_reuseport ? -ENOENT : -EINVAL;
648 	}
649 
650 	if (sk_unhashed(sk) && reuse->num_closed_socks) {
651 		spin_unlock_bh(&reuseport_lock);
652 		return -ENOENT;
653 	}
654 
655 	old_prog = rcu_replace_pointer(reuse->prog, old_prog,
656 				       lockdep_is_held(&reuseport_lock));
657 	spin_unlock_bh(&reuseport_lock);
658 
659 	if (!old_prog)
660 		return -ENOENT;
661 
662 	sk_reuseport_prog_free(old_prog);
663 	return 0;
664 }
665 EXPORT_SYMBOL(reuseport_detach_prog);
666