1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4 #include <linux/skmsg.h>
5 #include <linux/skbuff.h>
6 #include <linux/scatterlist.h>
7
8 #include <net/sock.h>
9 #include <net/tcp.h>
10 #include <net/tls.h>
11
sk_msg_try_coalesce_ok(struct sk_msg * msg,int elem_first_coalesce)12 static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13 {
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24 }
25
sk_msg_alloc(struct sock * sk,struct sk_msg * msg,int len,int elem_first_coalesce)26 int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28 {
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74 }
75 EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
sk_msg_clone(struct sock * sk,struct sk_msg * dst,struct sk_msg * src,u32 off,u32 len)77 int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79 {
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
82 struct scatterlist *sgd = NULL;
83 u32 sge_len, sge_off;
84
85 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
97 if (sge_len > len)
98 sge_len = len;
99
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
115 off = 0;
116 len -= sge_len;
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125 }
126 EXPORT_SYMBOL_GPL(sk_msg_clone);
127
sk_msg_return_zero(struct sock * sk,struct sk_msg * msg,int bytes)128 void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129 {
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149 }
150 EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
sk_msg_return(struct sock * sk,struct sk_msg * msg,int bytes)152 void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153 {
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164 }
165 EXPORT_SYMBOL_GPL(sk_msg_return);
166
sk_msg_free_elem(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)167 static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169 {
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
173 /* When the skb owns the memory we free it from consume_skb path. */
174 if (!msg->skb) {
175 if (charge)
176 sk_mem_uncharge(sk, len);
177 put_page(sg_page(sge));
178 }
179 memset(sge, 0, sizeof(*sge));
180 return len;
181 }
182
__sk_msg_free(struct sock * sk,struct sk_msg * msg,u32 i,bool charge)183 static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
184 bool charge)
185 {
186 struct scatterlist *sge = sk_msg_elem(msg, i);
187 int freed = 0;
188
189 while (msg->sg.size) {
190 msg->sg.size -= sge->length;
191 freed += sk_msg_free_elem(sk, msg, i, charge);
192 sk_msg_iter_var_next(i);
193 sk_msg_check_to_free(msg, i, msg->sg.size);
194 sge = sk_msg_elem(msg, i);
195 }
196 consume_skb(msg->skb);
197 sk_msg_init(msg);
198 return freed;
199 }
200
sk_msg_free_nocharge(struct sock * sk,struct sk_msg * msg)201 int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
202 {
203 return __sk_msg_free(sk, msg, msg->sg.start, false);
204 }
205 EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
206
sk_msg_free(struct sock * sk,struct sk_msg * msg)207 int sk_msg_free(struct sock *sk, struct sk_msg *msg)
208 {
209 return __sk_msg_free(sk, msg, msg->sg.start, true);
210 }
211 EXPORT_SYMBOL_GPL(sk_msg_free);
212
__sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes,bool charge)213 static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
214 u32 bytes, bool charge)
215 {
216 struct scatterlist *sge;
217 u32 i = msg->sg.start;
218
219 while (bytes) {
220 sge = sk_msg_elem(msg, i);
221 if (!sge->length)
222 break;
223 if (bytes < sge->length) {
224 if (charge)
225 sk_mem_uncharge(sk, bytes);
226 sge->length -= bytes;
227 sge->offset += bytes;
228 msg->sg.size -= bytes;
229 break;
230 }
231
232 msg->sg.size -= sge->length;
233 bytes -= sge->length;
234 sk_msg_free_elem(sk, msg, i, charge);
235 sk_msg_iter_var_next(i);
236 sk_msg_check_to_free(msg, i, bytes);
237 }
238 msg->sg.start = i;
239 }
240
sk_msg_free_partial(struct sock * sk,struct sk_msg * msg,u32 bytes)241 void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
242 {
243 __sk_msg_free_partial(sk, msg, bytes, true);
244 }
245 EXPORT_SYMBOL_GPL(sk_msg_free_partial);
246
sk_msg_free_partial_nocharge(struct sock * sk,struct sk_msg * msg,u32 bytes)247 void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
248 u32 bytes)
249 {
250 __sk_msg_free_partial(sk, msg, bytes, false);
251 }
252
sk_msg_trim(struct sock * sk,struct sk_msg * msg,int len)253 void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
254 {
255 int trim = msg->sg.size - len;
256 u32 i = msg->sg.end;
257
258 if (trim <= 0) {
259 WARN_ON(trim < 0);
260 return;
261 }
262
263 sk_msg_iter_var_prev(i);
264 msg->sg.size = len;
265 while (msg->sg.data[i].length &&
266 trim >= msg->sg.data[i].length) {
267 trim -= msg->sg.data[i].length;
268 sk_msg_free_elem(sk, msg, i, true);
269 sk_msg_iter_var_prev(i);
270 if (!trim)
271 goto out;
272 }
273
274 msg->sg.data[i].length -= trim;
275 sk_mem_uncharge(sk, trim);
276 /* Adjust copybreak if it falls into the trimmed part of last buf */
277 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
278 msg->sg.copybreak = msg->sg.data[i].length;
279 out:
280 sk_msg_iter_var_next(i);
281 msg->sg.end = i;
282
283 /* If we trim data a full sg elem before curr pointer update
284 * copybreak and current so that any future copy operations
285 * start at new copy location.
286 * However trimed data that has not yet been used in a copy op
287 * does not require an update.
288 */
289 if (!msg->sg.size) {
290 msg->sg.curr = msg->sg.start;
291 msg->sg.copybreak = 0;
292 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
293 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
294 sk_msg_iter_var_prev(i);
295 msg->sg.curr = i;
296 msg->sg.copybreak = msg->sg.data[i].length;
297 }
298 }
299 EXPORT_SYMBOL_GPL(sk_msg_trim);
300
sk_msg_zerocopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)301 int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
302 struct sk_msg *msg, u32 bytes)
303 {
304 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
305 const int to_max_pages = MAX_MSG_FRAGS;
306 struct page *pages[MAX_MSG_FRAGS];
307 ssize_t orig, copied, use, offset;
308
309 orig = msg->sg.size;
310 while (bytes > 0) {
311 i = 0;
312 maxpages = to_max_pages - num_elems;
313 if (maxpages == 0) {
314 ret = -EFAULT;
315 goto out;
316 }
317
318 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
319 &offset);
320 if (copied <= 0) {
321 ret = -EFAULT;
322 goto out;
323 }
324
325 iov_iter_advance(from, copied);
326 bytes -= copied;
327 msg->sg.size += copied;
328
329 while (copied) {
330 use = min_t(int, copied, PAGE_SIZE - offset);
331 sg_set_page(&msg->sg.data[msg->sg.end],
332 pages[i], use, offset);
333 sg_unmark_end(&msg->sg.data[msg->sg.end]);
334 sk_mem_charge(sk, use);
335
336 offset = 0;
337 copied -= use;
338 sk_msg_iter_next(msg, end);
339 num_elems++;
340 i++;
341 }
342 /* When zerocopy is mixed with sk_msg_*copy* operations we
343 * may have a copybreak set in this case clear and prefer
344 * zerocopy remainder when possible.
345 */
346 msg->sg.copybreak = 0;
347 msg->sg.curr = msg->sg.end;
348 }
349 out:
350 /* Revert iov_iter updates, msg will need to use 'trim' later if it
351 * also needs to be cleared.
352 */
353 if (ret)
354 iov_iter_revert(from, msg->sg.size - orig);
355 return ret;
356 }
357 EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
358
sk_msg_memcopy_from_iter(struct sock * sk,struct iov_iter * from,struct sk_msg * msg,u32 bytes)359 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
360 struct sk_msg *msg, u32 bytes)
361 {
362 int ret = -ENOSPC, i = msg->sg.curr;
363 struct scatterlist *sge;
364 u32 copy, buf_size;
365 void *to;
366
367 do {
368 sge = sk_msg_elem(msg, i);
369 /* This is possible if a trim operation shrunk the buffer */
370 if (msg->sg.copybreak >= sge->length) {
371 msg->sg.copybreak = 0;
372 sk_msg_iter_var_next(i);
373 if (i == msg->sg.end)
374 break;
375 sge = sk_msg_elem(msg, i);
376 }
377
378 buf_size = sge->length - msg->sg.copybreak;
379 copy = (buf_size > bytes) ? bytes : buf_size;
380 to = sg_virt(sge) + msg->sg.copybreak;
381 msg->sg.copybreak += copy;
382 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
383 ret = copy_from_iter_nocache(to, copy, from);
384 else
385 ret = copy_from_iter(to, copy, from);
386 if (ret != copy) {
387 ret = -EFAULT;
388 goto out;
389 }
390 bytes -= copy;
391 if (!bytes)
392 break;
393 msg->sg.copybreak = 0;
394 sk_msg_iter_var_next(i);
395 } while (i != msg->sg.end);
396 out:
397 msg->sg.curr = i;
398 return ret;
399 }
400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
401
sk_msg_wait_data(struct sock * sk,struct sk_psock * psock,int flags,long timeo,int * err)402 int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
403 long timeo, int *err)
404 {
405 DEFINE_WAIT_FUNC(wait, woken_wake_function);
406 int ret = 0;
407
408 if (sk->sk_shutdown & RCV_SHUTDOWN)
409 return 1;
410
411 if (!timeo)
412 return ret;
413
414 add_wait_queue(sk_sleep(sk), &wait);
415 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
416 ret = sk_wait_event(sk, &timeo,
417 !list_empty(&psock->ingress_msg) ||
418 !skb_queue_empty(&sk->sk_receive_queue), &wait);
419 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
420 remove_wait_queue(sk_sleep(sk), &wait);
421 return ret;
422 }
423 EXPORT_SYMBOL_GPL(sk_msg_wait_data);
424
425 /* Receive sk_msg from psock->ingress_msg to @msg. */
sk_msg_recvmsg(struct sock * sk,struct sk_psock * psock,struct msghdr * msg,int len,int flags)426 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
427 int len, int flags)
428 {
429 struct iov_iter *iter = &msg->msg_iter;
430 int peek = flags & MSG_PEEK;
431 struct sk_msg *msg_rx;
432 int i, copied = 0;
433
434 msg_rx = sk_psock_peek_msg(psock);
435 while (copied != len) {
436 struct scatterlist *sge;
437
438 if (unlikely(!msg_rx))
439 break;
440
441 i = msg_rx->sg.start;
442 do {
443 struct page *page;
444 int copy;
445
446 sge = sk_msg_elem(msg_rx, i);
447 copy = sge->length;
448 page = sg_page(sge);
449 if (copied + copy > len)
450 copy = len - copied;
451 copy = copy_page_to_iter(page, sge->offset, copy, iter);
452 if (!copy)
453 return copied ? copied : -EFAULT;
454
455 copied += copy;
456 if (likely(!peek)) {
457 sge->offset += copy;
458 sge->length -= copy;
459 if (!msg_rx->skb)
460 sk_mem_uncharge(sk, copy);
461 msg_rx->sg.size -= copy;
462
463 if (!sge->length) {
464 sk_msg_iter_var_next(i);
465 if (!msg_rx->skb)
466 put_page(page);
467 }
468 } else {
469 /* Lets not optimize peek case if copy_page_to_iter
470 * didn't copy the entire length lets just break.
471 */
472 if (copy != sge->length)
473 return copied;
474 sk_msg_iter_var_next(i);
475 }
476
477 if (copied == len)
478 break;
479 } while (i != msg_rx->sg.end);
480
481 if (unlikely(peek)) {
482 msg_rx = sk_psock_next_msg(psock, msg_rx);
483 if (!msg_rx)
484 break;
485 continue;
486 }
487
488 msg_rx->sg.start = i;
489 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
490 msg_rx = sk_psock_dequeue_msg(psock);
491 kfree_sk_msg(msg_rx);
492 }
493 msg_rx = sk_psock_peek_msg(psock);
494 }
495
496 return copied;
497 }
498 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
499
sk_psock_create_ingress_msg(struct sock * sk,struct sk_buff * skb)500 static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
501 struct sk_buff *skb)
502 {
503 struct sk_msg *msg;
504
505 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
506 return NULL;
507
508 if (!sk_rmem_schedule(sk, skb, skb->truesize))
509 return NULL;
510
511 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
512 if (unlikely(!msg))
513 return NULL;
514
515 sk_msg_init(msg);
516 return msg;
517 }
518
sk_psock_skb_ingress_enqueue(struct sk_buff * skb,struct sk_psock * psock,struct sock * sk,struct sk_msg * msg)519 static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
520 struct sk_psock *psock,
521 struct sock *sk,
522 struct sk_msg *msg)
523 {
524 int num_sge, copied;
525
526 /* skb linearize may fail with ENOMEM, but lets simply try again
527 * later if this happens. Under memory pressure we don't want to
528 * drop the skb. We need to linearize the skb so that the mapping
529 * in skb_to_sgvec can not error.
530 */
531 if (skb_linearize(skb))
532 return -EAGAIN;
533 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
534 if (unlikely(num_sge < 0)) {
535 kfree(msg);
536 return num_sge;
537 }
538
539 copied = skb->len;
540 msg->sg.start = 0;
541 msg->sg.size = copied;
542 msg->sg.end = num_sge;
543 msg->skb = skb;
544
545 sk_psock_queue_msg(psock, msg);
546 sk_psock_data_ready(sk, psock);
547 return copied;
548 }
549
550 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb);
551
sk_psock_skb_ingress(struct sk_psock * psock,struct sk_buff * skb)552 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
553 {
554 struct sock *sk = psock->sk;
555 struct sk_msg *msg;
556
557 /* If we are receiving on the same sock skb->sk is already assigned,
558 * skip memory accounting and owner transition seeing it already set
559 * correctly.
560 */
561 if (unlikely(skb->sk == sk))
562 return sk_psock_skb_ingress_self(psock, skb);
563 msg = sk_psock_create_ingress_msg(sk, skb);
564 if (!msg)
565 return -EAGAIN;
566
567 /* This will transition ownership of the data from the socket where
568 * the BPF program was run initiating the redirect to the socket
569 * we will eventually receive this data on. The data will be released
570 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
571 * into user buffers.
572 */
573 skb_set_owner_r(skb, sk);
574 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
575 }
576
577 /* Puts an skb on the ingress queue of the socket already assigned to the
578 * skb. In this case we do not need to check memory limits or skb_set_owner_r
579 * because the skb is already accounted for here.
580 */
sk_psock_skb_ingress_self(struct sk_psock * psock,struct sk_buff * skb)581 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb)
582 {
583 struct sk_msg *msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
584 struct sock *sk = psock->sk;
585
586 if (unlikely(!msg))
587 return -EAGAIN;
588 sk_msg_init(msg);
589 skb_set_owner_r(skb, sk);
590 return sk_psock_skb_ingress_enqueue(skb, psock, sk, msg);
591 }
592
sk_psock_handle_skb(struct sk_psock * psock,struct sk_buff * skb,u32 off,u32 len,bool ingress)593 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
594 u32 off, u32 len, bool ingress)
595 {
596 if (!ingress) {
597 if (!sock_writeable(psock->sk))
598 return -EAGAIN;
599 return skb_send_sock(psock->sk, skb, off, len);
600 }
601 return sk_psock_skb_ingress(psock, skb);
602 }
603
sk_psock_backlog(struct work_struct * work)604 static void sk_psock_backlog(struct work_struct *work)
605 {
606 struct sk_psock *psock = container_of(work, struct sk_psock, work);
607 struct sk_psock_work_state *state = &psock->work_state;
608 struct sk_buff *skb;
609 bool ingress;
610 u32 len, off;
611 int ret;
612
613 mutex_lock(&psock->work_mutex);
614 if (state->skb) {
615 skb = state->skb;
616 len = state->len;
617 off = state->off;
618 state->skb = NULL;
619 goto start;
620 }
621
622 while ((skb = skb_dequeue(&psock->ingress_skb))) {
623 len = skb->len;
624 off = 0;
625 start:
626 ingress = skb_bpf_ingress(skb);
627 skb_bpf_redirect_clear(skb);
628 do {
629 ret = -EIO;
630 if (!sock_flag(psock->sk, SOCK_DEAD))
631 ret = sk_psock_handle_skb(psock, skb, off,
632 len, ingress);
633 if (ret <= 0) {
634 if (ret == -EAGAIN) {
635 state->skb = skb;
636 state->len = len;
637 state->off = off;
638 goto end;
639 }
640 /* Hard errors break pipe and stop xmit. */
641 sk_psock_report_error(psock, ret ? -ret : EPIPE);
642 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
643 kfree_skb(skb);
644 goto end;
645 }
646 off += ret;
647 len -= ret;
648 } while (len);
649
650 if (!ingress)
651 kfree_skb(skb);
652 }
653 end:
654 mutex_unlock(&psock->work_mutex);
655 }
656
sk_psock_init(struct sock * sk,int node)657 struct sk_psock *sk_psock_init(struct sock *sk, int node)
658 {
659 struct sk_psock *psock;
660 struct proto *prot;
661
662 write_lock_bh(&sk->sk_callback_lock);
663
664 if (sk->sk_user_data) {
665 psock = ERR_PTR(-EBUSY);
666 goto out;
667 }
668
669 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
670 if (!psock) {
671 psock = ERR_PTR(-ENOMEM);
672 goto out;
673 }
674
675 prot = READ_ONCE(sk->sk_prot);
676 psock->sk = sk;
677 psock->eval = __SK_NONE;
678 psock->sk_proto = prot;
679 psock->saved_unhash = prot->unhash;
680 psock->saved_close = prot->close;
681 psock->saved_write_space = sk->sk_write_space;
682
683 INIT_LIST_HEAD(&psock->link);
684 spin_lock_init(&psock->link_lock);
685
686 INIT_WORK(&psock->work, sk_psock_backlog);
687 mutex_init(&psock->work_mutex);
688 INIT_LIST_HEAD(&psock->ingress_msg);
689 spin_lock_init(&psock->ingress_lock);
690 skb_queue_head_init(&psock->ingress_skb);
691
692 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
693 refcount_set(&psock->refcnt, 1);
694
695 rcu_assign_sk_user_data_nocopy(sk, psock);
696 sock_hold(sk);
697
698 out:
699 write_unlock_bh(&sk->sk_callback_lock);
700 return psock;
701 }
702 EXPORT_SYMBOL_GPL(sk_psock_init);
703
sk_psock_link_pop(struct sk_psock * psock)704 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
705 {
706 struct sk_psock_link *link;
707
708 spin_lock_bh(&psock->link_lock);
709 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
710 list);
711 if (link)
712 list_del(&link->list);
713 spin_unlock_bh(&psock->link_lock);
714 return link;
715 }
716
__sk_psock_purge_ingress_msg(struct sk_psock * psock)717 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
718 {
719 struct sk_msg *msg, *tmp;
720
721 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
722 list_del(&msg->list);
723 sk_msg_free(psock->sk, msg);
724 kfree(msg);
725 }
726 }
727
__sk_psock_zap_ingress(struct sk_psock * psock)728 static void __sk_psock_zap_ingress(struct sk_psock *psock)
729 {
730 struct sk_buff *skb;
731
732 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
733 skb_bpf_redirect_clear(skb);
734 kfree_skb(skb);
735 }
736 __sk_psock_purge_ingress_msg(psock);
737 }
738
sk_psock_link_destroy(struct sk_psock * psock)739 static void sk_psock_link_destroy(struct sk_psock *psock)
740 {
741 struct sk_psock_link *link, *tmp;
742
743 list_for_each_entry_safe(link, tmp, &psock->link, list) {
744 list_del(&link->list);
745 sk_psock_free_link(link);
746 }
747 }
748
sk_psock_stop(struct sk_psock * psock,bool wait)749 void sk_psock_stop(struct sk_psock *psock, bool wait)
750 {
751 spin_lock_bh(&psock->ingress_lock);
752 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
753 sk_psock_cork_free(psock);
754 __sk_psock_zap_ingress(psock);
755 spin_unlock_bh(&psock->ingress_lock);
756
757 if (wait)
758 cancel_work_sync(&psock->work);
759 }
760
761 static void sk_psock_done_strp(struct sk_psock *psock);
762
sk_psock_destroy(struct work_struct * work)763 static void sk_psock_destroy(struct work_struct *work)
764 {
765 struct sk_psock *psock = container_of(to_rcu_work(work),
766 struct sk_psock, rwork);
767 /* No sk_callback_lock since already detached. */
768
769 sk_psock_done_strp(psock);
770
771 cancel_work_sync(&psock->work);
772 mutex_destroy(&psock->work_mutex);
773
774 psock_progs_drop(&psock->progs);
775
776 sk_psock_link_destroy(psock);
777 sk_psock_cork_free(psock);
778
779 if (psock->sk_redir)
780 sock_put(psock->sk_redir);
781 sock_put(psock->sk);
782 kfree(psock);
783 }
784
sk_psock_drop(struct sock * sk,struct sk_psock * psock)785 void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
786 {
787 sk_psock_stop(psock, false);
788
789 write_lock_bh(&sk->sk_callback_lock);
790 sk_psock_restore_proto(sk, psock);
791 rcu_assign_sk_user_data(sk, NULL);
792 if (psock->progs.stream_parser)
793 sk_psock_stop_strp(sk, psock);
794 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
795 sk_psock_stop_verdict(sk, psock);
796 write_unlock_bh(&sk->sk_callback_lock);
797
798 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
799 queue_rcu_work(system_wq, &psock->rwork);
800 }
801 EXPORT_SYMBOL_GPL(sk_psock_drop);
802
sk_psock_map_verd(int verdict,bool redir)803 static int sk_psock_map_verd(int verdict, bool redir)
804 {
805 switch (verdict) {
806 case SK_PASS:
807 return redir ? __SK_REDIRECT : __SK_PASS;
808 case SK_DROP:
809 default:
810 break;
811 }
812
813 return __SK_DROP;
814 }
815
sk_psock_msg_verdict(struct sock * sk,struct sk_psock * psock,struct sk_msg * msg)816 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
817 struct sk_msg *msg)
818 {
819 struct bpf_prog *prog;
820 int ret;
821
822 rcu_read_lock();
823 prog = READ_ONCE(psock->progs.msg_parser);
824 if (unlikely(!prog)) {
825 ret = __SK_PASS;
826 goto out;
827 }
828
829 sk_msg_compute_data_pointers(msg);
830 msg->sk = sk;
831 ret = bpf_prog_run_pin_on_cpu(prog, msg);
832 ret = sk_psock_map_verd(ret, msg->sk_redir);
833 psock->apply_bytes = msg->apply_bytes;
834 if (ret == __SK_REDIRECT) {
835 if (psock->sk_redir)
836 sock_put(psock->sk_redir);
837 psock->sk_redir = msg->sk_redir;
838 if (!psock->sk_redir) {
839 ret = __SK_DROP;
840 goto out;
841 }
842 sock_hold(psock->sk_redir);
843 }
844 out:
845 rcu_read_unlock();
846 return ret;
847 }
848 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
849
sk_psock_skb_redirect(struct sk_buff * skb)850 static void sk_psock_skb_redirect(struct sk_buff *skb)
851 {
852 struct sk_psock *psock_other;
853 struct sock *sk_other;
854
855 sk_other = skb_bpf_redirect_fetch(skb);
856 /* This error is a buggy BPF program, it returned a redirect
857 * return code, but then didn't set a redirect interface.
858 */
859 if (unlikely(!sk_other)) {
860 kfree_skb(skb);
861 return;
862 }
863 psock_other = sk_psock(sk_other);
864 /* This error indicates the socket is being torn down or had another
865 * error that caused the pipe to break. We can't send a packet on
866 * a socket that is in this state so we drop the skb.
867 */
868 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
869 kfree_skb(skb);
870 return;
871 }
872 spin_lock_bh(&psock_other->ingress_lock);
873 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
874 spin_unlock_bh(&psock_other->ingress_lock);
875 kfree_skb(skb);
876 return;
877 }
878
879 skb_queue_tail(&psock_other->ingress_skb, skb);
880 schedule_work(&psock_other->work);
881 spin_unlock_bh(&psock_other->ingress_lock);
882 }
883
sk_psock_tls_verdict_apply(struct sk_buff * skb,struct sock * sk,int verdict)884 static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
885 {
886 switch (verdict) {
887 case __SK_REDIRECT:
888 sk_psock_skb_redirect(skb);
889 break;
890 case __SK_PASS:
891 case __SK_DROP:
892 default:
893 break;
894 }
895 }
896
sk_psock_tls_strp_read(struct sk_psock * psock,struct sk_buff * skb)897 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
898 {
899 struct bpf_prog *prog;
900 int ret = __SK_PASS;
901
902 rcu_read_lock();
903 prog = READ_ONCE(psock->progs.stream_verdict);
904 if (likely(prog)) {
905 skb->sk = psock->sk;
906 skb_dst_drop(skb);
907 skb_bpf_redirect_clear(skb);
908 ret = bpf_prog_run_pin_on_cpu(prog, skb);
909 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
910 skb->sk = NULL;
911 }
912 sk_psock_tls_verdict_apply(skb, psock->sk, ret);
913 rcu_read_unlock();
914 return ret;
915 }
916 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
917
sk_psock_verdict_apply(struct sk_psock * psock,struct sk_buff * skb,int verdict)918 static void sk_psock_verdict_apply(struct sk_psock *psock,
919 struct sk_buff *skb, int verdict)
920 {
921 struct sock *sk_other;
922 int err = -EIO;
923
924 switch (verdict) {
925 case __SK_PASS:
926 sk_other = psock->sk;
927 if (sock_flag(sk_other, SOCK_DEAD) ||
928 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
929 goto out_free;
930 }
931
932 skb_bpf_set_ingress(skb);
933
934 /* If the queue is empty then we can submit directly
935 * into the msg queue. If its not empty we have to
936 * queue work otherwise we may get OOO data. Otherwise,
937 * if sk_psock_skb_ingress errors will be handled by
938 * retrying later from workqueue.
939 */
940 if (skb_queue_empty(&psock->ingress_skb)) {
941 err = sk_psock_skb_ingress_self(psock, skb);
942 }
943 if (err < 0) {
944 spin_lock_bh(&psock->ingress_lock);
945 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
946 skb_queue_tail(&psock->ingress_skb, skb);
947 schedule_work(&psock->work);
948 }
949 spin_unlock_bh(&psock->ingress_lock);
950 }
951 break;
952 case __SK_REDIRECT:
953 sk_psock_skb_redirect(skb);
954 break;
955 case __SK_DROP:
956 default:
957 out_free:
958 kfree_skb(skb);
959 }
960 }
961
sk_psock_write_space(struct sock * sk)962 static void sk_psock_write_space(struct sock *sk)
963 {
964 struct sk_psock *psock;
965 void (*write_space)(struct sock *sk) = NULL;
966
967 rcu_read_lock();
968 psock = sk_psock(sk);
969 if (likely(psock)) {
970 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
971 schedule_work(&psock->work);
972 write_space = psock->saved_write_space;
973 }
974 rcu_read_unlock();
975 if (write_space)
976 write_space(sk);
977 }
978
979 #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
sk_psock_strp_read(struct strparser * strp,struct sk_buff * skb)980 static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
981 {
982 struct sk_psock *psock;
983 struct bpf_prog *prog;
984 int ret = __SK_DROP;
985 struct sock *sk;
986
987 rcu_read_lock();
988 sk = strp->sk;
989 psock = sk_psock(sk);
990 if (unlikely(!psock)) {
991 kfree_skb(skb);
992 goto out;
993 }
994 prog = READ_ONCE(psock->progs.stream_verdict);
995 if (likely(prog)) {
996 skb->sk = sk;
997 skb_dst_drop(skb);
998 skb_bpf_redirect_clear(skb);
999 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1000 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1001 skb->sk = NULL;
1002 }
1003 sk_psock_verdict_apply(psock, skb, ret);
1004 out:
1005 rcu_read_unlock();
1006 }
1007
sk_psock_strp_read_done(struct strparser * strp,int err)1008 static int sk_psock_strp_read_done(struct strparser *strp, int err)
1009 {
1010 return err;
1011 }
1012
sk_psock_strp_parse(struct strparser * strp,struct sk_buff * skb)1013 static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1014 {
1015 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
1016 struct bpf_prog *prog;
1017 int ret = skb->len;
1018
1019 rcu_read_lock();
1020 prog = READ_ONCE(psock->progs.stream_parser);
1021 if (likely(prog)) {
1022 skb->sk = psock->sk;
1023 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1024 skb->sk = NULL;
1025 }
1026 rcu_read_unlock();
1027 return ret;
1028 }
1029
1030 /* Called with socket lock held. */
sk_psock_strp_data_ready(struct sock * sk)1031 static void sk_psock_strp_data_ready(struct sock *sk)
1032 {
1033 struct sk_psock *psock;
1034
1035 rcu_read_lock();
1036 psock = sk_psock(sk);
1037 if (likely(psock)) {
1038 if (tls_sw_has_ctx_rx(sk)) {
1039 psock->saved_data_ready(sk);
1040 } else {
1041 write_lock_bh(&sk->sk_callback_lock);
1042 strp_data_ready(&psock->strp);
1043 write_unlock_bh(&sk->sk_callback_lock);
1044 }
1045 }
1046 rcu_read_unlock();
1047 }
1048
sk_psock_init_strp(struct sock * sk,struct sk_psock * psock)1049 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1050 {
1051 static const struct strp_callbacks cb = {
1052 .rcv_msg = sk_psock_strp_read,
1053 .read_sock_done = sk_psock_strp_read_done,
1054 .parse_msg = sk_psock_strp_parse,
1055 };
1056
1057 return strp_init(&psock->strp, sk, &cb);
1058 }
1059
sk_psock_start_strp(struct sock * sk,struct sk_psock * psock)1060 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1061 {
1062 if (psock->saved_data_ready)
1063 return;
1064
1065 psock->saved_data_ready = sk->sk_data_ready;
1066 sk->sk_data_ready = sk_psock_strp_data_ready;
1067 sk->sk_write_space = sk_psock_write_space;
1068 }
1069
sk_psock_stop_strp(struct sock * sk,struct sk_psock * psock)1070 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1071 {
1072 if (!psock->saved_data_ready)
1073 return;
1074
1075 sk->sk_data_ready = psock->saved_data_ready;
1076 psock->saved_data_ready = NULL;
1077 strp_stop(&psock->strp);
1078 }
1079
sk_psock_done_strp(struct sk_psock * psock)1080 static void sk_psock_done_strp(struct sk_psock *psock)
1081 {
1082 /* Parser has been stopped */
1083 if (psock->progs.stream_parser)
1084 strp_done(&psock->strp);
1085 }
1086 #else
sk_psock_done_strp(struct sk_psock * psock)1087 static void sk_psock_done_strp(struct sk_psock *psock)
1088 {
1089 }
1090 #endif /* CONFIG_BPF_STREAM_PARSER */
1091
sk_psock_verdict_recv(read_descriptor_t * desc,struct sk_buff * skb,unsigned int offset,size_t orig_len)1092 static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
1093 unsigned int offset, size_t orig_len)
1094 {
1095 struct sock *sk = (struct sock *)desc->arg.data;
1096 struct sk_psock *psock;
1097 struct bpf_prog *prog;
1098 int ret = __SK_DROP;
1099 int len = skb->len;
1100
1101 /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
1102 skb = skb_clone(skb, GFP_ATOMIC);
1103 if (!skb) {
1104 desc->error = -ENOMEM;
1105 return 0;
1106 }
1107
1108 rcu_read_lock();
1109 psock = sk_psock(sk);
1110 if (unlikely(!psock)) {
1111 len = 0;
1112 kfree_skb(skb);
1113 goto out;
1114 }
1115 prog = READ_ONCE(psock->progs.stream_verdict);
1116 if (!prog)
1117 prog = READ_ONCE(psock->progs.skb_verdict);
1118 if (likely(prog)) {
1119 skb->sk = sk;
1120 skb_dst_drop(skb);
1121 skb_bpf_redirect_clear(skb);
1122 ret = bpf_prog_run_pin_on_cpu(prog, skb);
1123 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
1124 skb->sk = NULL;
1125 }
1126 sk_psock_verdict_apply(psock, skb, ret);
1127 out:
1128 rcu_read_unlock();
1129 return len;
1130 }
1131
sk_psock_verdict_data_ready(struct sock * sk)1132 static void sk_psock_verdict_data_ready(struct sock *sk)
1133 {
1134 struct socket *sock = sk->sk_socket;
1135 read_descriptor_t desc;
1136
1137 if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
1138 return;
1139
1140 desc.arg.data = sk;
1141 desc.error = 0;
1142 desc.count = 1;
1143
1144 sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
1145 }
1146
sk_psock_start_verdict(struct sock * sk,struct sk_psock * psock)1147 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1148 {
1149 if (psock->saved_data_ready)
1150 return;
1151
1152 psock->saved_data_ready = sk->sk_data_ready;
1153 sk->sk_data_ready = sk_psock_verdict_data_ready;
1154 sk->sk_write_space = sk_psock_write_space;
1155 }
1156
sk_psock_stop_verdict(struct sock * sk,struct sk_psock * psock)1157 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1158 {
1159 if (!psock->saved_data_ready)
1160 return;
1161
1162 sk->sk_data_ready = psock->saved_data_ready;
1163 psock->saved_data_ready = NULL;
1164 }
1165