1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3 *
4 * Copyright (c) 2017 - 2019, Intel Corporation.
5 */
6
7 #define pr_fmt(fmt) "MPTCP: " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <crypto/sha2.h>
13 #include <crypto/utils.h>
14 #include <net/sock.h>
15 #include <net/inet_common.h>
16 #include <net/inet_hashtables.h>
17 #include <net/protocol.h>
18 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
19 #include <net/ip6_route.h>
20 #include <net/transp_v6.h>
21 #endif
22 #include <net/mptcp.h>
23
24 #include "protocol.h"
25 #include "mib.h"
26
27 #include <trace/events/mptcp.h>
28 #include <trace/events/sock.h>
29
30 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
31
SUBFLOW_REQ_INC_STATS(struct request_sock * req,enum linux_mptcp_mib_field field)32 static void SUBFLOW_REQ_INC_STATS(struct request_sock *req,
33 enum linux_mptcp_mib_field field)
34 {
35 MPTCP_INC_STATS(sock_net(req_to_sk(req)), field);
36 }
37
subflow_req_destructor(struct request_sock * req)38 static void subflow_req_destructor(struct request_sock *req)
39 {
40 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
41
42 pr_debug("subflow_req=%p", subflow_req);
43
44 if (subflow_req->msk)
45 sock_put((struct sock *)subflow_req->msk);
46
47 mptcp_token_destroy_request(req);
48 }
49
subflow_generate_hmac(u64 key1,u64 key2,u32 nonce1,u32 nonce2,void * hmac)50 static void subflow_generate_hmac(u64 key1, u64 key2, u32 nonce1, u32 nonce2,
51 void *hmac)
52 {
53 u8 msg[8];
54
55 put_unaligned_be32(nonce1, &msg[0]);
56 put_unaligned_be32(nonce2, &msg[4]);
57
58 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac);
59 }
60
mptcp_can_accept_new_subflow(const struct mptcp_sock * msk)61 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
62 {
63 return mptcp_is_fully_established((void *)msk) &&
64 ((mptcp_pm_is_userspace(msk) &&
65 mptcp_userspace_pm_active(msk)) ||
66 READ_ONCE(msk->pm.accept_subflow));
67 }
68
69 /* validate received token and create truncated hmac and nonce for SYN-ACK */
subflow_req_create_thmac(struct mptcp_subflow_request_sock * subflow_req)70 static void subflow_req_create_thmac(struct mptcp_subflow_request_sock *subflow_req)
71 {
72 struct mptcp_sock *msk = subflow_req->msk;
73 u8 hmac[SHA256_DIGEST_SIZE];
74
75 get_random_bytes(&subflow_req->local_nonce, sizeof(u32));
76
77 subflow_generate_hmac(READ_ONCE(msk->local_key),
78 READ_ONCE(msk->remote_key),
79 subflow_req->local_nonce,
80 subflow_req->remote_nonce, hmac);
81
82 subflow_req->thmac = get_unaligned_be64(hmac);
83 }
84
subflow_token_join_request(struct request_sock * req)85 static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
86 {
87 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
88 struct mptcp_sock *msk;
89 int local_id;
90
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 if (!msk) {
93 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
94 return NULL;
95 }
96
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
98 if (local_id < 0) {
99 sock_put((struct sock *)msk);
100 return NULL;
101 }
102 subflow_req->local_id = local_id;
103 subflow_req->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)req);
104
105 return msk;
106 }
107
subflow_init_req(struct request_sock * req,const struct sock * sk_listener)108 static void subflow_init_req(struct request_sock *req, const struct sock *sk_listener)
109 {
110 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
111
112 subflow_req->mp_capable = 0;
113 subflow_req->mp_join = 0;
114 subflow_req->csum_reqd = mptcp_is_checksum_enabled(sock_net(sk_listener));
115 subflow_req->allow_join_id0 = mptcp_allow_join_id0(sock_net(sk_listener));
116 subflow_req->msk = NULL;
117 mptcp_token_init_request(req);
118 }
119
subflow_use_different_sport(struct mptcp_sock * msk,const struct sock * sk)120 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
121 {
122 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
123 }
124
subflow_add_reset_reason(struct sk_buff * skb,u8 reason)125 static void subflow_add_reset_reason(struct sk_buff *skb, u8 reason)
126 {
127 struct mptcp_ext *mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
128
129 if (mpext) {
130 memset(mpext, 0, sizeof(*mpext));
131 mpext->reset_reason = reason;
132 }
133 }
134
135 /* Init mptcp request socket.
136 *
137 * Returns an error code if a JOIN has failed and a TCP reset
138 * should be sent.
139 */
subflow_check_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)140 static int subflow_check_req(struct request_sock *req,
141 const struct sock *sk_listener,
142 struct sk_buff *skb)
143 {
144 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
145 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
146 struct mptcp_options_received mp_opt;
147 bool opt_mp_capable, opt_mp_join;
148
149 pr_debug("subflow_req=%p, listener=%p", subflow_req, listener);
150
151 #ifdef CONFIG_TCP_MD5SIG
152 /* no MPTCP if MD5SIG is enabled on this socket or we may run out of
153 * TCP option space.
154 */
155 if (rcu_access_pointer(tcp_sk(sk_listener)->md5sig_info)) {
156 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
157 return -EINVAL;
158 }
159 #endif
160
161 mptcp_get_options(skb, &mp_opt);
162
163 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYN);
164 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYN);
165 if (opt_mp_capable) {
166 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVE);
167
168 if (opt_mp_join)
169 return 0;
170 } else if (opt_mp_join) {
171 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNRX);
172
173 if (mp_opt.backup)
174 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINSYNBACKUPRX);
175 }
176
177 if (opt_mp_capable && listener->request_mptcp) {
178 int err, retries = MPTCP_TOKEN_MAX_RETRIES;
179
180 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
181 again:
182 do {
183 get_random_bytes(&subflow_req->local_key, sizeof(subflow_req->local_key));
184 } while (subflow_req->local_key == 0);
185
186 if (unlikely(req->syncookie)) {
187 mptcp_crypto_key_sha(subflow_req->local_key,
188 &subflow_req->token,
189 &subflow_req->idsn);
190 if (mptcp_token_exists(subflow_req->token)) {
191 if (retries-- > 0)
192 goto again;
193 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
194 } else {
195 subflow_req->mp_capable = 1;
196 }
197 return 0;
198 }
199
200 err = mptcp_token_new_request(req);
201 if (err == 0)
202 subflow_req->mp_capable = 1;
203 else if (retries-- > 0)
204 goto again;
205 else
206 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_TOKENFALLBACKINIT);
207
208 } else if (opt_mp_join && listener->request_mptcp) {
209 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq;
210 subflow_req->mp_join = 1;
211 subflow_req->backup = mp_opt.backup;
212 subflow_req->remote_id = mp_opt.join_id;
213 subflow_req->token = mp_opt.token;
214 subflow_req->remote_nonce = mp_opt.nonce;
215 subflow_req->msk = subflow_token_join_request(req);
216
217 /* Can't fall back to TCP in this case. */
218 if (!subflow_req->msk) {
219 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
220 return -EPERM;
221 }
222
223 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
224 pr_debug("syn inet_sport=%d %d",
225 ntohs(inet_sk(sk_listener)->inet_sport),
226 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
227 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
228 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTSYNRX);
229 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
230 return -EPERM;
231 }
232 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTSYNRX);
233 }
234
235 subflow_req_create_thmac(subflow_req);
236
237 if (unlikely(req->syncookie)) {
238 if (!mptcp_can_accept_new_subflow(subflow_req->msk)) {
239 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
240 return -EPERM;
241 }
242
243 subflow_init_req_cookie_join_save(subflow_req, skb);
244 }
245
246 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
247 subflow_req->remote_nonce, subflow_req->msk);
248 }
249
250 return 0;
251 }
252
mptcp_subflow_init_cookie_req(struct request_sock * req,const struct sock * sk_listener,struct sk_buff * skb)253 int mptcp_subflow_init_cookie_req(struct request_sock *req,
254 const struct sock *sk_listener,
255 struct sk_buff *skb)
256 {
257 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk_listener);
258 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
259 struct mptcp_options_received mp_opt;
260 bool opt_mp_capable, opt_mp_join;
261 int err;
262
263 subflow_init_req(req, sk_listener);
264 mptcp_get_options(skb, &mp_opt);
265
266 opt_mp_capable = !!(mp_opt.suboptions & OPTION_MPTCP_MPC_ACK);
267 opt_mp_join = !!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK);
268 if (opt_mp_capable && opt_mp_join)
269 return -EINVAL;
270
271 if (opt_mp_capable && listener->request_mptcp) {
272 if (mp_opt.sndr_key == 0)
273 return -EINVAL;
274
275 subflow_req->local_key = mp_opt.rcvr_key;
276 err = mptcp_token_new_request(req);
277 if (err)
278 return err;
279
280 subflow_req->mp_capable = 1;
281 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
282 } else if (opt_mp_join && listener->request_mptcp) {
283 if (!mptcp_token_join_cookie_init_state(subflow_req, skb))
284 return -EINVAL;
285
286 subflow_req->mp_join = 1;
287 subflow_req->ssn_offset = TCP_SKB_CB(skb)->seq - 1;
288 }
289
290 return 0;
291 }
292 EXPORT_SYMBOL_GPL(mptcp_subflow_init_cookie_req);
293
mptcp_get_rst_reason(const struct sk_buff * skb)294 static enum sk_rst_reason mptcp_get_rst_reason(const struct sk_buff *skb)
295 {
296 const struct mptcp_ext *mpext = mptcp_get_ext(skb);
297
298 if (!mpext)
299 return SK_RST_REASON_NOT_SPECIFIED;
300
301 return sk_rst_convert_mptcp_reason(mpext->reset_reason);
302 }
303
subflow_v4_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)304 static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
305 struct sk_buff *skb,
306 struct flowi *fl,
307 struct request_sock *req,
308 u32 tw_isn)
309 {
310 struct dst_entry *dst;
311 int err;
312
313 tcp_rsk(req)->is_mptcp = 1;
314 subflow_init_req(req, sk);
315
316 dst = tcp_request_sock_ipv4_ops.route_req(sk, skb, fl, req, tw_isn);
317 if (!dst)
318 return NULL;
319
320 err = subflow_check_req(req, sk, skb);
321 if (err == 0)
322 return dst;
323
324 dst_release(dst);
325 if (!req->syncookie)
326 tcp_request_sock_ops.send_reset(sk, skb,
327 mptcp_get_rst_reason(skb));
328 return NULL;
329 }
330
subflow_prep_synack(const struct sock * sk,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type)331 static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
332 struct tcp_fastopen_cookie *foc,
333 enum tcp_synack_type synack_type)
334 {
335 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
336 struct inet_request_sock *ireq = inet_rsk(req);
337
338 /* clear tstamp_ok, as needed depending on cookie */
339 if (foc && foc->len > -1)
340 ireq->tstamp_ok = 0;
341
342 if (synack_type == TCP_SYNACK_FASTOPEN)
343 mptcp_fastopen_subflow_synack_set_params(subflow, req);
344 }
345
subflow_v4_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)346 static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
347 struct flowi *fl,
348 struct request_sock *req,
349 struct tcp_fastopen_cookie *foc,
350 enum tcp_synack_type synack_type,
351 struct sk_buff *syn_skb)
352 {
353 subflow_prep_synack(sk, req, foc, synack_type);
354
355 return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
356 synack_type, syn_skb);
357 }
358
359 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_v6_send_synack(const struct sock * sk,struct dst_entry * dst,struct flowi * fl,struct request_sock * req,struct tcp_fastopen_cookie * foc,enum tcp_synack_type synack_type,struct sk_buff * syn_skb)360 static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
361 struct flowi *fl,
362 struct request_sock *req,
363 struct tcp_fastopen_cookie *foc,
364 enum tcp_synack_type synack_type,
365 struct sk_buff *syn_skb)
366 {
367 subflow_prep_synack(sk, req, foc, synack_type);
368
369 return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
370 synack_type, syn_skb);
371 }
372
subflow_v6_route_req(const struct sock * sk,struct sk_buff * skb,struct flowi * fl,struct request_sock * req,u32 tw_isn)373 static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
374 struct sk_buff *skb,
375 struct flowi *fl,
376 struct request_sock *req,
377 u32 tw_isn)
378 {
379 struct dst_entry *dst;
380 int err;
381
382 tcp_rsk(req)->is_mptcp = 1;
383 subflow_init_req(req, sk);
384
385 dst = tcp_request_sock_ipv6_ops.route_req(sk, skb, fl, req, tw_isn);
386 if (!dst)
387 return NULL;
388
389 err = subflow_check_req(req, sk, skb);
390 if (err == 0)
391 return dst;
392
393 dst_release(dst);
394 if (!req->syncookie)
395 tcp6_request_sock_ops.send_reset(sk, skb,
396 mptcp_get_rst_reason(skb));
397 return NULL;
398 }
399 #endif
400
401 /* validate received truncated hmac and create hmac for third ACK */
subflow_thmac_valid(struct mptcp_subflow_context * subflow)402 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
403 {
404 u8 hmac[SHA256_DIGEST_SIZE];
405 u64 thmac;
406
407 subflow_generate_hmac(subflow->remote_key, subflow->local_key,
408 subflow->remote_nonce, subflow->local_nonce,
409 hmac);
410
411 thmac = get_unaligned_be64(hmac);
412 pr_debug("subflow=%p, token=%u, thmac=%llu, subflow->thmac=%llu\n",
413 subflow, subflow->token, thmac, subflow->thmac);
414
415 return thmac == subflow->thmac;
416 }
417
mptcp_subflow_reset(struct sock * ssk)418 void mptcp_subflow_reset(struct sock *ssk)
419 {
420 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
421 struct sock *sk = subflow->conn;
422
423 /* mptcp_mp_fail_no_response() can reach here on an already closed
424 * socket
425 */
426 if (ssk->sk_state == TCP_CLOSE)
427 return;
428
429 /* must hold: tcp_done() could drop last reference on parent */
430 sock_hold(sk);
431
432 mptcp_send_active_reset_reason(ssk);
433 tcp_done(ssk);
434 if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
435 mptcp_schedule_work(sk);
436
437 sock_put(sk);
438 }
439
subflow_use_different_dport(struct mptcp_sock * msk,const struct sock * sk)440 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
441 {
442 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
443 }
444
__mptcp_sync_state(struct sock * sk,int state)445 void __mptcp_sync_state(struct sock *sk, int state)
446 {
447 struct mptcp_subflow_context *subflow;
448 struct mptcp_sock *msk = mptcp_sk(sk);
449 struct sock *ssk = msk->first;
450
451 subflow = mptcp_subflow_ctx(ssk);
452 __mptcp_propagate_sndbuf(sk, ssk);
453 if (!msk->rcvspace_init)
454 mptcp_rcv_space_init(msk, ssk);
455
456 if (sk->sk_state == TCP_SYN_SENT) {
457 /* subflow->idsn is always available is TCP_SYN_SENT state,
458 * even for the FASTOPEN scenarios
459 */
460 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
461 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
462 mptcp_set_state(sk, state);
463 sk->sk_state_change(sk);
464 }
465 }
466
subflow_set_remote_key(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)467 static void subflow_set_remote_key(struct mptcp_sock *msk,
468 struct mptcp_subflow_context *subflow,
469 const struct mptcp_options_received *mp_opt)
470 {
471 /* active MPC subflow will reach here multiple times:
472 * at subflow_finish_connect() time and at 4th ack time
473 */
474 if (subflow->remote_key_valid)
475 return;
476
477 subflow->remote_key_valid = 1;
478 subflow->remote_key = mp_opt->sndr_key;
479 mptcp_crypto_key_sha(subflow->remote_key, NULL, &subflow->iasn);
480 subflow->iasn++;
481
482 WRITE_ONCE(msk->remote_key, subflow->remote_key);
483 WRITE_ONCE(msk->ack_seq, subflow->iasn);
484 WRITE_ONCE(msk->can_ack, true);
485 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
486 }
487
mptcp_propagate_state(struct sock * sk,struct sock * ssk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)488 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
489 struct mptcp_subflow_context *subflow,
490 const struct mptcp_options_received *mp_opt)
491 {
492 struct mptcp_sock *msk = mptcp_sk(sk);
493
494 mptcp_data_lock(sk);
495 if (mp_opt) {
496 /* Options are available only in the non fallback cases
497 * avoid updating rx path fields otherwise
498 */
499 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
500 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
501 subflow_set_remote_key(msk, subflow, mp_opt);
502 }
503
504 if (!sock_owned_by_user(sk)) {
505 __mptcp_sync_state(sk, ssk->sk_state);
506 } else {
507 msk->pending_state = ssk->sk_state;
508 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
509 }
510 mptcp_data_unlock(sk);
511 }
512
subflow_finish_connect(struct sock * sk,const struct sk_buff * skb)513 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
514 {
515 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
516 struct mptcp_options_received mp_opt;
517 struct sock *parent = subflow->conn;
518 struct mptcp_sock *msk;
519
520 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb);
521
522 /* be sure no special action on any packet other than syn-ack */
523 if (subflow->conn_finished)
524 return;
525
526 msk = mptcp_sk(parent);
527 subflow->rel_write_seq = 1;
528 subflow->conn_finished = 1;
529 subflow->ssn_offset = TCP_SKB_CB(skb)->seq;
530 pr_debug("subflow=%p synack seq=%x", subflow, subflow->ssn_offset);
531
532 mptcp_get_options(skb, &mp_opt);
533 if (subflow->request_mptcp) {
534 if (!(mp_opt.suboptions & OPTION_MPTCP_MPC_SYNACK)) {
535 MPTCP_INC_STATS(sock_net(sk),
536 MPTCP_MIB_MPCAPABLEACTIVEFALLBACK);
537 mptcp_do_fallback(sk);
538 pr_fallback(msk);
539 goto fallback;
540 }
541
542 if (mp_opt.suboptions & OPTION_MPTCP_CSUMREQD)
543 WRITE_ONCE(msk->csum_enabled, true);
544 if (mp_opt.deny_join_id0)
545 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
546 subflow->mp_capable = 1;
547 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK);
548 mptcp_finish_connect(sk);
549 mptcp_propagate_state(parent, sk, subflow, &mp_opt);
550 } else if (subflow->request_join) {
551 u8 hmac[SHA256_DIGEST_SIZE];
552
553 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_SYNACK)) {
554 subflow->reset_reason = MPTCP_RST_EMPTCP;
555 goto do_reset;
556 }
557
558 subflow->backup = mp_opt.backup;
559 subflow->thmac = mp_opt.thmac;
560 subflow->remote_nonce = mp_opt.nonce;
561 WRITE_ONCE(subflow->remote_id, mp_opt.join_id);
562 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u backup=%d",
563 subflow, subflow->thmac, subflow->remote_nonce,
564 subflow->backup);
565
566 if (!subflow_thmac_valid(subflow)) {
567 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINACKMAC);
568 subflow->reset_reason = MPTCP_RST_EMPTCP;
569 goto do_reset;
570 }
571
572 if (!mptcp_finish_join(sk))
573 goto do_reset;
574
575 subflow_generate_hmac(subflow->local_key, subflow->remote_key,
576 subflow->local_nonce,
577 subflow->remote_nonce,
578 hmac);
579 memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN);
580
581 subflow->mp_join = 1;
582 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX);
583
584 if (subflow->backup)
585 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKBACKUPRX);
586
587 if (subflow_use_different_dport(msk, sk)) {
588 pr_debug("synack inet_dport=%d %d",
589 ntohs(inet_sk(sk)->inet_dport),
590 ntohs(inet_sk(parent)->inet_dport));
591 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINPORTSYNACKRX);
592 }
593 } else if (mptcp_check_fallback(sk)) {
594 fallback:
595 mptcp_propagate_state(parent, sk, subflow, NULL);
596 }
597 return;
598
599 do_reset:
600 subflow->reset_transient = 0;
601 mptcp_subflow_reset(sk);
602 }
603
subflow_set_local_id(struct mptcp_subflow_context * subflow,int local_id)604 static void subflow_set_local_id(struct mptcp_subflow_context *subflow, int local_id)
605 {
606 WARN_ON_ONCE(local_id < 0 || local_id > 255);
607 WRITE_ONCE(subflow->local_id, local_id);
608 }
609
subflow_chk_local_id(struct sock * sk)610 static int subflow_chk_local_id(struct sock *sk)
611 {
612 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
613 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
614 int err;
615
616 if (likely(subflow->local_id >= 0))
617 return 0;
618
619 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
620 if (err < 0)
621 return err;
622
623 subflow_set_local_id(subflow, err);
624 subflow->request_bkup = mptcp_pm_is_backup(msk, (struct sock_common *)sk);
625
626 return 0;
627 }
628
subflow_rebuild_header(struct sock * sk)629 static int subflow_rebuild_header(struct sock *sk)
630 {
631 int err = subflow_chk_local_id(sk);
632
633 if (unlikely(err < 0))
634 return err;
635
636 return inet_sk_rebuild_header(sk);
637 }
638
639 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_v6_rebuild_header(struct sock * sk)640 static int subflow_v6_rebuild_header(struct sock *sk)
641 {
642 int err = subflow_chk_local_id(sk);
643
644 if (unlikely(err < 0))
645 return err;
646
647 return inet6_sk_rebuild_header(sk);
648 }
649 #endif
650
651 static struct request_sock_ops mptcp_subflow_v4_request_sock_ops __ro_after_init;
652 static struct tcp_request_sock_ops subflow_request_sock_ipv4_ops __ro_after_init;
653
subflow_v4_conn_request(struct sock * sk,struct sk_buff * skb)654 static int subflow_v4_conn_request(struct sock *sk, struct sk_buff *skb)
655 {
656 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
657
658 pr_debug("subflow=%p", subflow);
659
660 /* Never answer to SYNs sent to broadcast or multicast */
661 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
662 goto drop;
663
664 return tcp_conn_request(&mptcp_subflow_v4_request_sock_ops,
665 &subflow_request_sock_ipv4_ops,
666 sk, skb);
667 drop:
668 tcp_listendrop(sk);
669 return 0;
670 }
671
subflow_v4_req_destructor(struct request_sock * req)672 static void subflow_v4_req_destructor(struct request_sock *req)
673 {
674 subflow_req_destructor(req);
675 tcp_request_sock_ops.destructor(req);
676 }
677
678 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
679 static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init;
680 static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init;
681 static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init;
682 static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init;
683 static struct proto tcpv6_prot_override __ro_after_init;
684
subflow_v6_conn_request(struct sock * sk,struct sk_buff * skb)685 static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
686 {
687 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
688
689 pr_debug("subflow=%p", subflow);
690
691 if (skb->protocol == htons(ETH_P_IP))
692 return subflow_v4_conn_request(sk, skb);
693
694 if (!ipv6_unicast_destination(skb))
695 goto drop;
696
697 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
698 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
699 return 0;
700 }
701
702 return tcp_conn_request(&mptcp_subflow_v6_request_sock_ops,
703 &subflow_request_sock_ipv6_ops, sk, skb);
704
705 drop:
706 tcp_listendrop(sk);
707 return 0; /* don't send reset */
708 }
709
subflow_v6_req_destructor(struct request_sock * req)710 static void subflow_v6_req_destructor(struct request_sock *req)
711 {
712 subflow_req_destructor(req);
713 tcp6_request_sock_ops.destructor(req);
714 }
715 #endif
716
mptcp_subflow_reqsk_alloc(const struct request_sock_ops * ops,struct sock * sk_listener,bool attach_listener)717 struct request_sock *mptcp_subflow_reqsk_alloc(const struct request_sock_ops *ops,
718 struct sock *sk_listener,
719 bool attach_listener)
720 {
721 if (ops->family == AF_INET)
722 ops = &mptcp_subflow_v4_request_sock_ops;
723 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
724 else if (ops->family == AF_INET6)
725 ops = &mptcp_subflow_v6_request_sock_ops;
726 #endif
727
728 return inet_reqsk_alloc(ops, sk_listener, attach_listener);
729 }
730 EXPORT_SYMBOL(mptcp_subflow_reqsk_alloc);
731
732 /* validate hmac received in third ACK */
subflow_hmac_valid(const struct request_sock * req,const struct mptcp_options_received * mp_opt)733 static bool subflow_hmac_valid(const struct request_sock *req,
734 const struct mptcp_options_received *mp_opt)
735 {
736 const struct mptcp_subflow_request_sock *subflow_req;
737 u8 hmac[SHA256_DIGEST_SIZE];
738 struct mptcp_sock *msk;
739
740 subflow_req = mptcp_subflow_rsk(req);
741 msk = subflow_req->msk;
742 if (!msk)
743 return false;
744
745 subflow_generate_hmac(READ_ONCE(msk->remote_key),
746 READ_ONCE(msk->local_key),
747 subflow_req->remote_nonce,
748 subflow_req->local_nonce, hmac);
749
750 return !crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN);
751 }
752
subflow_ulp_fallback(struct sock * sk,struct mptcp_subflow_context * old_ctx)753 static void subflow_ulp_fallback(struct sock *sk,
754 struct mptcp_subflow_context *old_ctx)
755 {
756 struct inet_connection_sock *icsk = inet_csk(sk);
757
758 mptcp_subflow_tcp_fallback(sk, old_ctx);
759 icsk->icsk_ulp_ops = NULL;
760 rcu_assign_pointer(icsk->icsk_ulp_data, NULL);
761 tcp_sk(sk)->is_mptcp = 0;
762
763 mptcp_subflow_ops_undo_override(sk);
764 }
765
mptcp_subflow_drop_ctx(struct sock * ssk)766 void mptcp_subflow_drop_ctx(struct sock *ssk)
767 {
768 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
769
770 if (!ctx)
771 return;
772
773 list_del(&mptcp_subflow_ctx(ssk)->node);
774 if (inet_csk(ssk)->icsk_ulp_ops) {
775 subflow_ulp_fallback(ssk, ctx);
776 if (ctx->conn)
777 sock_put(ctx->conn);
778 }
779
780 kfree_rcu(ctx, rcu);
781 }
782
__mptcp_subflow_fully_established(struct mptcp_sock * msk,struct mptcp_subflow_context * subflow,const struct mptcp_options_received * mp_opt)783 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
784 struct mptcp_subflow_context *subflow,
785 const struct mptcp_options_received *mp_opt)
786 {
787 subflow_set_remote_key(msk, subflow, mp_opt);
788 subflow->fully_established = 1;
789 WRITE_ONCE(msk->fully_established, true);
790
791 if (subflow->is_mptfo)
792 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
793 }
794
subflow_syn_recv_sock(const struct sock * sk,struct sk_buff * skb,struct request_sock * req,struct dst_entry * dst,struct request_sock * req_unhash,bool * own_req)795 static struct sock *subflow_syn_recv_sock(const struct sock *sk,
796 struct sk_buff *skb,
797 struct request_sock *req,
798 struct dst_entry *dst,
799 struct request_sock *req_unhash,
800 bool *own_req)
801 {
802 struct mptcp_subflow_context *listener = mptcp_subflow_ctx(sk);
803 struct mptcp_subflow_request_sock *subflow_req;
804 struct mptcp_options_received mp_opt;
805 bool fallback, fallback_is_fatal;
806 enum sk_rst_reason reason;
807 struct mptcp_sock *owner;
808 struct sock *child;
809
810 pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn);
811
812 /* After child creation we must look for MPC even when options
813 * are not parsed
814 */
815 mp_opt.suboptions = 0;
816
817 /* hopefully temporary handling for MP_JOIN+syncookie */
818 subflow_req = mptcp_subflow_rsk(req);
819 fallback_is_fatal = tcp_rsk(req)->is_mptcp && subflow_req->mp_join;
820 fallback = !tcp_rsk(req)->is_mptcp;
821 if (fallback)
822 goto create_child;
823
824 /* if the sk is MP_CAPABLE, we try to fetch the client key */
825 if (subflow_req->mp_capable) {
826 /* we can receive and accept an in-window, out-of-order pkt,
827 * which may not carry the MP_CAPABLE opt even on mptcp enabled
828 * paths: always try to extract the peer key, and fallback
829 * for packets missing it.
830 * Even OoO DSS packets coming legitly after dropped or
831 * reordered MPC will cause fallback, but we don't have other
832 * options.
833 */
834 mptcp_get_options(skb, &mp_opt);
835 if (!(mp_opt.suboptions &
836 (OPTION_MPTCP_MPC_SYN | OPTION_MPTCP_MPC_ACK)))
837 fallback = true;
838
839 } else if (subflow_req->mp_join) {
840 mptcp_get_options(skb, &mp_opt);
841 if (!(mp_opt.suboptions & OPTION_MPTCP_MPJ_ACK) ||
842 !subflow_hmac_valid(req, &mp_opt) ||
843 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
844 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
845 fallback = true;
846 }
847 }
848
849 create_child:
850 child = listener->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
851 req_unhash, own_req);
852
853 if (child && *own_req) {
854 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(child);
855
856 tcp_rsk(req)->drop_req = false;
857
858 /* we need to fallback on ctx allocation failure and on pre-reqs
859 * checking above. In the latter scenario we additionally need
860 * to reset the context to non MPTCP status.
861 */
862 if (!ctx || fallback) {
863 if (fallback_is_fatal) {
864 subflow_add_reset_reason(skb, MPTCP_RST_EMPTCP);
865 goto dispose_child;
866 }
867 goto fallback;
868 }
869
870 /* ssk inherits options of listener sk */
871 ctx->setsockopt_seq = listener->setsockopt_seq;
872
873 if (ctx->mp_capable) {
874 ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
875 if (!ctx->conn)
876 goto fallback;
877
878 ctx->subflow_id = 1;
879 owner = mptcp_sk(ctx->conn);
880 mptcp_pm_new_connection(owner, child, 1);
881
882 /* with OoO packets we can reach here without ingress
883 * mpc option
884 */
885 if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) {
886 mptcp_pm_fully_established(owner, child);
887 ctx->pm_notified = 1;
888 }
889 } else if (ctx->mp_join) {
890 owner = subflow_req->msk;
891 if (!owner) {
892 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
893 goto dispose_child;
894 }
895
896 /* move the msk reference ownership to the subflow */
897 subflow_req->msk = NULL;
898 ctx->conn = (struct sock *)owner;
899
900 if (subflow_use_different_sport(owner, sk)) {
901 pr_debug("ack inet_sport=%d %d",
902 ntohs(inet_sk(sk)->inet_sport),
903 ntohs(inet_sk((struct sock *)owner)->inet_sport));
904 if (!mptcp_pm_sport_in_anno_list(owner, sk)) {
905 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MISMATCHPORTACKRX);
906 subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT);
907 goto dispose_child;
908 }
909 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINPORTACKRX);
910 }
911
912 if (!mptcp_finish_join(child)) {
913 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(child);
914
915 subflow_add_reset_reason(skb, subflow->reset_reason);
916 goto dispose_child;
917 }
918
919 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKRX);
920 tcp_rsk(req)->drop_req = true;
921 }
922 }
923
924 /* check for expected invariant - should never trigger, just help
925 * catching earlier subtle bugs
926 */
927 WARN_ON_ONCE(child && *own_req && tcp_sk(child)->is_mptcp &&
928 (!mptcp_subflow_ctx(child) ||
929 !mptcp_subflow_ctx(child)->conn));
930 return child;
931
932 dispose_child:
933 mptcp_subflow_drop_ctx(child);
934 tcp_rsk(req)->drop_req = true;
935 inet_csk_prepare_for_destroy_sock(child);
936 tcp_done(child);
937 reason = mptcp_get_rst_reason(skb);
938 req->rsk_ops->send_reset(sk, skb, reason);
939
940 /* The last child reference will be released by the caller */
941 return child;
942
943 fallback:
944 if (fallback)
945 SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK);
946 mptcp_subflow_drop_ctx(child);
947 return child;
948 }
949
950 static struct inet_connection_sock_af_ops subflow_specific __ro_after_init;
951 static struct proto tcp_prot_override __ro_after_init;
952
953 enum mapping_status {
954 MAPPING_OK,
955 MAPPING_INVALID,
956 MAPPING_EMPTY,
957 MAPPING_DATA_FIN,
958 MAPPING_DUMMY,
959 MAPPING_BAD_CSUM
960 };
961
dbg_bad_map(struct mptcp_subflow_context * subflow,u32 ssn)962 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
963 {
964 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
965 ssn, subflow->map_subflow_seq, subflow->map_data_len);
966 }
967
skb_is_fully_mapped(struct sock * ssk,struct sk_buff * skb)968 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
969 {
970 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
971 unsigned int skb_consumed;
972
973 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
974 if (WARN_ON_ONCE(skb_consumed >= skb->len))
975 return true;
976
977 return skb->len - skb_consumed <= subflow->map_data_len -
978 mptcp_subflow_get_map_offset(subflow);
979 }
980
validate_mapping(struct sock * ssk,struct sk_buff * skb)981 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
982 {
983 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
984 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
985
986 if (unlikely(before(ssn, subflow->map_subflow_seq))) {
987 /* Mapping covers data later in the subflow stream,
988 * currently unsupported.
989 */
990 dbg_bad_map(subflow, ssn);
991 return false;
992 }
993 if (unlikely(!before(ssn, subflow->map_subflow_seq +
994 subflow->map_data_len))) {
995 /* Mapping does covers past subflow data, invalid */
996 dbg_bad_map(subflow, ssn);
997 return false;
998 }
999 return true;
1000 }
1001
validate_data_csum(struct sock * ssk,struct sk_buff * skb,bool csum_reqd)1002 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
1003 bool csum_reqd)
1004 {
1005 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1006 u32 offset, seq, delta;
1007 __sum16 csum;
1008 int len;
1009
1010 if (!csum_reqd)
1011 return MAPPING_OK;
1012
1013 /* mapping already validated on previous traversal */
1014 if (subflow->map_csum_len == subflow->map_data_len)
1015 return MAPPING_OK;
1016
1017 /* traverse the receive queue, ensuring it contains a full
1018 * DSS mapping and accumulating the related csum.
1019 * Preserve the accoumlate csum across multiple calls, to compute
1020 * the csum only once
1021 */
1022 delta = subflow->map_data_len - subflow->map_csum_len;
1023 for (;;) {
1024 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
1025 offset = seq - TCP_SKB_CB(skb)->seq;
1026
1027 /* if the current skb has not been accounted yet, csum its contents
1028 * up to the amount covered by the current DSS
1029 */
1030 if (offset < skb->len) {
1031 __wsum csum;
1032
1033 len = min(skb->len - offset, delta);
1034 csum = skb_checksum(skb, offset, len, 0);
1035 subflow->map_data_csum = csum_block_add(subflow->map_data_csum, csum,
1036 subflow->map_csum_len);
1037
1038 delta -= len;
1039 subflow->map_csum_len += len;
1040 }
1041 if (delta == 0)
1042 break;
1043
1044 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1045 /* if this subflow is closed, the partial mapping
1046 * will be never completed; flush the pending skbs, so
1047 * that subflow_sched_work_if_closed() can kick in
1048 */
1049 if (unlikely(ssk->sk_state == TCP_CLOSE))
1050 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1051 sk_eat_skb(ssk, skb);
1052
1053 /* not enough data to validate the csum */
1054 return MAPPING_EMPTY;
1055 }
1056
1057 /* the DSS mapping for next skbs will be validated later,
1058 * when a get_mapping_status call will process such skb
1059 */
1060 skb = skb->next;
1061 }
1062
1063 /* note that 'map_data_len' accounts only for the carried data, does
1064 * not include the eventual seq increment due to the data fin,
1065 * while the pseudo header requires the original DSS data len,
1066 * including that
1067 */
1068 csum = __mptcp_make_csum(subflow->map_seq,
1069 subflow->map_subflow_seq,
1070 subflow->map_data_len + subflow->map_data_fin,
1071 subflow->map_data_csum);
1072 if (unlikely(csum)) {
1073 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1074 return MAPPING_BAD_CSUM;
1075 }
1076
1077 subflow->valid_csum_seen = 1;
1078 return MAPPING_OK;
1079 }
1080
get_mapping_status(struct sock * ssk,struct mptcp_sock * msk)1081 static enum mapping_status get_mapping_status(struct sock *ssk,
1082 struct mptcp_sock *msk)
1083 {
1084 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1085 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1086 struct mptcp_ext *mpext;
1087 struct sk_buff *skb;
1088 u16 data_len;
1089 u64 map_seq;
1090
1091 skb = skb_peek(&ssk->sk_receive_queue);
1092 if (!skb)
1093 return MAPPING_EMPTY;
1094
1095 if (mptcp_check_fallback(ssk))
1096 return MAPPING_DUMMY;
1097
1098 mpext = mptcp_get_ext(skb);
1099 if (!mpext || !mpext->use_map) {
1100 if (!subflow->map_valid && !skb->len) {
1101 /* the TCP stack deliver 0 len FIN pkt to the receive
1102 * queue, that is the only 0len pkts ever expected here,
1103 * and we can admit no mapping only for 0 len pkts
1104 */
1105 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1106 WARN_ONCE(1, "0len seq %d:%d flags %x",
1107 TCP_SKB_CB(skb)->seq,
1108 TCP_SKB_CB(skb)->end_seq,
1109 TCP_SKB_CB(skb)->tcp_flags);
1110 sk_eat_skb(ssk, skb);
1111 return MAPPING_EMPTY;
1112 }
1113
1114 if (!subflow->map_valid)
1115 return MAPPING_INVALID;
1116
1117 goto validate_seq;
1118 }
1119
1120 trace_get_mapping_status(mpext);
1121
1122 data_len = mpext->data_len;
1123 if (data_len == 0) {
1124 pr_debug("infinite mapping received");
1125 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1126 subflow->map_data_len = 0;
1127 return MAPPING_INVALID;
1128 }
1129
1130 if (mpext->data_fin == 1) {
1131 u64 data_fin_seq;
1132
1133 if (data_len == 1) {
1134 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1135 mpext->dsn64);
1136 pr_debug("DATA_FIN with no payload seq=%llu", mpext->data_seq);
1137 if (subflow->map_valid) {
1138 /* A DATA_FIN might arrive in a DSS
1139 * option before the previous mapping
1140 * has been fully consumed. Continue
1141 * handling the existing mapping.
1142 */
1143 skb_ext_del(skb, SKB_EXT_MPTCP);
1144 return MAPPING_OK;
1145 }
1146
1147 if (updated)
1148 mptcp_schedule_work((struct sock *)msk);
1149
1150 return MAPPING_DATA_FIN;
1151 }
1152
1153 data_fin_seq = mpext->data_seq + data_len - 1;
1154
1155 /* If mpext->data_seq is a 32-bit value, data_fin_seq must also
1156 * be limited to 32 bits.
1157 */
1158 if (!mpext->dsn64)
1159 data_fin_seq &= GENMASK_ULL(31, 0);
1160
1161 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1162 pr_debug("DATA_FIN with mapping seq=%llu dsn64=%d",
1163 data_fin_seq, mpext->dsn64);
1164
1165 /* Adjust for DATA_FIN using 1 byte of sequence space */
1166 data_len--;
1167 }
1168
1169 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1170 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
1171
1172 if (subflow->map_valid) {
1173 /* Allow replacing only with an identical map */
1174 if (subflow->map_seq == map_seq &&
1175 subflow->map_subflow_seq == mpext->subflow_seq &&
1176 subflow->map_data_len == data_len &&
1177 subflow->map_csum_reqd == mpext->csum_reqd) {
1178 skb_ext_del(skb, SKB_EXT_MPTCP);
1179 goto validate_csum;
1180 }
1181
1182 /* If this skb data are fully covered by the current mapping,
1183 * the new map would need caching, which is not supported
1184 */
1185 if (skb_is_fully_mapped(ssk, skb)) {
1186 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1187 return MAPPING_INVALID;
1188 }
1189
1190 /* will validate the next map after consuming the current one */
1191 goto validate_csum;
1192 }
1193
1194 subflow->map_seq = map_seq;
1195 subflow->map_subflow_seq = mpext->subflow_seq;
1196 subflow->map_data_len = data_len;
1197 subflow->map_valid = 1;
1198 subflow->map_data_fin = mpext->data_fin;
1199 subflow->mpc_map = mpext->mpc_map;
1200 subflow->map_csum_reqd = mpext->csum_reqd;
1201 subflow->map_csum_len = 0;
1202 subflow->map_data_csum = csum_unfold(mpext->csum);
1203
1204 /* Cfr RFC 8684 Section 3.3.0 */
1205 if (unlikely(subflow->map_csum_reqd != csum_reqd))
1206 return MAPPING_INVALID;
1207
1208 pr_debug("new map seq=%llu subflow_seq=%u data_len=%u csum=%d:%u",
1209 subflow->map_seq, subflow->map_subflow_seq,
1210 subflow->map_data_len, subflow->map_csum_reqd,
1211 subflow->map_data_csum);
1212
1213 validate_seq:
1214 /* we revalidate valid mapping on new skb, because we must ensure
1215 * the current skb is completely covered by the available mapping
1216 */
1217 if (!validate_mapping(ssk, skb)) {
1218 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1219 return MAPPING_INVALID;
1220 }
1221
1222 skb_ext_del(skb, SKB_EXT_MPTCP);
1223
1224 validate_csum:
1225 return validate_data_csum(ssk, skb, csum_reqd);
1226 }
1227
mptcp_subflow_discard_data(struct sock * ssk,struct sk_buff * skb,u64 limit)1228 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1229 u64 limit)
1230 {
1231 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1232 bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
1233 struct tcp_sock *tp = tcp_sk(ssk);
1234 u32 offset, incr, avail_len;
1235
1236 offset = tp->copied_seq - TCP_SKB_CB(skb)->seq;
1237 if (WARN_ON_ONCE(offset > skb->len))
1238 goto out;
1239
1240 avail_len = skb->len - offset;
1241 incr = limit >= avail_len ? avail_len + fin : limit;
1242
1243 pr_debug("discarding=%d len=%d offset=%d seq=%d", incr, skb->len,
1244 offset, subflow->map_subflow_seq);
1245 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1246 tcp_sk(ssk)->copied_seq += incr;
1247
1248 out:
1249 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1250 sk_eat_skb(ssk, skb);
1251 if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
1252 subflow->map_valid = 0;
1253 }
1254
1255 /* sched mptcp worker to remove the subflow if no more data is pending */
subflow_sched_work_if_closed(struct mptcp_sock * msk,struct sock * ssk)1256 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1257 {
1258 if (likely(ssk->sk_state != TCP_CLOSE))
1259 return;
1260
1261 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1262 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1263 mptcp_schedule_work((struct sock *)msk);
1264 }
1265
subflow_can_fallback(struct mptcp_subflow_context * subflow)1266 static bool subflow_can_fallback(struct mptcp_subflow_context *subflow)
1267 {
1268 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1269
1270 if (subflow->mp_join)
1271 return false;
1272 else if (READ_ONCE(msk->csum_enabled))
1273 return !subflow->valid_csum_seen;
1274 else
1275 return !subflow->fully_established;
1276 }
1277
mptcp_subflow_fail(struct mptcp_sock * msk,struct sock * ssk)1278 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1279 {
1280 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1281 unsigned long fail_tout;
1282
1283 /* graceful failure can happen only on the MPC subflow */
1284 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1285 return;
1286
1287 /* since the close timeout take precedence on the fail one,
1288 * no need to start the latter when the first is already set
1289 */
1290 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1291 return;
1292
1293 /* we don't need extreme accuracy here, use a zero fail_tout as special
1294 * value meaning no fail timeout at all;
1295 */
1296 fail_tout = jiffies + TCP_RTO_MAX;
1297 if (!fail_tout)
1298 fail_tout = 1;
1299 WRITE_ONCE(subflow->fail_tout, fail_tout);
1300 tcp_send_ack(ssk);
1301
1302 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1303 }
1304
subflow_check_data_avail(struct sock * ssk)1305 static bool subflow_check_data_avail(struct sock *ssk)
1306 {
1307 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1308 enum mapping_status status;
1309 struct mptcp_sock *msk;
1310 struct sk_buff *skb;
1311
1312 if (!skb_peek(&ssk->sk_receive_queue))
1313 WRITE_ONCE(subflow->data_avail, false);
1314 if (subflow->data_avail)
1315 return true;
1316
1317 msk = mptcp_sk(subflow->conn);
1318 for (;;) {
1319 u64 ack_seq;
1320 u64 old_ack;
1321
1322 status = get_mapping_status(ssk, msk);
1323 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1324 if (unlikely(status == MAPPING_INVALID || status == MAPPING_DUMMY ||
1325 status == MAPPING_BAD_CSUM))
1326 goto fallback;
1327
1328 if (status != MAPPING_OK)
1329 goto no_data;
1330
1331 skb = skb_peek(&ssk->sk_receive_queue);
1332 if (WARN_ON_ONCE(!skb))
1333 goto no_data;
1334
1335 if (unlikely(!READ_ONCE(msk->can_ack)))
1336 goto fallback;
1337
1338 old_ack = READ_ONCE(msk->ack_seq);
1339 ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
1340 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1341 ack_seq);
1342 if (unlikely(before64(ack_seq, old_ack))) {
1343 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1344 continue;
1345 }
1346
1347 WRITE_ONCE(subflow->data_avail, true);
1348 break;
1349 }
1350 return true;
1351
1352 no_data:
1353 subflow_sched_work_if_closed(msk, ssk);
1354 return false;
1355
1356 fallback:
1357 if (!__mptcp_check_fallback(msk)) {
1358 /* RFC 8684 section 3.7. */
1359 if (status == MAPPING_BAD_CSUM &&
1360 (subflow->mp_join || subflow->valid_csum_seen)) {
1361 subflow->send_mp_fail = 1;
1362
1363 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1364 subflow->reset_transient = 0;
1365 subflow->reset_reason = MPTCP_RST_EMIDDLEBOX;
1366 goto reset;
1367 }
1368 mptcp_subflow_fail(msk, ssk);
1369 WRITE_ONCE(subflow->data_avail, true);
1370 return true;
1371 }
1372
1373 if (!subflow_can_fallback(subflow) && subflow->map_data_len) {
1374 /* fatal protocol error, close the socket.
1375 * subflow_error_report() will introduce the appropriate barriers
1376 */
1377 subflow->reset_transient = 0;
1378 subflow->reset_reason = MPTCP_RST_EMPTCP;
1379
1380 reset:
1381 WRITE_ONCE(ssk->sk_err, EBADMSG);
1382 tcp_set_state(ssk, TCP_CLOSE);
1383 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1384 sk_eat_skb(ssk, skb);
1385 mptcp_send_active_reset_reason(ssk);
1386 WRITE_ONCE(subflow->data_avail, false);
1387 return false;
1388 }
1389
1390 mptcp_do_fallback(ssk);
1391 }
1392
1393 skb = skb_peek(&ssk->sk_receive_queue);
1394 subflow->map_valid = 1;
1395 subflow->map_seq = READ_ONCE(msk->ack_seq);
1396 subflow->map_data_len = skb->len;
1397 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1398 WRITE_ONCE(subflow->data_avail, true);
1399 return true;
1400 }
1401
mptcp_subflow_data_available(struct sock * sk)1402 bool mptcp_subflow_data_available(struct sock *sk)
1403 {
1404 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1405
1406 /* check if current mapping is still valid */
1407 if (subflow->map_valid &&
1408 mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
1409 subflow->map_valid = 0;
1410 WRITE_ONCE(subflow->data_avail, false);
1411
1412 pr_debug("Done with mapping: seq=%u data_len=%u",
1413 subflow->map_subflow_seq,
1414 subflow->map_data_len);
1415 }
1416
1417 return subflow_check_data_avail(sk);
1418 }
1419
1420 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1421 * not the ssk one.
1422 *
1423 * In mptcp, rwin is about the mptcp-level connection data.
1424 *
1425 * Data that is still on the ssk rx queue can thus be ignored,
1426 * as far as mptcp peer is concerned that data is still inflight.
1427 * DSS ACK is updated when skb is moved to the mptcp rx queue.
1428 */
mptcp_space(const struct sock * ssk,int * space,int * full_space)1429 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1430 {
1431 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1432 const struct sock *sk = subflow->conn;
1433
1434 *space = __mptcp_space(sk);
1435 *full_space = mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
1436 }
1437
subflow_error_report(struct sock * ssk)1438 static void subflow_error_report(struct sock *ssk)
1439 {
1440 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1441
1442 /* bail early if this is a no-op, so that we avoid introducing a
1443 * problematic lockdep dependency between TCP accept queue lock
1444 * and msk socket spinlock
1445 */
1446 if (!sk->sk_socket)
1447 return;
1448
1449 mptcp_data_lock(sk);
1450 if (!sock_owned_by_user(sk))
1451 __mptcp_error_report(sk);
1452 else
1453 __set_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->cb_flags);
1454 mptcp_data_unlock(sk);
1455 }
1456
subflow_data_ready(struct sock * sk)1457 static void subflow_data_ready(struct sock *sk)
1458 {
1459 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1460 u16 state = 1 << inet_sk_state_load(sk);
1461 struct sock *parent = subflow->conn;
1462 struct mptcp_sock *msk;
1463
1464 trace_sk_data_ready(sk);
1465
1466 msk = mptcp_sk(parent);
1467 if (state & TCPF_LISTEN) {
1468 /* MPJ subflow are removed from accept queue before reaching here,
1469 * avoid stray wakeups
1470 */
1471 if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
1472 return;
1473
1474 parent->sk_data_ready(parent);
1475 return;
1476 }
1477
1478 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1479 !subflow->mp_join && !(state & TCPF_CLOSE));
1480
1481 if (mptcp_subflow_data_available(sk)) {
1482 mptcp_data_ready(parent, sk);
1483
1484 /* subflow-level lowat test are not relevant.
1485 * respect the msk-level threshold eventually mandating an immediate ack
1486 */
1487 if (mptcp_data_avail(msk) < parent->sk_rcvlowat &&
1488 (tcp_sk(sk)->rcv_nxt - tcp_sk(sk)->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss)
1489 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
1490 } else if (unlikely(sk->sk_err)) {
1491 subflow_error_report(sk);
1492 }
1493 }
1494
subflow_write_space(struct sock * ssk)1495 static void subflow_write_space(struct sock *ssk)
1496 {
1497 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1498
1499 mptcp_propagate_sndbuf(sk, ssk);
1500 mptcp_write_space(sk);
1501 }
1502
1503 static const struct inet_connection_sock_af_ops *
subflow_default_af_ops(struct sock * sk)1504 subflow_default_af_ops(struct sock *sk)
1505 {
1506 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1507 if (sk->sk_family == AF_INET6)
1508 return &subflow_v6_specific;
1509 #endif
1510 return &subflow_specific;
1511 }
1512
1513 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
mptcpv6_handle_mapped(struct sock * sk,bool mapped)1514 void mptcpv6_handle_mapped(struct sock *sk, bool mapped)
1515 {
1516 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1517 struct inet_connection_sock *icsk = inet_csk(sk);
1518 const struct inet_connection_sock_af_ops *target;
1519
1520 target = mapped ? &subflow_v6m_specific : subflow_default_af_ops(sk);
1521
1522 pr_debug("subflow=%p family=%d ops=%p target=%p mapped=%d",
1523 subflow, sk->sk_family, icsk->icsk_af_ops, target, mapped);
1524
1525 if (likely(icsk->icsk_af_ops == target))
1526 return;
1527
1528 subflow->icsk_af_ops = icsk->icsk_af_ops;
1529 icsk->icsk_af_ops = target;
1530 }
1531 #endif
1532
mptcp_info2sockaddr(const struct mptcp_addr_info * info,struct sockaddr_storage * addr,unsigned short family)1533 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
1534 struct sockaddr_storage *addr,
1535 unsigned short family)
1536 {
1537 memset(addr, 0, sizeof(*addr));
1538 addr->ss_family = family;
1539 if (addr->ss_family == AF_INET) {
1540 struct sockaddr_in *in_addr = (struct sockaddr_in *)addr;
1541
1542 if (info->family == AF_INET)
1543 in_addr->sin_addr = info->addr;
1544 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1545 else if (ipv6_addr_v4mapped(&info->addr6))
1546 in_addr->sin_addr.s_addr = info->addr6.s6_addr32[3];
1547 #endif
1548 in_addr->sin_port = info->port;
1549 }
1550 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1551 else if (addr->ss_family == AF_INET6) {
1552 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)addr;
1553
1554 if (info->family == AF_INET)
1555 ipv6_addr_set_v4mapped(info->addr.s_addr,
1556 &in6_addr->sin6_addr);
1557 else
1558 in6_addr->sin6_addr = info->addr6;
1559 in6_addr->sin6_port = info->port;
1560 }
1561 #endif
1562 }
1563
__mptcp_subflow_connect(struct sock * sk,const struct mptcp_addr_info * loc,const struct mptcp_addr_info * remote)1564 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
1565 const struct mptcp_addr_info *remote)
1566 {
1567 struct mptcp_sock *msk = mptcp_sk(sk);
1568 struct mptcp_subflow_context *subflow;
1569 struct sockaddr_storage addr;
1570 int remote_id = remote->id;
1571 int local_id = loc->id;
1572 int err = -ENOTCONN;
1573 struct socket *sf;
1574 struct sock *ssk;
1575 u32 remote_token;
1576 int addrlen;
1577 int ifindex;
1578 u8 flags;
1579
1580 if (!mptcp_is_fully_established(sk))
1581 goto err_out;
1582
1583 err = mptcp_subflow_create_socket(sk, loc->family, &sf);
1584 if (err)
1585 goto err_out;
1586
1587 ssk = sf->sk;
1588 subflow = mptcp_subflow_ctx(ssk);
1589 do {
1590 get_random_bytes(&subflow->local_nonce, sizeof(u32));
1591 } while (!subflow->local_nonce);
1592
1593 if (local_id)
1594 subflow_set_local_id(subflow, local_id);
1595
1596 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1597 &flags, &ifindex);
1598 subflow->remote_key_valid = 1;
1599 subflow->remote_key = READ_ONCE(msk->remote_key);
1600 subflow->local_key = READ_ONCE(msk->local_key);
1601 subflow->token = msk->token;
1602 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1603
1604 addrlen = sizeof(struct sockaddr_in);
1605 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1606 if (addr.ss_family == AF_INET6)
1607 addrlen = sizeof(struct sockaddr_in6);
1608 #endif
1609 ssk->sk_bound_dev_if = ifindex;
1610 err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
1611 if (err)
1612 goto failed;
1613
1614 mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL);
1615 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1616 remote_token, local_id, remote_id);
1617 subflow->remote_token = remote_token;
1618 WRITE_ONCE(subflow->remote_id, remote_id);
1619 subflow->request_join = 1;
1620 subflow->request_bkup = !!(flags & MPTCP_PM_ADDR_FLAG_BACKUP);
1621 subflow->subflow_id = msk->subflow_id++;
1622 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1623
1624 sock_hold(ssk);
1625 list_add_tail(&subflow->node, &msk->conn_list);
1626 err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
1627 if (err && err != -EINPROGRESS)
1628 goto failed_unlink;
1629
1630 /* discard the subflow socket */
1631 mptcp_sock_graft(ssk, sk->sk_socket);
1632 iput(SOCK_INODE(sf));
1633 WRITE_ONCE(msk->allow_infinite_fallback, false);
1634 mptcp_stop_tout_timer(sk);
1635 return 0;
1636
1637 failed_unlink:
1638 list_del(&subflow->node);
1639 sock_put(mptcp_subflow_tcp_sock(subflow));
1640
1641 failed:
1642 subflow->disposable = 1;
1643 sock_release(sf);
1644
1645 err_out:
1646 /* we account subflows before the creation, and this failures will not
1647 * be caught by sk_state_change()
1648 */
1649 mptcp_pm_close_subflow(msk);
1650 return err;
1651 }
1652
mptcp_attach_cgroup(struct sock * parent,struct sock * child)1653 static void mptcp_attach_cgroup(struct sock *parent, struct sock *child)
1654 {
1655 #ifdef CONFIG_SOCK_CGROUP_DATA
1656 struct sock_cgroup_data *parent_skcd = &parent->sk_cgrp_data,
1657 *child_skcd = &child->sk_cgrp_data;
1658
1659 /* only the additional subflows created by kworkers have to be modified */
1660 if (cgroup_id(sock_cgroup_ptr(parent_skcd)) !=
1661 cgroup_id(sock_cgroup_ptr(child_skcd))) {
1662 #ifdef CONFIG_MEMCG
1663 struct mem_cgroup *memcg = parent->sk_memcg;
1664
1665 mem_cgroup_sk_free(child);
1666 if (memcg && css_tryget(&memcg->css))
1667 child->sk_memcg = memcg;
1668 #endif /* CONFIG_MEMCG */
1669
1670 cgroup_sk_free(child_skcd);
1671 *child_skcd = *parent_skcd;
1672 cgroup_sk_clone(child_skcd);
1673 }
1674 #endif /* CONFIG_SOCK_CGROUP_DATA */
1675 }
1676
mptcp_subflow_ops_override(struct sock * ssk)1677 static void mptcp_subflow_ops_override(struct sock *ssk)
1678 {
1679 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1680 if (ssk->sk_prot == &tcpv6_prot)
1681 ssk->sk_prot = &tcpv6_prot_override;
1682 else
1683 #endif
1684 ssk->sk_prot = &tcp_prot_override;
1685 }
1686
mptcp_subflow_ops_undo_override(struct sock * ssk)1687 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1688 {
1689 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
1690 if (ssk->sk_prot == &tcpv6_prot_override)
1691 ssk->sk_prot = &tcpv6_prot;
1692 else
1693 #endif
1694 ssk->sk_prot = &tcp_prot;
1695 }
1696
mptcp_subflow_create_socket(struct sock * sk,unsigned short family,struct socket ** new_sock)1697 int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
1698 struct socket **new_sock)
1699 {
1700 struct mptcp_subflow_context *subflow;
1701 struct net *net = sock_net(sk);
1702 struct socket *sf;
1703 int err;
1704
1705 /* un-accepted server sockets can reach here - on bad configuration
1706 * bail early to avoid greater trouble later
1707 */
1708 if (unlikely(!sk->sk_socket))
1709 return -EINVAL;
1710
1711 err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
1712 if (err)
1713 return err;
1714
1715 lock_sock_nested(sf->sk, SINGLE_DEPTH_NESTING);
1716
1717 err = security_mptcp_add_subflow(sk, sf->sk);
1718 if (err)
1719 goto err_free;
1720
1721 /* the newly created socket has to be in the same cgroup as its parent */
1722 mptcp_attach_cgroup(sk, sf->sk);
1723
1724 /* kernel sockets do not by default acquire net ref, but TCP timer
1725 * needs it.
1726 * Update ns_tracker to current stack trace and refcounted tracker.
1727 */
1728 __netns_tracker_free(net, &sf->sk->ns_tracker, false);
1729 sf->sk->sk_net_refcnt = 1;
1730 get_net_track(net, &sf->sk->ns_tracker, GFP_KERNEL);
1731 sock_inuse_add(net, 1);
1732 err = tcp_set_ulp(sf->sk, "mptcp");
1733 if (err)
1734 goto err_free;
1735
1736 mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk);
1737 release_sock(sf->sk);
1738
1739 /* the newly created socket really belongs to the owning MPTCP
1740 * socket, even if for additional subflows the allocation is performed
1741 * by a kernel workqueue. Adjust inode references, so that the
1742 * procfs/diag interfaces really show this one belonging to the correct
1743 * user.
1744 */
1745 SOCK_INODE(sf)->i_ino = SOCK_INODE(sk->sk_socket)->i_ino;
1746 SOCK_INODE(sf)->i_uid = SOCK_INODE(sk->sk_socket)->i_uid;
1747 SOCK_INODE(sf)->i_gid = SOCK_INODE(sk->sk_socket)->i_gid;
1748
1749 subflow = mptcp_subflow_ctx(sf->sk);
1750 pr_debug("subflow=%p", subflow);
1751
1752 *new_sock = sf;
1753 sock_hold(sk);
1754 subflow->conn = sk;
1755 mptcp_subflow_ops_override(sf->sk);
1756
1757 return 0;
1758
1759 err_free:
1760 release_sock(sf->sk);
1761 sock_release(sf);
1762 return err;
1763 }
1764
subflow_create_ctx(struct sock * sk,gfp_t priority)1765 static struct mptcp_subflow_context *subflow_create_ctx(struct sock *sk,
1766 gfp_t priority)
1767 {
1768 struct inet_connection_sock *icsk = inet_csk(sk);
1769 struct mptcp_subflow_context *ctx;
1770
1771 ctx = kzalloc(sizeof(*ctx), priority);
1772 if (!ctx)
1773 return NULL;
1774
1775 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
1776 INIT_LIST_HEAD(&ctx->node);
1777 INIT_LIST_HEAD(&ctx->delegated_node);
1778
1779 pr_debug("subflow=%p", ctx);
1780
1781 ctx->tcp_sock = sk;
1782 WRITE_ONCE(ctx->local_id, -1);
1783
1784 return ctx;
1785 }
1786
__subflow_state_change(struct sock * sk)1787 static void __subflow_state_change(struct sock *sk)
1788 {
1789 struct socket_wq *wq;
1790
1791 rcu_read_lock();
1792 wq = rcu_dereference(sk->sk_wq);
1793 if (skwq_has_sleeper(wq))
1794 wake_up_interruptible_all(&wq->wait);
1795 rcu_read_unlock();
1796 }
1797
subflow_is_done(const struct sock * sk)1798 static bool subflow_is_done(const struct sock *sk)
1799 {
1800 return sk->sk_shutdown & RCV_SHUTDOWN || sk->sk_state == TCP_CLOSE;
1801 }
1802
subflow_state_change(struct sock * sk)1803 static void subflow_state_change(struct sock *sk)
1804 {
1805 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1806 struct sock *parent = subflow->conn;
1807 struct mptcp_sock *msk;
1808
1809 __subflow_state_change(sk);
1810
1811 msk = mptcp_sk(parent);
1812 if (subflow_simultaneous_connect(sk)) {
1813 mptcp_do_fallback(sk);
1814 pr_fallback(msk);
1815 subflow->conn_finished = 1;
1816 mptcp_propagate_state(parent, sk, subflow, NULL);
1817 }
1818
1819 /* as recvmsg() does not acquire the subflow socket for ssk selection
1820 * a fin packet carrying a DSS can be unnoticed if we don't trigger
1821 * the data available machinery here.
1822 */
1823 if (mptcp_subflow_data_available(sk))
1824 mptcp_data_ready(parent, sk);
1825 else if (unlikely(sk->sk_err))
1826 subflow_error_report(sk);
1827
1828 subflow_sched_work_if_closed(mptcp_sk(parent), sk);
1829
1830 /* when the fallback subflow closes the rx side, trigger a 'dummy'
1831 * ingress data fin, so that the msk state will follow along
1832 */
1833 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1834 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1835 mptcp_schedule_work(parent);
1836 }
1837
mptcp_subflow_queue_clean(struct sock * listener_sk,struct sock * listener_ssk)1838 void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
1839 {
1840 struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
1841 struct request_sock *req, *head, *tail;
1842 struct mptcp_subflow_context *subflow;
1843 struct sock *sk, *ssk;
1844
1845 /* Due to lock dependencies no relevant lock can be acquired under rskq_lock.
1846 * Splice the req list, so that accept() can not reach the pending ssk after
1847 * the listener socket is released below.
1848 */
1849 spin_lock_bh(&queue->rskq_lock);
1850 head = queue->rskq_accept_head;
1851 tail = queue->rskq_accept_tail;
1852 queue->rskq_accept_head = NULL;
1853 queue->rskq_accept_tail = NULL;
1854 spin_unlock_bh(&queue->rskq_lock);
1855
1856 if (!head)
1857 return;
1858
1859 /* can't acquire the msk socket lock under the subflow one,
1860 * or will cause ABBA deadlock
1861 */
1862 release_sock(listener_ssk);
1863
1864 for (req = head; req; req = req->dl_next) {
1865 ssk = req->sk;
1866 if (!sk_is_mptcp(ssk))
1867 continue;
1868
1869 subflow = mptcp_subflow_ctx(ssk);
1870 if (!subflow || !subflow->conn)
1871 continue;
1872
1873 sk = subflow->conn;
1874 sock_hold(sk);
1875
1876 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1877 __mptcp_unaccepted_force_close(sk);
1878 release_sock(sk);
1879
1880 /* lockdep will report a false positive ABBA deadlock
1881 * between cancel_work_sync and the listener socket.
1882 * The involved locks belong to different sockets WRT
1883 * the existing AB chain.
1884 * Using a per socket key is problematic as key
1885 * deregistration requires process context and must be
1886 * performed at socket disposal time, in atomic
1887 * context.
1888 * Just tell lockdep to consider the listener socket
1889 * released here.
1890 */
1891 mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
1892 mptcp_cancel_work(sk);
1893 mutex_acquire(&listener_sk->sk_lock.dep_map, 0, 0, _RET_IP_);
1894
1895 sock_put(sk);
1896 }
1897
1898 /* we are still under the listener msk socket lock */
1899 lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING);
1900
1901 /* restore the listener queue, to let the TCP code clean it up */
1902 spin_lock_bh(&queue->rskq_lock);
1903 WARN_ON_ONCE(queue->rskq_accept_head);
1904 queue->rskq_accept_head = head;
1905 queue->rskq_accept_tail = tail;
1906 spin_unlock_bh(&queue->rskq_lock);
1907 }
1908
subflow_ulp_init(struct sock * sk)1909 static int subflow_ulp_init(struct sock *sk)
1910 {
1911 struct inet_connection_sock *icsk = inet_csk(sk);
1912 struct mptcp_subflow_context *ctx;
1913 struct tcp_sock *tp = tcp_sk(sk);
1914 int err = 0;
1915
1916 /* disallow attaching ULP to a socket unless it has been
1917 * created with sock_create_kern()
1918 */
1919 if (!sk->sk_kern_sock) {
1920 err = -EOPNOTSUPP;
1921 goto out;
1922 }
1923
1924 ctx = subflow_create_ctx(sk, GFP_KERNEL);
1925 if (!ctx) {
1926 err = -ENOMEM;
1927 goto out;
1928 }
1929
1930 pr_debug("subflow=%p, family=%d", ctx, sk->sk_family);
1931
1932 tp->is_mptcp = 1;
1933 ctx->icsk_af_ops = icsk->icsk_af_ops;
1934 icsk->icsk_af_ops = subflow_default_af_ops(sk);
1935 ctx->tcp_state_change = sk->sk_state_change;
1936 ctx->tcp_error_report = sk->sk_error_report;
1937
1938 WARN_ON_ONCE(sk->sk_data_ready != sock_def_readable);
1939 WARN_ON_ONCE(sk->sk_write_space != sk_stream_write_space);
1940
1941 sk->sk_data_ready = subflow_data_ready;
1942 sk->sk_write_space = subflow_write_space;
1943 sk->sk_state_change = subflow_state_change;
1944 sk->sk_error_report = subflow_error_report;
1945 out:
1946 return err;
1947 }
1948
subflow_ulp_release(struct sock * ssk)1949 static void subflow_ulp_release(struct sock *ssk)
1950 {
1951 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1952 bool release = true;
1953 struct sock *sk;
1954
1955 if (!ctx)
1956 return;
1957
1958 sk = ctx->conn;
1959 if (sk) {
1960 /* if the msk has been orphaned, keep the ctx
1961 * alive, will be freed by __mptcp_close_ssk(),
1962 * when the subflow is still unaccepted
1963 */
1964 release = ctx->disposable || list_empty(&ctx->node);
1965
1966 /* inet_child_forget() does not call sk_state_change(),
1967 * explicitly trigger the socket close machinery
1968 */
1969 if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW,
1970 &mptcp_sk(sk)->flags))
1971 mptcp_schedule_work(sk);
1972 sock_put(sk);
1973 }
1974
1975 mptcp_subflow_ops_undo_override(ssk);
1976 if (release)
1977 kfree_rcu(ctx, rcu);
1978 }
1979
subflow_ulp_clone(const struct request_sock * req,struct sock * newsk,const gfp_t priority)1980 static void subflow_ulp_clone(const struct request_sock *req,
1981 struct sock *newsk,
1982 const gfp_t priority)
1983 {
1984 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
1985 struct mptcp_subflow_context *old_ctx = mptcp_subflow_ctx(newsk);
1986 struct mptcp_subflow_context *new_ctx;
1987
1988 if (!tcp_rsk(req)->is_mptcp ||
1989 (!subflow_req->mp_capable && !subflow_req->mp_join)) {
1990 subflow_ulp_fallback(newsk, old_ctx);
1991 return;
1992 }
1993
1994 new_ctx = subflow_create_ctx(newsk, priority);
1995 if (!new_ctx) {
1996 subflow_ulp_fallback(newsk, old_ctx);
1997 return;
1998 }
1999
2000 new_ctx->conn_finished = 1;
2001 new_ctx->icsk_af_ops = old_ctx->icsk_af_ops;
2002 new_ctx->tcp_state_change = old_ctx->tcp_state_change;
2003 new_ctx->tcp_error_report = old_ctx->tcp_error_report;
2004 new_ctx->rel_write_seq = 1;
2005 new_ctx->tcp_sock = newsk;
2006
2007 if (subflow_req->mp_capable) {
2008 /* see comments in subflow_syn_recv_sock(), MPTCP connection
2009 * is fully established only after we receive the remote key
2010 */
2011 new_ctx->mp_capable = 1;
2012 new_ctx->local_key = subflow_req->local_key;
2013 new_ctx->token = subflow_req->token;
2014 new_ctx->ssn_offset = subflow_req->ssn_offset;
2015 new_ctx->idsn = subflow_req->idsn;
2016
2017 /* this is the first subflow, id is always 0 */
2018 subflow_set_local_id(new_ctx, 0);
2019 } else if (subflow_req->mp_join) {
2020 new_ctx->ssn_offset = subflow_req->ssn_offset;
2021 new_ctx->mp_join = 1;
2022 new_ctx->fully_established = 1;
2023 new_ctx->remote_key_valid = 1;
2024 new_ctx->backup = subflow_req->backup;
2025 new_ctx->request_bkup = subflow_req->request_bkup;
2026 WRITE_ONCE(new_ctx->remote_id, subflow_req->remote_id);
2027 new_ctx->token = subflow_req->token;
2028 new_ctx->thmac = subflow_req->thmac;
2029
2030 /* the subflow req id is valid, fetched via subflow_check_req()
2031 * and subflow_token_join_request()
2032 */
2033 subflow_set_local_id(new_ctx, subflow_req->local_id);
2034 }
2035 }
2036
tcp_release_cb_override(struct sock * ssk)2037 static void tcp_release_cb_override(struct sock *ssk)
2038 {
2039 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
2040 long status;
2041
2042 /* process and clear all the pending actions, but leave the subflow into
2043 * the napi queue. To respect locking, only the same CPU that originated
2044 * the action can touch the list. mptcp_napi_poll will take care of it.
2045 */
2046 status = set_mask_bits(&subflow->delegated_status, MPTCP_DELEGATE_ACTIONS_MASK, 0);
2047 if (status)
2048 mptcp_subflow_process_delegated(ssk, status);
2049
2050 tcp_release_cb(ssk);
2051 }
2052
tcp_abort_override(struct sock * ssk,int err)2053 static int tcp_abort_override(struct sock *ssk, int err)
2054 {
2055 /* closing a listener subflow requires a great deal of care.
2056 * keep it simple and just prevent such operation
2057 */
2058 if (inet_sk_state_load(ssk) == TCP_LISTEN)
2059 return -EINVAL;
2060
2061 return tcp_abort(ssk, err);
2062 }
2063
2064 static struct tcp_ulp_ops subflow_ulp_ops __read_mostly = {
2065 .name = "mptcp",
2066 .owner = THIS_MODULE,
2067 .init = subflow_ulp_init,
2068 .release = subflow_ulp_release,
2069 .clone = subflow_ulp_clone,
2070 };
2071
subflow_ops_init(struct request_sock_ops * subflow_ops)2072 static int subflow_ops_init(struct request_sock_ops *subflow_ops)
2073 {
2074 subflow_ops->obj_size = sizeof(struct mptcp_subflow_request_sock);
2075
2076 subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
2077 subflow_ops->obj_size, 0,
2078 SLAB_ACCOUNT |
2079 SLAB_TYPESAFE_BY_RCU,
2080 NULL);
2081 if (!subflow_ops->slab)
2082 return -ENOMEM;
2083
2084 return 0;
2085 }
2086
mptcp_subflow_init(void)2087 void __init mptcp_subflow_init(void)
2088 {
2089 mptcp_subflow_v4_request_sock_ops = tcp_request_sock_ops;
2090 mptcp_subflow_v4_request_sock_ops.slab_name = "request_sock_subflow_v4";
2091 mptcp_subflow_v4_request_sock_ops.destructor = subflow_v4_req_destructor;
2092
2093 if (subflow_ops_init(&mptcp_subflow_v4_request_sock_ops) != 0)
2094 panic("MPTCP: failed to init subflow v4 request sock ops\n");
2095
2096 subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
2097 subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
2098 subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
2099
2100 subflow_specific = ipv4_specific;
2101 subflow_specific.conn_request = subflow_v4_conn_request;
2102 subflow_specific.syn_recv_sock = subflow_syn_recv_sock;
2103 subflow_specific.sk_rx_dst_set = subflow_finish_connect;
2104 subflow_specific.rebuild_header = subflow_rebuild_header;
2105
2106 tcp_prot_override = tcp_prot;
2107 tcp_prot_override.release_cb = tcp_release_cb_override;
2108 tcp_prot_override.diag_destroy = tcp_abort_override;
2109
2110 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
2111 /* In struct mptcp_subflow_request_sock, we assume the TCP request sock
2112 * structures for v4 and v6 have the same size. It should not changed in
2113 * the future but better to make sure to be warned if it is no longer
2114 * the case.
2115 */
2116 BUILD_BUG_ON(sizeof(struct tcp_request_sock) != sizeof(struct tcp6_request_sock));
2117
2118 mptcp_subflow_v6_request_sock_ops = tcp6_request_sock_ops;
2119 mptcp_subflow_v6_request_sock_ops.slab_name = "request_sock_subflow_v6";
2120 mptcp_subflow_v6_request_sock_ops.destructor = subflow_v6_req_destructor;
2121
2122 if (subflow_ops_init(&mptcp_subflow_v6_request_sock_ops) != 0)
2123 panic("MPTCP: failed to init subflow v6 request sock ops\n");
2124
2125 subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
2126 subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
2127 subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
2128
2129 subflow_v6_specific = ipv6_specific;
2130 subflow_v6_specific.conn_request = subflow_v6_conn_request;
2131 subflow_v6_specific.syn_recv_sock = subflow_syn_recv_sock;
2132 subflow_v6_specific.sk_rx_dst_set = subflow_finish_connect;
2133 subflow_v6_specific.rebuild_header = subflow_v6_rebuild_header;
2134
2135 subflow_v6m_specific = subflow_v6_specific;
2136 subflow_v6m_specific.queue_xmit = ipv4_specific.queue_xmit;
2137 subflow_v6m_specific.send_check = ipv4_specific.send_check;
2138 subflow_v6m_specific.net_header_len = ipv4_specific.net_header_len;
2139 subflow_v6m_specific.mtu_reduced = ipv4_specific.mtu_reduced;
2140 subflow_v6m_specific.rebuild_header = subflow_rebuild_header;
2141
2142 tcpv6_prot_override = tcpv6_prot;
2143 tcpv6_prot_override.release_cb = tcp_release_cb_override;
2144 tcpv6_prot_override.diag_destroy = tcp_abort_override;
2145 #endif
2146
2147 mptcp_diag_subflow_init(&subflow_ulp_ops);
2148
2149 if (tcp_register_ulp(&subflow_ulp_ops) != 0)
2150 panic("MPTCP: failed to register subflows to ULP\n");
2151 }
2152