1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * NET3: Implementation of the ICMP protocol layer. 4 * 5 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 6 * 7 * Some of the function names and the icmp unreach table for this 8 * module were derived from [icmp.c 1.0.11 06/02/93] by 9 * Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting. 10 * Other than that this module is a complete rewrite. 11 * 12 * Fixes: 13 * Clemens Fruhwirth : introduce global icmp rate limiting 14 * with icmp type masking ability instead 15 * of broken per type icmp timeouts. 16 * Mike Shaver : RFC1122 checks. 17 * Alan Cox : Multicast ping reply as self. 18 * Alan Cox : Fix atomicity lockup in ip_build_xmit 19 * call. 20 * Alan Cox : Added 216,128 byte paths to the MTU 21 * code. 22 * Martin Mares : RFC1812 checks. 23 * Martin Mares : Can be configured to follow redirects 24 * if acting as a router _without_ a 25 * routing protocol (RFC 1812). 26 * Martin Mares : Echo requests may be configured to 27 * be ignored (RFC 1812). 28 * Martin Mares : Limitation of ICMP error message 29 * transmit rate (RFC 1812). 30 * Martin Mares : TOS and Precedence set correctly 31 * (RFC 1812). 32 * Martin Mares : Now copying as much data from the 33 * original packet as we can without 34 * exceeding 576 bytes (RFC 1812). 35 * Willy Konynenberg : Transparent proxying support. 36 * Keith Owens : RFC1191 correction for 4.2BSD based 37 * path MTU bug. 38 * Thomas Quinot : ICMP Dest Unreach codes up to 15 are 39 * valid (RFC 1812). 40 * Andi Kleen : Check all packet lengths properly 41 * and moved all kfree_skb() up to 42 * icmp_rcv. 43 * Andi Kleen : Move the rate limit bookkeeping 44 * into the dest entry and use a token 45 * bucket filter (thanks to ANK). Make 46 * the rates sysctl configurable. 47 * Yu Tianli : Fixed two ugly bugs in icmp_send 48 * - IP option length was accounted wrongly 49 * - ICMP header length was not accounted 50 * at all. 51 * Tristan Greaves : Added sysctl option to ignore bogus 52 * broadcast responses from broken routers. 53 * 54 * To Fix: 55 * 56 * - Should use skb_pull() instead of all the manual checking. 57 * This would also greatly simply some upper layer error handlers. --AK 58 */ 59 60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 61 62 #include <linux/module.h> 63 #include <linux/types.h> 64 #include <linux/jiffies.h> 65 #include <linux/kernel.h> 66 #include <linux/fcntl.h> 67 #include <linux/socket.h> 68 #include <linux/in.h> 69 #include <linux/inet.h> 70 #include <linux/inetdevice.h> 71 #include <linux/netdevice.h> 72 #include <linux/string.h> 73 #include <linux/netfilter_ipv4.h> 74 #include <linux/slab.h> 75 #include <net/snmp.h> 76 #include <net/ip.h> 77 #include <net/route.h> 78 #include <net/protocol.h> 79 #include <net/icmp.h> 80 #include <net/tcp.h> 81 #include <net/udp.h> 82 #include <net/raw.h> 83 #include <net/ping.h> 84 #include <linux/skbuff.h> 85 #include <net/sock.h> 86 #include <linux/errno.h> 87 #include <linux/timer.h> 88 #include <linux/init.h> 89 #include <linux/uaccess.h> 90 #include <net/checksum.h> 91 #include <net/xfrm.h> 92 #include <net/inet_common.h> 93 #include <net/ip_fib.h> 94 #include <net/l3mdev.h> 95 #include <net/addrconf.h> 96 #define CREATE_TRACE_POINTS 97 #include <trace/events/icmp.h> 98 99 /* 100 * Build xmit assembly blocks 101 */ 102 103 struct icmp_bxm { 104 struct sk_buff *skb; 105 int offset; 106 int data_len; 107 108 struct { 109 struct icmphdr icmph; 110 __be32 times[3]; 111 } data; 112 int head_len; 113 struct ip_options_data replyopts; 114 }; 115 116 /* An array of errno for error messages from dest unreach. */ 117 /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ 118 119 const struct icmp_err icmp_err_convert[] = { 120 { 121 .errno = ENETUNREACH, /* ICMP_NET_UNREACH */ 122 .fatal = 0, 123 }, 124 { 125 .errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */ 126 .fatal = 0, 127 }, 128 { 129 .errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */, 130 .fatal = 1, 131 }, 132 { 133 .errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */ 134 .fatal = 1, 135 }, 136 { 137 .errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */ 138 .fatal = 0, 139 }, 140 { 141 .errno = EOPNOTSUPP, /* ICMP_SR_FAILED */ 142 .fatal = 0, 143 }, 144 { 145 .errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */ 146 .fatal = 1, 147 }, 148 { 149 .errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */ 150 .fatal = 1, 151 }, 152 { 153 .errno = ENONET, /* ICMP_HOST_ISOLATED */ 154 .fatal = 1, 155 }, 156 { 157 .errno = ENETUNREACH, /* ICMP_NET_ANO */ 158 .fatal = 1, 159 }, 160 { 161 .errno = EHOSTUNREACH, /* ICMP_HOST_ANO */ 162 .fatal = 1, 163 }, 164 { 165 .errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */ 166 .fatal = 0, 167 }, 168 { 169 .errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */ 170 .fatal = 0, 171 }, 172 { 173 .errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */ 174 .fatal = 1, 175 }, 176 { 177 .errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */ 178 .fatal = 1, 179 }, 180 { 181 .errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */ 182 .fatal = 1, 183 }, 184 }; 185 EXPORT_SYMBOL(icmp_err_convert); 186 187 /* 188 * ICMP control array. This specifies what to do with each ICMP. 189 */ 190 191 struct icmp_control { 192 enum skb_drop_reason (*handler)(struct sk_buff *skb); 193 short error; /* This ICMP is classed as an error message */ 194 }; 195 196 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; 197 198 static DEFINE_PER_CPU(struct sock *, ipv4_icmp_sk); 199 200 /* Called with BH disabled */ 201 static inline struct sock *icmp_xmit_lock(struct net *net) 202 { 203 struct sock *sk; 204 205 sk = this_cpu_read(ipv4_icmp_sk); 206 207 if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { 208 /* This can happen if the output path signals a 209 * dst_link_failure() for an outgoing ICMP packet. 210 */ 211 return NULL; 212 } 213 sock_net_set(sk, net); 214 return sk; 215 } 216 217 static inline void icmp_xmit_unlock(struct sock *sk) 218 { 219 sock_net_set(sk, &init_net); 220 spin_unlock(&sk->sk_lock.slock); 221 } 222 223 int sysctl_icmp_msgs_per_sec __read_mostly = 1000; 224 int sysctl_icmp_msgs_burst __read_mostly = 50; 225 226 /** 227 * icmp_global_allow - Are we allowed to send one more ICMP message ? 228 * @net: network namespace 229 * 230 * Uses a token bucket to limit our ICMP messages to ~sysctl_icmp_msgs_per_sec. 231 * Returns false if we reached the limit and can not send another packet. 232 * Works in tandem with icmp_global_consume(). 233 */ 234 bool icmp_global_allow(struct net *net) 235 { 236 u32 delta, now, oldstamp; 237 int incr, new, old; 238 239 /* Note: many cpus could find this condition true. 240 * Then later icmp_global_consume() could consume more credits, 241 * this is an acceptable race. 242 */ 243 if (atomic_read(&net->ipv4.icmp_global_credit) > 0) 244 return true; 245 246 now = jiffies; 247 oldstamp = READ_ONCE(net->ipv4.icmp_global_stamp); 248 delta = min_t(u32, now - oldstamp, HZ); 249 if (delta < HZ / 50) 250 return false; 251 252 incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; 253 if (!incr) 254 return false; 255 256 if (cmpxchg(&net->ipv4.icmp_global_stamp, oldstamp, now) == oldstamp) { 257 old = atomic_read(&net->ipv4.icmp_global_credit); 258 do { 259 new = min(old + incr, READ_ONCE(sysctl_icmp_msgs_burst)); 260 } while (!atomic_try_cmpxchg(&net->ipv4.icmp_global_credit, &old, new)); 261 } 262 return true; 263 } 264 EXPORT_SYMBOL(icmp_global_allow); 265 266 void icmp_global_consume(struct net *net) 267 { 268 int credits = get_random_u32_below(3); 269 270 /* Note: this might make icmp_global.credit negative. */ 271 if (credits) 272 atomic_sub(credits, &net->ipv4.icmp_global_credit); 273 } 274 EXPORT_SYMBOL(icmp_global_consume); 275 276 static bool icmpv4_mask_allow(struct net *net, int type, int code) 277 { 278 if (type > NR_ICMP_TYPES) 279 return true; 280 281 /* Don't limit PMTU discovery. */ 282 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 283 return true; 284 285 /* Limit if icmp type is enabled in ratemask. */ 286 if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask))) 287 return true; 288 289 return false; 290 } 291 292 static bool icmpv4_global_allow(struct net *net, int type, int code, 293 bool *apply_ratelimit) 294 { 295 if (icmpv4_mask_allow(net, type, code)) 296 return true; 297 298 if (icmp_global_allow(net)) { 299 *apply_ratelimit = true; 300 return true; 301 } 302 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITGLOBAL); 303 return false; 304 } 305 306 /* 307 * Send an ICMP frame. 308 */ 309 310 static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 311 struct flowi4 *fl4, int type, int code, 312 bool apply_ratelimit) 313 { 314 struct dst_entry *dst = &rt->dst; 315 struct inet_peer *peer; 316 bool rc = true; 317 int vif; 318 319 if (!apply_ratelimit) 320 return true; 321 322 /* No rate limit on loopback */ 323 if (dst->dev && (dst->dev->flags&IFF_LOOPBACK)) 324 goto out; 325 326 vif = l3mdev_master_ifindex(dst->dev); 327 peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); 328 rc = inet_peer_xrlim_allow(peer, 329 READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); 330 if (peer) 331 inet_putpeer(peer); 332 out: 333 if (!rc) 334 __ICMP_INC_STATS(net, ICMP_MIB_RATELIMITHOST); 335 else 336 icmp_global_consume(net); 337 return rc; 338 } 339 340 /* 341 * Maintain the counters used in the SNMP statistics for outgoing ICMP 342 */ 343 void icmp_out_count(struct net *net, unsigned char type) 344 { 345 ICMPMSGOUT_INC_STATS(net, type); 346 ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS); 347 } 348 349 /* 350 * Checksum each fragment, and on the first include the headers and final 351 * checksum. 352 */ 353 static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd, 354 struct sk_buff *skb) 355 { 356 struct icmp_bxm *icmp_param = from; 357 __wsum csum; 358 359 csum = skb_copy_and_csum_bits(icmp_param->skb, 360 icmp_param->offset + offset, 361 to, len); 362 363 skb->csum = csum_block_add(skb->csum, csum, odd); 364 if (icmp_pointers[icmp_param->data.icmph.type].error) 365 nf_ct_attach(skb, icmp_param->skb); 366 return 0; 367 } 368 369 static void icmp_push_reply(struct sock *sk, 370 struct icmp_bxm *icmp_param, 371 struct flowi4 *fl4, 372 struct ipcm_cookie *ipc, struct rtable **rt) 373 { 374 struct sk_buff *skb; 375 376 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, 377 icmp_param->data_len+icmp_param->head_len, 378 icmp_param->head_len, 379 ipc, rt, MSG_DONTWAIT) < 0) { 380 __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS); 381 ip_flush_pending_frames(sk); 382 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 383 struct icmphdr *icmph = icmp_hdr(skb); 384 __wsum csum; 385 struct sk_buff *skb1; 386 387 csum = csum_partial_copy_nocheck((void *)&icmp_param->data, 388 (char *)icmph, 389 icmp_param->head_len); 390 skb_queue_walk(&sk->sk_write_queue, skb1) { 391 csum = csum_add(csum, skb1->csum); 392 } 393 icmph->checksum = csum_fold(csum); 394 skb->ip_summed = CHECKSUM_NONE; 395 ip_push_pending_frames(sk, fl4); 396 } 397 } 398 399 /* 400 * Driving logic for building and sending ICMP messages. 401 */ 402 403 static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) 404 { 405 struct ipcm_cookie ipc; 406 struct rtable *rt = skb_rtable(skb); 407 struct net *net = dev_net(rt->dst.dev); 408 bool apply_ratelimit = false; 409 struct flowi4 fl4; 410 struct sock *sk; 411 struct inet_sock *inet; 412 __be32 daddr, saddr; 413 u32 mark = IP4_REPLY_MARK(net, skb->mark); 414 int type = icmp_param->data.icmph.type; 415 int code = icmp_param->data.icmph.code; 416 417 if (ip_options_echo(net, &icmp_param->replyopts.opt.opt, skb)) 418 return; 419 420 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ 421 local_bh_disable(); 422 423 /* is global icmp_msgs_per_sec exhausted ? */ 424 if (!icmpv4_global_allow(net, type, code, &apply_ratelimit)) 425 goto out_bh_enable; 426 427 sk = icmp_xmit_lock(net); 428 if (!sk) 429 goto out_bh_enable; 430 inet = inet_sk(sk); 431 432 icmp_param->data.icmph.checksum = 0; 433 434 ipcm_init(&ipc); 435 inet->tos = ip_hdr(skb)->tos; 436 ipc.sockc.mark = mark; 437 daddr = ipc.addr = ip_hdr(skb)->saddr; 438 saddr = fib_compute_spec_dst(skb); 439 440 if (icmp_param->replyopts.opt.opt.optlen) { 441 ipc.opt = &icmp_param->replyopts.opt; 442 if (ipc.opt->opt.srr) 443 daddr = icmp_param->replyopts.opt.opt.faddr; 444 } 445 memset(&fl4, 0, sizeof(fl4)); 446 fl4.daddr = daddr; 447 fl4.saddr = saddr; 448 fl4.flowi4_mark = mark; 449 fl4.flowi4_uid = sock_net_uid(net, NULL); 450 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); 451 fl4.flowi4_proto = IPPROTO_ICMP; 452 fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev); 453 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4)); 454 rt = ip_route_output_key(net, &fl4); 455 if (IS_ERR(rt)) 456 goto out_unlock; 457 if (icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) 458 icmp_push_reply(sk, icmp_param, &fl4, &ipc, &rt); 459 ip_rt_put(rt); 460 out_unlock: 461 icmp_xmit_unlock(sk); 462 out_bh_enable: 463 local_bh_enable(); 464 } 465 466 /* 467 * The device used for looking up which routing table to use for sending an ICMP 468 * error is preferably the source whenever it is set, which should ensure the 469 * icmp error can be sent to the source host, else lookup using the routing 470 * table of the destination device, else use the main routing table (index 0). 471 */ 472 static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb) 473 { 474 struct net_device *route_lookup_dev = NULL; 475 476 if (skb->dev) 477 route_lookup_dev = skb->dev; 478 else if (skb_dst(skb)) 479 route_lookup_dev = skb_dst(skb)->dev; 480 return route_lookup_dev; 481 } 482 483 static struct rtable *icmp_route_lookup(struct net *net, 484 struct flowi4 *fl4, 485 struct sk_buff *skb_in, 486 const struct iphdr *iph, 487 __be32 saddr, u8 tos, u32 mark, 488 int type, int code, 489 struct icmp_bxm *param) 490 { 491 struct net_device *route_lookup_dev; 492 struct dst_entry *dst, *dst2; 493 struct rtable *rt, *rt2; 494 struct flowi4 fl4_dec; 495 int err; 496 497 memset(fl4, 0, sizeof(*fl4)); 498 fl4->daddr = (param->replyopts.opt.opt.srr ? 499 param->replyopts.opt.opt.faddr : iph->saddr); 500 fl4->saddr = saddr; 501 fl4->flowi4_mark = mark; 502 fl4->flowi4_uid = sock_net_uid(net, NULL); 503 fl4->flowi4_tos = RT_TOS(tos); 504 fl4->flowi4_proto = IPPROTO_ICMP; 505 fl4->fl4_icmp_type = type; 506 fl4->fl4_icmp_code = code; 507 route_lookup_dev = icmp_get_route_lookup_dev(skb_in); 508 fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev); 509 510 security_skb_classify_flow(skb_in, flowi4_to_flowi_common(fl4)); 511 rt = ip_route_output_key_hash(net, fl4, skb_in); 512 if (IS_ERR(rt)) 513 return rt; 514 515 /* No need to clone since we're just using its address. */ 516 rt2 = rt; 517 518 dst = xfrm_lookup(net, &rt->dst, 519 flowi4_to_flowi(fl4), NULL, 0); 520 rt = dst_rtable(dst); 521 if (!IS_ERR(dst)) { 522 if (rt != rt2) 523 return rt; 524 } else if (PTR_ERR(dst) == -EPERM) { 525 rt = NULL; 526 } else { 527 return rt; 528 } 529 err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); 530 if (err) 531 goto relookup_failed; 532 533 if (inet_addr_type_dev_table(net, route_lookup_dev, 534 fl4_dec.saddr) == RTN_LOCAL) { 535 rt2 = __ip_route_output_key(net, &fl4_dec); 536 if (IS_ERR(rt2)) 537 err = PTR_ERR(rt2); 538 } else { 539 struct flowi4 fl4_2 = {}; 540 unsigned long orefdst; 541 542 fl4_2.daddr = fl4_dec.saddr; 543 rt2 = ip_route_output_key(net, &fl4_2); 544 if (IS_ERR(rt2)) { 545 err = PTR_ERR(rt2); 546 goto relookup_failed; 547 } 548 /* Ugh! */ 549 orefdst = skb_in->_skb_refdst; /* save old refdst */ 550 skb_dst_set(skb_in, NULL); 551 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr, 552 tos, rt2->dst.dev); 553 554 dst_release(&rt2->dst); 555 rt2 = skb_rtable(skb_in); 556 skb_in->_skb_refdst = orefdst; /* restore old refdst */ 557 } 558 559 if (err) 560 goto relookup_failed; 561 562 dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL, 563 XFRM_LOOKUP_ICMP); 564 rt2 = dst_rtable(dst2); 565 if (!IS_ERR(dst2)) { 566 dst_release(&rt->dst); 567 memcpy(fl4, &fl4_dec, sizeof(*fl4)); 568 rt = rt2; 569 } else if (PTR_ERR(dst2) == -EPERM) { 570 if (rt) 571 dst_release(&rt->dst); 572 return rt2; 573 } else { 574 err = PTR_ERR(dst2); 575 goto relookup_failed; 576 } 577 return rt; 578 579 relookup_failed: 580 if (rt) 581 return rt; 582 return ERR_PTR(err); 583 } 584 585 /* 586 * Send an ICMP message in response to a situation 587 * 588 * RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header. 589 * MAY send more (we do). 590 * MUST NOT change this header information. 591 * MUST NOT reply to a multicast/broadcast IP address. 592 * MUST NOT reply to a multicast/broadcast MAC address. 593 * MUST reply to only the first fragment. 594 */ 595 596 void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info, 597 const struct ip_options *opt) 598 { 599 struct iphdr *iph; 600 int room; 601 struct icmp_bxm icmp_param; 602 struct rtable *rt = skb_rtable(skb_in); 603 bool apply_ratelimit = false; 604 struct ipcm_cookie ipc; 605 struct flowi4 fl4; 606 __be32 saddr; 607 u8 tos; 608 u32 mark; 609 struct net *net; 610 struct sock *sk; 611 612 if (!rt) 613 goto out; 614 615 if (rt->dst.dev) 616 net = dev_net(rt->dst.dev); 617 else if (skb_in->dev) 618 net = dev_net(skb_in->dev); 619 else 620 goto out; 621 622 /* 623 * Find the original header. It is expected to be valid, of course. 624 * Check this, icmp_send is called from the most obscure devices 625 * sometimes. 626 */ 627 iph = ip_hdr(skb_in); 628 629 if ((u8 *)iph < skb_in->head || 630 (skb_network_header(skb_in) + sizeof(*iph)) > 631 skb_tail_pointer(skb_in)) 632 goto out; 633 634 /* 635 * No replies to physical multicast/broadcast 636 */ 637 if (skb_in->pkt_type != PACKET_HOST) 638 goto out; 639 640 /* 641 * Now check at the protocol level 642 */ 643 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) 644 goto out; 645 646 /* 647 * Only reply to fragment 0. We byte re-order the constant 648 * mask for efficiency. 649 */ 650 if (iph->frag_off & htons(IP_OFFSET)) 651 goto out; 652 653 /* 654 * If we send an ICMP error to an ICMP error a mess would result.. 655 */ 656 if (icmp_pointers[type].error) { 657 /* 658 * We are an error, check if we are replying to an 659 * ICMP error 660 */ 661 if (iph->protocol == IPPROTO_ICMP) { 662 u8 _inner_type, *itp; 663 664 itp = skb_header_pointer(skb_in, 665 skb_network_header(skb_in) + 666 (iph->ihl << 2) + 667 offsetof(struct icmphdr, 668 type) - 669 skb_in->data, 670 sizeof(_inner_type), 671 &_inner_type); 672 if (!itp) 673 goto out; 674 675 /* 676 * Assume any unknown ICMP type is an error. This 677 * isn't specified by the RFC, but think about it.. 678 */ 679 if (*itp > NR_ICMP_TYPES || 680 icmp_pointers[*itp].error) 681 goto out; 682 } 683 } 684 685 /* Needed by both icmpv4_global_allow and icmp_xmit_lock */ 686 local_bh_disable(); 687 688 /* Check global sysctl_icmp_msgs_per_sec ratelimit, unless 689 * incoming dev is loopback. If outgoing dev change to not be 690 * loopback, then peer ratelimit still work (in icmpv4_xrlim_allow) 691 */ 692 if (!(skb_in->dev && (skb_in->dev->flags&IFF_LOOPBACK)) && 693 !icmpv4_global_allow(net, type, code, &apply_ratelimit)) 694 goto out_bh_enable; 695 696 sk = icmp_xmit_lock(net); 697 if (!sk) 698 goto out_bh_enable; 699 700 /* 701 * Construct source address and options. 702 */ 703 704 saddr = iph->daddr; 705 if (!(rt->rt_flags & RTCF_LOCAL)) { 706 struct net_device *dev = NULL; 707 708 rcu_read_lock(); 709 if (rt_is_input_route(rt) && 710 READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)) 711 dev = dev_get_by_index_rcu(net, inet_iif(skb_in)); 712 713 if (dev) 714 saddr = inet_select_addr(dev, iph->saddr, 715 RT_SCOPE_LINK); 716 else 717 saddr = 0; 718 rcu_read_unlock(); 719 } 720 721 tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) | 722 IPTOS_PREC_INTERNETCONTROL) : 723 iph->tos; 724 mark = IP4_REPLY_MARK(net, skb_in->mark); 725 726 if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt)) 727 goto out_unlock; 728 729 730 /* 731 * Prepare data for ICMP header. 732 */ 733 734 icmp_param.data.icmph.type = type; 735 icmp_param.data.icmph.code = code; 736 icmp_param.data.icmph.un.gateway = info; 737 icmp_param.data.icmph.checksum = 0; 738 icmp_param.skb = skb_in; 739 icmp_param.offset = skb_network_offset(skb_in); 740 inet_sk(sk)->tos = tos; 741 ipcm_init(&ipc); 742 ipc.addr = iph->saddr; 743 ipc.opt = &icmp_param.replyopts.opt; 744 ipc.sockc.mark = mark; 745 746 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos, mark, 747 type, code, &icmp_param); 748 if (IS_ERR(rt)) 749 goto out_unlock; 750 751 /* peer icmp_ratelimit */ 752 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code, apply_ratelimit)) 753 goto ende; 754 755 /* RFC says return as much as we can without exceeding 576 bytes. */ 756 757 room = dst_mtu(&rt->dst); 758 if (room > 576) 759 room = 576; 760 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen; 761 room -= sizeof(struct icmphdr); 762 /* Guard against tiny mtu. We need to include at least one 763 * IP network header for this message to make any sense. 764 */ 765 if (room <= (int)sizeof(struct iphdr)) 766 goto ende; 767 768 icmp_param.data_len = skb_in->len - icmp_param.offset; 769 if (icmp_param.data_len > room) 770 icmp_param.data_len = room; 771 icmp_param.head_len = sizeof(struct icmphdr); 772 773 /* if we don't have a source address at this point, fall back to the 774 * dummy address instead of sending out a packet with a source address 775 * of 0.0.0.0 776 */ 777 if (!fl4.saddr) 778 fl4.saddr = htonl(INADDR_DUMMY); 779 780 trace_icmp_send(skb_in, type, code); 781 782 icmp_push_reply(sk, &icmp_param, &fl4, &ipc, &rt); 783 ende: 784 ip_rt_put(rt); 785 out_unlock: 786 icmp_xmit_unlock(sk); 787 out_bh_enable: 788 local_bh_enable(); 789 out:; 790 } 791 EXPORT_SYMBOL(__icmp_send); 792 793 #if IS_ENABLED(CONFIG_NF_NAT) 794 #include <net/netfilter/nf_conntrack.h> 795 void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) 796 { 797 struct sk_buff *cloned_skb = NULL; 798 struct ip_options opts = { 0 }; 799 enum ip_conntrack_info ctinfo; 800 struct nf_conn *ct; 801 __be32 orig_ip; 802 803 ct = nf_ct_get(skb_in, &ctinfo); 804 if (!ct || !(ct->status & IPS_SRC_NAT)) { 805 __icmp_send(skb_in, type, code, info, &opts); 806 return; 807 } 808 809 if (skb_shared(skb_in)) 810 skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); 811 812 if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || 813 (skb_network_header(skb_in) + sizeof(struct iphdr)) > 814 skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, 815 skb_network_offset(skb_in) + sizeof(struct iphdr)))) 816 goto out; 817 818 orig_ip = ip_hdr(skb_in)->saddr; 819 ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; 820 __icmp_send(skb_in, type, code, info, &opts); 821 ip_hdr(skb_in)->saddr = orig_ip; 822 out: 823 consume_skb(cloned_skb); 824 } 825 EXPORT_SYMBOL(icmp_ndo_send); 826 #endif 827 828 static void icmp_socket_deliver(struct sk_buff *skb, u32 info) 829 { 830 const struct iphdr *iph = (const struct iphdr *)skb->data; 831 const struct net_protocol *ipprot; 832 int protocol = iph->protocol; 833 834 /* Checkin full IP header plus 8 bytes of protocol to 835 * avoid additional coding at protocol handlers. 836 */ 837 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) { 838 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); 839 return; 840 } 841 842 raw_icmp_error(skb, protocol, info); 843 844 ipprot = rcu_dereference(inet_protos[protocol]); 845 if (ipprot && ipprot->err_handler) 846 ipprot->err_handler(skb, info); 847 } 848 849 static bool icmp_tag_validation(int proto) 850 { 851 bool ok; 852 853 rcu_read_lock(); 854 ok = rcu_dereference(inet_protos[proto])->icmp_strict_tag_validation; 855 rcu_read_unlock(); 856 return ok; 857 } 858 859 /* 860 * Handle ICMP_DEST_UNREACH, ICMP_TIME_EXCEEDED, ICMP_QUENCH, and 861 * ICMP_PARAMETERPROB. 862 */ 863 864 static enum skb_drop_reason icmp_unreach(struct sk_buff *skb) 865 { 866 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET; 867 const struct iphdr *iph; 868 struct icmphdr *icmph; 869 struct net *net; 870 u32 info = 0; 871 872 net = dev_net(skb_dst(skb)->dev); 873 874 /* 875 * Incomplete header ? 876 * Only checks for the IP header, there should be an 877 * additional check for longer headers in upper levels. 878 */ 879 880 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 881 goto out_err; 882 883 icmph = icmp_hdr(skb); 884 iph = (const struct iphdr *)skb->data; 885 886 if (iph->ihl < 5) { /* Mangled header, drop. */ 887 reason = SKB_DROP_REASON_IP_INHDR; 888 goto out_err; 889 } 890 891 switch (icmph->type) { 892 case ICMP_DEST_UNREACH: 893 switch (icmph->code & 15) { 894 case ICMP_NET_UNREACH: 895 case ICMP_HOST_UNREACH: 896 case ICMP_PROT_UNREACH: 897 case ICMP_PORT_UNREACH: 898 break; 899 case ICMP_FRAG_NEEDED: 900 /* for documentation of the ip_no_pmtu_disc 901 * values please see 902 * Documentation/networking/ip-sysctl.rst 903 */ 904 switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { 905 default: 906 net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", 907 &iph->daddr); 908 break; 909 case 2: 910 goto out; 911 case 3: 912 if (!icmp_tag_validation(iph->protocol)) 913 goto out; 914 fallthrough; 915 case 0: 916 info = ntohs(icmph->un.frag.mtu); 917 } 918 break; 919 case ICMP_SR_FAILED: 920 net_dbg_ratelimited("%pI4: Source Route Failed\n", 921 &iph->daddr); 922 break; 923 default: 924 break; 925 } 926 if (icmph->code > NR_ICMP_UNREACH) 927 goto out; 928 break; 929 case ICMP_PARAMETERPROB: 930 info = ntohl(icmph->un.gateway) >> 24; 931 break; 932 case ICMP_TIME_EXCEEDED: 933 __ICMP_INC_STATS(net, ICMP_MIB_INTIMEEXCDS); 934 if (icmph->code == ICMP_EXC_FRAGTIME) 935 goto out; 936 break; 937 } 938 939 /* 940 * Throw it at our lower layers 941 * 942 * RFC 1122: 3.2.2 MUST extract the protocol ID from the passed 943 * header. 944 * RFC 1122: 3.2.2.1 MUST pass ICMP unreach messages to the 945 * transport layer. 946 * RFC 1122: 3.2.2.2 MUST pass ICMP time expired messages to 947 * transport layer. 948 */ 949 950 /* 951 * Check the other end isn't violating RFC 1122. Some routers send 952 * bogus responses to broadcast frames. If you see this message 953 * first check your netmask matches at both ends, if it does then 954 * get the other vendor to fix their kit. 955 */ 956 957 if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) && 958 inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) { 959 net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n", 960 &ip_hdr(skb)->saddr, 961 icmph->type, icmph->code, 962 &iph->daddr, skb->dev->name); 963 goto out; 964 } 965 966 icmp_socket_deliver(skb, info); 967 968 out: 969 return reason; 970 out_err: 971 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 972 return reason ?: SKB_DROP_REASON_NOT_SPECIFIED; 973 } 974 975 976 /* 977 * Handle ICMP_REDIRECT. 978 */ 979 980 static enum skb_drop_reason icmp_redirect(struct sk_buff *skb) 981 { 982 if (skb->len < sizeof(struct iphdr)) { 983 __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS); 984 return SKB_DROP_REASON_PKT_TOO_SMALL; 985 } 986 987 if (!pskb_may_pull(skb, sizeof(struct iphdr))) { 988 /* there aught to be a stat */ 989 return SKB_DROP_REASON_NOMEM; 990 } 991 992 icmp_socket_deliver(skb, ntohl(icmp_hdr(skb)->un.gateway)); 993 return SKB_NOT_DROPPED_YET; 994 } 995 996 /* 997 * Handle ICMP_ECHO ("ping") and ICMP_EXT_ECHO ("PROBE") requests. 998 * 999 * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo 1000 * requests. 1001 * RFC 1122: 3.2.2.6 Data received in the ICMP_ECHO request MUST be 1002 * included in the reply. 1003 * RFC 1812: 4.3.3.6 SHOULD have a config option for silently ignoring 1004 * echo requests, MUST have default=NOT. 1005 * RFC 8335: 8 MUST have a config option to enable/disable ICMP 1006 * Extended Echo Functionality, MUST be disabled by default 1007 * See also WRT handling of options once they are done and working. 1008 */ 1009 1010 static enum skb_drop_reason icmp_echo(struct sk_buff *skb) 1011 { 1012 struct icmp_bxm icmp_param; 1013 struct net *net; 1014 1015 net = dev_net(skb_dst(skb)->dev); 1016 /* should there be an ICMP stat for ignored echos? */ 1017 if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all)) 1018 return SKB_NOT_DROPPED_YET; 1019 1020 icmp_param.data.icmph = *icmp_hdr(skb); 1021 icmp_param.skb = skb; 1022 icmp_param.offset = 0; 1023 icmp_param.data_len = skb->len; 1024 icmp_param.head_len = sizeof(struct icmphdr); 1025 1026 if (icmp_param.data.icmph.type == ICMP_ECHO) 1027 icmp_param.data.icmph.type = ICMP_ECHOREPLY; 1028 else if (!icmp_build_probe(skb, &icmp_param.data.icmph)) 1029 return SKB_NOT_DROPPED_YET; 1030 1031 icmp_reply(&icmp_param, skb); 1032 return SKB_NOT_DROPPED_YET; 1033 } 1034 1035 /* Helper for icmp_echo and icmpv6_echo_reply. 1036 * Searches for net_device that matches PROBE interface identifier 1037 * and builds PROBE reply message in icmphdr. 1038 * 1039 * Returns false if PROBE responses are disabled via sysctl 1040 */ 1041 1042 bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr) 1043 { 1044 struct icmp_ext_hdr *ext_hdr, _ext_hdr; 1045 struct icmp_ext_echo_iio *iio, _iio; 1046 struct net *net = dev_net(skb->dev); 1047 struct inet6_dev *in6_dev; 1048 struct in_device *in_dev; 1049 struct net_device *dev; 1050 char buff[IFNAMSIZ]; 1051 u16 ident_len; 1052 u8 status; 1053 1054 if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe)) 1055 return false; 1056 1057 /* We currently only support probing interfaces on the proxy node 1058 * Check to ensure L-bit is set 1059 */ 1060 if (!(ntohs(icmphdr->un.echo.sequence) & 1)) 1061 return false; 1062 /* Clear status bits in reply message */ 1063 icmphdr->un.echo.sequence &= htons(0xFF00); 1064 if (icmphdr->type == ICMP_EXT_ECHO) 1065 icmphdr->type = ICMP_EXT_ECHOREPLY; 1066 else 1067 icmphdr->type = ICMPV6_EXT_ECHO_REPLY; 1068 ext_hdr = skb_header_pointer(skb, 0, sizeof(_ext_hdr), &_ext_hdr); 1069 /* Size of iio is class_type dependent. 1070 * Only check header here and assign length based on ctype in the switch statement 1071 */ 1072 iio = skb_header_pointer(skb, sizeof(_ext_hdr), sizeof(iio->extobj_hdr), &_iio); 1073 if (!ext_hdr || !iio) 1074 goto send_mal_query; 1075 if (ntohs(iio->extobj_hdr.length) <= sizeof(iio->extobj_hdr) || 1076 ntohs(iio->extobj_hdr.length) > sizeof(_iio)) 1077 goto send_mal_query; 1078 ident_len = ntohs(iio->extobj_hdr.length) - sizeof(iio->extobj_hdr); 1079 iio = skb_header_pointer(skb, sizeof(_ext_hdr), 1080 sizeof(iio->extobj_hdr) + ident_len, &_iio); 1081 if (!iio) 1082 goto send_mal_query; 1083 1084 status = 0; 1085 dev = NULL; 1086 switch (iio->extobj_hdr.class_type) { 1087 case ICMP_EXT_ECHO_CTYPE_NAME: 1088 if (ident_len >= IFNAMSIZ) 1089 goto send_mal_query; 1090 memset(buff, 0, sizeof(buff)); 1091 memcpy(buff, &iio->ident.name, ident_len); 1092 dev = dev_get_by_name(net, buff); 1093 break; 1094 case ICMP_EXT_ECHO_CTYPE_INDEX: 1095 if (ident_len != sizeof(iio->ident.ifindex)) 1096 goto send_mal_query; 1097 dev = dev_get_by_index(net, ntohl(iio->ident.ifindex)); 1098 break; 1099 case ICMP_EXT_ECHO_CTYPE_ADDR: 1100 if (ident_len < sizeof(iio->ident.addr.ctype3_hdr) || 1101 ident_len != sizeof(iio->ident.addr.ctype3_hdr) + 1102 iio->ident.addr.ctype3_hdr.addrlen) 1103 goto send_mal_query; 1104 switch (ntohs(iio->ident.addr.ctype3_hdr.afi)) { 1105 case ICMP_AFI_IP: 1106 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in_addr)) 1107 goto send_mal_query; 1108 dev = ip_dev_find(net, iio->ident.addr.ip_addr.ipv4_addr); 1109 break; 1110 #if IS_ENABLED(CONFIG_IPV6) 1111 case ICMP_AFI_IP6: 1112 if (iio->ident.addr.ctype3_hdr.addrlen != sizeof(struct in6_addr)) 1113 goto send_mal_query; 1114 dev = ipv6_stub->ipv6_dev_find(net, &iio->ident.addr.ip_addr.ipv6_addr, dev); 1115 dev_hold(dev); 1116 break; 1117 #endif 1118 default: 1119 goto send_mal_query; 1120 } 1121 break; 1122 default: 1123 goto send_mal_query; 1124 } 1125 if (!dev) { 1126 icmphdr->code = ICMP_EXT_CODE_NO_IF; 1127 return true; 1128 } 1129 /* Fill bits in reply message */ 1130 if (dev->flags & IFF_UP) 1131 status |= ICMP_EXT_ECHOREPLY_ACTIVE; 1132 1133 in_dev = __in_dev_get_rcu(dev); 1134 if (in_dev && rcu_access_pointer(in_dev->ifa_list)) 1135 status |= ICMP_EXT_ECHOREPLY_IPV4; 1136 1137 in6_dev = __in6_dev_get(dev); 1138 if (in6_dev && !list_empty(&in6_dev->addr_list)) 1139 status |= ICMP_EXT_ECHOREPLY_IPV6; 1140 1141 dev_put(dev); 1142 icmphdr->un.echo.sequence |= htons(status); 1143 return true; 1144 send_mal_query: 1145 icmphdr->code = ICMP_EXT_CODE_MAL_QUERY; 1146 return true; 1147 } 1148 EXPORT_SYMBOL_GPL(icmp_build_probe); 1149 1150 /* 1151 * Handle ICMP Timestamp requests. 1152 * RFC 1122: 3.2.2.8 MAY implement ICMP timestamp requests. 1153 * SHOULD be in the kernel for minimum random latency. 1154 * MUST be accurate to a few minutes. 1155 * MUST be updated at least at 15Hz. 1156 */ 1157 static enum skb_drop_reason icmp_timestamp(struct sk_buff *skb) 1158 { 1159 struct icmp_bxm icmp_param; 1160 /* 1161 * Too short. 1162 */ 1163 if (skb->len < 4) 1164 goto out_err; 1165 1166 /* 1167 * Fill in the current time as ms since midnight UT: 1168 */ 1169 icmp_param.data.times[1] = inet_current_timestamp(); 1170 icmp_param.data.times[2] = icmp_param.data.times[1]; 1171 1172 BUG_ON(skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)); 1173 1174 icmp_param.data.icmph = *icmp_hdr(skb); 1175 icmp_param.data.icmph.type = ICMP_TIMESTAMPREPLY; 1176 icmp_param.data.icmph.code = 0; 1177 icmp_param.skb = skb; 1178 icmp_param.offset = 0; 1179 icmp_param.data_len = 0; 1180 icmp_param.head_len = sizeof(struct icmphdr) + 12; 1181 icmp_reply(&icmp_param, skb); 1182 return SKB_NOT_DROPPED_YET; 1183 1184 out_err: 1185 __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); 1186 return SKB_DROP_REASON_PKT_TOO_SMALL; 1187 } 1188 1189 static enum skb_drop_reason icmp_discard(struct sk_buff *skb) 1190 { 1191 /* pretend it was a success */ 1192 return SKB_NOT_DROPPED_YET; 1193 } 1194 1195 /* 1196 * Deal with incoming ICMP packets. 1197 */ 1198 int icmp_rcv(struct sk_buff *skb) 1199 { 1200 enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; 1201 struct rtable *rt = skb_rtable(skb); 1202 struct net *net = dev_net(rt->dst.dev); 1203 struct icmphdr *icmph; 1204 1205 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 1206 struct sec_path *sp = skb_sec_path(skb); 1207 int nh; 1208 1209 if (!(sp && sp->xvec[sp->len - 1]->props.flags & 1210 XFRM_STATE_ICMP)) { 1211 reason = SKB_DROP_REASON_XFRM_POLICY; 1212 goto drop; 1213 } 1214 1215 if (!pskb_may_pull(skb, sizeof(*icmph) + sizeof(struct iphdr))) 1216 goto drop; 1217 1218 nh = skb_network_offset(skb); 1219 skb_set_network_header(skb, sizeof(*icmph)); 1220 1221 if (!xfrm4_policy_check_reverse(NULL, XFRM_POLICY_IN, 1222 skb)) { 1223 reason = SKB_DROP_REASON_XFRM_POLICY; 1224 goto drop; 1225 } 1226 1227 skb_set_network_header(skb, nh); 1228 } 1229 1230 __ICMP_INC_STATS(net, ICMP_MIB_INMSGS); 1231 1232 if (skb_checksum_simple_validate(skb)) 1233 goto csum_error; 1234 1235 if (!pskb_pull(skb, sizeof(*icmph))) 1236 goto error; 1237 1238 icmph = icmp_hdr(skb); 1239 1240 ICMPMSGIN_INC_STATS(net, icmph->type); 1241 1242 /* Check for ICMP Extended Echo (PROBE) messages */ 1243 if (icmph->type == ICMP_EXT_ECHO) { 1244 /* We can't use icmp_pointers[].handler() because it is an array of 1245 * size NR_ICMP_TYPES + 1 (19 elements) and PROBE has code 42. 1246 */ 1247 reason = icmp_echo(skb); 1248 goto reason_check; 1249 } 1250 1251 if (icmph->type == ICMP_EXT_ECHOREPLY) { 1252 reason = ping_rcv(skb); 1253 goto reason_check; 1254 } 1255 1256 /* 1257 * 18 is the highest 'known' ICMP type. Anything else is a mystery 1258 * 1259 * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently 1260 * discarded. 1261 */ 1262 if (icmph->type > NR_ICMP_TYPES) { 1263 reason = SKB_DROP_REASON_UNHANDLED_PROTO; 1264 goto error; 1265 } 1266 1267 /* 1268 * Parse the ICMP message 1269 */ 1270 1271 if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 1272 /* 1273 * RFC 1122: 3.2.2.6 An ICMP_ECHO to broadcast MAY be 1274 * silently ignored (we let user decide with a sysctl). 1275 * RFC 1122: 3.2.2.8 An ICMP_TIMESTAMP MAY be silently 1276 * discarded if to broadcast/multicast. 1277 */ 1278 if ((icmph->type == ICMP_ECHO || 1279 icmph->type == ICMP_TIMESTAMP) && 1280 READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) { 1281 reason = SKB_DROP_REASON_INVALID_PROTO; 1282 goto error; 1283 } 1284 if (icmph->type != ICMP_ECHO && 1285 icmph->type != ICMP_TIMESTAMP && 1286 icmph->type != ICMP_ADDRESS && 1287 icmph->type != ICMP_ADDRESSREPLY) { 1288 reason = SKB_DROP_REASON_INVALID_PROTO; 1289 goto error; 1290 } 1291 } 1292 1293 reason = icmp_pointers[icmph->type].handler(skb); 1294 reason_check: 1295 if (!reason) { 1296 consume_skb(skb); 1297 return NET_RX_SUCCESS; 1298 } 1299 1300 drop: 1301 kfree_skb_reason(skb, reason); 1302 return NET_RX_DROP; 1303 csum_error: 1304 reason = SKB_DROP_REASON_ICMP_CSUM; 1305 __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS); 1306 error: 1307 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); 1308 goto drop; 1309 } 1310 1311 static bool ip_icmp_error_rfc4884_validate(const struct sk_buff *skb, int off) 1312 { 1313 struct icmp_extobj_hdr *objh, _objh; 1314 struct icmp_ext_hdr *exth, _exth; 1315 u16 olen; 1316 1317 exth = skb_header_pointer(skb, off, sizeof(_exth), &_exth); 1318 if (!exth) 1319 return false; 1320 if (exth->version != 2) 1321 return true; 1322 1323 if (exth->checksum && 1324 csum_fold(skb_checksum(skb, off, skb->len - off, 0))) 1325 return false; 1326 1327 off += sizeof(_exth); 1328 while (off < skb->len) { 1329 objh = skb_header_pointer(skb, off, sizeof(_objh), &_objh); 1330 if (!objh) 1331 return false; 1332 1333 olen = ntohs(objh->length); 1334 if (olen < sizeof(_objh)) 1335 return false; 1336 1337 off += olen; 1338 if (off > skb->len) 1339 return false; 1340 } 1341 1342 return true; 1343 } 1344 1345 void ip_icmp_error_rfc4884(const struct sk_buff *skb, 1346 struct sock_ee_data_rfc4884 *out, 1347 int thlen, int off) 1348 { 1349 int hlen; 1350 1351 /* original datagram headers: end of icmph to payload (skb->data) */ 1352 hlen = -skb_transport_offset(skb) - thlen; 1353 1354 /* per rfc 4884: minimal datagram length of 128 bytes */ 1355 if (off < 128 || off < hlen) 1356 return; 1357 1358 /* kernel has stripped headers: return payload offset in bytes */ 1359 off -= hlen; 1360 if (off + sizeof(struct icmp_ext_hdr) > skb->len) 1361 return; 1362 1363 out->len = off; 1364 1365 if (!ip_icmp_error_rfc4884_validate(skb, off)) 1366 out->flags |= SO_EE_RFC4884_FLAG_INVALID; 1367 } 1368 EXPORT_SYMBOL_GPL(ip_icmp_error_rfc4884); 1369 1370 int icmp_err(struct sk_buff *skb, u32 info) 1371 { 1372 struct iphdr *iph = (struct iphdr *)skb->data; 1373 int offset = iph->ihl<<2; 1374 struct icmphdr *icmph = (struct icmphdr *)(skb->data + offset); 1375 int type = icmp_hdr(skb)->type; 1376 int code = icmp_hdr(skb)->code; 1377 struct net *net = dev_net(skb->dev); 1378 1379 /* 1380 * Use ping_err to handle all icmp errors except those 1381 * triggered by ICMP_ECHOREPLY which sent from kernel. 1382 */ 1383 if (icmph->type != ICMP_ECHOREPLY) { 1384 ping_err(skb, offset, info); 1385 return 0; 1386 } 1387 1388 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) 1389 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ICMP); 1390 else if (type == ICMP_REDIRECT) 1391 ipv4_redirect(skb, net, 0, IPPROTO_ICMP); 1392 1393 return 0; 1394 } 1395 1396 /* 1397 * This table is the definition of how we handle ICMP. 1398 */ 1399 static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { 1400 [ICMP_ECHOREPLY] = { 1401 .handler = ping_rcv, 1402 }, 1403 [1] = { 1404 .handler = icmp_discard, 1405 .error = 1, 1406 }, 1407 [2] = { 1408 .handler = icmp_discard, 1409 .error = 1, 1410 }, 1411 [ICMP_DEST_UNREACH] = { 1412 .handler = icmp_unreach, 1413 .error = 1, 1414 }, 1415 [ICMP_SOURCE_QUENCH] = { 1416 .handler = icmp_unreach, 1417 .error = 1, 1418 }, 1419 [ICMP_REDIRECT] = { 1420 .handler = icmp_redirect, 1421 .error = 1, 1422 }, 1423 [6] = { 1424 .handler = icmp_discard, 1425 .error = 1, 1426 }, 1427 [7] = { 1428 .handler = icmp_discard, 1429 .error = 1, 1430 }, 1431 [ICMP_ECHO] = { 1432 .handler = icmp_echo, 1433 }, 1434 [9] = { 1435 .handler = icmp_discard, 1436 .error = 1, 1437 }, 1438 [10] = { 1439 .handler = icmp_discard, 1440 .error = 1, 1441 }, 1442 [ICMP_TIME_EXCEEDED] = { 1443 .handler = icmp_unreach, 1444 .error = 1, 1445 }, 1446 [ICMP_PARAMETERPROB] = { 1447 .handler = icmp_unreach, 1448 .error = 1, 1449 }, 1450 [ICMP_TIMESTAMP] = { 1451 .handler = icmp_timestamp, 1452 }, 1453 [ICMP_TIMESTAMPREPLY] = { 1454 .handler = icmp_discard, 1455 }, 1456 [ICMP_INFO_REQUEST] = { 1457 .handler = icmp_discard, 1458 }, 1459 [ICMP_INFO_REPLY] = { 1460 .handler = icmp_discard, 1461 }, 1462 [ICMP_ADDRESS] = { 1463 .handler = icmp_discard, 1464 }, 1465 [ICMP_ADDRESSREPLY] = { 1466 .handler = icmp_discard, 1467 }, 1468 }; 1469 1470 static int __net_init icmp_sk_init(struct net *net) 1471 { 1472 /* Control parameters for ECHO replies. */ 1473 net->ipv4.sysctl_icmp_echo_ignore_all = 0; 1474 net->ipv4.sysctl_icmp_echo_enable_probe = 0; 1475 net->ipv4.sysctl_icmp_echo_ignore_broadcasts = 1; 1476 1477 /* Control parameter - ignore bogus broadcast responses? */ 1478 net->ipv4.sysctl_icmp_ignore_bogus_error_responses = 1; 1479 1480 /* 1481 * Configurable global rate limit. 1482 * 1483 * ratelimit defines tokens/packet consumed for dst->rate_token 1484 * bucket ratemask defines which icmp types are ratelimited by 1485 * setting it's bit position. 1486 * 1487 * default: 1488 * dest unreachable (3), source quench (4), 1489 * time exceeded (11), parameter problem (12) 1490 */ 1491 1492 net->ipv4.sysctl_icmp_ratelimit = 1 * HZ; 1493 net->ipv4.sysctl_icmp_ratemask = 0x1818; 1494 net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr = 0; 1495 1496 return 0; 1497 } 1498 1499 static struct pernet_operations __net_initdata icmp_sk_ops = { 1500 .init = icmp_sk_init, 1501 }; 1502 1503 int __init icmp_init(void) 1504 { 1505 int err, i; 1506 1507 for_each_possible_cpu(i) { 1508 struct sock *sk; 1509 1510 err = inet_ctl_sock_create(&sk, PF_INET, 1511 SOCK_RAW, IPPROTO_ICMP, &init_net); 1512 if (err < 0) 1513 return err; 1514 1515 per_cpu(ipv4_icmp_sk, i) = sk; 1516 1517 /* Enough space for 2 64K ICMP packets, including 1518 * sk_buff/skb_shared_info struct overhead. 1519 */ 1520 sk->sk_sndbuf = 2 * SKB_TRUESIZE(64 * 1024); 1521 1522 /* 1523 * Speedup sock_wfree() 1524 */ 1525 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 1526 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DONT; 1527 } 1528 return register_pernet_subsys(&icmp_sk_ops); 1529 } 1530