1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/kernel.h> 3 #include <linux/init.h> 4 #include <linux/module.h> 5 #include <linux/netfilter.h> 6 #include <linux/rhashtable.h> 7 #include <linux/ip.h> 8 #include <linux/ipv6.h> 9 #include <linux/netdevice.h> 10 #include <linux/if_ether.h> 11 #include <net/ip.h> 12 #include <net/ipv6.h> 13 #include <net/ip6_route.h> 14 #include <net/neighbour.h> 15 #include <net/netfilter/nf_flow_table.h> 16 #include <net/netfilter/nf_conntrack_acct.h> 17 /* For layer 4 checksum field offset. */ 18 #include <linux/tcp.h> 19 #include <linux/udp.h> 20 21 static int nf_flow_state_check(struct flow_offload *flow, int proto, 22 struct sk_buff *skb, unsigned int thoff) 23 { 24 struct tcphdr *tcph; 25 26 if (proto != IPPROTO_TCP) 27 return 0; 28 29 tcph = (void *)(skb_network_header(skb) + thoff); 30 if (unlikely(tcph->fin || tcph->rst)) { 31 flow_offload_teardown(flow); 32 return -1; 33 } 34 35 return 0; 36 } 37 38 static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff, 39 __be32 addr, __be32 new_addr) 40 { 41 struct tcphdr *tcph; 42 43 tcph = (void *)(skb_network_header(skb) + thoff); 44 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true); 45 } 46 47 static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff, 48 __be32 addr, __be32 new_addr) 49 { 50 struct udphdr *udph; 51 52 udph = (void *)(skb_network_header(skb) + thoff); 53 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 54 inet_proto_csum_replace4(&udph->check, skb, addr, 55 new_addr, true); 56 if (!udph->check) 57 udph->check = CSUM_MANGLED_0; 58 } 59 } 60 61 static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph, 62 unsigned int thoff, __be32 addr, 63 __be32 new_addr) 64 { 65 switch (iph->protocol) { 66 case IPPROTO_TCP: 67 nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr); 68 break; 69 case IPPROTO_UDP: 70 nf_flow_nat_ip_udp(skb, thoff, addr, new_addr); 71 break; 72 } 73 } 74 75 static void nf_flow_snat_ip(const struct flow_offload *flow, 76 struct sk_buff *skb, struct iphdr *iph, 77 unsigned int thoff, enum flow_offload_tuple_dir dir) 78 { 79 __be32 addr, new_addr; 80 81 switch (dir) { 82 case FLOW_OFFLOAD_DIR_ORIGINAL: 83 addr = iph->saddr; 84 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr; 85 iph->saddr = new_addr; 86 break; 87 case FLOW_OFFLOAD_DIR_REPLY: 88 addr = iph->daddr; 89 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr; 90 iph->daddr = new_addr; 91 break; 92 } 93 csum_replace4(&iph->check, addr, new_addr); 94 95 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 96 } 97 98 static void nf_flow_dnat_ip(const struct flow_offload *flow, 99 struct sk_buff *skb, struct iphdr *iph, 100 unsigned int thoff, enum flow_offload_tuple_dir dir) 101 { 102 __be32 addr, new_addr; 103 104 switch (dir) { 105 case FLOW_OFFLOAD_DIR_ORIGINAL: 106 addr = iph->daddr; 107 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr; 108 iph->daddr = new_addr; 109 break; 110 case FLOW_OFFLOAD_DIR_REPLY: 111 addr = iph->saddr; 112 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr; 113 iph->saddr = new_addr; 114 break; 115 } 116 csum_replace4(&iph->check, addr, new_addr); 117 118 nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr); 119 } 120 121 static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb, 122 unsigned int thoff, enum flow_offload_tuple_dir dir, 123 struct iphdr *iph) 124 { 125 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 126 nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir); 127 nf_flow_snat_ip(flow, skb, iph, thoff, dir); 128 } 129 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 130 nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir); 131 nf_flow_dnat_ip(flow, skb, iph, thoff, dir); 132 } 133 } 134 135 static bool ip_has_options(unsigned int thoff) 136 { 137 return thoff != sizeof(struct iphdr); 138 } 139 140 static void nf_flow_tuple_encap(struct sk_buff *skb, 141 struct flow_offload_tuple *tuple) 142 { 143 struct vlan_ethhdr *veth; 144 struct pppoe_hdr *phdr; 145 int i = 0; 146 147 if (skb_vlan_tag_present(skb)) { 148 tuple->encap[i].id = skb_vlan_tag_get(skb); 149 tuple->encap[i].proto = skb->vlan_proto; 150 i++; 151 } 152 switch (skb->protocol) { 153 case htons(ETH_P_8021Q): 154 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 155 tuple->encap[i].id = ntohs(veth->h_vlan_TCI); 156 tuple->encap[i].proto = skb->protocol; 157 break; 158 case htons(ETH_P_PPP_SES): 159 phdr = (struct pppoe_hdr *)skb_mac_header(skb); 160 tuple->encap[i].id = ntohs(phdr->sid); 161 tuple->encap[i].proto = skb->protocol; 162 break; 163 } 164 } 165 166 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev, 167 struct flow_offload_tuple *tuple, u32 *hdrsize, 168 u32 offset) 169 { 170 struct flow_ports *ports; 171 unsigned int thoff; 172 struct iphdr *iph; 173 u8 ipproto; 174 175 if (!pskb_may_pull(skb, sizeof(*iph) + offset)) 176 return -1; 177 178 iph = (struct iphdr *)(skb_network_header(skb) + offset); 179 thoff = (iph->ihl * 4); 180 181 if (ip_is_fragment(iph) || 182 unlikely(ip_has_options(thoff))) 183 return -1; 184 185 thoff += offset; 186 187 ipproto = iph->protocol; 188 switch (ipproto) { 189 case IPPROTO_TCP: 190 *hdrsize = sizeof(struct tcphdr); 191 break; 192 case IPPROTO_UDP: 193 *hdrsize = sizeof(struct udphdr); 194 break; 195 #ifdef CONFIG_NF_CT_PROTO_GRE 196 case IPPROTO_GRE: 197 *hdrsize = sizeof(struct gre_base_hdr); 198 break; 199 #endif 200 default: 201 return -1; 202 } 203 204 if (iph->ttl <= 1) 205 return -1; 206 207 if (!pskb_may_pull(skb, thoff + *hdrsize)) 208 return -1; 209 210 switch (ipproto) { 211 case IPPROTO_TCP: 212 case IPPROTO_UDP: 213 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 214 tuple->src_port = ports->source; 215 tuple->dst_port = ports->dest; 216 break; 217 case IPPROTO_GRE: { 218 struct gre_base_hdr *greh; 219 220 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 221 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 222 return -1; 223 break; 224 } 225 } 226 227 iph = (struct iphdr *)(skb_network_header(skb) + offset); 228 229 tuple->src_v4.s_addr = iph->saddr; 230 tuple->dst_v4.s_addr = iph->daddr; 231 tuple->l3proto = AF_INET; 232 tuple->l4proto = ipproto; 233 tuple->iifidx = dev->ifindex; 234 nf_flow_tuple_encap(skb, tuple); 235 236 return 0; 237 } 238 239 /* Based on ip_exceeds_mtu(). */ 240 static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu) 241 { 242 if (skb->len <= mtu) 243 return false; 244 245 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu)) 246 return false; 247 248 return true; 249 } 250 251 static inline bool nf_flow_dst_check(struct flow_offload_tuple *tuple) 252 { 253 if (tuple->xmit_type != FLOW_OFFLOAD_XMIT_NEIGH && 254 tuple->xmit_type != FLOW_OFFLOAD_XMIT_XFRM) 255 return true; 256 257 return dst_check(tuple->dst_cache, tuple->dst_cookie); 258 } 259 260 static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, 261 const struct nf_hook_state *state, 262 struct dst_entry *dst) 263 { 264 skb_orphan(skb); 265 skb_dst_set_noref(skb, dst); 266 dst_output(state->net, state->sk, skb); 267 return NF_STOLEN; 268 } 269 270 static bool nf_flow_skb_encap_protocol(const struct sk_buff *skb, __be16 proto, 271 u32 *offset) 272 { 273 struct vlan_ethhdr *veth; 274 275 switch (skb->protocol) { 276 case htons(ETH_P_8021Q): 277 veth = (struct vlan_ethhdr *)skb_mac_header(skb); 278 if (veth->h_vlan_encapsulated_proto == proto) { 279 *offset += VLAN_HLEN; 280 return true; 281 } 282 break; 283 case htons(ETH_P_PPP_SES): 284 if (nf_flow_pppoe_proto(skb) == proto) { 285 *offset += PPPOE_SES_HLEN; 286 return true; 287 } 288 break; 289 } 290 291 return false; 292 } 293 294 static void nf_flow_encap_pop(struct sk_buff *skb, 295 struct flow_offload_tuple_rhash *tuplehash) 296 { 297 struct vlan_hdr *vlan_hdr; 298 int i; 299 300 for (i = 0; i < tuplehash->tuple.encap_num; i++) { 301 if (skb_vlan_tag_present(skb)) { 302 __vlan_hwaccel_clear_tag(skb); 303 continue; 304 } 305 switch (skb->protocol) { 306 case htons(ETH_P_8021Q): 307 vlan_hdr = (struct vlan_hdr *)skb->data; 308 __skb_pull(skb, VLAN_HLEN); 309 vlan_set_encap_proto(skb, vlan_hdr); 310 skb_reset_network_header(skb); 311 break; 312 case htons(ETH_P_PPP_SES): 313 skb->protocol = nf_flow_pppoe_proto(skb); 314 skb_pull(skb, PPPOE_SES_HLEN); 315 skb_reset_network_header(skb); 316 break; 317 } 318 } 319 } 320 321 static unsigned int nf_flow_queue_xmit(struct net *net, struct sk_buff *skb, 322 const struct flow_offload_tuple_rhash *tuplehash, 323 unsigned short type) 324 { 325 struct net_device *outdev; 326 327 outdev = dev_get_by_index_rcu(net, tuplehash->tuple.out.ifidx); 328 if (!outdev) 329 return NF_DROP; 330 331 skb->dev = outdev; 332 dev_hard_header(skb, skb->dev, type, tuplehash->tuple.out.h_dest, 333 tuplehash->tuple.out.h_source, skb->len); 334 dev_queue_xmit(skb); 335 336 return NF_STOLEN; 337 } 338 339 unsigned int 340 nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, 341 const struct nf_hook_state *state) 342 { 343 struct flow_offload_tuple_rhash *tuplehash; 344 struct nf_flowtable *flow_table = priv; 345 struct flow_offload_tuple tuple = {}; 346 enum flow_offload_tuple_dir dir; 347 struct flow_offload *flow; 348 struct net_device *outdev; 349 u32 hdrsize, offset = 0; 350 unsigned int thoff, mtu; 351 struct rtable *rt; 352 struct iphdr *iph; 353 __be32 nexthop; 354 int ret; 355 356 if (skb->protocol != htons(ETH_P_IP) && 357 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IP), &offset)) 358 return NF_ACCEPT; 359 360 if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize, offset) < 0) 361 return NF_ACCEPT; 362 363 tuplehash = flow_offload_lookup(flow_table, &tuple); 364 if (tuplehash == NULL) 365 return NF_ACCEPT; 366 367 dir = tuplehash->tuple.dir; 368 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 369 370 mtu = flow->tuplehash[dir].tuple.mtu + offset; 371 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 372 return NF_ACCEPT; 373 374 iph = (struct iphdr *)(skb_network_header(skb) + offset); 375 thoff = (iph->ihl * 4) + offset; 376 if (nf_flow_state_check(flow, iph->protocol, skb, thoff)) 377 return NF_ACCEPT; 378 379 if (!nf_flow_dst_check(&tuplehash->tuple)) { 380 flow_offload_teardown(flow); 381 return NF_ACCEPT; 382 } 383 384 if (skb_try_make_writable(skb, thoff + hdrsize)) 385 return NF_DROP; 386 387 flow_offload_refresh(flow_table, flow); 388 389 nf_flow_encap_pop(skb, tuplehash); 390 thoff -= offset; 391 392 iph = ip_hdr(skb); 393 nf_flow_nat_ip(flow, skb, thoff, dir, iph); 394 395 ip_decrease_ttl(iph); 396 skb_clear_tstamp(skb); 397 398 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 399 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 400 401 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 402 rt = (struct rtable *)tuplehash->tuple.dst_cache; 403 memset(skb->cb, 0, sizeof(struct inet_skb_parm)); 404 IPCB(skb)->iif = skb->dev->ifindex; 405 IPCB(skb)->flags = IPSKB_FORWARDED; 406 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 407 } 408 409 switch (tuplehash->tuple.xmit_type) { 410 case FLOW_OFFLOAD_XMIT_NEIGH: 411 rt = (struct rtable *)tuplehash->tuple.dst_cache; 412 outdev = rt->dst.dev; 413 skb->dev = outdev; 414 nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); 415 skb_dst_set_noref(skb, &rt->dst); 416 neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb); 417 ret = NF_STOLEN; 418 break; 419 case FLOW_OFFLOAD_XMIT_DIRECT: 420 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IP); 421 if (ret == NF_DROP) 422 flow_offload_teardown(flow); 423 break; 424 default: 425 WARN_ON_ONCE(1); 426 ret = NF_DROP; 427 break; 428 } 429 430 return ret; 431 } 432 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook); 433 434 static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff, 435 struct in6_addr *addr, 436 struct in6_addr *new_addr, 437 struct ipv6hdr *ip6h) 438 { 439 struct tcphdr *tcph; 440 441 tcph = (void *)(skb_network_header(skb) + thoff); 442 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32, 443 new_addr->s6_addr32, true); 444 } 445 446 static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff, 447 struct in6_addr *addr, 448 struct in6_addr *new_addr) 449 { 450 struct udphdr *udph; 451 452 udph = (void *)(skb_network_header(skb) + thoff); 453 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { 454 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32, 455 new_addr->s6_addr32, true); 456 if (!udph->check) 457 udph->check = CSUM_MANGLED_0; 458 } 459 } 460 461 static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h, 462 unsigned int thoff, struct in6_addr *addr, 463 struct in6_addr *new_addr) 464 { 465 switch (ip6h->nexthdr) { 466 case IPPROTO_TCP: 467 nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h); 468 break; 469 case IPPROTO_UDP: 470 nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr); 471 break; 472 } 473 } 474 475 static void nf_flow_snat_ipv6(const struct flow_offload *flow, 476 struct sk_buff *skb, struct ipv6hdr *ip6h, 477 unsigned int thoff, 478 enum flow_offload_tuple_dir dir) 479 { 480 struct in6_addr addr, new_addr; 481 482 switch (dir) { 483 case FLOW_OFFLOAD_DIR_ORIGINAL: 484 addr = ip6h->saddr; 485 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6; 486 ip6h->saddr = new_addr; 487 break; 488 case FLOW_OFFLOAD_DIR_REPLY: 489 addr = ip6h->daddr; 490 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6; 491 ip6h->daddr = new_addr; 492 break; 493 } 494 495 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 496 } 497 498 static void nf_flow_dnat_ipv6(const struct flow_offload *flow, 499 struct sk_buff *skb, struct ipv6hdr *ip6h, 500 unsigned int thoff, 501 enum flow_offload_tuple_dir dir) 502 { 503 struct in6_addr addr, new_addr; 504 505 switch (dir) { 506 case FLOW_OFFLOAD_DIR_ORIGINAL: 507 addr = ip6h->daddr; 508 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6; 509 ip6h->daddr = new_addr; 510 break; 511 case FLOW_OFFLOAD_DIR_REPLY: 512 addr = ip6h->saddr; 513 new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6; 514 ip6h->saddr = new_addr; 515 break; 516 } 517 518 nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr); 519 } 520 521 static void nf_flow_nat_ipv6(const struct flow_offload *flow, 522 struct sk_buff *skb, 523 enum flow_offload_tuple_dir dir, 524 struct ipv6hdr *ip6h) 525 { 526 unsigned int thoff = sizeof(*ip6h); 527 528 if (test_bit(NF_FLOW_SNAT, &flow->flags)) { 529 nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir); 530 nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir); 531 } 532 if (test_bit(NF_FLOW_DNAT, &flow->flags)) { 533 nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir); 534 nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir); 535 } 536 } 537 538 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev, 539 struct flow_offload_tuple *tuple, u32 *hdrsize, 540 u32 offset) 541 { 542 struct flow_ports *ports; 543 struct ipv6hdr *ip6h; 544 unsigned int thoff; 545 u8 nexthdr; 546 547 thoff = sizeof(*ip6h) + offset; 548 if (!pskb_may_pull(skb, thoff)) 549 return -1; 550 551 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 552 553 nexthdr = ip6h->nexthdr; 554 switch (nexthdr) { 555 case IPPROTO_TCP: 556 *hdrsize = sizeof(struct tcphdr); 557 break; 558 case IPPROTO_UDP: 559 *hdrsize = sizeof(struct udphdr); 560 break; 561 #ifdef CONFIG_NF_CT_PROTO_GRE 562 case IPPROTO_GRE: 563 *hdrsize = sizeof(struct gre_base_hdr); 564 break; 565 #endif 566 default: 567 return -1; 568 } 569 570 if (ip6h->hop_limit <= 1) 571 return -1; 572 573 if (!pskb_may_pull(skb, thoff + *hdrsize)) 574 return -1; 575 576 switch (nexthdr) { 577 case IPPROTO_TCP: 578 case IPPROTO_UDP: 579 ports = (struct flow_ports *)(skb_network_header(skb) + thoff); 580 tuple->src_port = ports->source; 581 tuple->dst_port = ports->dest; 582 break; 583 case IPPROTO_GRE: { 584 struct gre_base_hdr *greh; 585 586 greh = (struct gre_base_hdr *)(skb_network_header(skb) + thoff); 587 if ((greh->flags & GRE_VERSION) != GRE_VERSION_0) 588 return -1; 589 break; 590 } 591 } 592 593 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 594 595 tuple->src_v6 = ip6h->saddr; 596 tuple->dst_v6 = ip6h->daddr; 597 tuple->l3proto = AF_INET6; 598 tuple->l4proto = nexthdr; 599 tuple->iifidx = dev->ifindex; 600 nf_flow_tuple_encap(skb, tuple); 601 602 return 0; 603 } 604 605 unsigned int 606 nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, 607 const struct nf_hook_state *state) 608 { 609 struct flow_offload_tuple_rhash *tuplehash; 610 struct nf_flowtable *flow_table = priv; 611 struct flow_offload_tuple tuple = {}; 612 enum flow_offload_tuple_dir dir; 613 const struct in6_addr *nexthop; 614 struct flow_offload *flow; 615 struct net_device *outdev; 616 unsigned int thoff, mtu; 617 u32 hdrsize, offset = 0; 618 struct ipv6hdr *ip6h; 619 struct rt6_info *rt; 620 int ret; 621 622 if (skb->protocol != htons(ETH_P_IPV6) && 623 !nf_flow_skb_encap_protocol(skb, htons(ETH_P_IPV6), &offset)) 624 return NF_ACCEPT; 625 626 if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize, offset) < 0) 627 return NF_ACCEPT; 628 629 tuplehash = flow_offload_lookup(flow_table, &tuple); 630 if (tuplehash == NULL) 631 return NF_ACCEPT; 632 633 dir = tuplehash->tuple.dir; 634 flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]); 635 636 mtu = flow->tuplehash[dir].tuple.mtu + offset; 637 if (unlikely(nf_flow_exceeds_mtu(skb, mtu))) 638 return NF_ACCEPT; 639 640 ip6h = (struct ipv6hdr *)(skb_network_header(skb) + offset); 641 thoff = sizeof(*ip6h) + offset; 642 if (nf_flow_state_check(flow, ip6h->nexthdr, skb, thoff)) 643 return NF_ACCEPT; 644 645 if (!nf_flow_dst_check(&tuplehash->tuple)) { 646 flow_offload_teardown(flow); 647 return NF_ACCEPT; 648 } 649 650 if (skb_try_make_writable(skb, thoff + hdrsize)) 651 return NF_DROP; 652 653 flow_offload_refresh(flow_table, flow); 654 655 nf_flow_encap_pop(skb, tuplehash); 656 657 ip6h = ipv6_hdr(skb); 658 nf_flow_nat_ipv6(flow, skb, dir, ip6h); 659 660 ip6h->hop_limit--; 661 skb_clear_tstamp(skb); 662 663 if (flow_table->flags & NF_FLOWTABLE_COUNTER) 664 nf_ct_acct_update(flow->ct, tuplehash->tuple.dir, skb->len); 665 666 if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { 667 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 668 memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); 669 IP6CB(skb)->iif = skb->dev->ifindex; 670 IP6CB(skb)->flags = IP6SKB_FORWARDED; 671 return nf_flow_xmit_xfrm(skb, state, &rt->dst); 672 } 673 674 switch (tuplehash->tuple.xmit_type) { 675 case FLOW_OFFLOAD_XMIT_NEIGH: 676 rt = (struct rt6_info *)tuplehash->tuple.dst_cache; 677 outdev = rt->dst.dev; 678 skb->dev = outdev; 679 nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6); 680 skb_dst_set_noref(skb, &rt->dst); 681 neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb); 682 ret = NF_STOLEN; 683 break; 684 case FLOW_OFFLOAD_XMIT_DIRECT: 685 ret = nf_flow_queue_xmit(state->net, skb, tuplehash, ETH_P_IPV6); 686 if (ret == NF_DROP) 687 flow_offload_teardown(flow); 688 break; 689 default: 690 WARN_ON_ONCE(1); 691 ret = NF_DROP; 692 break; 693 } 694 695 return ret; 696 } 697 EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook); 698