1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5 * Copyright 2011-2018 Alexander Bluhm <bluhm@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * $OpenBSD: pf_norm.c,v 1.114 2009/01/29 14:11:45 henning Exp $
29 */
30
31 #include <sys/cdefs.h>
32 #include "opt_inet.h"
33 #include "opt_inet6.h"
34 #include "opt_pf.h"
35
36 #include <sys/param.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/mbuf.h>
40 #include <sys/mutex.h>
41 #include <sys/refcount.h>
42 #include <sys/socket.h>
43
44 #include <net/if.h>
45 #include <net/vnet.h>
46 #include <net/pfvar.h>
47 #include <net/if_pflog.h>
48
49 #include <netinet/in.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet6/scope6_var.h>
54 #include <netinet/tcp.h>
55 #include <netinet/tcp_fsm.h>
56 #include <netinet/tcp_seq.h>
57 #include <netinet/sctp_constants.h>
58 #include <netinet/sctp_header.h>
59
60 #ifdef INET6
61 #include <netinet/ip6.h>
62 #endif /* INET6 */
63
64 struct pf_frent {
65 TAILQ_ENTRY(pf_frent) fr_next;
66 struct mbuf *fe_m;
67 uint16_t fe_hdrlen; /* ipv4 header length with ip options
68 ipv6, extension, fragment header */
69 uint16_t fe_extoff; /* last extension header offset or 0 */
70 uint16_t fe_len; /* fragment length */
71 uint16_t fe_off; /* fragment offset */
72 uint16_t fe_mff; /* more fragment flag */
73 };
74
75 struct pf_fragment_cmp {
76 struct pf_addr frc_src;
77 struct pf_addr frc_dst;
78 uint32_t frc_id;
79 sa_family_t frc_af;
80 uint8_t frc_proto;
81 };
82
83 struct pf_fragment {
84 struct pf_fragment_cmp fr_key;
85 #define fr_src fr_key.frc_src
86 #define fr_dst fr_key.frc_dst
87 #define fr_id fr_key.frc_id
88 #define fr_af fr_key.frc_af
89 #define fr_proto fr_key.frc_proto
90
91 /* pointers to queue element */
92 struct pf_frent *fr_firstoff[PF_FRAG_ENTRY_POINTS];
93 /* count entries between pointers */
94 uint8_t fr_entries[PF_FRAG_ENTRY_POINTS];
95 RB_ENTRY(pf_fragment) fr_entry;
96 TAILQ_ENTRY(pf_fragment) frag_next;
97 uint32_t fr_timeout;
98 uint16_t fr_maxlen; /* maximum length of single fragment */
99 u_int16_t fr_holes; /* number of holes in the queue */
100 TAILQ_HEAD(pf_fragq, pf_frent) fr_queue;
101 };
102
103 struct pf_fragment_tag {
104 uint16_t ft_hdrlen; /* header length of reassembled pkt */
105 uint16_t ft_extoff; /* last extension header offset or 0 */
106 uint16_t ft_maxlen; /* maximum fragment payload length */
107 uint32_t ft_id; /* fragment id */
108 };
109
110 VNET_DEFINE_STATIC(struct mtx, pf_frag_mtx);
111 #define V_pf_frag_mtx VNET(pf_frag_mtx)
112 #define PF_FRAG_LOCK() mtx_lock(&V_pf_frag_mtx)
113 #define PF_FRAG_UNLOCK() mtx_unlock(&V_pf_frag_mtx)
114 #define PF_FRAG_ASSERT() mtx_assert(&V_pf_frag_mtx, MA_OWNED)
115
116 VNET_DEFINE(uma_zone_t, pf_state_scrub_z); /* XXX: shared with pfsync */
117
118 VNET_DEFINE_STATIC(uma_zone_t, pf_frent_z);
119 #define V_pf_frent_z VNET(pf_frent_z)
120 VNET_DEFINE_STATIC(uma_zone_t, pf_frag_z);
121 #define V_pf_frag_z VNET(pf_frag_z)
122
123 TAILQ_HEAD(pf_fragqueue, pf_fragment);
124 TAILQ_HEAD(pf_cachequeue, pf_fragment);
125 VNET_DEFINE_STATIC(struct pf_fragqueue, pf_fragqueue);
126 #define V_pf_fragqueue VNET(pf_fragqueue)
127 RB_HEAD(pf_frag_tree, pf_fragment);
128 VNET_DEFINE_STATIC(struct pf_frag_tree, pf_frag_tree);
129 #define V_pf_frag_tree VNET(pf_frag_tree)
130 static int pf_frag_compare(struct pf_fragment *,
131 struct pf_fragment *);
132 static RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
133 static RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
134
135 static void pf_flush_fragments(void);
136 static void pf_free_fragment(struct pf_fragment *);
137 static void pf_remove_fragment(struct pf_fragment *);
138
139 static struct pf_frent *pf_create_fragment(u_short *);
140 static int pf_frent_holes(struct pf_frent *frent);
141 static struct pf_fragment *pf_find_fragment(struct pf_fragment_cmp *key,
142 struct pf_frag_tree *tree);
143 static inline int pf_frent_index(struct pf_frent *);
144 static int pf_frent_insert(struct pf_fragment *,
145 struct pf_frent *, struct pf_frent *);
146 void pf_frent_remove(struct pf_fragment *,
147 struct pf_frent *);
148 struct pf_frent *pf_frent_previous(struct pf_fragment *,
149 struct pf_frent *);
150 static struct pf_fragment *pf_fillup_fragment(struct pf_fragment_cmp *,
151 struct pf_frent *, u_short *);
152 static struct mbuf *pf_join_fragment(struct pf_fragment *);
153 #ifdef INET
154 static int pf_reassemble(struct mbuf **, struct ip *, int, u_short *);
155 #endif /* INET */
156 #ifdef INET6
157 static int pf_reassemble6(struct mbuf **, struct ip6_hdr *,
158 struct ip6_frag *, uint16_t, uint16_t, u_short *);
159 #endif /* INET6 */
160
161 #define DPFPRINTF(x) do { \
162 if (V_pf_status.debug >= PF_DEBUG_MISC) { \
163 printf("%s: ", __func__); \
164 printf x ; \
165 } \
166 } while(0)
167
168 #ifdef INET
169 static void
pf_ip2key(struct ip * ip,int dir,struct pf_fragment_cmp * key)170 pf_ip2key(struct ip *ip, int dir, struct pf_fragment_cmp *key)
171 {
172
173 key->frc_src.v4 = ip->ip_src;
174 key->frc_dst.v4 = ip->ip_dst;
175 key->frc_af = AF_INET;
176 key->frc_proto = ip->ip_p;
177 key->frc_id = ip->ip_id;
178 }
179 #endif /* INET */
180
181 void
pf_normalize_init(void)182 pf_normalize_init(void)
183 {
184
185 V_pf_frag_z = uma_zcreate("pf frags", sizeof(struct pf_fragment),
186 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
187 V_pf_frent_z = uma_zcreate("pf frag entries", sizeof(struct pf_frent),
188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
189 V_pf_state_scrub_z = uma_zcreate("pf state scrubs",
190 sizeof(struct pf_state_scrub), NULL, NULL, NULL, NULL,
191 UMA_ALIGN_PTR, 0);
192
193 mtx_init(&V_pf_frag_mtx, "pf fragments", NULL, MTX_DEF);
194
195 V_pf_limits[PF_LIMIT_FRAGS].zone = V_pf_frent_z;
196 V_pf_limits[PF_LIMIT_FRAGS].limit = PFFRAG_FRENT_HIWAT;
197 uma_zone_set_max(V_pf_frent_z, PFFRAG_FRENT_HIWAT);
198 uma_zone_set_warning(V_pf_frent_z, "PF frag entries limit reached");
199
200 TAILQ_INIT(&V_pf_fragqueue);
201 }
202
203 void
pf_normalize_cleanup(void)204 pf_normalize_cleanup(void)
205 {
206
207 uma_zdestroy(V_pf_state_scrub_z);
208 uma_zdestroy(V_pf_frent_z);
209 uma_zdestroy(V_pf_frag_z);
210
211 mtx_destroy(&V_pf_frag_mtx);
212 }
213
214 static int
pf_frag_compare(struct pf_fragment * a,struct pf_fragment * b)215 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
216 {
217 int diff;
218
219 if ((diff = a->fr_id - b->fr_id) != 0)
220 return (diff);
221 if ((diff = a->fr_proto - b->fr_proto) != 0)
222 return (diff);
223 if ((diff = a->fr_af - b->fr_af) != 0)
224 return (diff);
225 if ((diff = pf_addr_cmp(&a->fr_src, &b->fr_src, a->fr_af)) != 0)
226 return (diff);
227 if ((diff = pf_addr_cmp(&a->fr_dst, &b->fr_dst, a->fr_af)) != 0)
228 return (diff);
229 return (0);
230 }
231
232 void
pf_purge_expired_fragments(void)233 pf_purge_expired_fragments(void)
234 {
235 u_int32_t expire = time_uptime -
236 V_pf_default_rule.timeout[PFTM_FRAG];
237
238 pf_purge_fragments(expire);
239 }
240
241 void
pf_purge_fragments(uint32_t expire)242 pf_purge_fragments(uint32_t expire)
243 {
244 struct pf_fragment *frag;
245
246 PF_FRAG_LOCK();
247 while ((frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue)) != NULL) {
248 if (frag->fr_timeout > expire)
249 break;
250
251 DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
252 pf_free_fragment(frag);
253 }
254
255 PF_FRAG_UNLOCK();
256 }
257
258 /*
259 * Try to flush old fragments to make space for new ones
260 */
261 static void
pf_flush_fragments(void)262 pf_flush_fragments(void)
263 {
264 struct pf_fragment *frag;
265 int goal;
266
267 PF_FRAG_ASSERT();
268
269 goal = uma_zone_get_cur(V_pf_frent_z) * 9 / 10;
270 DPFPRINTF(("trying to free %d frag entriess\n", goal));
271 while (goal < uma_zone_get_cur(V_pf_frent_z)) {
272 frag = TAILQ_LAST(&V_pf_fragqueue, pf_fragqueue);
273 if (frag)
274 pf_free_fragment(frag);
275 else
276 break;
277 }
278 }
279
280 /* Frees the fragments and all associated entries */
281 static void
pf_free_fragment(struct pf_fragment * frag)282 pf_free_fragment(struct pf_fragment *frag)
283 {
284 struct pf_frent *frent;
285
286 PF_FRAG_ASSERT();
287
288 /* Free all fragments */
289 for (frent = TAILQ_FIRST(&frag->fr_queue); frent;
290 frent = TAILQ_FIRST(&frag->fr_queue)) {
291 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
292
293 m_freem(frent->fe_m);
294 uma_zfree(V_pf_frent_z, frent);
295 }
296
297 pf_remove_fragment(frag);
298 }
299
300 static struct pf_fragment *
pf_find_fragment(struct pf_fragment_cmp * key,struct pf_frag_tree * tree)301 pf_find_fragment(struct pf_fragment_cmp *key, struct pf_frag_tree *tree)
302 {
303 struct pf_fragment *frag;
304
305 PF_FRAG_ASSERT();
306
307 frag = RB_FIND(pf_frag_tree, tree, (struct pf_fragment *)key);
308 if (frag != NULL) {
309 /* XXX Are we sure we want to update the timeout? */
310 frag->fr_timeout = time_uptime;
311 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
312 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
313 }
314
315 return (frag);
316 }
317
318 /* Removes a fragment from the fragment queue and frees the fragment */
319 static void
pf_remove_fragment(struct pf_fragment * frag)320 pf_remove_fragment(struct pf_fragment *frag)
321 {
322
323 PF_FRAG_ASSERT();
324 KASSERT(frag, ("frag != NULL"));
325
326 RB_REMOVE(pf_frag_tree, &V_pf_frag_tree, frag);
327 TAILQ_REMOVE(&V_pf_fragqueue, frag, frag_next);
328 uma_zfree(V_pf_frag_z, frag);
329 }
330
331 static struct pf_frent *
pf_create_fragment(u_short * reason)332 pf_create_fragment(u_short *reason)
333 {
334 struct pf_frent *frent;
335
336 PF_FRAG_ASSERT();
337
338 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
339 if (frent == NULL) {
340 pf_flush_fragments();
341 frent = uma_zalloc(V_pf_frent_z, M_NOWAIT);
342 if (frent == NULL) {
343 REASON_SET(reason, PFRES_MEMORY);
344 return (NULL);
345 }
346 }
347
348 return (frent);
349 }
350
351 /*
352 * Calculate the additional holes that were created in the fragment
353 * queue by inserting this fragment. A fragment in the middle
354 * creates one more hole by splitting. For each connected side,
355 * it loses one hole.
356 * Fragment entry must be in the queue when calling this function.
357 */
358 static int
pf_frent_holes(struct pf_frent * frent)359 pf_frent_holes(struct pf_frent *frent)
360 {
361 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
362 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
363 int holes = 1;
364
365 if (prev == NULL) {
366 if (frent->fe_off == 0)
367 holes--;
368 } else {
369 KASSERT(frent->fe_off != 0, ("frent->fe_off != 0"));
370 if (frent->fe_off == prev->fe_off + prev->fe_len)
371 holes--;
372 }
373 if (next == NULL) {
374 if (!frent->fe_mff)
375 holes--;
376 } else {
377 KASSERT(frent->fe_mff, ("frent->fe_mff"));
378 if (next->fe_off == frent->fe_off + frent->fe_len)
379 holes--;
380 }
381 return holes;
382 }
383
384 static inline int
pf_frent_index(struct pf_frent * frent)385 pf_frent_index(struct pf_frent *frent)
386 {
387 /*
388 * We have an array of 16 entry points to the queue. A full size
389 * 65535 octet IP packet can have 8192 fragments. So the queue
390 * traversal length is at most 512 and at most 16 entry points are
391 * checked. We need 128 additional bytes on a 64 bit architecture.
392 */
393 CTASSERT(((u_int16_t)0xffff &~ 7) / (0x10000 / PF_FRAG_ENTRY_POINTS) ==
394 16 - 1);
395 CTASSERT(((u_int16_t)0xffff >> 3) / PF_FRAG_ENTRY_POINTS == 512 - 1);
396
397 return frent->fe_off / (0x10000 / PF_FRAG_ENTRY_POINTS);
398 }
399
400 static int
pf_frent_insert(struct pf_fragment * frag,struct pf_frent * frent,struct pf_frent * prev)401 pf_frent_insert(struct pf_fragment *frag, struct pf_frent *frent,
402 struct pf_frent *prev)
403 {
404 int index;
405
406 CTASSERT(PF_FRAG_ENTRY_LIMIT <= 0xff);
407
408 /*
409 * A packet has at most 65536 octets. With 16 entry points, each one
410 * spawns 4096 octets. We limit these to 64 fragments each, which
411 * means on average every fragment must have at least 64 octets.
412 */
413 index = pf_frent_index(frent);
414 if (frag->fr_entries[index] >= PF_FRAG_ENTRY_LIMIT)
415 return ENOBUFS;
416 frag->fr_entries[index]++;
417
418 if (prev == NULL) {
419 TAILQ_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
420 } else {
421 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
422 ("overlapping fragment"));
423 TAILQ_INSERT_AFTER(&frag->fr_queue, prev, frent, fr_next);
424 }
425
426 if (frag->fr_firstoff[index] == NULL) {
427 KASSERT(prev == NULL || pf_frent_index(prev) < index,
428 ("prev == NULL || pf_frent_index(pref) < index"));
429 frag->fr_firstoff[index] = frent;
430 } else {
431 if (frent->fe_off < frag->fr_firstoff[index]->fe_off) {
432 KASSERT(prev == NULL || pf_frent_index(prev) < index,
433 ("prev == NULL || pf_frent_index(pref) < index"));
434 frag->fr_firstoff[index] = frent;
435 } else {
436 KASSERT(prev != NULL, ("prev != NULL"));
437 KASSERT(pf_frent_index(prev) == index,
438 ("pf_frent_index(prev) == index"));
439 }
440 }
441
442 frag->fr_holes += pf_frent_holes(frent);
443
444 return 0;
445 }
446
447 void
pf_frent_remove(struct pf_fragment * frag,struct pf_frent * frent)448 pf_frent_remove(struct pf_fragment *frag, struct pf_frent *frent)
449 {
450 #ifdef INVARIANTS
451 struct pf_frent *prev = TAILQ_PREV(frent, pf_fragq, fr_next);
452 #endif
453 struct pf_frent *next = TAILQ_NEXT(frent, fr_next);
454 int index;
455
456 frag->fr_holes -= pf_frent_holes(frent);
457
458 index = pf_frent_index(frent);
459 KASSERT(frag->fr_firstoff[index] != NULL, ("frent not found"));
460 if (frag->fr_firstoff[index]->fe_off == frent->fe_off) {
461 if (next == NULL) {
462 frag->fr_firstoff[index] = NULL;
463 } else {
464 KASSERT(frent->fe_off + frent->fe_len <= next->fe_off,
465 ("overlapping fragment"));
466 if (pf_frent_index(next) == index) {
467 frag->fr_firstoff[index] = next;
468 } else {
469 frag->fr_firstoff[index] = NULL;
470 }
471 }
472 } else {
473 KASSERT(frag->fr_firstoff[index]->fe_off < frent->fe_off,
474 ("frag->fr_firstoff[index]->fe_off < frent->fe_off"));
475 KASSERT(prev != NULL, ("prev != NULL"));
476 KASSERT(prev->fe_off + prev->fe_len <= frent->fe_off,
477 ("overlapping fragment"));
478 KASSERT(pf_frent_index(prev) == index,
479 ("pf_frent_index(prev) == index"));
480 }
481
482 TAILQ_REMOVE(&frag->fr_queue, frent, fr_next);
483
484 KASSERT(frag->fr_entries[index] > 0, ("No fragments remaining"));
485 frag->fr_entries[index]--;
486 }
487
488 struct pf_frent *
pf_frent_previous(struct pf_fragment * frag,struct pf_frent * frent)489 pf_frent_previous(struct pf_fragment *frag, struct pf_frent *frent)
490 {
491 struct pf_frent *prev, *next;
492 int index;
493
494 /*
495 * If there are no fragments after frag, take the final one. Assume
496 * that the global queue is not empty.
497 */
498 prev = TAILQ_LAST(&frag->fr_queue, pf_fragq);
499 KASSERT(prev != NULL, ("prev != NULL"));
500 if (prev->fe_off <= frent->fe_off)
501 return prev;
502 /*
503 * We want to find a fragment entry that is before frag, but still
504 * close to it. Find the first fragment entry that is in the same
505 * entry point or in the first entry point after that. As we have
506 * already checked that there are entries behind frag, this will
507 * succeed.
508 */
509 for (index = pf_frent_index(frent); index < PF_FRAG_ENTRY_POINTS;
510 index++) {
511 prev = frag->fr_firstoff[index];
512 if (prev != NULL)
513 break;
514 }
515 KASSERT(prev != NULL, ("prev != NULL"));
516 /*
517 * In prev we may have a fragment from the same entry point that is
518 * before frent, or one that is just one position behind frent.
519 * In the latter case, we go back one step and have the predecessor.
520 * There may be none if the new fragment will be the first one.
521 */
522 if (prev->fe_off > frent->fe_off) {
523 prev = TAILQ_PREV(prev, pf_fragq, fr_next);
524 if (prev == NULL)
525 return NULL;
526 KASSERT(prev->fe_off <= frent->fe_off,
527 ("prev->fe_off <= frent->fe_off"));
528 return prev;
529 }
530 /*
531 * In prev is the first fragment of the entry point. The offset
532 * of frag is behind it. Find the closest previous fragment.
533 */
534 for (next = TAILQ_NEXT(prev, fr_next); next != NULL;
535 next = TAILQ_NEXT(next, fr_next)) {
536 if (next->fe_off > frent->fe_off)
537 break;
538 prev = next;
539 }
540 return prev;
541 }
542
543 static struct pf_fragment *
pf_fillup_fragment(struct pf_fragment_cmp * key,struct pf_frent * frent,u_short * reason)544 pf_fillup_fragment(struct pf_fragment_cmp *key, struct pf_frent *frent,
545 u_short *reason)
546 {
547 struct pf_frent *after, *next, *prev;
548 struct pf_fragment *frag;
549 uint16_t total;
550 int old_index, new_index;
551
552 PF_FRAG_ASSERT();
553
554 /* No empty fragments. */
555 if (frent->fe_len == 0) {
556 DPFPRINTF(("bad fragment: len 0\n"));
557 goto bad_fragment;
558 }
559
560 /* All fragments are 8 byte aligned. */
561 if (frent->fe_mff && (frent->fe_len & 0x7)) {
562 DPFPRINTF(("bad fragment: mff and len %d\n", frent->fe_len));
563 goto bad_fragment;
564 }
565
566 /* Respect maximum length, IP_MAXPACKET == IPV6_MAXPACKET. */
567 if (frent->fe_off + frent->fe_len > IP_MAXPACKET) {
568 DPFPRINTF(("bad fragment: max packet %d\n",
569 frent->fe_off + frent->fe_len));
570 goto bad_fragment;
571 }
572
573 DPFPRINTF((key->frc_af == AF_INET ?
574 "reass frag %d @ %d-%d\n" : "reass frag %#08x @ %d-%d\n",
575 key->frc_id, frent->fe_off, frent->fe_off + frent->fe_len));
576
577 /* Fully buffer all of the fragments in this fragment queue. */
578 frag = pf_find_fragment(key, &V_pf_frag_tree);
579
580 /* Create a new reassembly queue for this packet. */
581 if (frag == NULL) {
582 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
583 if (frag == NULL) {
584 pf_flush_fragments();
585 frag = uma_zalloc(V_pf_frag_z, M_NOWAIT);
586 if (frag == NULL) {
587 REASON_SET(reason, PFRES_MEMORY);
588 goto drop_fragment;
589 }
590 }
591
592 *(struct pf_fragment_cmp *)frag = *key;
593 memset(frag->fr_firstoff, 0, sizeof(frag->fr_firstoff));
594 memset(frag->fr_entries, 0, sizeof(frag->fr_entries));
595 frag->fr_timeout = time_uptime;
596 frag->fr_maxlen = frent->fe_len;
597 frag->fr_holes = 1;
598 TAILQ_INIT(&frag->fr_queue);
599
600 RB_INSERT(pf_frag_tree, &V_pf_frag_tree, frag);
601 TAILQ_INSERT_HEAD(&V_pf_fragqueue, frag, frag_next);
602
603 /* We do not have a previous fragment, cannot fail. */
604 pf_frent_insert(frag, frent, NULL);
605
606 return (frag);
607 }
608
609 KASSERT(!TAILQ_EMPTY(&frag->fr_queue), ("!TAILQ_EMPTY()->fr_queue"));
610
611 /* Remember maximum fragment len for refragmentation. */
612 if (frent->fe_len > frag->fr_maxlen)
613 frag->fr_maxlen = frent->fe_len;
614
615 /* Maximum data we have seen already. */
616 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
617 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
618
619 /* Non terminal fragments must have more fragments flag. */
620 if (frent->fe_off + frent->fe_len < total && !frent->fe_mff)
621 goto bad_fragment;
622
623 /* Check if we saw the last fragment already. */
624 if (!TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_mff) {
625 if (frent->fe_off + frent->fe_len > total ||
626 (frent->fe_off + frent->fe_len == total && frent->fe_mff))
627 goto bad_fragment;
628 } else {
629 if (frent->fe_off + frent->fe_len == total && !frent->fe_mff)
630 goto bad_fragment;
631 }
632
633 /* Find neighbors for newly inserted fragment */
634 prev = pf_frent_previous(frag, frent);
635 if (prev == NULL) {
636 after = TAILQ_FIRST(&frag->fr_queue);
637 KASSERT(after != NULL, ("after != NULL"));
638 } else {
639 after = TAILQ_NEXT(prev, fr_next);
640 }
641
642 if (prev != NULL && prev->fe_off + prev->fe_len > frent->fe_off) {
643 uint16_t precut;
644
645 precut = prev->fe_off + prev->fe_len - frent->fe_off;
646 if (precut >= frent->fe_len)
647 goto bad_fragment;
648 DPFPRINTF(("overlap -%d\n", precut));
649 m_adj(frent->fe_m, precut);
650 frent->fe_off += precut;
651 frent->fe_len -= precut;
652 }
653
654 for (; after != NULL && frent->fe_off + frent->fe_len > after->fe_off;
655 after = next) {
656 uint16_t aftercut;
657
658 aftercut = frent->fe_off + frent->fe_len - after->fe_off;
659 DPFPRINTF(("adjust overlap %d\n", aftercut));
660 if (aftercut < after->fe_len) {
661 m_adj(after->fe_m, aftercut);
662 old_index = pf_frent_index(after);
663 after->fe_off += aftercut;
664 after->fe_len -= aftercut;
665 new_index = pf_frent_index(after);
666 if (old_index != new_index) {
667 DPFPRINTF(("frag index %d, new %d",
668 old_index, new_index));
669 /* Fragment switched queue as fe_off changed */
670 after->fe_off -= aftercut;
671 after->fe_len += aftercut;
672 /* Remove restored fragment from old queue */
673 pf_frent_remove(frag, after);
674 after->fe_off += aftercut;
675 after->fe_len -= aftercut;
676 /* Insert into correct queue */
677 if (pf_frent_insert(frag, after, prev)) {
678 DPFPRINTF(
679 ("fragment requeue limit exceeded"));
680 m_freem(after->fe_m);
681 uma_zfree(V_pf_frent_z, after);
682 /* There is not way to recover */
683 goto bad_fragment;
684 }
685 }
686 break;
687 }
688
689 /* This fragment is completely overlapped, lose it. */
690 next = TAILQ_NEXT(after, fr_next);
691 pf_frent_remove(frag, after);
692 m_freem(after->fe_m);
693 uma_zfree(V_pf_frent_z, after);
694 }
695
696 /* If part of the queue gets too long, there is not way to recover. */
697 if (pf_frent_insert(frag, frent, prev)) {
698 DPFPRINTF(("fragment queue limit exceeded\n"));
699 goto bad_fragment;
700 }
701
702 return (frag);
703
704 bad_fragment:
705 REASON_SET(reason, PFRES_FRAG);
706 drop_fragment:
707 uma_zfree(V_pf_frent_z, frent);
708 return (NULL);
709 }
710
711 static struct mbuf *
pf_join_fragment(struct pf_fragment * frag)712 pf_join_fragment(struct pf_fragment *frag)
713 {
714 struct mbuf *m, *m2;
715 struct pf_frent *frent, *next;
716
717 frent = TAILQ_FIRST(&frag->fr_queue);
718 next = TAILQ_NEXT(frent, fr_next);
719
720 m = frent->fe_m;
721 m_adj(m, (frent->fe_hdrlen + frent->fe_len) - m->m_pkthdr.len);
722 uma_zfree(V_pf_frent_z, frent);
723 for (frent = next; frent != NULL; frent = next) {
724 next = TAILQ_NEXT(frent, fr_next);
725
726 m2 = frent->fe_m;
727 /* Strip off ip header. */
728 m_adj(m2, frent->fe_hdrlen);
729 /* Strip off any trailing bytes. */
730 m_adj(m2, frent->fe_len - m2->m_pkthdr.len);
731
732 uma_zfree(V_pf_frent_z, frent);
733 m_cat(m, m2);
734 }
735
736 /* Remove from fragment queue. */
737 pf_remove_fragment(frag);
738
739 return (m);
740 }
741
742 #ifdef INET
743 static int
pf_reassemble(struct mbuf ** m0,struct ip * ip,int dir,u_short * reason)744 pf_reassemble(struct mbuf **m0, struct ip *ip, int dir, u_short *reason)
745 {
746 struct mbuf *m = *m0;
747 struct pf_frent *frent;
748 struct pf_fragment *frag;
749 struct pf_fragment_cmp key;
750 uint16_t total, hdrlen;
751
752 /* Get an entry for the fragment queue */
753 if ((frent = pf_create_fragment(reason)) == NULL)
754 return (PF_DROP);
755
756 frent->fe_m = m;
757 frent->fe_hdrlen = ip->ip_hl << 2;
758 frent->fe_extoff = 0;
759 frent->fe_len = ntohs(ip->ip_len) - (ip->ip_hl << 2);
760 frent->fe_off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
761 frent->fe_mff = ntohs(ip->ip_off) & IP_MF;
762
763 pf_ip2key(ip, dir, &key);
764
765 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL)
766 return (PF_DROP);
767
768 /* The mbuf is part of the fragment entry, no direct free or access */
769 m = *m0 = NULL;
770
771 if (frag->fr_holes) {
772 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id, frag->fr_holes));
773 return (PF_PASS); /* drop because *m0 is NULL, no error */
774 }
775
776 /* We have all the data */
777 frent = TAILQ_FIRST(&frag->fr_queue);
778 KASSERT(frent != NULL, ("frent != NULL"));
779 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
780 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
781 hdrlen = frent->fe_hdrlen;
782
783 m = *m0 = pf_join_fragment(frag);
784 frag = NULL;
785
786 if (m->m_flags & M_PKTHDR) {
787 int plen = 0;
788 for (m = *m0; m; m = m->m_next)
789 plen += m->m_len;
790 m = *m0;
791 m->m_pkthdr.len = plen;
792 }
793
794 ip = mtod(m, struct ip *);
795 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_len,
796 htons(hdrlen + total), 0);
797 ip->ip_len = htons(hdrlen + total);
798 ip->ip_sum = pf_cksum_fixup(ip->ip_sum, ip->ip_off,
799 ip->ip_off & ~(IP_MF|IP_OFFMASK), 0);
800 ip->ip_off &= ~(IP_MF|IP_OFFMASK);
801
802 if (hdrlen + total > IP_MAXPACKET) {
803 DPFPRINTF(("drop: too big: %d\n", total));
804 ip->ip_len = 0;
805 REASON_SET(reason, PFRES_SHORT);
806 /* PF_DROP requires a valid mbuf *m0 in pf_test() */
807 return (PF_DROP);
808 }
809
810 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
811 return (PF_PASS);
812 }
813 #endif /* INET */
814
815 #ifdef INET6
816 static int
pf_reassemble6(struct mbuf ** m0,struct ip6_hdr * ip6,struct ip6_frag * fraghdr,uint16_t hdrlen,uint16_t extoff,u_short * reason)817 pf_reassemble6(struct mbuf **m0, struct ip6_hdr *ip6, struct ip6_frag *fraghdr,
818 uint16_t hdrlen, uint16_t extoff, u_short *reason)
819 {
820 struct mbuf *m = *m0;
821 struct pf_frent *frent;
822 struct pf_fragment *frag;
823 struct pf_fragment_cmp key;
824 struct m_tag *mtag;
825 struct pf_fragment_tag *ftag;
826 int off;
827 uint32_t frag_id;
828 uint16_t total, maxlen;
829 uint8_t proto;
830
831 PF_FRAG_LOCK();
832
833 /* Get an entry for the fragment queue. */
834 if ((frent = pf_create_fragment(reason)) == NULL) {
835 PF_FRAG_UNLOCK();
836 return (PF_DROP);
837 }
838
839 frent->fe_m = m;
840 frent->fe_hdrlen = hdrlen;
841 frent->fe_extoff = extoff;
842 frent->fe_len = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - hdrlen;
843 frent->fe_off = ntohs(fraghdr->ip6f_offlg & IP6F_OFF_MASK);
844 frent->fe_mff = fraghdr->ip6f_offlg & IP6F_MORE_FRAG;
845
846 key.frc_src.v6 = ip6->ip6_src;
847 key.frc_dst.v6 = ip6->ip6_dst;
848 key.frc_af = AF_INET6;
849 /* Only the first fragment's protocol is relevant. */
850 key.frc_proto = 0;
851 key.frc_id = fraghdr->ip6f_ident;
852
853 if ((frag = pf_fillup_fragment(&key, frent, reason)) == NULL) {
854 PF_FRAG_UNLOCK();
855 return (PF_DROP);
856 }
857
858 /* The mbuf is part of the fragment entry, no direct free or access. */
859 m = *m0 = NULL;
860
861 if (frag->fr_holes) {
862 DPFPRINTF(("frag %d, holes %d\n", frag->fr_id,
863 frag->fr_holes));
864 PF_FRAG_UNLOCK();
865 return (PF_PASS); /* Drop because *m0 is NULL, no error. */
866 }
867
868 /* We have all the data. */
869 frent = TAILQ_FIRST(&frag->fr_queue);
870 KASSERT(frent != NULL, ("frent != NULL"));
871 extoff = frent->fe_extoff;
872 maxlen = frag->fr_maxlen;
873 frag_id = frag->fr_id;
874 total = TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_off +
875 TAILQ_LAST(&frag->fr_queue, pf_fragq)->fe_len;
876 hdrlen = frent->fe_hdrlen - sizeof(struct ip6_frag);
877
878 m = *m0 = pf_join_fragment(frag);
879 frag = NULL;
880
881 PF_FRAG_UNLOCK();
882
883 /* Take protocol from first fragment header. */
884 m = m_getptr(m, hdrlen + offsetof(struct ip6_frag, ip6f_nxt), &off);
885 KASSERT(m, ("%s: short mbuf chain", __func__));
886 proto = *(mtod(m, uint8_t *) + off);
887 m = *m0;
888
889 /* Delete frag6 header */
890 if (ip6_deletefraghdr(m, hdrlen, M_NOWAIT) != 0)
891 goto fail;
892
893 if (m->m_flags & M_PKTHDR) {
894 int plen = 0;
895 for (m = *m0; m; m = m->m_next)
896 plen += m->m_len;
897 m = *m0;
898 m->m_pkthdr.len = plen;
899 }
900
901 if ((mtag = m_tag_get(PACKET_TAG_PF_REASSEMBLED,
902 sizeof(struct pf_fragment_tag), M_NOWAIT)) == NULL)
903 goto fail;
904 ftag = (struct pf_fragment_tag *)(mtag + 1);
905 ftag->ft_hdrlen = hdrlen;
906 ftag->ft_extoff = extoff;
907 ftag->ft_maxlen = maxlen;
908 ftag->ft_id = frag_id;
909 m_tag_prepend(m, mtag);
910
911 ip6 = mtod(m, struct ip6_hdr *);
912 ip6->ip6_plen = htons(hdrlen - sizeof(struct ip6_hdr) + total);
913 if (extoff) {
914 /* Write protocol into next field of last extension header. */
915 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
916 &off);
917 KASSERT(m, ("%s: short mbuf chain", __func__));
918 *(mtod(m, char *) + off) = proto;
919 m = *m0;
920 } else
921 ip6->ip6_nxt = proto;
922
923 if (hdrlen - sizeof(struct ip6_hdr) + total > IPV6_MAXPACKET) {
924 DPFPRINTF(("drop: too big: %d\n", total));
925 ip6->ip6_plen = 0;
926 REASON_SET(reason, PFRES_SHORT);
927 /* PF_DROP requires a valid mbuf *m0 in pf_test6(). */
928 return (PF_DROP);
929 }
930
931 DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip6->ip6_plen)));
932 return (PF_PASS);
933
934 fail:
935 REASON_SET(reason, PFRES_MEMORY);
936 /* PF_DROP requires a valid mbuf *m0 in pf_test6(), will free later. */
937 return (PF_DROP);
938 }
939 #endif /* INET6 */
940
941 #ifdef INET6
942 int
pf_max_frag_size(struct mbuf * m)943 pf_max_frag_size(struct mbuf *m)
944 {
945 struct m_tag *tag;
946 struct pf_fragment_tag *ftag;
947
948 tag = m_tag_find(m, PACKET_TAG_PF_REASSEMBLED, NULL);
949 if (tag == NULL)
950 return (m->m_pkthdr.len);
951
952 ftag = (struct pf_fragment_tag *)(tag + 1);
953
954 return (ftag->ft_maxlen);
955 }
956
957 int
pf_refragment6(struct ifnet * ifp,struct mbuf ** m0,struct m_tag * mtag,bool forward)958 pf_refragment6(struct ifnet *ifp, struct mbuf **m0, struct m_tag *mtag,
959 bool forward)
960 {
961 struct mbuf *m = *m0, *t;
962 struct ip6_hdr *hdr;
963 struct pf_fragment_tag *ftag = (struct pf_fragment_tag *)(mtag + 1);
964 struct pf_pdesc pd;
965 uint32_t frag_id;
966 uint16_t hdrlen, extoff, maxlen;
967 uint8_t proto;
968 int error, action;
969
970 hdrlen = ftag->ft_hdrlen;
971 extoff = ftag->ft_extoff;
972 maxlen = ftag->ft_maxlen;
973 frag_id = ftag->ft_id;
974 m_tag_delete(m, mtag);
975 mtag = NULL;
976 ftag = NULL;
977
978 if (extoff) {
979 int off;
980
981 /* Use protocol from next field of last extension header */
982 m = m_getptr(m, extoff + offsetof(struct ip6_ext, ip6e_nxt),
983 &off);
984 KASSERT((m != NULL), ("pf_refragment6: short mbuf chain"));
985 proto = *(mtod(m, uint8_t *) + off);
986 *(mtod(m, char *) + off) = IPPROTO_FRAGMENT;
987 m = *m0;
988 } else {
989 hdr = mtod(m, struct ip6_hdr *);
990 proto = hdr->ip6_nxt;
991 hdr->ip6_nxt = IPPROTO_FRAGMENT;
992 }
993
994 /* In case of link-local traffic we'll need a scope set. */
995 hdr = mtod(m, struct ip6_hdr *);
996
997 in6_setscope(&hdr->ip6_src, ifp, NULL);
998 in6_setscope(&hdr->ip6_dst, ifp, NULL);
999
1000 /* The MTU must be a multiple of 8 bytes, or we risk doing the
1001 * fragmentation wrong. */
1002 maxlen = maxlen & ~7;
1003
1004 /*
1005 * Maxlen may be less than 8 if there was only a single
1006 * fragment. As it was fragmented before, add a fragment
1007 * header also for a single fragment. If total or maxlen
1008 * is less than 8, ip6_fragment() will return EMSGSIZE and
1009 * we drop the packet.
1010 */
1011 error = ip6_fragment(ifp, m, hdrlen, proto, maxlen, frag_id);
1012 m = (*m0)->m_nextpkt;
1013 (*m0)->m_nextpkt = NULL;
1014 if (error == 0) {
1015 /* The first mbuf contains the unfragmented packet. */
1016 m_freem(*m0);
1017 *m0 = NULL;
1018 action = PF_PASS;
1019 } else {
1020 /* Drop expects an mbuf to free. */
1021 DPFPRINTF(("refragment error %d\n", error));
1022 action = PF_DROP;
1023 }
1024 for (; m; m = t) {
1025 t = m->m_nextpkt;
1026 m->m_nextpkt = NULL;
1027 m->m_flags |= M_SKIP_FIREWALL;
1028 memset(&pd, 0, sizeof(pd));
1029 pd.pf_mtag = pf_find_mtag(m);
1030 if (error == 0)
1031 if (forward) {
1032 MPASS(m->m_pkthdr.rcvif != NULL);
1033 ip6_forward(m, 0);
1034 } else {
1035 (void)ip6_output(m, NULL, NULL, 0, NULL, NULL,
1036 NULL);
1037 }
1038 else
1039 m_freem(m);
1040 }
1041
1042 return (action);
1043 }
1044 #endif /* INET6 */
1045
1046 #ifdef INET
1047 int
pf_normalize_ip(struct mbuf ** m0,struct pfi_kkif * kif,u_short * reason,struct pf_pdesc * pd)1048 pf_normalize_ip(struct mbuf **m0, struct pfi_kkif *kif, u_short *reason,
1049 struct pf_pdesc *pd)
1050 {
1051 struct mbuf *m = *m0;
1052 struct pf_krule *r;
1053 struct ip *h = mtod(m, struct ip *);
1054 int mff = (ntohs(h->ip_off) & IP_MF);
1055 int hlen = h->ip_hl << 2;
1056 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
1057 u_int16_t max;
1058 int ip_len;
1059 int tag = -1;
1060 int verdict;
1061 bool scrub_compat;
1062
1063 PF_RULES_RASSERT();
1064
1065 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1066 /*
1067 * Check if there are any scrub rules, matching or not.
1068 * Lack of scrub rules means:
1069 * - enforced packet normalization operation just like in OpenBSD
1070 * - fragment reassembly depends on V_pf_status.reass
1071 * With scrub rules:
1072 * - packet normalization is performed if there is a matching scrub rule
1073 * - fragment reassembly is performed if the matching rule has no
1074 * PFRULE_FRAGMENT_NOREASS flag
1075 */
1076 scrub_compat = (r != NULL);
1077 while (r != NULL) {
1078 pf_counter_u64_add(&r->evaluations, 1);
1079 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1080 r = r->skip[PF_SKIP_IFP].ptr;
1081 else if (r->direction && r->direction != pd->dir)
1082 r = r->skip[PF_SKIP_DIR].ptr;
1083 else if (r->af && r->af != AF_INET)
1084 r = r->skip[PF_SKIP_AF].ptr;
1085 else if (r->proto && r->proto != h->ip_p)
1086 r = r->skip[PF_SKIP_PROTO].ptr;
1087 else if (PF_MISMATCHAW(&r->src.addr,
1088 (struct pf_addr *)&h->ip_src.s_addr, AF_INET,
1089 r->src.neg, kif, M_GETFIB(m)))
1090 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1091 else if (PF_MISMATCHAW(&r->dst.addr,
1092 (struct pf_addr *)&h->ip_dst.s_addr, AF_INET,
1093 r->dst.neg, NULL, M_GETFIB(m)))
1094 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1095 else if (r->match_tag && !pf_match_tag(m, r, &tag,
1096 pd->pf_mtag ? pd->pf_mtag->tag : 0))
1097 r = TAILQ_NEXT(r, entries);
1098 else
1099 break;
1100 }
1101
1102 if (scrub_compat) {
1103 /* With scrub rules present IPv4 normalization happens only
1104 * if one of rules has matched and it's not a "no scrub" rule */
1105 if (r == NULL || r->action == PF_NOSCRUB)
1106 return (PF_PASS);
1107
1108 pf_counter_u64_critical_enter();
1109 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1110 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1111 pf_counter_u64_critical_exit();
1112 pf_rule_to_actions(r, &pd->act);
1113 }
1114
1115 /* Check for illegal packets */
1116 if (hlen < (int)sizeof(struct ip)) {
1117 REASON_SET(reason, PFRES_NORM);
1118 goto drop;
1119 }
1120
1121 if (hlen > ntohs(h->ip_len)) {
1122 REASON_SET(reason, PFRES_NORM);
1123 goto drop;
1124 }
1125
1126 /* Clear IP_DF if the rule uses the no-df option or we're in no-df mode */
1127 if (((!scrub_compat && V_pf_status.reass & PF_REASS_NODF) ||
1128 (r != NULL && r->rule_flag & PFRULE_NODF)) &&
1129 (h->ip_off & htons(IP_DF))
1130 ) {
1131 u_int16_t ip_off = h->ip_off;
1132
1133 h->ip_off &= htons(~IP_DF);
1134 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1135 }
1136
1137 /* We will need other tests here */
1138 if (!fragoff && !mff)
1139 goto no_fragment;
1140
1141 /* We're dealing with a fragment now. Don't allow fragments
1142 * with IP_DF to enter the cache. If the flag was cleared by
1143 * no-df above, fine. Otherwise drop it.
1144 */
1145 if (h->ip_off & htons(IP_DF)) {
1146 DPFPRINTF(("IP_DF\n"));
1147 goto bad;
1148 }
1149
1150 ip_len = ntohs(h->ip_len) - hlen;
1151
1152 /* All fragments are 8 byte aligned */
1153 if (mff && (ip_len & 0x7)) {
1154 DPFPRINTF(("mff and %d\n", ip_len));
1155 goto bad;
1156 }
1157
1158 /* Respect maximum length */
1159 if (fragoff + ip_len > IP_MAXPACKET) {
1160 DPFPRINTF(("max packet %d\n", fragoff + ip_len));
1161 goto bad;
1162 }
1163
1164 if ((!scrub_compat && V_pf_status.reass) ||
1165 (r != NULL && !(r->rule_flag & PFRULE_FRAGMENT_NOREASS))
1166 ) {
1167 max = fragoff + ip_len;
1168
1169 /* Fully buffer all of the fragments
1170 * Might return a completely reassembled mbuf, or NULL */
1171 PF_FRAG_LOCK();
1172 DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
1173 verdict = pf_reassemble(m0, h, pd->dir, reason);
1174 PF_FRAG_UNLOCK();
1175
1176 if (verdict != PF_PASS)
1177 return (PF_DROP);
1178
1179 m = *m0;
1180 if (m == NULL)
1181 return (PF_DROP);
1182
1183 h = mtod(m, struct ip *);
1184
1185 no_fragment:
1186 /* At this point, only IP_DF is allowed in ip_off */
1187 if (h->ip_off & ~htons(IP_DF)) {
1188 u_int16_t ip_off = h->ip_off;
1189
1190 h->ip_off &= htons(IP_DF);
1191 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
1192 }
1193 }
1194
1195 return (PF_PASS);
1196
1197 bad:
1198 DPFPRINTF(("dropping bad fragment\n"));
1199 REASON_SET(reason, PFRES_FRAG);
1200 drop:
1201 if (r != NULL && r->log)
1202 PFLOG_PACKET(kif, m, AF_INET, PF_DROP, *reason, r, NULL, NULL, pd, 1);
1203
1204 return (PF_DROP);
1205 }
1206 #endif
1207
1208 #ifdef INET6
1209 int
pf_normalize_ip6(struct mbuf ** m0,struct pfi_kkif * kif,u_short * reason,struct pf_pdesc * pd)1210 pf_normalize_ip6(struct mbuf **m0, struct pfi_kkif *kif,
1211 u_short *reason, struct pf_pdesc *pd)
1212 {
1213 struct mbuf *m = *m0;
1214 struct pf_krule *r;
1215 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1216 int extoff;
1217 int off;
1218 struct ip6_ext ext;
1219 struct ip6_opt opt;
1220 struct ip6_frag frag;
1221 u_int32_t plen;
1222 int optend;
1223 int ooff;
1224 u_int8_t proto;
1225 int terminal;
1226 bool scrub_compat;
1227
1228 PF_RULES_RASSERT();
1229
1230 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1231 /*
1232 * Check if there are any scrub rules, matching or not.
1233 * Lack of scrub rules means:
1234 * - enforced packet normalization operation just like in OpenBSD
1235 * With scrub rules:
1236 * - packet normalization is performed if there is a matching scrub rule
1237 * XXX: Fragment reassembly always performed for IPv6!
1238 */
1239 scrub_compat = (r != NULL);
1240 while (r != NULL) {
1241 pf_counter_u64_add(&r->evaluations, 1);
1242 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1243 r = r->skip[PF_SKIP_IFP].ptr;
1244 else if (r->direction && r->direction != pd->dir)
1245 r = r->skip[PF_SKIP_DIR].ptr;
1246 else if (r->af && r->af != AF_INET6)
1247 r = r->skip[PF_SKIP_AF].ptr;
1248 #if 0 /* header chain! */
1249 else if (r->proto && r->proto != h->ip6_nxt)
1250 r = r->skip[PF_SKIP_PROTO].ptr;
1251 #endif
1252 else if (PF_MISMATCHAW(&r->src.addr,
1253 (struct pf_addr *)&h->ip6_src, AF_INET6,
1254 r->src.neg, kif, M_GETFIB(m)))
1255 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1256 else if (PF_MISMATCHAW(&r->dst.addr,
1257 (struct pf_addr *)&h->ip6_dst, AF_INET6,
1258 r->dst.neg, NULL, M_GETFIB(m)))
1259 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1260 else
1261 break;
1262 }
1263
1264 if (scrub_compat) {
1265 /* With scrub rules present IPv6 normalization happens only
1266 * if one of rules has matched and it's not a "no scrub" rule */
1267 if (r == NULL || r->action == PF_NOSCRUB)
1268 return (PF_PASS);
1269
1270 pf_counter_u64_critical_enter();
1271 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1272 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1273 pf_counter_u64_critical_exit();
1274 pf_rule_to_actions(r, &pd->act);
1275 }
1276
1277 /* Check for illegal packets */
1278 if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1279 goto drop;
1280
1281 again:
1282 h = mtod(m, struct ip6_hdr *);
1283 plen = ntohs(h->ip6_plen);
1284 /* jumbo payload option not supported */
1285 if (plen == 0)
1286 goto drop;
1287
1288 extoff = 0;
1289 off = sizeof(struct ip6_hdr);
1290 proto = h->ip6_nxt;
1291 terminal = 0;
1292 do {
1293 switch (proto) {
1294 case IPPROTO_FRAGMENT:
1295 goto fragment;
1296 break;
1297 case IPPROTO_AH:
1298 case IPPROTO_ROUTING:
1299 case IPPROTO_DSTOPTS:
1300 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1301 NULL, AF_INET6))
1302 goto shortpkt;
1303 extoff = off;
1304 if (proto == IPPROTO_AH)
1305 off += (ext.ip6e_len + 2) * 4;
1306 else
1307 off += (ext.ip6e_len + 1) * 8;
1308 proto = ext.ip6e_nxt;
1309 break;
1310 case IPPROTO_HOPOPTS:
1311 if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1312 NULL, AF_INET6))
1313 goto shortpkt;
1314 extoff = off;
1315 optend = off + (ext.ip6e_len + 1) * 8;
1316 ooff = off + sizeof(ext);
1317 do {
1318 if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1319 sizeof(opt.ip6o_type), NULL, NULL,
1320 AF_INET6))
1321 goto shortpkt;
1322 if (opt.ip6o_type == IP6OPT_PAD1) {
1323 ooff++;
1324 continue;
1325 }
1326 if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1327 NULL, NULL, AF_INET6))
1328 goto shortpkt;
1329 if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1330 goto drop;
1331 if (opt.ip6o_type == IP6OPT_JUMBO)
1332 goto drop;
1333 ooff += sizeof(opt) + opt.ip6o_len;
1334 } while (ooff < optend);
1335
1336 off = optend;
1337 proto = ext.ip6e_nxt;
1338 break;
1339 default:
1340 terminal = 1;
1341 break;
1342 }
1343 } while (!terminal);
1344
1345 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1346 goto shortpkt;
1347
1348 return (PF_PASS);
1349
1350 fragment:
1351 if (pd->flags & PFDESC_IP_REAS)
1352 return (PF_DROP);
1353 if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1354 goto shortpkt;
1355
1356 if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1357 goto shortpkt;
1358
1359 /* Offset now points to data portion. */
1360 off += sizeof(frag);
1361
1362 /* Returns PF_DROP or *m0 is NULL or completely reassembled mbuf. */
1363 if (pf_reassemble6(m0, h, &frag, off, extoff, reason) != PF_PASS)
1364 return (PF_DROP);
1365 m = *m0;
1366 if (m == NULL)
1367 return (PF_DROP);
1368
1369 pd->flags |= PFDESC_IP_REAS;
1370 goto again;
1371
1372 shortpkt:
1373 REASON_SET(reason, PFRES_SHORT);
1374 if (r != NULL && r->log)
1375 PFLOG_PACKET(kif, m, AF_INET6, PF_DROP, *reason, r, NULL, NULL, pd, 1);
1376 return (PF_DROP);
1377
1378 drop:
1379 REASON_SET(reason, PFRES_NORM);
1380 if (r != NULL && r->log)
1381 PFLOG_PACKET(kif, m, AF_INET6, PF_DROP, *reason, r, NULL, NULL, pd, 1);
1382 return (PF_DROP);
1383 }
1384 #endif /* INET6 */
1385
1386 int
pf_normalize_tcp(struct pfi_kkif * kif,struct mbuf * m,int ipoff,int off,void * h,struct pf_pdesc * pd)1387 pf_normalize_tcp(struct pfi_kkif *kif, struct mbuf *m, int ipoff,
1388 int off, void *h, struct pf_pdesc *pd)
1389 {
1390 struct pf_krule *r, *rm = NULL;
1391 struct tcphdr *th = &pd->hdr.tcp;
1392 int rewrite = 0;
1393 u_short reason;
1394 u_int16_t flags;
1395 sa_family_t af = pd->af;
1396 int srs;
1397
1398 PF_RULES_RASSERT();
1399
1400 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1401 /* Check if there any scrub rules. Lack of scrub rules means enforced
1402 * packet normalization operation just like in OpenBSD. */
1403 srs = (r != NULL);
1404 while (r != NULL) {
1405 pf_counter_u64_add(&r->evaluations, 1);
1406 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
1407 r = r->skip[PF_SKIP_IFP].ptr;
1408 else if (r->direction && r->direction != pd->dir)
1409 r = r->skip[PF_SKIP_DIR].ptr;
1410 else if (r->af && r->af != af)
1411 r = r->skip[PF_SKIP_AF].ptr;
1412 else if (r->proto && r->proto != pd->proto)
1413 r = r->skip[PF_SKIP_PROTO].ptr;
1414 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
1415 r->src.neg, kif, M_GETFIB(m)))
1416 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1417 else if (r->src.port_op && !pf_match_port(r->src.port_op,
1418 r->src.port[0], r->src.port[1], th->th_sport))
1419 r = r->skip[PF_SKIP_SRC_PORT].ptr;
1420 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
1421 r->dst.neg, NULL, M_GETFIB(m)))
1422 r = r->skip[PF_SKIP_DST_ADDR].ptr;
1423 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1424 r->dst.port[0], r->dst.port[1], th->th_dport))
1425 r = r->skip[PF_SKIP_DST_PORT].ptr;
1426 else if (r->os_fingerprint != PF_OSFP_ANY && !pf_osfp_match(
1427 pf_osfp_fingerprint(pd, m, off, th),
1428 r->os_fingerprint))
1429 r = TAILQ_NEXT(r, entries);
1430 else {
1431 rm = r;
1432 break;
1433 }
1434 }
1435
1436 if (srs) {
1437 /* With scrub rules present TCP normalization happens only
1438 * if one of rules has matched and it's not a "no scrub" rule */
1439 if (rm == NULL || rm->action == PF_NOSCRUB)
1440 return (PF_PASS);
1441
1442 pf_counter_u64_critical_enter();
1443 pf_counter_u64_add_protected(&r->packets[pd->dir == PF_OUT], 1);
1444 pf_counter_u64_add_protected(&r->bytes[pd->dir == PF_OUT], pd->tot_len);
1445 pf_counter_u64_critical_exit();
1446 pf_rule_to_actions(rm, &pd->act);
1447 }
1448
1449 if (rm && rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1450 pd->flags |= PFDESC_TCP_NORM;
1451
1452 flags = tcp_get_flags(th);
1453 if (flags & TH_SYN) {
1454 /* Illegal packet */
1455 if (flags & TH_RST)
1456 goto tcp_drop;
1457
1458 if (flags & TH_FIN)
1459 goto tcp_drop;
1460 } else {
1461 /* Illegal packet */
1462 if (!(flags & (TH_ACK|TH_RST)))
1463 goto tcp_drop;
1464 }
1465
1466 if (!(flags & TH_ACK)) {
1467 /* These flags are only valid if ACK is set */
1468 if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1469 goto tcp_drop;
1470 }
1471
1472 /* Check for illegal header length */
1473 if (th->th_off < (sizeof(struct tcphdr) >> 2))
1474 goto tcp_drop;
1475
1476 /* If flags changed, or reserved data set, then adjust */
1477 if (flags != tcp_get_flags(th) ||
1478 (tcp_get_flags(th) & (TH_RES1|TH_RES2|TH_RES2)) != 0) {
1479 u_int16_t ov, nv;
1480
1481 ov = *(u_int16_t *)(&th->th_ack + 1);
1482 flags &= ~(TH_RES1 | TH_RES2 | TH_RES3);
1483 tcp_set_flags(th, flags);
1484 nv = *(u_int16_t *)(&th->th_ack + 1);
1485
1486 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, ov, nv, 0);
1487 rewrite = 1;
1488 }
1489
1490 /* Remove urgent pointer, if TH_URG is not set */
1491 if (!(flags & TH_URG) && th->th_urp) {
1492 th->th_sum = pf_proto_cksum_fixup(m, th->th_sum, th->th_urp,
1493 0, 0);
1494 th->th_urp = 0;
1495 rewrite = 1;
1496 }
1497
1498 /* copy back packet headers if we sanitized */
1499 if (rewrite)
1500 m_copyback(m, off, sizeof(*th), (caddr_t)th);
1501
1502 return (PF_PASS);
1503
1504 tcp_drop:
1505 REASON_SET(&reason, PFRES_NORM);
1506 if (rm != NULL && r->log)
1507 PFLOG_PACKET(kif, m, AF_INET, PF_DROP, reason, r, NULL, NULL, pd, 1);
1508 return (PF_DROP);
1509 }
1510
1511 int
pf_normalize_tcp_init(struct mbuf * m,int off,struct pf_pdesc * pd,struct tcphdr * th,struct pf_state_peer * src,struct pf_state_peer * dst)1512 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1513 struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1514 {
1515 u_int32_t tsval, tsecr;
1516 u_int8_t hdr[60];
1517 u_int8_t *opt;
1518
1519 KASSERT((src->scrub == NULL),
1520 ("pf_normalize_tcp_init: src->scrub != NULL"));
1521
1522 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1523 if (src->scrub == NULL)
1524 return (1);
1525
1526 switch (pd->af) {
1527 #ifdef INET
1528 case AF_INET: {
1529 struct ip *h = mtod(m, struct ip *);
1530 src->scrub->pfss_ttl = h->ip_ttl;
1531 break;
1532 }
1533 #endif /* INET */
1534 #ifdef INET6
1535 case AF_INET6: {
1536 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1537 src->scrub->pfss_ttl = h->ip6_hlim;
1538 break;
1539 }
1540 #endif /* INET6 */
1541 }
1542
1543 /*
1544 * All normalizations below are only begun if we see the start of
1545 * the connections. They must all set an enabled bit in pfss_flags
1546 */
1547 if ((th->th_flags & TH_SYN) == 0)
1548 return (0);
1549
1550 if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1551 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1552 /* Diddle with TCP options */
1553 int hlen;
1554 opt = hdr + sizeof(struct tcphdr);
1555 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1556 while (hlen >= TCPOLEN_TIMESTAMP) {
1557 switch (*opt) {
1558 case TCPOPT_EOL: /* FALLTHROUGH */
1559 case TCPOPT_NOP:
1560 opt++;
1561 hlen--;
1562 break;
1563 case TCPOPT_TIMESTAMP:
1564 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1565 src->scrub->pfss_flags |=
1566 PFSS_TIMESTAMP;
1567 src->scrub->pfss_ts_mod =
1568 htonl(arc4random());
1569
1570 /* note PFSS_PAWS not set yet */
1571 memcpy(&tsval, &opt[2],
1572 sizeof(u_int32_t));
1573 memcpy(&tsecr, &opt[6],
1574 sizeof(u_int32_t));
1575 src->scrub->pfss_tsval0 = ntohl(tsval);
1576 src->scrub->pfss_tsval = ntohl(tsval);
1577 src->scrub->pfss_tsecr = ntohl(tsecr);
1578 getmicrouptime(&src->scrub->pfss_last);
1579 }
1580 /* FALLTHROUGH */
1581 default:
1582 hlen -= MAX(opt[1], 2);
1583 opt += MAX(opt[1], 2);
1584 break;
1585 }
1586 }
1587 }
1588
1589 return (0);
1590 }
1591
1592 void
pf_normalize_tcp_cleanup(struct pf_kstate * state)1593 pf_normalize_tcp_cleanup(struct pf_kstate *state)
1594 {
1595 /* XXX Note: this also cleans up SCTP. */
1596 uma_zfree(V_pf_state_scrub_z, state->src.scrub);
1597 uma_zfree(V_pf_state_scrub_z, state->dst.scrub);
1598
1599 /* Someday... flush the TCP segment reassembly descriptors. */
1600 }
1601 int
pf_normalize_sctp_init(struct mbuf * m,int off,struct pf_pdesc * pd,struct pf_state_peer * src,struct pf_state_peer * dst)1602 pf_normalize_sctp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1603 struct pf_state_peer *src, struct pf_state_peer *dst)
1604 {
1605 src->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1606 if (src->scrub == NULL)
1607 return (1);
1608
1609 dst->scrub = uma_zalloc(V_pf_state_scrub_z, M_ZERO | M_NOWAIT);
1610 if (dst->scrub == NULL) {
1611 uma_zfree(V_pf_state_scrub_z, src);
1612 return (1);
1613 }
1614
1615 dst->scrub->pfss_v_tag = pd->sctp_initiate_tag;
1616
1617 return (0);
1618 }
1619
1620 int
pf_normalize_tcp_stateful(struct mbuf * m,int off,struct pf_pdesc * pd,u_short * reason,struct tcphdr * th,struct pf_kstate * state,struct pf_state_peer * src,struct pf_state_peer * dst,int * writeback)1621 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1622 u_short *reason, struct tcphdr *th, struct pf_kstate *state,
1623 struct pf_state_peer *src, struct pf_state_peer *dst, int *writeback)
1624 {
1625 struct timeval uptime;
1626 u_int32_t tsval, tsecr;
1627 u_int tsval_from_last;
1628 u_int8_t hdr[60];
1629 u_int8_t *opt;
1630 int copyback = 0;
1631 int got_ts = 0;
1632 size_t startoff;
1633
1634 KASSERT((src->scrub || dst->scrub),
1635 ("%s: src->scrub && dst->scrub!", __func__));
1636
1637 /*
1638 * Enforce the minimum TTL seen for this connection. Negate a common
1639 * technique to evade an intrusion detection system and confuse
1640 * firewall state code.
1641 */
1642 switch (pd->af) {
1643 #ifdef INET
1644 case AF_INET: {
1645 if (src->scrub) {
1646 struct ip *h = mtod(m, struct ip *);
1647 if (h->ip_ttl > src->scrub->pfss_ttl)
1648 src->scrub->pfss_ttl = h->ip_ttl;
1649 h->ip_ttl = src->scrub->pfss_ttl;
1650 }
1651 break;
1652 }
1653 #endif /* INET */
1654 #ifdef INET6
1655 case AF_INET6: {
1656 if (src->scrub) {
1657 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1658 if (h->ip6_hlim > src->scrub->pfss_ttl)
1659 src->scrub->pfss_ttl = h->ip6_hlim;
1660 h->ip6_hlim = src->scrub->pfss_ttl;
1661 }
1662 break;
1663 }
1664 #endif /* INET6 */
1665 }
1666
1667 if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1668 ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1669 (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1670 pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1671 /* Diddle with TCP options */
1672 int hlen;
1673 opt = hdr + sizeof(struct tcphdr);
1674 hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1675 while (hlen >= TCPOLEN_TIMESTAMP) {
1676 startoff = opt - (hdr + sizeof(struct tcphdr));
1677 switch (*opt) {
1678 case TCPOPT_EOL: /* FALLTHROUGH */
1679 case TCPOPT_NOP:
1680 opt++;
1681 hlen--;
1682 break;
1683 case TCPOPT_TIMESTAMP:
1684 /* Modulate the timestamps. Can be used for
1685 * NAT detection, OS uptime determination or
1686 * reboot detection.
1687 */
1688
1689 if (got_ts) {
1690 /* Huh? Multiple timestamps!? */
1691 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1692 DPFPRINTF(("multiple TS??\n"));
1693 pf_print_state(state);
1694 printf("\n");
1695 }
1696 REASON_SET(reason, PFRES_TS);
1697 return (PF_DROP);
1698 }
1699 if (opt[1] >= TCPOLEN_TIMESTAMP) {
1700 memcpy(&tsval, &opt[2],
1701 sizeof(u_int32_t));
1702 if (tsval && src->scrub &&
1703 (src->scrub->pfss_flags &
1704 PFSS_TIMESTAMP)) {
1705 tsval = ntohl(tsval);
1706 pf_patch_32_unaligned(m,
1707 &th->th_sum,
1708 &opt[2],
1709 htonl(tsval +
1710 src->scrub->pfss_ts_mod),
1711 PF_ALGNMNT(startoff),
1712 0);
1713 copyback = 1;
1714 }
1715
1716 /* Modulate TS reply iff valid (!0) */
1717 memcpy(&tsecr, &opt[6],
1718 sizeof(u_int32_t));
1719 if (tsecr && dst->scrub &&
1720 (dst->scrub->pfss_flags &
1721 PFSS_TIMESTAMP)) {
1722 tsecr = ntohl(tsecr)
1723 - dst->scrub->pfss_ts_mod;
1724 pf_patch_32_unaligned(m,
1725 &th->th_sum,
1726 &opt[6],
1727 htonl(tsecr),
1728 PF_ALGNMNT(startoff),
1729 0);
1730 copyback = 1;
1731 }
1732 got_ts = 1;
1733 }
1734 /* FALLTHROUGH */
1735 default:
1736 hlen -= MAX(opt[1], 2);
1737 opt += MAX(opt[1], 2);
1738 break;
1739 }
1740 }
1741 if (copyback) {
1742 /* Copyback the options, caller copys back header */
1743 *writeback = 1;
1744 m_copyback(m, off + sizeof(struct tcphdr),
1745 (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1746 sizeof(struct tcphdr));
1747 }
1748 }
1749
1750 /*
1751 * Must invalidate PAWS checks on connections idle for too long.
1752 * The fastest allowed timestamp clock is 1ms. That turns out to
1753 * be about 24 days before it wraps. XXX Right now our lowerbound
1754 * TS echo check only works for the first 12 days of a connection
1755 * when the TS has exhausted half its 32bit space
1756 */
1757 #define TS_MAX_IDLE (24*24*60*60)
1758 #define TS_MAX_CONN (12*24*60*60) /* XXX remove when better tsecr check */
1759
1760 getmicrouptime(&uptime);
1761 if (src->scrub && (src->scrub->pfss_flags & PFSS_PAWS) &&
1762 (uptime.tv_sec - src->scrub->pfss_last.tv_sec > TS_MAX_IDLE ||
1763 time_uptime - (state->creation / 1000) > TS_MAX_CONN)) {
1764 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1765 DPFPRINTF(("src idled out of PAWS\n"));
1766 pf_print_state(state);
1767 printf("\n");
1768 }
1769 src->scrub->pfss_flags = (src->scrub->pfss_flags & ~PFSS_PAWS)
1770 | PFSS_PAWS_IDLED;
1771 }
1772 if (dst->scrub && (dst->scrub->pfss_flags & PFSS_PAWS) &&
1773 uptime.tv_sec - dst->scrub->pfss_last.tv_sec > TS_MAX_IDLE) {
1774 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1775 DPFPRINTF(("dst idled out of PAWS\n"));
1776 pf_print_state(state);
1777 printf("\n");
1778 }
1779 dst->scrub->pfss_flags = (dst->scrub->pfss_flags & ~PFSS_PAWS)
1780 | PFSS_PAWS_IDLED;
1781 }
1782
1783 if (got_ts && src->scrub && dst->scrub &&
1784 (src->scrub->pfss_flags & PFSS_PAWS) &&
1785 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1786 /* Validate that the timestamps are "in-window".
1787 * RFC1323 describes TCP Timestamp options that allow
1788 * measurement of RTT (round trip time) and PAWS
1789 * (protection against wrapped sequence numbers). PAWS
1790 * gives us a set of rules for rejecting packets on
1791 * long fat pipes (packets that were somehow delayed
1792 * in transit longer than the time it took to send the
1793 * full TCP sequence space of 4Gb). We can use these
1794 * rules and infer a few others that will let us treat
1795 * the 32bit timestamp and the 32bit echoed timestamp
1796 * as sequence numbers to prevent a blind attacker from
1797 * inserting packets into a connection.
1798 *
1799 * RFC1323 tells us:
1800 * - The timestamp on this packet must be greater than
1801 * or equal to the last value echoed by the other
1802 * endpoint. The RFC says those will be discarded
1803 * since it is a dup that has already been acked.
1804 * This gives us a lowerbound on the timestamp.
1805 * timestamp >= other last echoed timestamp
1806 * - The timestamp will be less than or equal to
1807 * the last timestamp plus the time between the
1808 * last packet and now. The RFC defines the max
1809 * clock rate as 1ms. We will allow clocks to be
1810 * up to 10% fast and will allow a total difference
1811 * or 30 seconds due to a route change. And this
1812 * gives us an upperbound on the timestamp.
1813 * timestamp <= last timestamp + max ticks
1814 * We have to be careful here. Windows will send an
1815 * initial timestamp of zero and then initialize it
1816 * to a random value after the 3whs; presumably to
1817 * avoid a DoS by having to call an expensive RNG
1818 * during a SYN flood. Proof MS has at least one
1819 * good security geek.
1820 *
1821 * - The TCP timestamp option must also echo the other
1822 * endpoints timestamp. The timestamp echoed is the
1823 * one carried on the earliest unacknowledged segment
1824 * on the left edge of the sequence window. The RFC
1825 * states that the host will reject any echoed
1826 * timestamps that were larger than any ever sent.
1827 * This gives us an upperbound on the TS echo.
1828 * tescr <= largest_tsval
1829 * - The lowerbound on the TS echo is a little more
1830 * tricky to determine. The other endpoint's echoed
1831 * values will not decrease. But there may be
1832 * network conditions that re-order packets and
1833 * cause our view of them to decrease. For now the
1834 * only lowerbound we can safely determine is that
1835 * the TS echo will never be less than the original
1836 * TS. XXX There is probably a better lowerbound.
1837 * Remove TS_MAX_CONN with better lowerbound check.
1838 * tescr >= other original TS
1839 *
1840 * It is also important to note that the fastest
1841 * timestamp clock of 1ms will wrap its 32bit space in
1842 * 24 days. So we just disable TS checking after 24
1843 * days of idle time. We actually must use a 12d
1844 * connection limit until we can come up with a better
1845 * lowerbound to the TS echo check.
1846 */
1847 struct timeval delta_ts;
1848 int ts_fudge;
1849
1850 /*
1851 * PFTM_TS_DIFF is how many seconds of leeway to allow
1852 * a host's timestamp. This can happen if the previous
1853 * packet got delayed in transit for much longer than
1854 * this packet.
1855 */
1856 if ((ts_fudge = state->rule.ptr->timeout[PFTM_TS_DIFF]) == 0)
1857 ts_fudge = V_pf_default_rule.timeout[PFTM_TS_DIFF];
1858
1859 /* Calculate max ticks since the last timestamp */
1860 #define TS_MAXFREQ 1100 /* RFC max TS freq of 1Khz + 10% skew */
1861 #define TS_MICROSECS 1000000 /* microseconds per second */
1862 delta_ts = uptime;
1863 timevalsub(&delta_ts, &src->scrub->pfss_last);
1864 tsval_from_last = (delta_ts.tv_sec + ts_fudge) * TS_MAXFREQ;
1865 tsval_from_last += delta_ts.tv_usec / (TS_MICROSECS/TS_MAXFREQ);
1866
1867 if ((src->state >= TCPS_ESTABLISHED &&
1868 dst->state >= TCPS_ESTABLISHED) &&
1869 (SEQ_LT(tsval, dst->scrub->pfss_tsecr) ||
1870 SEQ_GT(tsval, src->scrub->pfss_tsval + tsval_from_last) ||
1871 (tsecr && (SEQ_GT(tsecr, dst->scrub->pfss_tsval) ||
1872 SEQ_LT(tsecr, dst->scrub->pfss_tsval0))))) {
1873 /* Bad RFC1323 implementation or an insertion attack.
1874 *
1875 * - Solaris 2.6 and 2.7 are known to send another ACK
1876 * after the FIN,FIN|ACK,ACK closing that carries
1877 * an old timestamp.
1878 */
1879
1880 DPFPRINTF(("Timestamp failed %c%c%c%c\n",
1881 SEQ_LT(tsval, dst->scrub->pfss_tsecr) ? '0' : ' ',
1882 SEQ_GT(tsval, src->scrub->pfss_tsval +
1883 tsval_from_last) ? '1' : ' ',
1884 SEQ_GT(tsecr, dst->scrub->pfss_tsval) ? '2' : ' ',
1885 SEQ_LT(tsecr, dst->scrub->pfss_tsval0)? '3' : ' '));
1886 DPFPRINTF((" tsval: %u tsecr: %u +ticks: %u "
1887 "idle: %jus %lums\n",
1888 tsval, tsecr, tsval_from_last,
1889 (uintmax_t)delta_ts.tv_sec,
1890 delta_ts.tv_usec / 1000));
1891 DPFPRINTF((" src->tsval: %u tsecr: %u\n",
1892 src->scrub->pfss_tsval, src->scrub->pfss_tsecr));
1893 DPFPRINTF((" dst->tsval: %u tsecr: %u tsval0: %u"
1894 "\n", dst->scrub->pfss_tsval,
1895 dst->scrub->pfss_tsecr, dst->scrub->pfss_tsval0));
1896 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1897 pf_print_state(state);
1898 pf_print_flags(th->th_flags);
1899 printf("\n");
1900 }
1901 REASON_SET(reason, PFRES_TS);
1902 return (PF_DROP);
1903 }
1904
1905 /* XXX I'd really like to require tsecr but it's optional */
1906
1907 } else if (!got_ts && (th->th_flags & TH_RST) == 0 &&
1908 ((src->state == TCPS_ESTABLISHED && dst->state == TCPS_ESTABLISHED)
1909 || pd->p_len > 0 || (th->th_flags & TH_SYN)) &&
1910 src->scrub && dst->scrub &&
1911 (src->scrub->pfss_flags & PFSS_PAWS) &&
1912 (dst->scrub->pfss_flags & PFSS_PAWS)) {
1913 /* Didn't send a timestamp. Timestamps aren't really useful
1914 * when:
1915 * - connection opening or closing (often not even sent).
1916 * but we must not let an attacker to put a FIN on a
1917 * data packet to sneak it through our ESTABLISHED check.
1918 * - on a TCP reset. RFC suggests not even looking at TS.
1919 * - on an empty ACK. The TS will not be echoed so it will
1920 * probably not help keep the RTT calculation in sync and
1921 * there isn't as much danger when the sequence numbers
1922 * got wrapped. So some stacks don't include TS on empty
1923 * ACKs :-(
1924 *
1925 * To minimize the disruption to mostly RFC1323 conformant
1926 * stacks, we will only require timestamps on data packets.
1927 *
1928 * And what do ya know, we cannot require timestamps on data
1929 * packets. There appear to be devices that do legitimate
1930 * TCP connection hijacking. There are HTTP devices that allow
1931 * a 3whs (with timestamps) and then buffer the HTTP request.
1932 * If the intermediate device has the HTTP response cache, it
1933 * will spoof the response but not bother timestamping its
1934 * packets. So we can look for the presence of a timestamp in
1935 * the first data packet and if there, require it in all future
1936 * packets.
1937 */
1938
1939 if (pd->p_len > 0 && (src->scrub->pfss_flags & PFSS_DATA_TS)) {
1940 /*
1941 * Hey! Someone tried to sneak a packet in. Or the
1942 * stack changed its RFC1323 behavior?!?!
1943 */
1944 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1945 DPFPRINTF(("Did not receive expected RFC1323 "
1946 "timestamp\n"));
1947 pf_print_state(state);
1948 pf_print_flags(th->th_flags);
1949 printf("\n");
1950 }
1951 REASON_SET(reason, PFRES_TS);
1952 return (PF_DROP);
1953 }
1954 }
1955
1956 /*
1957 * We will note if a host sends his data packets with or without
1958 * timestamps. And require all data packets to contain a timestamp
1959 * if the first does. PAWS implicitly requires that all data packets be
1960 * timestamped. But I think there are middle-man devices that hijack
1961 * TCP streams immediately after the 3whs and don't timestamp their
1962 * packets (seen in a WWW accelerator or cache).
1963 */
1964 if (pd->p_len > 0 && src->scrub && (src->scrub->pfss_flags &
1965 (PFSS_TIMESTAMP|PFSS_DATA_TS|PFSS_DATA_NOTS)) == PFSS_TIMESTAMP) {
1966 if (got_ts)
1967 src->scrub->pfss_flags |= PFSS_DATA_TS;
1968 else {
1969 src->scrub->pfss_flags |= PFSS_DATA_NOTS;
1970 if (V_pf_status.debug >= PF_DEBUG_MISC && dst->scrub &&
1971 (dst->scrub->pfss_flags & PFSS_TIMESTAMP)) {
1972 /* Don't warn if other host rejected RFC1323 */
1973 DPFPRINTF(("Broken RFC1323 stack did not "
1974 "timestamp data packet. Disabled PAWS "
1975 "security.\n"));
1976 pf_print_state(state);
1977 pf_print_flags(th->th_flags);
1978 printf("\n");
1979 }
1980 }
1981 }
1982
1983 /*
1984 * Update PAWS values
1985 */
1986 if (got_ts && src->scrub && PFSS_TIMESTAMP == (src->scrub->pfss_flags &
1987 (PFSS_PAWS_IDLED|PFSS_TIMESTAMP))) {
1988 getmicrouptime(&src->scrub->pfss_last);
1989 if (SEQ_GEQ(tsval, src->scrub->pfss_tsval) ||
1990 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1991 src->scrub->pfss_tsval = tsval;
1992
1993 if (tsecr) {
1994 if (SEQ_GEQ(tsecr, src->scrub->pfss_tsecr) ||
1995 (src->scrub->pfss_flags & PFSS_PAWS) == 0)
1996 src->scrub->pfss_tsecr = tsecr;
1997
1998 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0 &&
1999 (SEQ_LT(tsval, src->scrub->pfss_tsval0) ||
2000 src->scrub->pfss_tsval0 == 0)) {
2001 /* tsval0 MUST be the lowest timestamp */
2002 src->scrub->pfss_tsval0 = tsval;
2003 }
2004
2005 /* Only fully initialized after a TS gets echoed */
2006 if ((src->scrub->pfss_flags & PFSS_PAWS) == 0)
2007 src->scrub->pfss_flags |= PFSS_PAWS;
2008 }
2009 }
2010
2011 /* I have a dream.... TCP segment reassembly.... */
2012 return (0);
2013 }
2014
2015 int
pf_normalize_mss(struct mbuf * m,int off,struct pf_pdesc * pd)2016 pf_normalize_mss(struct mbuf *m, int off, struct pf_pdesc *pd)
2017 {
2018 struct tcphdr *th = &pd->hdr.tcp;
2019 u_int16_t *mss;
2020 int thoff;
2021 int opt, cnt, optlen = 0;
2022 u_char opts[TCP_MAXOLEN];
2023 u_char *optp = opts;
2024 size_t startoff;
2025
2026 thoff = th->th_off << 2;
2027 cnt = thoff - sizeof(struct tcphdr);
2028
2029 if (cnt > 0 && !pf_pull_hdr(m, off + sizeof(*th), opts, cnt,
2030 NULL, NULL, pd->af))
2031 return (0);
2032
2033 for (; cnt > 0; cnt -= optlen, optp += optlen) {
2034 startoff = optp - opts;
2035 opt = optp[0];
2036 if (opt == TCPOPT_EOL)
2037 break;
2038 if (opt == TCPOPT_NOP)
2039 optlen = 1;
2040 else {
2041 if (cnt < 2)
2042 break;
2043 optlen = optp[1];
2044 if (optlen < 2 || optlen > cnt)
2045 break;
2046 }
2047 switch (opt) {
2048 case TCPOPT_MAXSEG:
2049 mss = (u_int16_t *)(optp + 2);
2050 if ((ntohs(*mss)) > pd->act.max_mss) {
2051 pf_patch_16_unaligned(m,
2052 &th->th_sum,
2053 mss, htons(pd->act.max_mss),
2054 PF_ALGNMNT(startoff),
2055 0);
2056 m_copyback(m, off + sizeof(*th),
2057 thoff - sizeof(*th), opts);
2058 m_copyback(m, off, sizeof(*th), (caddr_t)th);
2059 }
2060 break;
2061 default:
2062 break;
2063 }
2064 }
2065
2066 return (0);
2067 }
2068
2069 static int
pf_scan_sctp(struct mbuf * m,int ipoff,int off,struct pf_pdesc * pd,struct pfi_kkif * kif)2070 pf_scan_sctp(struct mbuf *m, int ipoff, int off, struct pf_pdesc *pd,
2071 struct pfi_kkif *kif)
2072 {
2073 struct sctp_chunkhdr ch = { };
2074 int chunk_off = sizeof(struct sctphdr);
2075 int chunk_start;
2076 int ret;
2077
2078 while (off + chunk_off < pd->tot_len) {
2079 if (!pf_pull_hdr(m, off + chunk_off, &ch, sizeof(ch), NULL,
2080 NULL, pd->af))
2081 return (PF_DROP);
2082
2083 /* Length includes the header, this must be at least 4. */
2084 if (ntohs(ch.chunk_length) < 4)
2085 return (PF_DROP);
2086
2087 chunk_start = chunk_off;
2088 chunk_off += roundup(ntohs(ch.chunk_length), 4);
2089
2090 switch (ch.chunk_type) {
2091 case SCTP_INITIATION:
2092 case SCTP_INITIATION_ACK: {
2093 struct sctp_init_chunk init;
2094
2095 if (!pf_pull_hdr(m, off + chunk_start, &init,
2096 sizeof(init), NULL, NULL, pd->af))
2097 return (PF_DROP);
2098
2099 /*
2100 * RFC 9620, Section 3.3.2, "The Initiate Tag is allowed to have
2101 * any value except 0."
2102 */
2103 if (init.init.initiate_tag == 0)
2104 return (PF_DROP);
2105 if (init.init.num_inbound_streams == 0)
2106 return (PF_DROP);
2107 if (init.init.num_outbound_streams == 0)
2108 return (PF_DROP);
2109 if (ntohl(init.init.a_rwnd) < SCTP_MIN_RWND)
2110 return (PF_DROP);
2111
2112 /*
2113 * RFC 9260, Section 3.1, INIT chunks MUST have zero
2114 * verification tag.
2115 */
2116 if (ch.chunk_type == SCTP_INITIATION &&
2117 pd->hdr.sctp.v_tag != 0)
2118 return (PF_DROP);
2119
2120 pd->sctp_initiate_tag = init.init.initiate_tag;
2121
2122 if (ch.chunk_type == SCTP_INITIATION)
2123 pd->sctp_flags |= PFDESC_SCTP_INIT;
2124 else
2125 pd->sctp_flags |= PFDESC_SCTP_INIT_ACK;
2126
2127 ret = pf_multihome_scan_init(m, off + chunk_start,
2128 ntohs(init.ch.chunk_length), pd, kif);
2129 if (ret != PF_PASS)
2130 return (ret);
2131
2132 break;
2133 }
2134 case SCTP_ABORT_ASSOCIATION:
2135 pd->sctp_flags |= PFDESC_SCTP_ABORT;
2136 break;
2137 case SCTP_SHUTDOWN:
2138 case SCTP_SHUTDOWN_ACK:
2139 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN;
2140 break;
2141 case SCTP_SHUTDOWN_COMPLETE:
2142 pd->sctp_flags |= PFDESC_SCTP_SHUTDOWN_COMPLETE;
2143 break;
2144 case SCTP_COOKIE_ECHO:
2145 pd->sctp_flags |= PFDESC_SCTP_COOKIE;
2146 break;
2147 case SCTP_COOKIE_ACK:
2148 pd->sctp_flags |= PFDESC_SCTP_COOKIE_ACK;
2149 break;
2150 case SCTP_DATA:
2151 pd->sctp_flags |= PFDESC_SCTP_DATA;
2152 break;
2153 case SCTP_HEARTBEAT_REQUEST:
2154 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT;
2155 break;
2156 case SCTP_HEARTBEAT_ACK:
2157 pd->sctp_flags |= PFDESC_SCTP_HEARTBEAT_ACK;
2158 break;
2159 case SCTP_ASCONF:
2160 pd->sctp_flags |= PFDESC_SCTP_ASCONF;
2161
2162 ret = pf_multihome_scan_asconf(m, off + chunk_start,
2163 ntohs(ch.chunk_length), pd, kif);
2164 if (ret != PF_PASS)
2165 return (ret);
2166 break;
2167 default:
2168 pd->sctp_flags |= PFDESC_SCTP_OTHER;
2169 break;
2170 }
2171 }
2172
2173 /* Validate chunk lengths vs. packet length. */
2174 if (off + chunk_off != pd->tot_len)
2175 return (PF_DROP);
2176
2177 /*
2178 * INIT, INIT_ACK or SHUTDOWN_COMPLETE chunks must always be the only
2179 * one in a packet.
2180 */
2181 if ((pd->sctp_flags & PFDESC_SCTP_INIT) &&
2182 (pd->sctp_flags & ~PFDESC_SCTP_INIT))
2183 return (PF_DROP);
2184 if ((pd->sctp_flags & PFDESC_SCTP_INIT_ACK) &&
2185 (pd->sctp_flags & ~PFDESC_SCTP_INIT_ACK))
2186 return (PF_DROP);
2187 if ((pd->sctp_flags & PFDESC_SCTP_SHUTDOWN_COMPLETE) &&
2188 (pd->sctp_flags & ~PFDESC_SCTP_SHUTDOWN_COMPLETE))
2189 return (PF_DROP);
2190
2191 return (PF_PASS);
2192 }
2193
2194 int
pf_normalize_sctp(int dir,struct pfi_kkif * kif,struct mbuf * m,int ipoff,int off,void * h,struct pf_pdesc * pd)2195 pf_normalize_sctp(int dir, struct pfi_kkif *kif, struct mbuf *m, int ipoff,
2196 int off, void *h, struct pf_pdesc *pd)
2197 {
2198 struct pf_krule *r, *rm = NULL;
2199 struct sctphdr *sh = &pd->hdr.sctp;
2200 u_short reason;
2201 sa_family_t af = pd->af;
2202 int srs;
2203
2204 PF_RULES_RASSERT();
2205
2206 /* Unconditionally scan the SCTP packet, because we need to look for
2207 * things like shutdown and asconf chunks. */
2208 if (pf_scan_sctp(m, ipoff, off, pd, kif) != PF_PASS)
2209 goto sctp_drop;
2210
2211 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
2212 /* Check if there any scrub rules. Lack of scrub rules means enforced
2213 * packet normalization operation just like in OpenBSD. */
2214 srs = (r != NULL);
2215 while (r != NULL) {
2216 pf_counter_u64_add(&r->evaluations, 1);
2217 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
2218 r = r->skip[PF_SKIP_IFP].ptr;
2219 else if (r->direction && r->direction != dir)
2220 r = r->skip[PF_SKIP_DIR].ptr;
2221 else if (r->af && r->af != af)
2222 r = r->skip[PF_SKIP_AF].ptr;
2223 else if (r->proto && r->proto != pd->proto)
2224 r = r->skip[PF_SKIP_PROTO].ptr;
2225 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
2226 r->src.neg, kif, M_GETFIB(m)))
2227 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
2228 else if (r->src.port_op && !pf_match_port(r->src.port_op,
2229 r->src.port[0], r->src.port[1], sh->src_port))
2230 r = r->skip[PF_SKIP_SRC_PORT].ptr;
2231 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
2232 r->dst.neg, NULL, M_GETFIB(m)))
2233 r = r->skip[PF_SKIP_DST_ADDR].ptr;
2234 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
2235 r->dst.port[0], r->dst.port[1], sh->dest_port))
2236 r = r->skip[PF_SKIP_DST_PORT].ptr;
2237 else {
2238 rm = r;
2239 break;
2240 }
2241 }
2242
2243 if (srs) {
2244 /* With scrub rules present SCTP normalization happens only
2245 * if one of rules has matched and it's not a "no scrub" rule */
2246 if (rm == NULL || rm->action == PF_NOSCRUB)
2247 return (PF_PASS);
2248
2249 pf_counter_u64_critical_enter();
2250 pf_counter_u64_add_protected(&r->packets[dir == PF_OUT], 1);
2251 pf_counter_u64_add_protected(&r->bytes[dir == PF_OUT], pd->tot_len);
2252 pf_counter_u64_critical_exit();
2253 }
2254
2255 /* Verify we're a multiple of 4 bytes long */
2256 if ((pd->tot_len - off - sizeof(struct sctphdr)) % 4)
2257 goto sctp_drop;
2258
2259 /* INIT chunk needs to be the only chunk */
2260 if (pd->sctp_flags & PFDESC_SCTP_INIT)
2261 if (pd->sctp_flags & ~PFDESC_SCTP_INIT)
2262 goto sctp_drop;
2263
2264 return (PF_PASS);
2265
2266 sctp_drop:
2267 REASON_SET(&reason, PFRES_NORM);
2268 if (rm != NULL && r->log)
2269 PFLOG_PACKET(kif, m, AF_INET, PF_DROP, reason, r, NULL, NULL, pd,
2270 1);
2271
2272 return (PF_DROP);
2273 }
2274
2275 #ifdef INET
2276 void
pf_scrub_ip(struct mbuf ** m0,struct pf_pdesc * pd)2277 pf_scrub_ip(struct mbuf **m0, struct pf_pdesc *pd)
2278 {
2279 struct mbuf *m = *m0;
2280 struct ip *h = mtod(m, struct ip *);
2281
2282 /* Clear IP_DF if no-df was requested */
2283 if (pd->act.flags & PFSTATE_NODF && h->ip_off & htons(IP_DF)) {
2284 u_int16_t ip_off = h->ip_off;
2285
2286 h->ip_off &= htons(~IP_DF);
2287 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_off, h->ip_off, 0);
2288 }
2289
2290 /* Enforce a minimum ttl, may cause endless packet loops */
2291 if (pd->act.min_ttl && h->ip_ttl < pd->act.min_ttl) {
2292 u_int16_t ip_ttl = h->ip_ttl;
2293
2294 h->ip_ttl = pd->act.min_ttl;
2295 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_ttl, h->ip_ttl, 0);
2296 }
2297
2298 /* Enforce tos */
2299 if (pd->act.flags & PFSTATE_SETTOS) {
2300 u_int16_t ov, nv;
2301
2302 ov = *(u_int16_t *)h;
2303 h->ip_tos = pd->act.set_tos | (h->ip_tos & IPTOS_ECN_MASK);
2304 nv = *(u_int16_t *)h;
2305
2306 h->ip_sum = pf_cksum_fixup(h->ip_sum, ov, nv, 0);
2307 }
2308
2309 /* random-id, but not for fragments */
2310 if (pd->act.flags & PFSTATE_RANDOMID && !(h->ip_off & ~htons(IP_DF))) {
2311 uint16_t ip_id = h->ip_id;
2312
2313 ip_fillid(h);
2314 h->ip_sum = pf_cksum_fixup(h->ip_sum, ip_id, h->ip_id, 0);
2315 }
2316 }
2317 #endif /* INET */
2318
2319 #ifdef INET6
2320 void
pf_scrub_ip6(struct mbuf ** m0,struct pf_pdesc * pd)2321 pf_scrub_ip6(struct mbuf **m0, struct pf_pdesc *pd)
2322 {
2323 struct mbuf *m = *m0;
2324 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
2325
2326 /* Enforce a minimum ttl, may cause endless packet loops */
2327 if (pd->act.min_ttl && h->ip6_hlim < pd->act.min_ttl)
2328 h->ip6_hlim = pd->act.min_ttl;
2329
2330 /* Enforce tos. Set traffic class bits */
2331 if (pd->act.flags & PFSTATE_SETTOS) {
2332 h->ip6_flow &= IPV6_FLOWLABEL_MASK | IPV6_VERSION_MASK;
2333 h->ip6_flow |= htonl((pd->act.set_tos | IPV6_ECN(h)) << 20);
2334 }
2335 }
2336 #endif
2337