1 /* $OpenBSD: pf.c,v 1.1197 2024/06/07 18:24:16 bluhm Exp $ */
2
3 /*
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2013 Henning Brauer <henning@openbsd.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * - Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * - Redistributions in binary form must reproduce the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer in the documentation and/or other materials provided
17 * with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
29 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 *
32 * Effort sponsored in part by the Defense Advanced Research Projects
33 * Agency (DARPA) and Air Force Research Laboratory, Air Force
34 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
35 *
36 */
37
38 #include "bpfilter.h"
39 #include "carp.h"
40 #include "pflog.h"
41 #include "pfsync.h"
42 #include "pflow.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/mbuf.h>
47 #include <sys/filio.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/kernel.h>
51 #include <sys/time.h>
52 #include <sys/pool.h>
53 #include <sys/proc.h>
54 #include <sys/rwlock.h>
55 #include <sys/syslog.h>
56
57 #include <crypto/sha2.h>
58
59 #include <net/if.h>
60 #include <net/if_var.h>
61 #include <net/if_types.h>
62 #include <net/route.h>
63 #include <net/toeplitz.h>
64
65 #include <netinet/in.h>
66 #include <netinet/in_var.h>
67 #include <netinet/ip.h>
68 #include <netinet/in_pcb.h>
69 #include <netinet/ip_var.h>
70 #include <netinet/ip_icmp.h>
71 #include <netinet/icmp_var.h>
72 #include <netinet/tcp.h>
73 #include <netinet/tcp_seq.h>
74 #include <netinet/tcp_timer.h>
75 #include <netinet/tcp_var.h>
76 #include <netinet/tcp_fsm.h>
77 #include <netinet/udp.h>
78 #include <netinet/udp_var.h>
79 #include <netinet/ip_divert.h>
80
81 #ifdef INET6
82 #include <netinet6/in6_var.h>
83 #include <netinet/ip6.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/icmp6.h>
86 #include <netinet6/nd6.h>
87 #include <netinet6/ip6_divert.h>
88 #endif /* INET6 */
89
90 #include <net/pfvar.h>
91 #include <net/pfvar_priv.h>
92
93 #if NPFLOG > 0
94 #include <net/if_pflog.h>
95 #endif /* NPFLOG > 0 */
96
97 #if NPFLOW > 0
98 #include <net/if_pflow.h>
99 #endif /* NPFLOW > 0 */
100
101 #if NPFSYNC > 0
102 #include <net/if_pfsync.h>
103 #endif /* NPFSYNC > 0 */
104
105 /*
106 * Global variables
107 */
108 struct pf_state_tree pf_statetbl;
109 struct pf_queuehead pf_queues[2];
110 struct pf_queuehead *pf_queues_active;
111 struct pf_queuehead *pf_queues_inactive;
112
113 struct pf_status pf_status;
114
115 struct mutex pf_inp_mtx = MUTEX_INITIALIZER(IPL_SOFTNET);
116
117 int pf_hdr_limit = 20; /* arbitrary limit, tune in ddb */
118
119 SHA2_CTX pf_tcp_secret_ctx;
120 u_char pf_tcp_secret[16];
121 int pf_tcp_secret_init;
122 int pf_tcp_iss_off;
123
124 enum pf_test_status {
125 PF_TEST_FAIL = -1,
126 PF_TEST_OK,
127 PF_TEST_QUICK
128 };
129
130 struct pf_test_ctx {
131 struct pf_pdesc *pd;
132 struct pf_rule_actions act;
133 u_int8_t icmpcode;
134 u_int8_t icmptype;
135 int icmp_dir;
136 int state_icmp;
137 int tag;
138 u_short reason;
139 struct pf_rule_item *ri;
140 struct pf_src_node *sns[PF_SN_MAX];
141 struct pf_rule_slist rules;
142 struct pf_rule *nr;
143 struct pf_rule **rm;
144 struct pf_rule *a;
145 struct pf_rule **am;
146 struct pf_ruleset **rsm;
147 struct pf_ruleset *arsm;
148 struct pf_ruleset *aruleset;
149 struct tcphdr *th;
150 };
151
152 struct pool pf_src_tree_pl, pf_rule_pl, pf_queue_pl;
153 struct pool pf_state_pl, pf_state_key_pl, pf_state_item_pl;
154 struct pool pf_rule_item_pl, pf_sn_item_pl, pf_pktdelay_pl;
155
156 void pf_add_threshold(struct pf_threshold *);
157 int pf_check_threshold(struct pf_threshold *);
158 int pf_check_tcp_cksum(struct mbuf *, int, int,
159 sa_family_t);
160 __inline void pf_cksum_fixup(u_int16_t *, u_int16_t, u_int16_t,
161 u_int8_t);
162 void pf_cksum_fixup_a(u_int16_t *, const struct pf_addr *,
163 const struct pf_addr *, sa_family_t, u_int8_t);
164 int pf_modulate_sack(struct pf_pdesc *,
165 struct pf_state_peer *);
166 int pf_icmp_mapping(struct pf_pdesc *, u_int8_t, int *,
167 u_int16_t *, u_int16_t *);
168 int pf_change_icmp_af(struct mbuf *, int,
169 struct pf_pdesc *, struct pf_pdesc *,
170 struct pf_addr *, struct pf_addr *, sa_family_t,
171 sa_family_t);
172 int pf_translate_a(struct pf_pdesc *, struct pf_addr *,
173 struct pf_addr *);
174 void pf_translate_icmp(struct pf_pdesc *, struct pf_addr *,
175 u_int16_t *, struct pf_addr *, struct pf_addr *,
176 u_int16_t);
177 int pf_translate_icmp_af(struct pf_pdesc*, int, void *);
178 void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t, int,
179 sa_family_t, struct pf_rule *, u_int);
180 void pf_detach_state(struct pf_state *);
181 struct pf_state_key *pf_state_key_attach(struct pf_state_key *,
182 struct pf_state *, int);
183 void pf_state_key_detach(struct pf_state *, int);
184 u_int32_t pf_tcp_iss(struct pf_pdesc *);
185 void pf_rule_to_actions(struct pf_rule *,
186 struct pf_rule_actions *);
187 int pf_test_rule(struct pf_pdesc *, struct pf_rule **,
188 struct pf_state **, struct pf_rule **,
189 struct pf_ruleset **, u_short *);
190 static __inline int pf_create_state(struct pf_pdesc *, struct pf_rule *,
191 struct pf_rule *, struct pf_rule *,
192 struct pf_state_key **, struct pf_state_key **,
193 int *, struct pf_state **, int,
194 struct pf_rule_slist *, struct pf_rule_actions *,
195 struct pf_src_node **);
196 static __inline int pf_state_key_addr_setup(struct pf_pdesc *, void *,
197 int, struct pf_addr *, int, struct pf_addr *,
198 int, int);
199 int pf_state_key_setup(struct pf_pdesc *, struct
200 pf_state_key **, struct pf_state_key **, int);
201 int pf_tcp_track_full(struct pf_pdesc *,
202 struct pf_state **, u_short *, int *, int);
203 int pf_tcp_track_sloppy(struct pf_pdesc *,
204 struct pf_state **, u_short *);
205 static __inline int pf_synproxy(struct pf_pdesc *, struct pf_state **,
206 u_short *);
207 int pf_test_state(struct pf_pdesc *, struct pf_state **,
208 u_short *);
209 int pf_icmp_state_lookup(struct pf_pdesc *,
210 struct pf_state_key_cmp *, struct pf_state **,
211 u_int16_t, u_int16_t, int, int *, int, int);
212 int pf_test_state_icmp(struct pf_pdesc *,
213 struct pf_state **, u_short *);
214 u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t, int,
215 u_int16_t);
216 static __inline int pf_set_rt_ifp(struct pf_state *, struct pf_addr *,
217 sa_family_t, struct pf_src_node **);
218 struct pf_divert *pf_get_divert(struct mbuf *);
219 int pf_walk_option(struct pf_pdesc *, struct ip *,
220 int, int, u_short *);
221 int pf_walk_header(struct pf_pdesc *, struct ip *,
222 u_short *);
223 int pf_walk_option6(struct pf_pdesc *, struct ip6_hdr *,
224 int, int, u_short *);
225 int pf_walk_header6(struct pf_pdesc *, struct ip6_hdr *,
226 u_short *);
227 void pf_print_state_parts(struct pf_state *,
228 struct pf_state_key *, struct pf_state_key *);
229 int pf_addr_wrap_neq(struct pf_addr_wrap *,
230 struct pf_addr_wrap *);
231 int pf_compare_state_keys(struct pf_state_key *,
232 struct pf_state_key *, struct pfi_kif *, u_int);
233 u_int16_t pf_pkt_hash(sa_family_t, uint8_t,
234 const struct pf_addr *, const struct pf_addr *,
235 uint16_t, uint16_t);
236 int pf_find_state(struct pf_pdesc *,
237 struct pf_state_key_cmp *, struct pf_state **);
238 int pf_src_connlimit(struct pf_state **);
239 int pf_match_rcvif(struct mbuf *, struct pf_rule *);
240 enum pf_test_status pf_match_rule(struct pf_test_ctx *,
241 struct pf_ruleset *);
242 void pf_counters_inc(int, struct pf_pdesc *,
243 struct pf_state *, struct pf_rule *,
244 struct pf_rule *);
245
246 int pf_state_insert(struct pfi_kif *,
247 struct pf_state_key **, struct pf_state_key **,
248 struct pf_state *);
249
250 int pf_state_key_isvalid(struct pf_state_key *);
251 struct pf_state_key *pf_state_key_ref(struct pf_state_key *);
252 void pf_state_key_unref(struct pf_state_key *);
253 void pf_state_key_link_reverse(struct pf_state_key *,
254 struct pf_state_key *);
255 void pf_state_key_unlink_reverse(struct pf_state_key *);
256 void pf_state_key_link_inpcb(struct pf_state_key *,
257 struct inpcb *);
258 void pf_state_key_unlink_inpcb(struct pf_state_key *);
259 void pf_pktenqueue_delayed(void *);
260 int32_t pf_state_expires(const struct pf_state *, uint8_t);
261
262 #if NPFLOG > 0
263 void pf_log_matches(struct pf_pdesc *, struct pf_rule *,
264 struct pf_rule *, struct pf_ruleset *,
265 struct pf_rule_slist *);
266 #endif /* NPFLOG > 0 */
267
268 extern struct pool pfr_ktable_pl;
269 extern struct pool pfr_kentry_pl;
270
271 struct pf_pool_limit pf_pool_limits[PF_LIMIT_MAX] = {
272 { &pf_state_pl, PFSTATE_HIWAT, PFSTATE_HIWAT },
273 { &pf_src_tree_pl, PFSNODE_HIWAT, PFSNODE_HIWAT },
274 { &pf_frent_pl, PFFRAG_FRENT_HIWAT, PFFRAG_FRENT_HIWAT },
275 { &pfr_ktable_pl, PFR_KTABLE_HIWAT, PFR_KTABLE_HIWAT },
276 { &pfr_kentry_pl, PFR_KENTRY_HIWAT, PFR_KENTRY_HIWAT },
277 { &pf_pktdelay_pl, PF_PKTDELAY_MAXPKTS, PF_PKTDELAY_MAXPKTS },
278 { &pf_anchor_pl, PF_ANCHOR_HIWAT, PF_ANCHOR_HIWAT }
279 };
280
281 #define BOUND_IFACE(r, k) \
282 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : pfi_all
283
284 #define STATE_INC_COUNTERS(s) \
285 do { \
286 struct pf_rule_item *mrm; \
287 s->rule.ptr->states_cur++; \
288 s->rule.ptr->states_tot++; \
289 if (s->anchor.ptr != NULL) { \
290 s->anchor.ptr->states_cur++; \
291 s->anchor.ptr->states_tot++; \
292 } \
293 SLIST_FOREACH(mrm, &s->match_rules, entry) \
294 mrm->r->states_cur++; \
295 } while (0)
296
297 static __inline int pf_src_compare(struct pf_src_node *, struct pf_src_node *);
298 static inline int pf_state_compare_key(const struct pf_state_key *,
299 const struct pf_state_key *);
300 static inline int pf_state_compare_id(const struct pf_state *,
301 const struct pf_state *);
302 #ifdef INET6
303 static __inline void pf_cksum_uncover(u_int16_t *, u_int16_t, u_int8_t);
304 static __inline void pf_cksum_cover(u_int16_t *, u_int16_t, u_int8_t);
305 #endif /* INET6 */
306 static __inline void pf_set_protostate(struct pf_state *, int, u_int8_t);
307
308 struct pf_src_tree tree_src_tracking;
309
310 struct pf_state_tree_id tree_id;
311 struct pf_state_list pf_state_list = PF_STATE_LIST_INITIALIZER(pf_state_list);
312
313 RB_GENERATE(pf_src_tree, pf_src_node, entry, pf_src_compare);
314 RBT_GENERATE(pf_state_tree, pf_state_key, sk_entry, pf_state_compare_key);
315 RBT_GENERATE(pf_state_tree_id, pf_state, entry_id, pf_state_compare_id);
316
317 int
pf_addr_compare(const struct pf_addr * a,const struct pf_addr * b,sa_family_t af)318 pf_addr_compare(const struct pf_addr *a, const struct pf_addr *b,
319 sa_family_t af)
320 {
321 switch (af) {
322 case AF_INET:
323 if (a->addr32[0] > b->addr32[0])
324 return (1);
325 if (a->addr32[0] < b->addr32[0])
326 return (-1);
327 break;
328 #ifdef INET6
329 case AF_INET6:
330 if (a->addr32[3] > b->addr32[3])
331 return (1);
332 if (a->addr32[3] < b->addr32[3])
333 return (-1);
334 if (a->addr32[2] > b->addr32[2])
335 return (1);
336 if (a->addr32[2] < b->addr32[2])
337 return (-1);
338 if (a->addr32[1] > b->addr32[1])
339 return (1);
340 if (a->addr32[1] < b->addr32[1])
341 return (-1);
342 if (a->addr32[0] > b->addr32[0])
343 return (1);
344 if (a->addr32[0] < b->addr32[0])
345 return (-1);
346 break;
347 #endif /* INET6 */
348 }
349 return (0);
350 }
351
352 static __inline int
pf_src_compare(struct pf_src_node * a,struct pf_src_node * b)353 pf_src_compare(struct pf_src_node *a, struct pf_src_node *b)
354 {
355 int diff;
356
357 if (a->rule.ptr > b->rule.ptr)
358 return (1);
359 if (a->rule.ptr < b->rule.ptr)
360 return (-1);
361 if ((diff = a->type - b->type) != 0)
362 return (diff);
363 if ((diff = a->af - b->af) != 0)
364 return (diff);
365 if ((diff = pf_addr_compare(&a->addr, &b->addr, a->af)) != 0)
366 return (diff);
367 return (0);
368 }
369
370 static __inline void
pf_set_protostate(struct pf_state * st,int which,u_int8_t newstate)371 pf_set_protostate(struct pf_state *st, int which, u_int8_t newstate)
372 {
373 if (which == PF_PEER_DST || which == PF_PEER_BOTH)
374 st->dst.state = newstate;
375 if (which == PF_PEER_DST)
376 return;
377
378 if (st->src.state == newstate)
379 return;
380 if (st->creatorid == pf_status.hostid &&
381 st->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
382 !(TCPS_HAVEESTABLISHED(st->src.state) ||
383 st->src.state == TCPS_CLOSED) &&
384 (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED))
385 pf_status.states_halfopen--;
386
387 st->src.state = newstate;
388 }
389
390 void
pf_addrcpy(struct pf_addr * dst,struct pf_addr * src,sa_family_t af)391 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
392 {
393 switch (af) {
394 case AF_INET:
395 dst->addr32[0] = src->addr32[0];
396 break;
397 #ifdef INET6
398 case AF_INET6:
399 dst->addr32[0] = src->addr32[0];
400 dst->addr32[1] = src->addr32[1];
401 dst->addr32[2] = src->addr32[2];
402 dst->addr32[3] = src->addr32[3];
403 break;
404 #endif /* INET6 */
405 default:
406 unhandled_af(af);
407 }
408 }
409
410 void
pf_init_threshold(struct pf_threshold * threshold,u_int32_t limit,u_int32_t seconds)411 pf_init_threshold(struct pf_threshold *threshold,
412 u_int32_t limit, u_int32_t seconds)
413 {
414 threshold->limit = limit * PF_THRESHOLD_MULT;
415 threshold->seconds = seconds;
416 threshold->count = 0;
417 threshold->last = getuptime();
418 }
419
420 void
pf_add_threshold(struct pf_threshold * threshold)421 pf_add_threshold(struct pf_threshold *threshold)
422 {
423 u_int32_t t = getuptime(), diff = t - threshold->last;
424
425 if (diff >= threshold->seconds)
426 threshold->count = 0;
427 else
428 threshold->count -= threshold->count * diff /
429 threshold->seconds;
430 threshold->count += PF_THRESHOLD_MULT;
431 threshold->last = t;
432 }
433
434 int
pf_check_threshold(struct pf_threshold * threshold)435 pf_check_threshold(struct pf_threshold *threshold)
436 {
437 return (threshold->count > threshold->limit);
438 }
439
440 void
pf_state_list_insert(struct pf_state_list * pfs,struct pf_state * st)441 pf_state_list_insert(struct pf_state_list *pfs, struct pf_state *st)
442 {
443 /*
444 * we can always put states on the end of the list.
445 *
446 * things reading the list should take a read lock, then
447 * the mutex, get the head and tail pointers, release the
448 * mutex, and then they can iterate between the head and tail.
449 */
450
451 pf_state_ref(st); /* get a ref for the list */
452
453 mtx_enter(&pfs->pfs_mtx);
454 TAILQ_INSERT_TAIL(&pfs->pfs_list, st, entry_list);
455 mtx_leave(&pfs->pfs_mtx);
456 }
457
458 void
pf_state_list_remove(struct pf_state_list * pfs,struct pf_state * st)459 pf_state_list_remove(struct pf_state_list *pfs, struct pf_state *st)
460 {
461 /* states can only be removed when the write lock is held */
462 rw_assert_wrlock(&pfs->pfs_rwl);
463
464 mtx_enter(&pfs->pfs_mtx);
465 TAILQ_REMOVE(&pfs->pfs_list, st, entry_list);
466 mtx_leave(&pfs->pfs_mtx);
467
468 pf_state_unref(st); /* list no longer references the state */
469 }
470
471 void
pf_update_state_timeout(struct pf_state * st,int to)472 pf_update_state_timeout(struct pf_state *st, int to)
473 {
474 mtx_enter(&st->mtx);
475 if (st->timeout != PFTM_UNLINKED)
476 st->timeout = to;
477 mtx_leave(&st->mtx);
478 }
479
480 int
pf_src_connlimit(struct pf_state ** stp)481 pf_src_connlimit(struct pf_state **stp)
482 {
483 int bad = 0;
484 struct pf_src_node *sn;
485
486 if ((sn = pf_get_src_node((*stp), PF_SN_NONE)) == NULL)
487 return (0);
488
489 sn->conn++;
490 (*stp)->src.tcp_est = 1;
491 pf_add_threshold(&sn->conn_rate);
492
493 if ((*stp)->rule.ptr->max_src_conn &&
494 (*stp)->rule.ptr->max_src_conn < sn->conn) {
495 pf_status.lcounters[LCNT_SRCCONN]++;
496 bad++;
497 }
498
499 if ((*stp)->rule.ptr->max_src_conn_rate.limit &&
500 pf_check_threshold(&sn->conn_rate)) {
501 pf_status.lcounters[LCNT_SRCCONNRATE]++;
502 bad++;
503 }
504
505 if (!bad)
506 return (0);
507
508 if ((*stp)->rule.ptr->overload_tbl) {
509 struct pfr_addr p;
510 u_int32_t killed = 0;
511
512 pf_status.lcounters[LCNT_OVERLOAD_TABLE]++;
513 if (pf_status.debug >= LOG_NOTICE) {
514 log(LOG_NOTICE,
515 "pf: pf_src_connlimit: blocking address ");
516 pf_print_host(&sn->addr, 0,
517 (*stp)->key[PF_SK_WIRE]->af);
518 }
519
520 memset(&p, 0, sizeof(p));
521 p.pfra_af = (*stp)->key[PF_SK_WIRE]->af;
522 switch ((*stp)->key[PF_SK_WIRE]->af) {
523 case AF_INET:
524 p.pfra_net = 32;
525 p.pfra_ip4addr = sn->addr.v4;
526 break;
527 #ifdef INET6
528 case AF_INET6:
529 p.pfra_net = 128;
530 p.pfra_ip6addr = sn->addr.v6;
531 break;
532 #endif /* INET6 */
533 }
534
535 pfr_insert_kentry((*stp)->rule.ptr->overload_tbl,
536 &p, gettime());
537
538 /* kill existing states if that's required. */
539 if ((*stp)->rule.ptr->flush) {
540 struct pf_state_key *sk;
541 struct pf_state *st;
542
543 pf_status.lcounters[LCNT_OVERLOAD_FLUSH]++;
544 RBT_FOREACH(st, pf_state_tree_id, &tree_id) {
545 sk = st->key[PF_SK_WIRE];
546 /*
547 * Kill states from this source. (Only those
548 * from the same rule if PF_FLUSH_GLOBAL is not
549 * set)
550 */
551 if (sk->af ==
552 (*stp)->key[PF_SK_WIRE]->af &&
553 (((*stp)->direction == PF_OUT &&
554 PF_AEQ(&sn->addr, &sk->addr[1], sk->af)) ||
555 ((*stp)->direction == PF_IN &&
556 PF_AEQ(&sn->addr, &sk->addr[0], sk->af))) &&
557 ((*stp)->rule.ptr->flush &
558 PF_FLUSH_GLOBAL ||
559 (*stp)->rule.ptr == st->rule.ptr)) {
560 pf_update_state_timeout(st, PFTM_PURGE);
561 pf_set_protostate(st, PF_PEER_BOTH,
562 TCPS_CLOSED);
563 killed++;
564 }
565 }
566 if (pf_status.debug >= LOG_NOTICE)
567 addlog(", %u states killed", killed);
568 }
569 if (pf_status.debug >= LOG_NOTICE)
570 addlog("\n");
571 }
572
573 /* kill this state */
574 pf_update_state_timeout(*stp, PFTM_PURGE);
575 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_CLOSED);
576 return (1);
577 }
578
579 int
pf_insert_src_node(struct pf_src_node ** sn,struct pf_rule * rule,enum pf_sn_types type,sa_family_t af,struct pf_addr * src,struct pf_addr * raddr,struct pfi_kif * kif)580 pf_insert_src_node(struct pf_src_node **sn, struct pf_rule *rule,
581 enum pf_sn_types type, sa_family_t af, struct pf_addr *src,
582 struct pf_addr *raddr, struct pfi_kif *kif)
583 {
584 struct pf_src_node k;
585
586 if (*sn == NULL) {
587 k.af = af;
588 k.type = type;
589 pf_addrcpy(&k.addr, src, af);
590 k.rule.ptr = rule;
591 pf_status.scounters[SCNT_SRC_NODE_SEARCH]++;
592 *sn = RB_FIND(pf_src_tree, &tree_src_tracking, &k);
593 }
594 if (*sn == NULL) {
595 if (!rule->max_src_nodes ||
596 rule->src_nodes < rule->max_src_nodes)
597 (*sn) = pool_get(&pf_src_tree_pl, PR_NOWAIT | PR_ZERO);
598 else
599 pf_status.lcounters[LCNT_SRCNODES]++;
600 if ((*sn) == NULL)
601 return (-1);
602
603 pf_init_threshold(&(*sn)->conn_rate,
604 rule->max_src_conn_rate.limit,
605 rule->max_src_conn_rate.seconds);
606
607 (*sn)->type = type;
608 (*sn)->af = af;
609 (*sn)->rule.ptr = rule;
610 pf_addrcpy(&(*sn)->addr, src, af);
611 if (raddr)
612 pf_addrcpy(&(*sn)->raddr, raddr, af);
613 if (RB_INSERT(pf_src_tree,
614 &tree_src_tracking, *sn) != NULL) {
615 if (pf_status.debug >= LOG_NOTICE) {
616 log(LOG_NOTICE,
617 "pf: src_tree insert failed: ");
618 pf_print_host(&(*sn)->addr, 0, af);
619 addlog("\n");
620 }
621 pool_put(&pf_src_tree_pl, *sn);
622 return (-1);
623 }
624 (*sn)->creation = getuptime();
625 (*sn)->rule.ptr->src_nodes++;
626 if (kif != NULL) {
627 (*sn)->kif = kif;
628 pfi_kif_ref(kif, PFI_KIF_REF_SRCNODE);
629 }
630 pf_status.scounters[SCNT_SRC_NODE_INSERT]++;
631 pf_status.src_nodes++;
632 } else {
633 if (rule->max_src_states &&
634 (*sn)->states >= rule->max_src_states) {
635 pf_status.lcounters[LCNT_SRCSTATES]++;
636 return (-1);
637 }
638 }
639 return (0);
640 }
641
642 void
pf_remove_src_node(struct pf_src_node * sn)643 pf_remove_src_node(struct pf_src_node *sn)
644 {
645 if (sn->states > 0 || sn->expire > getuptime())
646 return;
647
648 sn->rule.ptr->src_nodes--;
649 if (sn->rule.ptr->states_cur == 0 &&
650 sn->rule.ptr->src_nodes == 0)
651 pf_rm_rule(NULL, sn->rule.ptr);
652 RB_REMOVE(pf_src_tree, &tree_src_tracking, sn);
653 pf_status.scounters[SCNT_SRC_NODE_REMOVALS]++;
654 pf_status.src_nodes--;
655 pfi_kif_unref(sn->kif, PFI_KIF_REF_SRCNODE);
656 pool_put(&pf_src_tree_pl, sn);
657 }
658
659 struct pf_src_node *
pf_get_src_node(struct pf_state * st,enum pf_sn_types type)660 pf_get_src_node(struct pf_state *st, enum pf_sn_types type)
661 {
662 struct pf_sn_item *sni;
663
664 SLIST_FOREACH(sni, &st->src_nodes, next)
665 if (sni->sn->type == type)
666 return (sni->sn);
667 return (NULL);
668 }
669
670 void
pf_state_rm_src_node(struct pf_state * st,struct pf_src_node * sn)671 pf_state_rm_src_node(struct pf_state *st, struct pf_src_node *sn)
672 {
673 struct pf_sn_item *sni, *snin, *snip = NULL;
674
675 for (sni = SLIST_FIRST(&st->src_nodes); sni; sni = snin) {
676 snin = SLIST_NEXT(sni, next);
677 if (sni->sn == sn) {
678 if (snip)
679 SLIST_REMOVE_AFTER(snip, next);
680 else
681 SLIST_REMOVE_HEAD(&st->src_nodes, next);
682 pool_put(&pf_sn_item_pl, sni);
683 sni = NULL;
684 sn->states--;
685 }
686 if (sni != NULL)
687 snip = sni;
688 }
689 }
690
691 /* state table stuff */
692
693 static inline int
pf_state_compare_key(const struct pf_state_key * a,const struct pf_state_key * b)694 pf_state_compare_key(const struct pf_state_key *a,
695 const struct pf_state_key *b)
696 {
697 int diff;
698
699 if ((diff = a->hash - b->hash) != 0)
700 return (diff);
701 if ((diff = a->proto - b->proto) != 0)
702 return (diff);
703 if ((diff = a->af - b->af) != 0)
704 return (diff);
705 if ((diff = pf_addr_compare(&a->addr[0], &b->addr[0], a->af)) != 0)
706 return (diff);
707 if ((diff = pf_addr_compare(&a->addr[1], &b->addr[1], a->af)) != 0)
708 return (diff);
709 if ((diff = a->port[0] - b->port[0]) != 0)
710 return (diff);
711 if ((diff = a->port[1] - b->port[1]) != 0)
712 return (diff);
713 if ((diff = a->rdomain - b->rdomain) != 0)
714 return (diff);
715 return (0);
716 }
717
718 static inline int
pf_state_compare_id(const struct pf_state * a,const struct pf_state * b)719 pf_state_compare_id(const struct pf_state *a, const struct pf_state *b)
720 {
721 if (a->id > b->id)
722 return (1);
723 if (a->id < b->id)
724 return (-1);
725 if (a->creatorid > b->creatorid)
726 return (1);
727 if (a->creatorid < b->creatorid)
728 return (-1);
729
730 return (0);
731 }
732
733 /*
734 * on failure, pf_state_key_attach() releases the pf_state_key
735 * reference and returns NULL.
736 */
737 struct pf_state_key *
pf_state_key_attach(struct pf_state_key * sk,struct pf_state * st,int idx)738 pf_state_key_attach(struct pf_state_key *sk, struct pf_state *st, int idx)
739 {
740 struct pf_state_item *si;
741 struct pf_state_key *cur;
742 struct pf_state *oldst = NULL;
743
744 PF_ASSERT_LOCKED();
745
746 KASSERT(st->key[idx] == NULL);
747 sk->sk_removed = 0;
748 cur = RBT_INSERT(pf_state_tree, &pf_statetbl, sk);
749 if (cur != NULL) {
750 sk->sk_removed = 1;
751 /* key exists. check for same kif, if none, add to key */
752 TAILQ_FOREACH(si, &cur->sk_states, si_entry) {
753 struct pf_state *sist = si->si_st;
754 if (sist->kif == st->kif &&
755 ((sist->key[PF_SK_WIRE]->af == sk->af &&
756 sist->direction == st->direction) ||
757 (sist->key[PF_SK_WIRE]->af !=
758 sist->key[PF_SK_STACK]->af &&
759 sk->af == sist->key[PF_SK_STACK]->af &&
760 sist->direction != st->direction))) {
761 int reuse = 0;
762
763 if (sk->proto == IPPROTO_TCP &&
764 sist->src.state >= TCPS_FIN_WAIT_2 &&
765 sist->dst.state >= TCPS_FIN_WAIT_2)
766 reuse = 1;
767 if (pf_status.debug >= LOG_NOTICE) {
768 log(LOG_NOTICE,
769 "pf: %s key attach %s on %s: ",
770 (idx == PF_SK_WIRE) ?
771 "wire" : "stack",
772 reuse ? "reuse" : "failed",
773 st->kif->pfik_name);
774 pf_print_state_parts(st,
775 (idx == PF_SK_WIRE) ? sk : NULL,
776 (idx == PF_SK_STACK) ? sk : NULL);
777 addlog(", existing: ");
778 pf_print_state_parts(sist,
779 (idx == PF_SK_WIRE) ? sk : NULL,
780 (idx == PF_SK_STACK) ? sk : NULL);
781 addlog("\n");
782 }
783 if (reuse) {
784 pf_set_protostate(sist, PF_PEER_BOTH,
785 TCPS_CLOSED);
786 /* remove late or sks can go away */
787 oldst = sist;
788 } else {
789 pf_state_key_unref(sk);
790 return (NULL); /* collision! */
791 }
792 }
793 }
794
795 /* reuse the existing state key */
796 pf_state_key_unref(sk);
797 sk = cur;
798 }
799
800 if ((si = pool_get(&pf_state_item_pl, PR_NOWAIT)) == NULL) {
801 if (TAILQ_EMPTY(&sk->sk_states)) {
802 KASSERT(cur == NULL);
803 RBT_REMOVE(pf_state_tree, &pf_statetbl, sk);
804 sk->sk_removed = 1;
805 pf_state_key_unref(sk);
806 }
807
808 return (NULL);
809 }
810
811 st->key[idx] = pf_state_key_ref(sk); /* give a ref to state */
812 si->si_st = pf_state_ref(st);
813
814 /* list is sorted, if-bound states before floating */
815 if (st->kif == pfi_all)
816 TAILQ_INSERT_TAIL(&sk->sk_states, si, si_entry);
817 else
818 TAILQ_INSERT_HEAD(&sk->sk_states, si, si_entry);
819
820 if (oldst)
821 pf_remove_state(oldst);
822
823 /* caller owns the pf_state ref, which owns a pf_state_key ref now */
824 return (sk);
825 }
826
827 void
pf_detach_state(struct pf_state * st)828 pf_detach_state(struct pf_state *st)
829 {
830 KASSERT(st->key[PF_SK_WIRE] != NULL);
831 pf_state_key_detach(st, PF_SK_WIRE);
832
833 KASSERT(st->key[PF_SK_STACK] != NULL);
834 if (st->key[PF_SK_STACK] != st->key[PF_SK_WIRE])
835 pf_state_key_detach(st, PF_SK_STACK);
836 }
837
838 void
pf_state_key_detach(struct pf_state * st,int idx)839 pf_state_key_detach(struct pf_state *st, int idx)
840 {
841 struct pf_state_item *si;
842 struct pf_state_key *sk;
843
844 PF_ASSERT_LOCKED();
845
846 sk = st->key[idx];
847 if (sk == NULL)
848 return;
849
850 TAILQ_FOREACH(si, &sk->sk_states, si_entry) {
851 if (si->si_st == st)
852 break;
853 }
854 if (si == NULL)
855 return;
856
857 TAILQ_REMOVE(&sk->sk_states, si, si_entry);
858 pool_put(&pf_state_item_pl, si);
859
860 if (TAILQ_EMPTY(&sk->sk_states)) {
861 RBT_REMOVE(pf_state_tree, &pf_statetbl, sk);
862 sk->sk_removed = 1;
863 pf_state_key_unlink_reverse(sk);
864 pf_state_key_unlink_inpcb(sk);
865 pf_state_key_unref(sk);
866 }
867
868 pf_state_unref(st);
869 }
870
871 struct pf_state_key *
pf_alloc_state_key(int pool_flags)872 pf_alloc_state_key(int pool_flags)
873 {
874 struct pf_state_key *sk;
875
876 if ((sk = pool_get(&pf_state_key_pl, pool_flags)) == NULL)
877 return (NULL);
878
879 PF_REF_INIT(sk->sk_refcnt);
880 TAILQ_INIT(&sk->sk_states);
881 sk->sk_removed = 1;
882
883 return (sk);
884 }
885
886 static __inline int
pf_state_key_addr_setup(struct pf_pdesc * pd,void * arg,int sidx,struct pf_addr * saddr,int didx,struct pf_addr * daddr,int af,int multi)887 pf_state_key_addr_setup(struct pf_pdesc *pd, void *arg, int sidx,
888 struct pf_addr *saddr, int didx, struct pf_addr *daddr, int af, int multi)
889 {
890 struct pf_state_key_cmp *key = arg;
891 #ifdef INET6
892 struct pf_addr *target;
893
894 if (af == AF_INET || pd->proto != IPPROTO_ICMPV6)
895 goto copy;
896
897 switch (pd->hdr.icmp6.icmp6_type) {
898 case ND_NEIGHBOR_SOLICIT:
899 if (multi)
900 return (-1);
901 target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target;
902 daddr = target;
903 break;
904 case ND_NEIGHBOR_ADVERT:
905 if (multi)
906 return (-1);
907 target = (struct pf_addr *)&pd->hdr.nd_ns.nd_ns_target;
908 saddr = target;
909 if (IN6_IS_ADDR_MULTICAST(&pd->dst->v6)) {
910 key->addr[didx].addr32[0] = 0;
911 key->addr[didx].addr32[1] = 0;
912 key->addr[didx].addr32[2] = 0;
913 key->addr[didx].addr32[3] = 0;
914 daddr = NULL; /* overwritten */
915 }
916 break;
917 default:
918 if (multi) {
919 key->addr[sidx].addr32[0] = __IPV6_ADDR_INT32_MLL;
920 key->addr[sidx].addr32[1] = 0;
921 key->addr[sidx].addr32[2] = 0;
922 key->addr[sidx].addr32[3] = __IPV6_ADDR_INT32_ONE;
923 saddr = NULL; /* overwritten */
924 }
925 }
926 copy:
927 #endif /* INET6 */
928 if (saddr)
929 pf_addrcpy(&key->addr[sidx], saddr, af);
930 if (daddr)
931 pf_addrcpy(&key->addr[didx], daddr, af);
932
933 return (0);
934 }
935
936 int
pf_state_key_setup(struct pf_pdesc * pd,struct pf_state_key ** skw,struct pf_state_key ** sks,int rtableid)937 pf_state_key_setup(struct pf_pdesc *pd, struct pf_state_key **skw,
938 struct pf_state_key **sks, int rtableid)
939 {
940 /* if returning error we MUST pool_put state keys ourselves */
941 struct pf_state_key *sk1, *sk2;
942 u_int wrdom = pd->rdomain;
943 int afto = pd->af != pd->naf;
944
945 if ((sk1 = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL)
946 return (ENOMEM);
947
948 pf_state_key_addr_setup(pd, sk1, pd->sidx, pd->src, pd->didx, pd->dst,
949 pd->af, 0);
950 sk1->port[pd->sidx] = pd->osport;
951 sk1->port[pd->didx] = pd->odport;
952 sk1->proto = pd->proto;
953 sk1->af = pd->af;
954 sk1->rdomain = pd->rdomain;
955 sk1->hash = pf_pkt_hash(sk1->af, sk1->proto,
956 &sk1->addr[0], &sk1->addr[1], sk1->port[0], sk1->port[1]);
957 if (rtableid >= 0)
958 wrdom = rtable_l2(rtableid);
959
960 if (PF_ANEQ(&pd->nsaddr, pd->src, pd->af) ||
961 PF_ANEQ(&pd->ndaddr, pd->dst, pd->af) ||
962 pd->nsport != pd->osport || pd->ndport != pd->odport ||
963 wrdom != pd->rdomain || afto) { /* NAT/NAT64 */
964 if ((sk2 = pf_alloc_state_key(PR_NOWAIT | PR_ZERO)) == NULL) {
965 pf_state_key_unref(sk1);
966 return (ENOMEM);
967 }
968 pf_state_key_addr_setup(pd, sk2, afto ? pd->didx : pd->sidx,
969 &pd->nsaddr, afto ? pd->sidx : pd->didx, &pd->ndaddr,
970 pd->naf, 0);
971 sk2->port[afto ? pd->didx : pd->sidx] = pd->nsport;
972 sk2->port[afto ? pd->sidx : pd->didx] = pd->ndport;
973 if (afto) {
974 switch (pd->proto) {
975 case IPPROTO_ICMP:
976 sk2->proto = IPPROTO_ICMPV6;
977 break;
978 case IPPROTO_ICMPV6:
979 sk2->proto = IPPROTO_ICMP;
980 break;
981 default:
982 sk2->proto = pd->proto;
983 }
984 } else
985 sk2->proto = pd->proto;
986 sk2->af = pd->naf;
987 sk2->rdomain = wrdom;
988 sk2->hash = pf_pkt_hash(sk2->af, sk2->proto,
989 &sk2->addr[0], &sk2->addr[1], sk2->port[0], sk2->port[1]);
990 } else
991 sk2 = pf_state_key_ref(sk1);
992
993 if (pd->dir == PF_IN) {
994 *skw = sk1;
995 *sks = sk2;
996 } else {
997 *sks = sk1;
998 *skw = sk2;
999 }
1000
1001 if (pf_status.debug >= LOG_DEBUG) {
1002 log(LOG_DEBUG, "pf: key setup: ");
1003 pf_print_state_parts(NULL, *skw, *sks);
1004 addlog("\n");
1005 }
1006
1007 return (0);
1008 }
1009
1010 /*
1011 * pf_state_insert() does the following:
1012 * - links the pf_state up with pf_state_key(s).
1013 * - inserts the pf_state_keys into pf_state_tree.
1014 * - inserts the pf_state into the into pf_state_tree_id.
1015 * - tells pfsync about the state.
1016 *
1017 * pf_state_insert() owns the references to the pf_state_key structs
1018 * it is given. on failure to insert, these references are released.
1019 * on success, the caller owns a pf_state reference that allows it
1020 * to access the state keys.
1021 */
1022
1023 int
pf_state_insert(struct pfi_kif * kif,struct pf_state_key ** skwp,struct pf_state_key ** sksp,struct pf_state * st)1024 pf_state_insert(struct pfi_kif *kif, struct pf_state_key **skwp,
1025 struct pf_state_key **sksp, struct pf_state *st)
1026 {
1027 struct pf_state_key *skw = *skwp;
1028 struct pf_state_key *sks = *sksp;
1029 int same = (skw == sks);
1030
1031 PF_ASSERT_LOCKED();
1032
1033 st->kif = kif;
1034 PF_STATE_ENTER_WRITE();
1035
1036 skw = pf_state_key_attach(skw, st, PF_SK_WIRE);
1037 if (skw == NULL) {
1038 pf_state_key_unref(sks);
1039 PF_STATE_EXIT_WRITE();
1040 return (-1);
1041 }
1042
1043 if (same) {
1044 /* pf_state_key_attach might have swapped skw */
1045 pf_state_key_unref(sks);
1046 st->key[PF_SK_STACK] = sks = pf_state_key_ref(skw);
1047 } else if (pf_state_key_attach(sks, st, PF_SK_STACK) == NULL) {
1048 pf_state_key_detach(st, PF_SK_WIRE);
1049 PF_STATE_EXIT_WRITE();
1050 return (-1);
1051 }
1052
1053 if (st->id == 0 && st->creatorid == 0) {
1054 st->id = htobe64(pf_status.stateid++);
1055 st->creatorid = pf_status.hostid;
1056 }
1057 if (RBT_INSERT(pf_state_tree_id, &tree_id, st) != NULL) {
1058 if (pf_status.debug >= LOG_NOTICE) {
1059 log(LOG_NOTICE, "pf: state insert failed: "
1060 "id: %016llx creatorid: %08x",
1061 betoh64(st->id), ntohl(st->creatorid));
1062 addlog("\n");
1063 }
1064 pf_detach_state(st);
1065 PF_STATE_EXIT_WRITE();
1066 return (-1);
1067 }
1068 pf_state_list_insert(&pf_state_list, st);
1069 pf_status.fcounters[FCNT_STATE_INSERT]++;
1070 pf_status.states++;
1071 pfi_kif_ref(kif, PFI_KIF_REF_STATE);
1072 PF_STATE_EXIT_WRITE();
1073
1074 #if NPFSYNC > 0
1075 pfsync_insert_state(st);
1076 #endif /* NPFSYNC > 0 */
1077
1078 *skwp = skw;
1079 *sksp = sks;
1080
1081 return (0);
1082 }
1083
1084 struct pf_state *
pf_find_state_byid(struct pf_state_cmp * key)1085 pf_find_state_byid(struct pf_state_cmp *key)
1086 {
1087 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1088
1089 return (RBT_FIND(pf_state_tree_id, &tree_id, (struct pf_state *)key));
1090 }
1091
1092 int
pf_compare_state_keys(struct pf_state_key * a,struct pf_state_key * b,struct pfi_kif * kif,u_int dir)1093 pf_compare_state_keys(struct pf_state_key *a, struct pf_state_key *b,
1094 struct pfi_kif *kif, u_int dir)
1095 {
1096 /* a (from hdr) and b (new) must be exact opposites of each other */
1097 if (a->af == b->af && a->proto == b->proto &&
1098 PF_AEQ(&a->addr[0], &b->addr[1], a->af) &&
1099 PF_AEQ(&a->addr[1], &b->addr[0], a->af) &&
1100 a->port[0] == b->port[1] &&
1101 a->port[1] == b->port[0] && a->rdomain == b->rdomain)
1102 return (0);
1103 else {
1104 /* mismatch. must not happen. */
1105 if (pf_status.debug >= LOG_ERR) {
1106 log(LOG_ERR,
1107 "pf: state key linking mismatch! dir=%s, "
1108 "if=%s, stored af=%u, a0: ",
1109 dir == PF_OUT ? "OUT" : "IN",
1110 kif->pfik_name, a->af);
1111 pf_print_host(&a->addr[0], a->port[0], a->af);
1112 addlog(", a1: ");
1113 pf_print_host(&a->addr[1], a->port[1], a->af);
1114 addlog(", proto=%u", a->proto);
1115 addlog(", found af=%u, a0: ", b->af);
1116 pf_print_host(&b->addr[0], b->port[0], b->af);
1117 addlog(", a1: ");
1118 pf_print_host(&b->addr[1], b->port[1], b->af);
1119 addlog(", proto=%u", b->proto);
1120 addlog("\n");
1121 }
1122 return (-1);
1123 }
1124 }
1125
1126 int
pf_find_state(struct pf_pdesc * pd,struct pf_state_key_cmp * key,struct pf_state ** stp)1127 pf_find_state(struct pf_pdesc *pd, struct pf_state_key_cmp *key,
1128 struct pf_state **stp)
1129 {
1130 struct pf_state_key *sk, *pkt_sk;
1131 struct pf_state_item *si;
1132 struct pf_state *st = NULL;
1133
1134 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1135 if (pf_status.debug >= LOG_DEBUG) {
1136 log(LOG_DEBUG, "pf: key search, %s on %s: ",
1137 pd->dir == PF_OUT ? "out" : "in", pd->kif->pfik_name);
1138 pf_print_state_parts(NULL, (struct pf_state_key *)key, NULL);
1139 addlog("\n");
1140 }
1141
1142 pkt_sk = NULL;
1143 sk = NULL;
1144 if (pd->dir == PF_OUT) {
1145 /* first if block deals with outbound forwarded packet */
1146 pkt_sk = pd->m->m_pkthdr.pf.statekey;
1147
1148 if (!pf_state_key_isvalid(pkt_sk)) {
1149 pf_mbuf_unlink_state_key(pd->m);
1150 pkt_sk = NULL;
1151 }
1152
1153 if (pkt_sk && pf_state_key_isvalid(pkt_sk->sk_reverse))
1154 sk = pkt_sk->sk_reverse;
1155
1156 if (pkt_sk == NULL) {
1157 struct inpcb *inp = pd->m->m_pkthdr.pf.inp;
1158
1159 /* here we deal with local outbound packet */
1160 if (inp != NULL) {
1161 struct pf_state_key *inp_sk;
1162
1163 mtx_enter(&pf_inp_mtx);
1164 inp_sk = inp->inp_pf_sk;
1165 if (pf_state_key_isvalid(inp_sk)) {
1166 sk = inp_sk;
1167 mtx_leave(&pf_inp_mtx);
1168 } else if (inp_sk != NULL) {
1169 KASSERT(inp_sk->sk_inp == inp);
1170 inp_sk->sk_inp = NULL;
1171 inp->inp_pf_sk = NULL;
1172 mtx_leave(&pf_inp_mtx);
1173
1174 pf_state_key_unref(inp_sk);
1175 in_pcbunref(inp);
1176 } else
1177 mtx_leave(&pf_inp_mtx);
1178 }
1179 }
1180 }
1181
1182 if (sk == NULL) {
1183 if ((sk = RBT_FIND(pf_state_tree, &pf_statetbl,
1184 (struct pf_state_key *)key)) == NULL)
1185 return (PF_DROP);
1186 if (pd->dir == PF_OUT && pkt_sk &&
1187 pf_compare_state_keys(pkt_sk, sk, pd->kif, pd->dir) == 0)
1188 pf_state_key_link_reverse(sk, pkt_sk);
1189 else if (pd->dir == PF_OUT)
1190 pf_state_key_link_inpcb(sk, pd->m->m_pkthdr.pf.inp);
1191 }
1192
1193 /* remove firewall data from outbound packet */
1194 if (pd->dir == PF_OUT)
1195 pf_pkt_addr_changed(pd->m);
1196
1197 /* list is sorted, if-bound states before floating ones */
1198 TAILQ_FOREACH(si, &sk->sk_states, si_entry) {
1199 struct pf_state *sist = si->si_st;
1200 if (sist->timeout != PFTM_PURGE &&
1201 (sist->kif == pfi_all || sist->kif == pd->kif) &&
1202 ((sist->key[PF_SK_WIRE]->af == sist->key[PF_SK_STACK]->af &&
1203 sk == (pd->dir == PF_IN ? sist->key[PF_SK_WIRE] :
1204 sist->key[PF_SK_STACK])) ||
1205 (sist->key[PF_SK_WIRE]->af != sist->key[PF_SK_STACK]->af
1206 && pd->dir == PF_IN && (sk == sist->key[PF_SK_STACK] ||
1207 sk == sist->key[PF_SK_WIRE])))) {
1208 st = sist;
1209 break;
1210 }
1211 }
1212
1213 if (st == NULL)
1214 return (PF_DROP);
1215 if (ISSET(st->state_flags, PFSTATE_INP_UNLINKED))
1216 return (PF_DROP);
1217
1218 if (st->rule.ptr->pktrate.limit && pd->dir == st->direction) {
1219 pf_add_threshold(&st->rule.ptr->pktrate);
1220 if (pf_check_threshold(&st->rule.ptr->pktrate))
1221 return (PF_DROP);
1222 }
1223
1224 *stp = st;
1225
1226 return (PF_MATCH);
1227 }
1228
1229 struct pf_state *
pf_find_state_all(struct pf_state_key_cmp * key,u_int dir,int * more)1230 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1231 {
1232 struct pf_state_key *sk;
1233 struct pf_state_item *si, *ret = NULL;
1234
1235 pf_status.fcounters[FCNT_STATE_SEARCH]++;
1236
1237 sk = RBT_FIND(pf_state_tree, &pf_statetbl, (struct pf_state_key *)key);
1238
1239 if (sk != NULL) {
1240 TAILQ_FOREACH(si, &sk->sk_states, si_entry) {
1241 struct pf_state *sist = si->si_st;
1242 if (dir == PF_INOUT ||
1243 (sk == (dir == PF_IN ? sist->key[PF_SK_WIRE] :
1244 sist->key[PF_SK_STACK]))) {
1245 if (more == NULL)
1246 return (sist);
1247
1248 if (ret)
1249 (*more)++;
1250 else
1251 ret = si;
1252 }
1253 }
1254 }
1255 return (ret ? ret->si_st : NULL);
1256 }
1257
1258 void
pf_state_peer_hton(const struct pf_state_peer * s,struct pfsync_state_peer * d)1259 pf_state_peer_hton(const struct pf_state_peer *s, struct pfsync_state_peer *d)
1260 {
1261 d->seqlo = htonl(s->seqlo);
1262 d->seqhi = htonl(s->seqhi);
1263 d->seqdiff = htonl(s->seqdiff);
1264 d->max_win = htons(s->max_win);
1265 d->mss = htons(s->mss);
1266 d->state = s->state;
1267 d->wscale = s->wscale;
1268 if (s->scrub) {
1269 d->scrub.pfss_flags =
1270 htons(s->scrub->pfss_flags & PFSS_TIMESTAMP);
1271 d->scrub.pfss_ttl = (s)->scrub->pfss_ttl;
1272 d->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);
1273 d->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID;
1274 }
1275 }
1276
1277 void
pf_state_peer_ntoh(const struct pfsync_state_peer * s,struct pf_state_peer * d)1278 pf_state_peer_ntoh(const struct pfsync_state_peer *s, struct pf_state_peer *d)
1279 {
1280 d->seqlo = ntohl(s->seqlo);
1281 d->seqhi = ntohl(s->seqhi);
1282 d->seqdiff = ntohl(s->seqdiff);
1283 d->max_win = ntohs(s->max_win);
1284 d->mss = ntohs(s->mss);
1285 d->state = s->state;
1286 d->wscale = s->wscale;
1287 if (s->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID &&
1288 d->scrub != NULL) {
1289 d->scrub->pfss_flags =
1290 ntohs(s->scrub.pfss_flags) & PFSS_TIMESTAMP;
1291 d->scrub->pfss_ttl = s->scrub.pfss_ttl;
1292 d->scrub->pfss_ts_mod = ntohl(s->scrub.pfss_ts_mod);
1293 }
1294 }
1295
1296 void
pf_state_export(struct pfsync_state * sp,struct pf_state * st)1297 pf_state_export(struct pfsync_state *sp, struct pf_state *st)
1298 {
1299 int32_t expire;
1300
1301 memset(sp, 0, sizeof(struct pfsync_state));
1302
1303 /* copy from state key */
1304 sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
1305 sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
1306 sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
1307 sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
1308 sp->key[PF_SK_WIRE].rdomain = htons(st->key[PF_SK_WIRE]->rdomain);
1309 sp->key[PF_SK_WIRE].af = st->key[PF_SK_WIRE]->af;
1310 sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
1311 sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
1312 sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
1313 sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
1314 sp->key[PF_SK_STACK].rdomain = htons(st->key[PF_SK_STACK]->rdomain);
1315 sp->key[PF_SK_STACK].af = st->key[PF_SK_STACK]->af;
1316 sp->rtableid[PF_SK_WIRE] = htonl(st->rtableid[PF_SK_WIRE]);
1317 sp->rtableid[PF_SK_STACK] = htonl(st->rtableid[PF_SK_STACK]);
1318 sp->proto = st->key[PF_SK_WIRE]->proto;
1319 sp->af = st->key[PF_SK_WIRE]->af;
1320
1321 /* copy from state */
1322 strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
1323 sp->rt = st->rt;
1324 sp->rt_addr = st->rt_addr;
1325 sp->creation = htonl(getuptime() - st->creation);
1326 expire = pf_state_expires(st, st->timeout);
1327 if (expire <= getuptime())
1328 sp->expire = htonl(0);
1329 else
1330 sp->expire = htonl(expire - getuptime());
1331
1332 sp->direction = st->direction;
1333 #if NPFLOG > 0
1334 sp->log = st->log;
1335 #endif /* NPFLOG > 0 */
1336 sp->timeout = st->timeout;
1337 sp->state_flags = htons(st->state_flags);
1338 if (READ_ONCE(st->sync_defer) != NULL)
1339 sp->state_flags |= htons(PFSTATE_ACK);
1340 if (!SLIST_EMPTY(&st->src_nodes))
1341 sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
1342
1343 sp->id = st->id;
1344 sp->creatorid = st->creatorid;
1345 pf_state_peer_hton(&st->src, &sp->src);
1346 pf_state_peer_hton(&st->dst, &sp->dst);
1347
1348 if (st->rule.ptr == NULL)
1349 sp->rule = htonl(-1);
1350 else
1351 sp->rule = htonl(st->rule.ptr->nr);
1352 if (st->anchor.ptr == NULL)
1353 sp->anchor = htonl(-1);
1354 else
1355 sp->anchor = htonl(st->anchor.ptr->nr);
1356 sp->nat_rule = htonl(-1); /* left for compat, nat_rule is gone */
1357
1358 pf_state_counter_hton(st->packets[0], sp->packets[0]);
1359 pf_state_counter_hton(st->packets[1], sp->packets[1]);
1360 pf_state_counter_hton(st->bytes[0], sp->bytes[0]);
1361 pf_state_counter_hton(st->bytes[1], sp->bytes[1]);
1362
1363 sp->max_mss = htons(st->max_mss);
1364 sp->min_ttl = st->min_ttl;
1365 sp->set_tos = st->set_tos;
1366 sp->set_prio[0] = st->set_prio[0];
1367 sp->set_prio[1] = st->set_prio[1];
1368 }
1369
1370 int
pf_state_alloc_scrub_memory(const struct pfsync_state_peer * s,struct pf_state_peer * d)1371 pf_state_alloc_scrub_memory(const struct pfsync_state_peer *s,
1372 struct pf_state_peer *d)
1373 {
1374 if (s->scrub.scrub_flag && d->scrub == NULL)
1375 return (pf_normalize_tcp_alloc(d));
1376
1377 return (0);
1378 }
1379
1380 #if NPFSYNC > 0
1381 int
pf_state_import(const struct pfsync_state * sp,int flags)1382 pf_state_import(const struct pfsync_state *sp, int flags)
1383 {
1384 struct pf_state *st = NULL;
1385 struct pf_state_key *skw = NULL, *sks = NULL;
1386 struct pf_rule *r = NULL;
1387 struct pfi_kif *kif;
1388 int pool_flags;
1389 int error = ENOMEM;
1390 int n = 0;
1391
1392 PF_ASSERT_LOCKED();
1393
1394 if (sp->creatorid == 0) {
1395 DPFPRINTF(LOG_NOTICE, "%s: invalid creator id: %08x", __func__,
1396 ntohl(sp->creatorid));
1397 return (EINVAL);
1398 }
1399
1400 if ((kif = pfi_kif_get(sp->ifname, NULL)) == NULL) {
1401 DPFPRINTF(LOG_NOTICE, "%s: unknown interface: %s", __func__,
1402 sp->ifname);
1403 if (flags & PFSYNC_SI_IOCTL)
1404 return (EINVAL);
1405 return (0); /* skip this state */
1406 }
1407
1408 if (sp->af == 0)
1409 return (0); /* skip this state */
1410
1411 /*
1412 * If the ruleset checksums match or the state is coming from the ioctl,
1413 * it's safe to associate the state with the rule of that number.
1414 */
1415 if (sp->rule != htonl(-1) && sp->anchor == htonl(-1) &&
1416 (flags & (PFSYNC_SI_IOCTL | PFSYNC_SI_CKSUM)) &&
1417 ntohl(sp->rule) < pf_main_ruleset.rules.active.rcount) {
1418 TAILQ_FOREACH(r, pf_main_ruleset.rules.active.ptr, entries)
1419 if (ntohl(sp->rule) == n++)
1420 break;
1421 } else
1422 r = &pf_default_rule;
1423
1424 if ((r->max_states && r->states_cur >= r->max_states))
1425 goto cleanup;
1426
1427 if (flags & PFSYNC_SI_IOCTL)
1428 pool_flags = PR_WAITOK | PR_LIMITFAIL | PR_ZERO;
1429 else
1430 pool_flags = PR_NOWAIT | PR_LIMITFAIL | PR_ZERO;
1431
1432 if ((st = pool_get(&pf_state_pl, pool_flags)) == NULL)
1433 goto cleanup;
1434
1435 if ((skw = pf_alloc_state_key(pool_flags)) == NULL)
1436 goto cleanup;
1437
1438 if ((sp->key[PF_SK_WIRE].af &&
1439 (sp->key[PF_SK_WIRE].af != sp->key[PF_SK_STACK].af)) ||
1440 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[0],
1441 &sp->key[PF_SK_STACK].addr[0], sp->af) ||
1442 PF_ANEQ(&sp->key[PF_SK_WIRE].addr[1],
1443 &sp->key[PF_SK_STACK].addr[1], sp->af) ||
1444 sp->key[PF_SK_WIRE].port[0] != sp->key[PF_SK_STACK].port[0] ||
1445 sp->key[PF_SK_WIRE].port[1] != sp->key[PF_SK_STACK].port[1] ||
1446 sp->key[PF_SK_WIRE].rdomain != sp->key[PF_SK_STACK].rdomain) {
1447 if ((sks = pf_alloc_state_key(pool_flags)) == NULL)
1448 goto cleanup;
1449 } else
1450 sks = pf_state_key_ref(skw);
1451
1452 /* allocate memory for scrub info */
1453 if (pf_state_alloc_scrub_memory(&sp->src, &st->src) ||
1454 pf_state_alloc_scrub_memory(&sp->dst, &st->dst))
1455 goto cleanup;
1456
1457 /* copy to state key(s) */
1458 skw->addr[0] = sp->key[PF_SK_WIRE].addr[0];
1459 skw->addr[1] = sp->key[PF_SK_WIRE].addr[1];
1460 skw->port[0] = sp->key[PF_SK_WIRE].port[0];
1461 skw->port[1] = sp->key[PF_SK_WIRE].port[1];
1462 skw->rdomain = ntohs(sp->key[PF_SK_WIRE].rdomain);
1463 skw->proto = sp->proto;
1464 if (!(skw->af = sp->key[PF_SK_WIRE].af))
1465 skw->af = sp->af;
1466 skw->hash = pf_pkt_hash(skw->af, skw->proto,
1467 &skw->addr[0], &skw->addr[1], skw->port[0], skw->port[1]);
1468
1469 if (sks != skw) {
1470 sks->addr[0] = sp->key[PF_SK_STACK].addr[0];
1471 sks->addr[1] = sp->key[PF_SK_STACK].addr[1];
1472 sks->port[0] = sp->key[PF_SK_STACK].port[0];
1473 sks->port[1] = sp->key[PF_SK_STACK].port[1];
1474 sks->rdomain = ntohs(sp->key[PF_SK_STACK].rdomain);
1475 if (!(sks->af = sp->key[PF_SK_STACK].af))
1476 sks->af = sp->af;
1477 if (sks->af != skw->af) {
1478 switch (sp->proto) {
1479 case IPPROTO_ICMP:
1480 sks->proto = IPPROTO_ICMPV6;
1481 break;
1482 case IPPROTO_ICMPV6:
1483 sks->proto = IPPROTO_ICMP;
1484 break;
1485 default:
1486 sks->proto = sp->proto;
1487 }
1488 } else
1489 sks->proto = sp->proto;
1490
1491 if (((sks->af != AF_INET) && (sks->af != AF_INET6)) ||
1492 ((skw->af != AF_INET) && (skw->af != AF_INET6))) {
1493 error = EINVAL;
1494 goto cleanup;
1495 }
1496
1497 sks->hash = pf_pkt_hash(sks->af, sks->proto,
1498 &sks->addr[0], &sks->addr[1], sks->port[0], sks->port[1]);
1499
1500 } else if ((sks->af != AF_INET) && (sks->af != AF_INET6)) {
1501 error = EINVAL;
1502 goto cleanup;
1503 }
1504 st->rtableid[PF_SK_WIRE] = ntohl(sp->rtableid[PF_SK_WIRE]);
1505 st->rtableid[PF_SK_STACK] = ntohl(sp->rtableid[PF_SK_STACK]);
1506
1507 /* copy to state */
1508 st->rt_addr = sp->rt_addr;
1509 st->rt = sp->rt;
1510 st->creation = getuptime() - ntohl(sp->creation);
1511 st->expire = getuptime();
1512 if (ntohl(sp->expire)) {
1513 u_int32_t timeout;
1514
1515 timeout = r->timeout[sp->timeout];
1516 if (!timeout)
1517 timeout = pf_default_rule.timeout[sp->timeout];
1518
1519 /* sp->expire may have been adaptively scaled by export. */
1520 st->expire -= timeout - ntohl(sp->expire);
1521 }
1522
1523 st->direction = sp->direction;
1524 st->log = sp->log;
1525 st->timeout = sp->timeout;
1526 st->state_flags = ntohs(sp->state_flags);
1527 st->max_mss = ntohs(sp->max_mss);
1528 st->min_ttl = sp->min_ttl;
1529 st->set_tos = sp->set_tos;
1530 st->set_prio[0] = sp->set_prio[0];
1531 st->set_prio[1] = sp->set_prio[1];
1532
1533 st->id = sp->id;
1534 st->creatorid = sp->creatorid;
1535 pf_state_peer_ntoh(&sp->src, &st->src);
1536 pf_state_peer_ntoh(&sp->dst, &st->dst);
1537
1538 st->rule.ptr = r;
1539 st->anchor.ptr = NULL;
1540
1541 PF_REF_INIT(st->refcnt);
1542 mtx_init(&st->mtx, IPL_NET);
1543
1544 /* XXX when we have anchors, use STATE_INC_COUNTERS */
1545 r->states_cur++;
1546 r->states_tot++;
1547
1548 st->sync_state = PFSYNC_S_NONE;
1549 st->pfsync_time = getuptime();
1550 #if NPFSYNC > 0
1551 pfsync_init_state(st, skw, sks, flags);
1552 #endif
1553
1554 if (pf_state_insert(kif, &skw, &sks, st) != 0) {
1555 /* XXX when we have anchors, use STATE_DEC_COUNTERS */
1556 r->states_cur--;
1557 error = EEXIST;
1558 goto cleanup_state;
1559 }
1560
1561 return (0);
1562
1563 cleanup:
1564 if (skw != NULL)
1565 pf_state_key_unref(skw);
1566 if (sks != NULL)
1567 pf_state_key_unref(sks);
1568
1569 cleanup_state: /* pf_state_insert frees the state keys */
1570 if (st) {
1571 if (st->dst.scrub)
1572 pool_put(&pf_state_scrub_pl, st->dst.scrub);
1573 if (st->src.scrub)
1574 pool_put(&pf_state_scrub_pl, st->src.scrub);
1575 pool_put(&pf_state_pl, st);
1576 }
1577 return (error);
1578 }
1579 #endif /* NPFSYNC > 0 */
1580
1581 /* END state table stuff */
1582
1583 void pf_purge_states(void *);
1584 struct task pf_purge_states_task =
1585 TASK_INITIALIZER(pf_purge_states, NULL);
1586
1587 void pf_purge_states_tick(void *);
1588 struct timeout pf_purge_states_to =
1589 TIMEOUT_INITIALIZER(pf_purge_states_tick, NULL);
1590
1591 unsigned int pf_purge_expired_states(unsigned int, unsigned int);
1592
1593 /*
1594 * how many states to scan this interval.
1595 *
1596 * this is set when the timeout fires, and reduced by the task. the
1597 * task will reschedule itself until the limit is reduced to zero,
1598 * and then it adds the timeout again.
1599 */
1600 unsigned int pf_purge_states_limit;
1601
1602 /*
1603 * limit how many states are processed with locks held per run of
1604 * the state purge task.
1605 */
1606 unsigned int pf_purge_states_collect = 64;
1607
1608 void
pf_purge_states_tick(void * null)1609 pf_purge_states_tick(void *null)
1610 {
1611 unsigned int limit = pf_status.states;
1612 unsigned int interval = pf_default_rule.timeout[PFTM_INTERVAL];
1613
1614 if (limit == 0) {
1615 timeout_add_sec(&pf_purge_states_to, 1);
1616 return;
1617 }
1618
1619 /*
1620 * process a fraction of the state table every second
1621 */
1622
1623 if (interval > 1)
1624 limit /= interval;
1625
1626 pf_purge_states_limit = limit;
1627 task_add(systqmp, &pf_purge_states_task);
1628 }
1629
1630 void
pf_purge_states(void * null)1631 pf_purge_states(void *null)
1632 {
1633 unsigned int limit;
1634 unsigned int scanned;
1635
1636 limit = pf_purge_states_limit;
1637 if (limit < pf_purge_states_collect)
1638 limit = pf_purge_states_collect;
1639
1640 scanned = pf_purge_expired_states(limit, pf_purge_states_collect);
1641 if (scanned >= pf_purge_states_limit) {
1642 /* we've run out of states to scan this "interval" */
1643 timeout_add_sec(&pf_purge_states_to, 1);
1644 return;
1645 }
1646
1647 pf_purge_states_limit -= scanned;
1648 task_add(systqmp, &pf_purge_states_task);
1649 }
1650
1651 void pf_purge_tick(void *);
1652 struct timeout pf_purge_to =
1653 TIMEOUT_INITIALIZER(pf_purge_tick, NULL);
1654
1655 void pf_purge(void *);
1656 struct task pf_purge_task =
1657 TASK_INITIALIZER(pf_purge, NULL);
1658
1659 void
pf_purge_tick(void * null)1660 pf_purge_tick(void *null)
1661 {
1662 task_add(systqmp, &pf_purge_task);
1663 }
1664
1665 void
pf_purge(void * null)1666 pf_purge(void *null)
1667 {
1668 unsigned int interval = max(1, pf_default_rule.timeout[PFTM_INTERVAL]);
1669
1670 PF_LOCK();
1671
1672 pf_purge_expired_src_nodes();
1673
1674 PF_UNLOCK();
1675
1676 /*
1677 * Fragments don't require PF_LOCK(), they use their own lock.
1678 */
1679 pf_purge_expired_fragments();
1680
1681 /* interpret the interval as idle time between runs */
1682 timeout_add_sec(&pf_purge_to, interval);
1683 }
1684
1685 int32_t
pf_state_expires(const struct pf_state * st,uint8_t stimeout)1686 pf_state_expires(const struct pf_state *st, uint8_t stimeout)
1687 {
1688 u_int32_t timeout;
1689 u_int32_t start;
1690 u_int32_t end;
1691 u_int32_t states;
1692
1693 /*
1694 * pf_state_expires is used by the state purge task to
1695 * decide if a state is a candidate for cleanup, and by the
1696 * pfsync state export code to populate an expiry time.
1697 *
1698 * this function may be called by the state purge task while
1699 * the state is being modified. avoid inconsistent reads of
1700 * state->timeout by having the caller do the read (and any
1701 * checks it needs to do on the same variable) and then pass
1702 * their view of the timeout in here for this function to use.
1703 * the only consequence of using a stale timeout value is
1704 * that the state won't be a candidate for purging until the
1705 * next pass of the purge task.
1706 */
1707
1708 /* handle all PFTM_* >= PFTM_MAX here */
1709 if (stimeout >= PFTM_MAX)
1710 return (0);
1711
1712 KASSERT(stimeout < PFTM_MAX);
1713
1714 timeout = st->rule.ptr->timeout[stimeout];
1715 if (!timeout)
1716 timeout = pf_default_rule.timeout[stimeout];
1717
1718 start = st->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1719 if (start) {
1720 end = st->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1721 states = st->rule.ptr->states_cur;
1722 } else {
1723 start = pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1724 end = pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1725 states = pf_status.states;
1726 }
1727 if (end && states > start && start < end) {
1728 if (states >= end)
1729 return (0);
1730
1731 timeout = (u_int64_t)timeout * (end - states) / (end - start);
1732 }
1733
1734 return (st->expire + timeout);
1735 }
1736
1737 void
pf_purge_expired_src_nodes(void)1738 pf_purge_expired_src_nodes(void)
1739 {
1740 struct pf_src_node *cur, *next;
1741
1742 PF_ASSERT_LOCKED();
1743
1744 RB_FOREACH_SAFE(cur, pf_src_tree, &tree_src_tracking, next) {
1745 if (cur->states == 0 && cur->expire <= getuptime()) {
1746 pf_remove_src_node(cur);
1747 }
1748 }
1749 }
1750
1751 void
pf_src_tree_remove_state(struct pf_state * st)1752 pf_src_tree_remove_state(struct pf_state *st)
1753 {
1754 u_int32_t timeout;
1755 struct pf_sn_item *sni;
1756
1757 while ((sni = SLIST_FIRST(&st->src_nodes)) != NULL) {
1758 SLIST_REMOVE_HEAD(&st->src_nodes, next);
1759 if (st->src.tcp_est)
1760 --sni->sn->conn;
1761 if (--sni->sn->states == 0) {
1762 timeout = st->rule.ptr->timeout[PFTM_SRC_NODE];
1763 if (!timeout)
1764 timeout =
1765 pf_default_rule.timeout[PFTM_SRC_NODE];
1766 sni->sn->expire = getuptime() + timeout;
1767 }
1768 pool_put(&pf_sn_item_pl, sni);
1769 }
1770 }
1771
1772 void
pf_remove_state(struct pf_state * st)1773 pf_remove_state(struct pf_state *st)
1774 {
1775 PF_ASSERT_LOCKED();
1776
1777 mtx_enter(&st->mtx);
1778 if (st->timeout == PFTM_UNLINKED) {
1779 mtx_leave(&st->mtx);
1780 return;
1781 }
1782 st->timeout = PFTM_UNLINKED;
1783 mtx_leave(&st->mtx);
1784
1785 /* handle load balancing related tasks */
1786 pf_postprocess_addr(st);
1787
1788 if (st->src.state == PF_TCPS_PROXY_DST) {
1789 pf_send_tcp(st->rule.ptr, st->key[PF_SK_WIRE]->af,
1790 &st->key[PF_SK_WIRE]->addr[1],
1791 &st->key[PF_SK_WIRE]->addr[0],
1792 st->key[PF_SK_WIRE]->port[1],
1793 st->key[PF_SK_WIRE]->port[0],
1794 st->src.seqhi, st->src.seqlo + 1,
1795 TH_RST|TH_ACK, 0, 0, 0, 1, st->tag,
1796 st->key[PF_SK_WIRE]->rdomain);
1797 }
1798 if (st->key[PF_SK_STACK]->proto == IPPROTO_TCP)
1799 pf_set_protostate(st, PF_PEER_BOTH, TCPS_CLOSED);
1800
1801 RBT_REMOVE(pf_state_tree_id, &tree_id, st);
1802 #if NPFLOW > 0
1803 if (st->state_flags & PFSTATE_PFLOW)
1804 export_pflow(st);
1805 #endif /* NPFLOW > 0 */
1806 #if NPFSYNC > 0
1807 pfsync_delete_state(st);
1808 #endif /* NPFSYNC > 0 */
1809 pf_src_tree_remove_state(st);
1810 pf_detach_state(st);
1811 }
1812
1813 void
pf_remove_divert_state(struct inpcb * inp)1814 pf_remove_divert_state(struct inpcb *inp)
1815 {
1816 struct pf_state_key *sk;
1817 struct pf_state_item *si;
1818
1819 PF_ASSERT_UNLOCKED();
1820
1821 if (READ_ONCE(inp->inp_pf_sk) == NULL)
1822 return;
1823
1824 mtx_enter(&pf_inp_mtx);
1825 sk = pf_state_key_ref(inp->inp_pf_sk);
1826 mtx_leave(&pf_inp_mtx);
1827 if (sk == NULL)
1828 return;
1829
1830 PF_LOCK();
1831 PF_STATE_ENTER_WRITE();
1832 TAILQ_FOREACH(si, &sk->sk_states, si_entry) {
1833 struct pf_state *sist = si->si_st;
1834 if (sk == sist->key[PF_SK_STACK] && sist->rule.ptr &&
1835 (sist->rule.ptr->divert.type == PF_DIVERT_TO ||
1836 sist->rule.ptr->divert.type == PF_DIVERT_REPLY)) {
1837 if (sist->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
1838 sist->key[PF_SK_WIRE] != sist->key[PF_SK_STACK]) {
1839 /*
1840 * If the local address is translated, keep
1841 * the state for "tcp.closed" seconds to
1842 * prevent its source port from being reused.
1843 */
1844 if (sist->src.state < TCPS_FIN_WAIT_2 ||
1845 sist->dst.state < TCPS_FIN_WAIT_2) {
1846 pf_set_protostate(sist, PF_PEER_BOTH,
1847 TCPS_TIME_WAIT);
1848 pf_update_state_timeout(sist,
1849 PFTM_TCP_CLOSED);
1850 sist->expire = getuptime();
1851 }
1852 sist->state_flags |= PFSTATE_INP_UNLINKED;
1853 } else
1854 pf_remove_state(sist);
1855 break;
1856 }
1857 }
1858 PF_STATE_EXIT_WRITE();
1859 PF_UNLOCK();
1860
1861 pf_state_key_unref(sk);
1862 }
1863
1864 void
pf_free_state(struct pf_state * st)1865 pf_free_state(struct pf_state *st)
1866 {
1867 struct pf_rule_item *ri;
1868
1869 PF_ASSERT_LOCKED();
1870
1871 #if NPFSYNC > 0
1872 if (pfsync_state_in_use(st))
1873 return;
1874 #endif /* NPFSYNC > 0 */
1875
1876 KASSERT(st->timeout == PFTM_UNLINKED);
1877 if (--st->rule.ptr->states_cur == 0 &&
1878 st->rule.ptr->src_nodes == 0)
1879 pf_rm_rule(NULL, st->rule.ptr);
1880 if (st->anchor.ptr != NULL)
1881 if (--st->anchor.ptr->states_cur == 0)
1882 pf_rm_rule(NULL, st->anchor.ptr);
1883 while ((ri = SLIST_FIRST(&st->match_rules))) {
1884 SLIST_REMOVE_HEAD(&st->match_rules, entry);
1885 if (--ri->r->states_cur == 0 &&
1886 ri->r->src_nodes == 0)
1887 pf_rm_rule(NULL, ri->r);
1888 pool_put(&pf_rule_item_pl, ri);
1889 }
1890 pf_normalize_tcp_cleanup(st);
1891 pfi_kif_unref(st->kif, PFI_KIF_REF_STATE);
1892 pf_state_list_remove(&pf_state_list, st);
1893 if (st->tag)
1894 pf_tag_unref(st->tag);
1895 pf_state_unref(st);
1896 pf_status.fcounters[FCNT_STATE_REMOVALS]++;
1897 pf_status.states--;
1898 }
1899
1900 unsigned int
pf_purge_expired_states(const unsigned int limit,const unsigned int collect)1901 pf_purge_expired_states(const unsigned int limit, const unsigned int collect)
1902 {
1903 /*
1904 * this task/thread/context/whatever is the only thing that
1905 * removes states from the pf_state_list, so the cur reference
1906 * it holds between calls is guaranteed to still be in the
1907 * list.
1908 */
1909 static struct pf_state *cur = NULL;
1910
1911 struct pf_state *head, *tail;
1912 struct pf_state *st;
1913 SLIST_HEAD(pf_state_gcl, pf_state) gcl = SLIST_HEAD_INITIALIZER(gcl);
1914 time_t now;
1915 unsigned int scanned;
1916 unsigned int collected = 0;
1917
1918 PF_ASSERT_UNLOCKED();
1919
1920 rw_enter_read(&pf_state_list.pfs_rwl);
1921
1922 mtx_enter(&pf_state_list.pfs_mtx);
1923 head = TAILQ_FIRST(&pf_state_list.pfs_list);
1924 tail = TAILQ_LAST(&pf_state_list.pfs_list, pf_state_queue);
1925 mtx_leave(&pf_state_list.pfs_mtx);
1926
1927 if (head == NULL) {
1928 /* the list is empty */
1929 rw_exit_read(&pf_state_list.pfs_rwl);
1930 return (limit);
1931 }
1932
1933 /* (re)start at the front of the list */
1934 if (cur == NULL)
1935 cur = head;
1936
1937 now = getuptime();
1938
1939 for (scanned = 0; scanned < limit; scanned++) {
1940 uint8_t stimeout = cur->timeout;
1941 unsigned int limited = 0;
1942
1943 if ((stimeout == PFTM_UNLINKED) ||
1944 (pf_state_expires(cur, stimeout) <= now)) {
1945 st = pf_state_ref(cur);
1946 SLIST_INSERT_HEAD(&gcl, st, gc_list);
1947
1948 if (++collected >= collect)
1949 limited = 1;
1950 }
1951
1952 /* don't iterate past the end of our view of the list */
1953 if (cur == tail) {
1954 cur = NULL;
1955 break;
1956 }
1957
1958 cur = TAILQ_NEXT(cur, entry_list);
1959
1960 /* don't spend too much time here. */
1961 if (ISSET(READ_ONCE(curcpu()->ci_schedstate.spc_schedflags),
1962 SPCF_SHOULDYIELD) || limited)
1963 break;
1964 }
1965
1966 rw_exit_read(&pf_state_list.pfs_rwl);
1967
1968 if (SLIST_EMPTY(&gcl))
1969 return (scanned);
1970
1971 rw_enter_write(&pf_state_list.pfs_rwl);
1972 PF_LOCK();
1973 PF_STATE_ENTER_WRITE();
1974 SLIST_FOREACH(st, &gcl, gc_list) {
1975 if (st->timeout != PFTM_UNLINKED)
1976 pf_remove_state(st);
1977
1978 pf_free_state(st);
1979 }
1980 PF_STATE_EXIT_WRITE();
1981 PF_UNLOCK();
1982 rw_exit_write(&pf_state_list.pfs_rwl);
1983
1984 while ((st = SLIST_FIRST(&gcl)) != NULL) {
1985 SLIST_REMOVE_HEAD(&gcl, gc_list);
1986 pf_state_unref(st);
1987 }
1988
1989 return (scanned);
1990 }
1991
1992 int
pf_tbladdr_setup(struct pf_ruleset * rs,struct pf_addr_wrap * aw,int wait)1993 pf_tbladdr_setup(struct pf_ruleset *rs, struct pf_addr_wrap *aw, int wait)
1994 {
1995 if (aw->type != PF_ADDR_TABLE)
1996 return (0);
1997 if ((aw->p.tbl = pfr_attach_table(rs, aw->v.tblname, wait)) == NULL)
1998 return (1);
1999 return (0);
2000 }
2001
2002 void
pf_tbladdr_remove(struct pf_addr_wrap * aw)2003 pf_tbladdr_remove(struct pf_addr_wrap *aw)
2004 {
2005 if (aw->type != PF_ADDR_TABLE || aw->p.tbl == NULL)
2006 return;
2007 pfr_detach_table(aw->p.tbl);
2008 aw->p.tbl = NULL;
2009 }
2010
2011 void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)2012 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
2013 {
2014 struct pfr_ktable *kt = aw->p.tbl;
2015
2016 if (aw->type != PF_ADDR_TABLE || kt == NULL)
2017 return;
2018 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2019 kt = kt->pfrkt_root;
2020 aw->p.tbl = NULL;
2021 aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
2022 kt->pfrkt_cnt : -1;
2023 }
2024
2025 void
pf_print_host(struct pf_addr * addr,u_int16_t p,sa_family_t af)2026 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2027 {
2028 switch (af) {
2029 case AF_INET: {
2030 u_int32_t a = ntohl(addr->addr32[0]);
2031 addlog("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2032 (a>>8)&255, a&255);
2033 if (p) {
2034 p = ntohs(p);
2035 addlog(":%u", p);
2036 }
2037 break;
2038 }
2039 #ifdef INET6
2040 case AF_INET6: {
2041 u_int16_t b;
2042 u_int8_t i, curstart, curend, maxstart, maxend;
2043 curstart = curend = maxstart = maxend = 255;
2044 for (i = 0; i < 8; i++) {
2045 if (!addr->addr16[i]) {
2046 if (curstart == 255)
2047 curstart = i;
2048 curend = i;
2049 } else {
2050 if ((curend - curstart) >
2051 (maxend - maxstart)) {
2052 maxstart = curstart;
2053 maxend = curend;
2054 }
2055 curstart = curend = 255;
2056 }
2057 }
2058 if ((curend - curstart) >
2059 (maxend - maxstart)) {
2060 maxstart = curstart;
2061 maxend = curend;
2062 }
2063 for (i = 0; i < 8; i++) {
2064 if (i >= maxstart && i <= maxend) {
2065 if (i == 0)
2066 addlog(":");
2067 if (i == maxend)
2068 addlog(":");
2069 } else {
2070 b = ntohs(addr->addr16[i]);
2071 addlog("%x", b);
2072 if (i < 7)
2073 addlog(":");
2074 }
2075 }
2076 if (p) {
2077 p = ntohs(p);
2078 addlog("[%u]", p);
2079 }
2080 break;
2081 }
2082 #endif /* INET6 */
2083 }
2084 }
2085
2086 void
pf_print_state(struct pf_state * st)2087 pf_print_state(struct pf_state *st)
2088 {
2089 pf_print_state_parts(st, NULL, NULL);
2090 }
2091
2092 void
pf_print_state_parts(struct pf_state * st,struct pf_state_key * skwp,struct pf_state_key * sksp)2093 pf_print_state_parts(struct pf_state *st,
2094 struct pf_state_key *skwp, struct pf_state_key *sksp)
2095 {
2096 struct pf_state_key *skw, *sks;
2097 u_int8_t proto, dir;
2098
2099 /* Do our best to fill these, but they're skipped if NULL */
2100 skw = skwp ? skwp : (st ? st->key[PF_SK_WIRE] : NULL);
2101 sks = sksp ? sksp : (st ? st->key[PF_SK_STACK] : NULL);
2102 proto = skw ? skw->proto : (sks ? sks->proto : 0);
2103 dir = st ? st->direction : 0;
2104
2105 switch (proto) {
2106 case IPPROTO_IPV4:
2107 addlog("IPv4");
2108 break;
2109 case IPPROTO_IPV6:
2110 addlog("IPv6");
2111 break;
2112 case IPPROTO_TCP:
2113 addlog("TCP");
2114 break;
2115 case IPPROTO_UDP:
2116 addlog("UDP");
2117 break;
2118 case IPPROTO_ICMP:
2119 addlog("ICMP");
2120 break;
2121 case IPPROTO_ICMPV6:
2122 addlog("ICMPv6");
2123 break;
2124 default:
2125 addlog("%u", proto);
2126 break;
2127 }
2128 switch (dir) {
2129 case PF_IN:
2130 addlog(" in");
2131 break;
2132 case PF_OUT:
2133 addlog(" out");
2134 break;
2135 }
2136 if (skw) {
2137 addlog(" wire: (%d) ", skw->rdomain);
2138 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2139 addlog(" ");
2140 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2141 }
2142 if (sks) {
2143 addlog(" stack: (%d) ", sks->rdomain);
2144 if (sks != skw) {
2145 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2146 addlog(" ");
2147 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2148 } else
2149 addlog("-");
2150 }
2151 if (st) {
2152 if (proto == IPPROTO_TCP) {
2153 addlog(" [lo=%u high=%u win=%u modulator=%u",
2154 st->src.seqlo, st->src.seqhi,
2155 st->src.max_win, st->src.seqdiff);
2156 if (st->src.wscale && st->dst.wscale)
2157 addlog(" wscale=%u",
2158 st->src.wscale & PF_WSCALE_MASK);
2159 addlog("]");
2160 addlog(" [lo=%u high=%u win=%u modulator=%u",
2161 st->dst.seqlo, st->dst.seqhi,
2162 st->dst.max_win, st->dst.seqdiff);
2163 if (st->src.wscale && st->dst.wscale)
2164 addlog(" wscale=%u",
2165 st->dst.wscale & PF_WSCALE_MASK);
2166 addlog("]");
2167 }
2168 addlog(" %u:%u", st->src.state, st->dst.state);
2169 if (st->rule.ptr)
2170 addlog(" @%d", st->rule.ptr->nr);
2171 }
2172 }
2173
2174 void
pf_print_flags(u_int8_t f)2175 pf_print_flags(u_int8_t f)
2176 {
2177 if (f)
2178 addlog(" ");
2179 if (f & TH_FIN)
2180 addlog("F");
2181 if (f & TH_SYN)
2182 addlog("S");
2183 if (f & TH_RST)
2184 addlog("R");
2185 if (f & TH_PUSH)
2186 addlog("P");
2187 if (f & TH_ACK)
2188 addlog("A");
2189 if (f & TH_URG)
2190 addlog("U");
2191 if (f & TH_ECE)
2192 addlog("E");
2193 if (f & TH_CWR)
2194 addlog("W");
2195 }
2196
2197 #define PF_SET_SKIP_STEPS(i) \
2198 do { \
2199 while (head[i] != cur) { \
2200 head[i]->skip[i].ptr = cur; \
2201 head[i] = TAILQ_NEXT(head[i], entries); \
2202 } \
2203 } while (0)
2204
2205 void
pf_calc_skip_steps(struct pf_rulequeue * rules)2206 pf_calc_skip_steps(struct pf_rulequeue *rules)
2207 {
2208 struct pf_rule *cur, *prev, *head[PF_SKIP_COUNT];
2209 int i;
2210
2211 cur = TAILQ_FIRST(rules);
2212 prev = cur;
2213 for (i = 0; i < PF_SKIP_COUNT; ++i)
2214 head[i] = cur;
2215 while (cur != NULL) {
2216 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2217 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2218 if (cur->direction != prev->direction)
2219 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2220 if (cur->onrdomain != prev->onrdomain ||
2221 cur->ifnot != prev->ifnot)
2222 PF_SET_SKIP_STEPS(PF_SKIP_RDOM);
2223 if (cur->af != prev->af)
2224 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2225 if (cur->proto != prev->proto)
2226 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2227 if (cur->src.neg != prev->src.neg ||
2228 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2229 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2230 if (cur->dst.neg != prev->dst.neg ||
2231 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2232 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2233 if (cur->src.port[0] != prev->src.port[0] ||
2234 cur->src.port[1] != prev->src.port[1] ||
2235 cur->src.port_op != prev->src.port_op)
2236 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2237 if (cur->dst.port[0] != prev->dst.port[0] ||
2238 cur->dst.port[1] != prev->dst.port[1] ||
2239 cur->dst.port_op != prev->dst.port_op)
2240 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2241
2242 prev = cur;
2243 cur = TAILQ_NEXT(cur, entries);
2244 }
2245 for (i = 0; i < PF_SKIP_COUNT; ++i)
2246 PF_SET_SKIP_STEPS(i);
2247 }
2248
2249 int
pf_addr_wrap_neq(struct pf_addr_wrap * aw1,struct pf_addr_wrap * aw2)2250 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2251 {
2252 if (aw1->type != aw2->type)
2253 return (1);
2254 switch (aw1->type) {
2255 case PF_ADDR_ADDRMASK:
2256 case PF_ADDR_RANGE:
2257 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2258 return (1);
2259 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2260 return (1);
2261 return (0);
2262 case PF_ADDR_DYNIFTL:
2263 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2264 case PF_ADDR_NONE:
2265 case PF_ADDR_NOROUTE:
2266 case PF_ADDR_URPFFAILED:
2267 return (0);
2268 case PF_ADDR_TABLE:
2269 return (aw1->p.tbl != aw2->p.tbl);
2270 case PF_ADDR_RTLABEL:
2271 return (aw1->v.rtlabel != aw2->v.rtlabel);
2272 default:
2273 addlog("invalid address type: %d\n", aw1->type);
2274 return (1);
2275 }
2276 }
2277
2278 /* This algorithm computes 'a + b - c' in ones-complement using a trick to
2279 * emulate at most one ones-complement subtraction. This thereby limits net
2280 * carries/borrows to at most one, eliminating a reduction step and saving one
2281 * each of +, >>, & and ~.
2282 *
2283 * def. x mod y = x - (x//y)*y for integer x,y
2284 * def. sum = x mod 2^16
2285 * def. accumulator = (x >> 16) mod 2^16
2286 *
2287 * The trick works as follows: subtracting exactly one u_int16_t from the
2288 * u_int32_t x incurs at most one underflow, wrapping its upper 16-bits, the
2289 * accumulator, to 2^16 - 1. Adding this to the 16-bit sum preserves the
2290 * ones-complement borrow:
2291 *
2292 * (sum + accumulator) mod 2^16
2293 * = { assume underflow: accumulator := 2^16 - 1 }
2294 * (sum + 2^16 - 1) mod 2^16
2295 * = { mod }
2296 * (sum - 1) mod 2^16
2297 *
2298 * Although this breaks for sum = 0, giving 0xffff, which is ones-complement's
2299 * other zero, not -1, that cannot occur: the 16-bit sum cannot be underflown
2300 * to zero as that requires subtraction of at least 2^16, which exceeds a
2301 * single u_int16_t's range.
2302 *
2303 * We use the following theorem to derive the implementation:
2304 *
2305 * th. (x + (y mod z)) mod z = (x + y) mod z (0)
2306 * proof.
2307 * (x + (y mod z)) mod z
2308 * = { def mod }
2309 * (x + y - (y//z)*z) mod z
2310 * = { (a + b*c) mod c = a mod c }
2311 * (x + y) mod z [end of proof]
2312 *
2313 * ... and thereby obtain:
2314 *
2315 * (sum + accumulator) mod 2^16
2316 * = { def. accumulator, def. sum }
2317 * (x mod 2^16 + (x >> 16) mod 2^16) mod 2^16
2318 * = { (0), twice }
2319 * (x + (x >> 16)) mod 2^16
2320 * = { x mod 2^n = x & (2^n - 1) }
2321 * (x + (x >> 16)) & 0xffff
2322 *
2323 * Note: this serves also as a reduction step for at most one add (as the
2324 * trailing mod 2^16 prevents further reductions by destroying carries).
2325 */
2326 __inline void
pf_cksum_fixup(u_int16_t * cksum,u_int16_t was,u_int16_t now,u_int8_t proto)2327 pf_cksum_fixup(u_int16_t *cksum, u_int16_t was, u_int16_t now,
2328 u_int8_t proto)
2329 {
2330 u_int32_t x;
2331 const int udp = proto == IPPROTO_UDP;
2332
2333 x = *cksum + was - now;
2334 x = (x + (x >> 16)) & 0xffff;
2335
2336 /* optimise: eliminate a branch when not udp */
2337 if (udp && *cksum == 0x0000)
2338 return;
2339 if (udp && x == 0x0000)
2340 x = 0xffff;
2341
2342 *cksum = (u_int16_t)(x);
2343 }
2344
2345 #ifdef INET6
2346 /* pre: coverage(cksum) is superset of coverage(covered_cksum) */
2347 static __inline void
pf_cksum_uncover(u_int16_t * cksum,u_int16_t covered_cksum,u_int8_t proto)2348 pf_cksum_uncover(u_int16_t *cksum, u_int16_t covered_cksum, u_int8_t proto)
2349 {
2350 pf_cksum_fixup(cksum, ~covered_cksum, 0x0, proto);
2351 }
2352
2353 /* pre: disjoint(coverage(cksum), coverage(uncovered_cksum)) */
2354 static __inline void
pf_cksum_cover(u_int16_t * cksum,u_int16_t uncovered_cksum,u_int8_t proto)2355 pf_cksum_cover(u_int16_t *cksum, u_int16_t uncovered_cksum, u_int8_t proto)
2356 {
2357 pf_cksum_fixup(cksum, 0x0, ~uncovered_cksum, proto);
2358 }
2359 #endif /* INET6 */
2360
2361 /* pre: *a is 16-bit aligned within its packet
2362 *
2363 * This algorithm emulates 16-bit ones-complement sums on a twos-complement
2364 * machine by conserving ones-complement's otherwise discarded carries in the
2365 * upper bits of x. These accumulated carries when added to the lower 16-bits
2366 * over at least zero 'reduction' steps then complete the ones-complement sum.
2367 *
2368 * def. sum = x mod 2^16
2369 * def. accumulator = (x >> 16)
2370 *
2371 * At most two reduction steps
2372 *
2373 * x := sum + accumulator
2374 * = { def sum, def accumulator }
2375 * x := x mod 2^16 + (x >> 16)
2376 * = { x mod 2^n = x & (2^n - 1) }
2377 * x := (x & 0xffff) + (x >> 16)
2378 *
2379 * are necessary to incorporate the accumulated carries (at most one per add)
2380 * i.e. to reduce x < 2^16 from at most 16 carries in the upper 16 bits.
2381 *
2382 * The function is also invariant over the endian of the host. Why?
2383 *
2384 * Define the unary transpose operator ~ on a bitstring in python slice
2385 * notation as lambda m: m[P:] + m[:P] , for some constant pivot P.
2386 *
2387 * th. ~ distributes over ones-complement addition, denoted by +_1, i.e.
2388 *
2389 * ~m +_1 ~n = ~(m +_1 n) (for all bitstrings m,n of equal length)
2390 *
2391 * proof. Regard the bitstrings in m +_1 n as split at P, forming at most two
2392 * 'half-adds'. Under ones-complement addition, each half-add carries to the
2393 * other, so the sum of each half-add is unaffected by their relative
2394 * order. Therefore:
2395 *
2396 * ~m +_1 ~n
2397 * = { half-adds invariant under transposition }
2398 * ~s
2399 * = { substitute }
2400 * ~(m +_1 n) [end of proof]
2401 *
2402 * th. Summing two in-memory ones-complement 16-bit variables m,n on a machine
2403 * with the converse endian does not alter the result.
2404 *
2405 * proof.
2406 * { converse machine endian: load/store transposes, P := 8 }
2407 * ~(~m +_1 ~n)
2408 * = { ~ over +_1 }
2409 * ~~m +_1 ~~n
2410 * = { ~ is an involution }
2411 * m +_1 n [end of proof]
2412 *
2413 */
2414 #define NEG(x) ((u_int16_t)~(x))
2415 void
pf_cksum_fixup_a(u_int16_t * cksum,const struct pf_addr * a,const struct pf_addr * an,sa_family_t af,u_int8_t proto)2416 pf_cksum_fixup_a(u_int16_t *cksum, const struct pf_addr *a,
2417 const struct pf_addr *an, sa_family_t af, u_int8_t proto)
2418 {
2419 u_int32_t x;
2420 const u_int16_t *n = an->addr16;
2421 const u_int16_t *o = a->addr16;
2422 const int udp = proto == IPPROTO_UDP;
2423
2424 switch (af) {
2425 case AF_INET:
2426 x = *cksum + o[0] + NEG(n[0]) + o[1] + NEG(n[1]);
2427 break;
2428 #ifdef INET6
2429 case AF_INET6:
2430 x = *cksum + o[0] + NEG(n[0]) + o[1] + NEG(n[1]) +\
2431 o[2] + NEG(n[2]) + o[3] + NEG(n[3]) +\
2432 o[4] + NEG(n[4]) + o[5] + NEG(n[5]) +\
2433 o[6] + NEG(n[6]) + o[7] + NEG(n[7]);
2434 break;
2435 #endif /* INET6 */
2436 default:
2437 unhandled_af(af);
2438 }
2439
2440 x = (x & 0xffff) + (x >> 16);
2441 x = (x & 0xffff) + (x >> 16);
2442
2443 /* optimise: eliminate a branch when not udp */
2444 if (udp && *cksum == 0x0000)
2445 return;
2446 if (udp && x == 0x0000)
2447 x = 0xffff;
2448
2449 *cksum = (u_int16_t)(x);
2450 }
2451
2452 int
pf_patch_8(struct pf_pdesc * pd,u_int8_t * f,u_int8_t v,bool hi)2453 pf_patch_8(struct pf_pdesc *pd, u_int8_t *f, u_int8_t v, bool hi)
2454 {
2455 int rewrite = 0;
2456
2457 if (*f != v) {
2458 u_int16_t old = htons(hi ? (*f << 8) : *f);
2459 u_int16_t new = htons(hi ? ( v << 8) : v);
2460
2461 pf_cksum_fixup(pd->pcksum, old, new, pd->proto);
2462 *f = v;
2463 rewrite = 1;
2464 }
2465
2466 return (rewrite);
2467 }
2468
2469 /* pre: *f is 16-bit aligned within its packet */
2470 int
pf_patch_16(struct pf_pdesc * pd,u_int16_t * f,u_int16_t v)2471 pf_patch_16(struct pf_pdesc *pd, u_int16_t *f, u_int16_t v)
2472 {
2473 int rewrite = 0;
2474
2475 if (*f != v) {
2476 pf_cksum_fixup(pd->pcksum, *f, v, pd->proto);
2477 *f = v;
2478 rewrite = 1;
2479 }
2480
2481 return (rewrite);
2482 }
2483
2484 int
pf_patch_16_unaligned(struct pf_pdesc * pd,void * f,u_int16_t v,bool hi)2485 pf_patch_16_unaligned(struct pf_pdesc *pd, void *f, u_int16_t v, bool hi)
2486 {
2487 int rewrite = 0;
2488 u_int8_t *fb = (u_int8_t*)f;
2489 u_int8_t *vb = (u_int8_t*)&v;
2490
2491 if (hi && ALIGNED_POINTER(f, u_int16_t)) {
2492 return (pf_patch_16(pd, f, v)); /* optimise */
2493 }
2494
2495 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2496 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2497
2498 return (rewrite);
2499 }
2500
2501 /* pre: *f is 16-bit aligned within its packet */
2502 /* pre: pd->proto != IPPROTO_UDP */
2503 int
pf_patch_32(struct pf_pdesc * pd,u_int32_t * f,u_int32_t v)2504 pf_patch_32(struct pf_pdesc *pd, u_int32_t *f, u_int32_t v)
2505 {
2506 int rewrite = 0;
2507 u_int16_t *pc = pd->pcksum;
2508 u_int8_t proto = pd->proto;
2509
2510 /* optimise: inline udp fixup code is unused; let compiler scrub it */
2511 if (proto == IPPROTO_UDP)
2512 panic("%s: udp", __func__);
2513
2514 /* optimise: skip *f != v guard; true for all use-cases */
2515 pf_cksum_fixup(pc, *f / (1 << 16), v / (1 << 16), proto);
2516 pf_cksum_fixup(pc, *f % (1 << 16), v % (1 << 16), proto);
2517
2518 *f = v;
2519 rewrite = 1;
2520
2521 return (rewrite);
2522 }
2523
2524 int
pf_patch_32_unaligned(struct pf_pdesc * pd,void * f,u_int32_t v,bool hi)2525 pf_patch_32_unaligned(struct pf_pdesc *pd, void *f, u_int32_t v, bool hi)
2526 {
2527 int rewrite = 0;
2528 u_int8_t *fb = (u_int8_t*)f;
2529 u_int8_t *vb = (u_int8_t*)&v;
2530
2531 if (hi && ALIGNED_POINTER(f, u_int32_t)) {
2532 return (pf_patch_32(pd, f, v)); /* optimise */
2533 }
2534
2535 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2536 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2537 rewrite += pf_patch_8(pd, fb++, *vb++, hi);
2538 rewrite += pf_patch_8(pd, fb++, *vb++,!hi);
2539
2540 return (rewrite);
2541 }
2542
2543 int
pf_icmp_mapping(struct pf_pdesc * pd,u_int8_t type,int * icmp_dir,u_int16_t * virtual_id,u_int16_t * virtual_type)2544 pf_icmp_mapping(struct pf_pdesc *pd, u_int8_t type, int *icmp_dir,
2545 u_int16_t *virtual_id, u_int16_t *virtual_type)
2546 {
2547 /*
2548 * ICMP types marked with PF_OUT are typically responses to
2549 * PF_IN, and will match states in the opposite direction.
2550 * PF_IN ICMP types need to match a state with that type.
2551 */
2552 *icmp_dir = PF_OUT;
2553
2554 /* Queries (and responses) */
2555 switch (pd->af) {
2556 case AF_INET:
2557 switch (type) {
2558 case ICMP_ECHO:
2559 *icmp_dir = PF_IN;
2560 /* FALLTHROUGH */
2561 case ICMP_ECHOREPLY:
2562 *virtual_type = ICMP_ECHO;
2563 *virtual_id = pd->hdr.icmp.icmp_id;
2564 break;
2565
2566 case ICMP_TSTAMP:
2567 *icmp_dir = PF_IN;
2568 /* FALLTHROUGH */
2569 case ICMP_TSTAMPREPLY:
2570 *virtual_type = ICMP_TSTAMP;
2571 *virtual_id = pd->hdr.icmp.icmp_id;
2572 break;
2573
2574 case ICMP_IREQ:
2575 *icmp_dir = PF_IN;
2576 /* FALLTHROUGH */
2577 case ICMP_IREQREPLY:
2578 *virtual_type = ICMP_IREQ;
2579 *virtual_id = pd->hdr.icmp.icmp_id;
2580 break;
2581
2582 case ICMP_MASKREQ:
2583 *icmp_dir = PF_IN;
2584 /* FALLTHROUGH */
2585 case ICMP_MASKREPLY:
2586 *virtual_type = ICMP_MASKREQ;
2587 *virtual_id = pd->hdr.icmp.icmp_id;
2588 break;
2589
2590 case ICMP_IPV6_WHEREAREYOU:
2591 *icmp_dir = PF_IN;
2592 /* FALLTHROUGH */
2593 case ICMP_IPV6_IAMHERE:
2594 *virtual_type = ICMP_IPV6_WHEREAREYOU;
2595 *virtual_id = 0; /* Nothing sane to match on! */
2596 break;
2597
2598 case ICMP_MOBILE_REGREQUEST:
2599 *icmp_dir = PF_IN;
2600 /* FALLTHROUGH */
2601 case ICMP_MOBILE_REGREPLY:
2602 *virtual_type = ICMP_MOBILE_REGREQUEST;
2603 *virtual_id = 0; /* Nothing sane to match on! */
2604 break;
2605
2606 case ICMP_ROUTERSOLICIT:
2607 *icmp_dir = PF_IN;
2608 /* FALLTHROUGH */
2609 case ICMP_ROUTERADVERT:
2610 *virtual_type = ICMP_ROUTERSOLICIT;
2611 *virtual_id = 0; /* Nothing sane to match on! */
2612 break;
2613
2614 /* These ICMP types map to other connections */
2615 case ICMP_UNREACH:
2616 case ICMP_SOURCEQUENCH:
2617 case ICMP_REDIRECT:
2618 case ICMP_TIMXCEED:
2619 case ICMP_PARAMPROB:
2620 /* These will not be used, but set them anyway */
2621 *icmp_dir = PF_IN;
2622 *virtual_type = htons(type);
2623 *virtual_id = 0;
2624 return (1); /* These types match to another state */
2625
2626 /*
2627 * All remaining ICMP types get their own states,
2628 * and will only match in one direction.
2629 */
2630 default:
2631 *icmp_dir = PF_IN;
2632 *virtual_type = type;
2633 *virtual_id = 0;
2634 break;
2635 }
2636 break;
2637 #ifdef INET6
2638 case AF_INET6:
2639 switch (type) {
2640 case ICMP6_ECHO_REQUEST:
2641 *icmp_dir = PF_IN;
2642 /* FALLTHROUGH */
2643 case ICMP6_ECHO_REPLY:
2644 *virtual_type = ICMP6_ECHO_REQUEST;
2645 *virtual_id = pd->hdr.icmp6.icmp6_id;
2646 break;
2647
2648 case MLD_LISTENER_QUERY:
2649 case MLD_LISTENER_REPORT: {
2650 struct mld_hdr *mld = &pd->hdr.mld;
2651 u_int32_t h;
2652
2653 /*
2654 * Listener Report can be sent by clients
2655 * without an associated Listener Query.
2656 * In addition to that, when Report is sent as a
2657 * reply to a Query its source and destination
2658 * address are different.
2659 */
2660 *icmp_dir = PF_IN;
2661 *virtual_type = MLD_LISTENER_QUERY;
2662 /* generate fake id for these messages */
2663 h = mld->mld_addr.s6_addr32[0] ^
2664 mld->mld_addr.s6_addr32[1] ^
2665 mld->mld_addr.s6_addr32[2] ^
2666 mld->mld_addr.s6_addr32[3];
2667 *virtual_id = (h >> 16) ^ (h & 0xffff);
2668 break;
2669 }
2670
2671 /*
2672 * ICMP6_FQDN and ICMP6_NI query/reply are the same type as
2673 * ICMP6_WRU
2674 */
2675 case ICMP6_WRUREQUEST:
2676 *icmp_dir = PF_IN;
2677 /* FALLTHROUGH */
2678 case ICMP6_WRUREPLY:
2679 *virtual_type = ICMP6_WRUREQUEST;
2680 *virtual_id = 0; /* Nothing sane to match on! */
2681 break;
2682
2683 case MLD_MTRACE:
2684 *icmp_dir = PF_IN;
2685 /* FALLTHROUGH */
2686 case MLD_MTRACE_RESP:
2687 *virtual_type = MLD_MTRACE;
2688 *virtual_id = 0; /* Nothing sane to match on! */
2689 break;
2690
2691 case ND_NEIGHBOR_SOLICIT:
2692 *icmp_dir = PF_IN;
2693 /* FALLTHROUGH */
2694 case ND_NEIGHBOR_ADVERT: {
2695 struct nd_neighbor_solicit *nd = &pd->hdr.nd_ns;
2696 u_int32_t h;
2697
2698 *virtual_type = ND_NEIGHBOR_SOLICIT;
2699 /* generate fake id for these messages */
2700 h = nd->nd_ns_target.s6_addr32[0] ^
2701 nd->nd_ns_target.s6_addr32[1] ^
2702 nd->nd_ns_target.s6_addr32[2] ^
2703 nd->nd_ns_target.s6_addr32[3];
2704 *virtual_id = (h >> 16) ^ (h & 0xffff);
2705 /*
2706 * the extra work here deals with 'keep state' option
2707 * at pass rule for unsolicited advertisement. By
2708 * returning 1 (state_icmp = 1) we override 'keep
2709 * state' to 'no state' so we don't create state for
2710 * unsolicited advertisements. No one expects answer to
2711 * unsolicited advertisements so we should be good.
2712 */
2713 if (type == ND_NEIGHBOR_ADVERT) {
2714 *virtual_type = htons(*virtual_type);
2715 return (1);
2716 }
2717 break;
2718 }
2719
2720 /*
2721 * These ICMP types map to other connections.
2722 * ND_REDIRECT can't be in this list because the triggering
2723 * packet header is optional.
2724 */
2725 case ICMP6_DST_UNREACH:
2726 case ICMP6_PACKET_TOO_BIG:
2727 case ICMP6_TIME_EXCEEDED:
2728 case ICMP6_PARAM_PROB:
2729 /* These will not be used, but set them anyway */
2730 *icmp_dir = PF_IN;
2731 *virtual_type = htons(type);
2732 *virtual_id = 0;
2733 return (1); /* These types match to another state */
2734 /*
2735 * All remaining ICMP6 types get their own states,
2736 * and will only match in one direction.
2737 */
2738 default:
2739 *icmp_dir = PF_IN;
2740 *virtual_type = type;
2741 *virtual_id = 0;
2742 break;
2743 }
2744 break;
2745 #endif /* INET6 */
2746 }
2747 *virtual_type = htons(*virtual_type);
2748 return (0); /* These types match to their own state */
2749 }
2750
2751 void
pf_translate_icmp(struct pf_pdesc * pd,struct pf_addr * qa,u_int16_t * qp,struct pf_addr * oa,struct pf_addr * na,u_int16_t np)2752 pf_translate_icmp(struct pf_pdesc *pd, struct pf_addr *qa, u_int16_t *qp,
2753 struct pf_addr *oa, struct pf_addr *na, u_int16_t np)
2754 {
2755 /* note: doesn't trouble to fixup quoted checksums, if any */
2756
2757 /* change quoted protocol port */
2758 if (qp != NULL)
2759 pf_patch_16(pd, qp, np);
2760
2761 /* change quoted ip address */
2762 pf_cksum_fixup_a(pd->pcksum, qa, na, pd->af, pd->proto);
2763 pf_addrcpy(qa, na, pd->af);
2764
2765 /* change network-header's ip address */
2766 if (oa)
2767 pf_translate_a(pd, oa, na);
2768 }
2769
2770 /* pre: *a is 16-bit aligned within its packet */
2771 /* *a is a network header src/dst address */
2772 int
pf_translate_a(struct pf_pdesc * pd,struct pf_addr * a,struct pf_addr * an)2773 pf_translate_a(struct pf_pdesc *pd, struct pf_addr *a, struct pf_addr *an)
2774 {
2775 int rewrite = 0;
2776
2777 /* warning: !PF_ANEQ != PF_AEQ */
2778 if (!PF_ANEQ(a, an, pd->af))
2779 return (0);
2780
2781 /* fixup transport pseudo-header, if any */
2782 switch (pd->proto) {
2783 case IPPROTO_TCP: /* FALLTHROUGH */
2784 case IPPROTO_UDP: /* FALLTHROUGH */
2785 case IPPROTO_ICMPV6:
2786 pf_cksum_fixup_a(pd->pcksum, a, an, pd->af, pd->proto);
2787 break;
2788 default:
2789 break; /* assume no pseudo-header */
2790 }
2791
2792 pf_addrcpy(a, an, pd->af);
2793 rewrite = 1;
2794
2795 return (rewrite);
2796 }
2797
2798 #ifdef INET6
2799 /* pf_translate_af() may change pd->m, adjust local copies after calling */
2800 int
pf_translate_af(struct pf_pdesc * pd)2801 pf_translate_af(struct pf_pdesc *pd)
2802 {
2803 static const struct pf_addr zero;
2804 struct ip *ip4;
2805 struct ip6_hdr *ip6;
2806 int copyback = 0;
2807 u_int hlen, ohlen, dlen;
2808 u_int16_t *pc;
2809 u_int8_t af_proto, naf_proto;
2810
2811 hlen = (pd->naf == AF_INET) ? sizeof(*ip4) : sizeof(*ip6);
2812 ohlen = pd->off;
2813 dlen = pd->tot_len - pd->off;
2814 pc = pd->pcksum;
2815
2816 af_proto = naf_proto = pd->proto;
2817 if (naf_proto == IPPROTO_ICMP)
2818 af_proto = IPPROTO_ICMPV6;
2819 if (naf_proto == IPPROTO_ICMPV6)
2820 af_proto = IPPROTO_ICMP;
2821
2822 /* uncover stale pseudo-header */
2823 switch (af_proto) {
2824 case IPPROTO_ICMPV6:
2825 /* optimise: unchanged for TCP/UDP */
2826 pf_cksum_fixup(pc, htons(af_proto), 0x0, af_proto);
2827 pf_cksum_fixup(pc, htons(dlen), 0x0, af_proto);
2828 /* FALLTHROUGH */
2829 case IPPROTO_UDP: /* FALLTHROUGH */
2830 case IPPROTO_TCP:
2831 pf_cksum_fixup_a(pc, pd->src, &zero, pd->af, af_proto);
2832 pf_cksum_fixup_a(pc, pd->dst, &zero, pd->af, af_proto);
2833 copyback = 1;
2834 break;
2835 default:
2836 break; /* assume no pseudo-header */
2837 }
2838
2839 /* replace the network header */
2840 m_adj(pd->m, pd->off);
2841 pd->src = NULL;
2842 pd->dst = NULL;
2843
2844 if ((M_PREPEND(pd->m, hlen, M_DONTWAIT)) == NULL) {
2845 pd->m = NULL;
2846 return (-1);
2847 }
2848
2849 pd->off = hlen;
2850 pd->tot_len += hlen - ohlen;
2851
2852 switch (pd->naf) {
2853 case AF_INET:
2854 ip4 = mtod(pd->m, struct ip *);
2855 memset(ip4, 0, hlen);
2856 ip4->ip_v = IPVERSION;
2857 ip4->ip_hl = hlen >> 2;
2858 ip4->ip_tos = pd->tos;
2859 ip4->ip_len = htons(hlen + dlen);
2860 ip4->ip_id = htons(ip_randomid());
2861 ip4->ip_off = htons(IP_DF);
2862 ip4->ip_ttl = pd->ttl;
2863 ip4->ip_p = pd->proto;
2864 ip4->ip_src = pd->nsaddr.v4;
2865 ip4->ip_dst = pd->ndaddr.v4;
2866 break;
2867 case AF_INET6:
2868 ip6 = mtod(pd->m, struct ip6_hdr *);
2869 memset(ip6, 0, hlen);
2870 ip6->ip6_vfc = IPV6_VERSION;
2871 ip6->ip6_flow |= htonl((u_int32_t)pd->tos << 20);
2872 ip6->ip6_plen = htons(dlen);
2873 ip6->ip6_nxt = pd->proto;
2874 if (!pd->ttl || pd->ttl > IPV6_DEFHLIM)
2875 ip6->ip6_hlim = IPV6_DEFHLIM;
2876 else
2877 ip6->ip6_hlim = pd->ttl;
2878 ip6->ip6_src = pd->nsaddr.v6;
2879 ip6->ip6_dst = pd->ndaddr.v6;
2880 break;
2881 default:
2882 unhandled_af(pd->naf);
2883 }
2884
2885 /* UDP over IPv6 must be checksummed per rfc2460 p27 */
2886 if (naf_proto == IPPROTO_UDP && *pc == 0x0000 &&
2887 pd->naf == AF_INET6) {
2888 pd->m->m_pkthdr.csum_flags |= M_UDP_CSUM_OUT;
2889 }
2890
2891 /* cover fresh pseudo-header */
2892 switch (naf_proto) {
2893 case IPPROTO_ICMPV6:
2894 /* optimise: unchanged for TCP/UDP */
2895 pf_cksum_fixup(pc, 0x0, htons(naf_proto), naf_proto);
2896 pf_cksum_fixup(pc, 0x0, htons(dlen), naf_proto);
2897 /* FALLTHROUGH */
2898 case IPPROTO_UDP: /* FALLTHROUGH */
2899 case IPPROTO_TCP:
2900 pf_cksum_fixup_a(pc, &zero, &pd->nsaddr, pd->naf, naf_proto);
2901 pf_cksum_fixup_a(pc, &zero, &pd->ndaddr, pd->naf, naf_proto);
2902 copyback = 1;
2903 break;
2904 default:
2905 break; /* assume no pseudo-header */
2906 }
2907
2908 /* flush pd->pcksum */
2909 if (copyback)
2910 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT);
2911
2912 return (0);
2913 }
2914
2915 int
pf_change_icmp_af(struct mbuf * m,int ipoff2,struct pf_pdesc * pd,struct pf_pdesc * pd2,struct pf_addr * src,struct pf_addr * dst,sa_family_t af,sa_family_t naf)2916 pf_change_icmp_af(struct mbuf *m, int ipoff2, struct pf_pdesc *pd,
2917 struct pf_pdesc *pd2, struct pf_addr *src, struct pf_addr *dst,
2918 sa_family_t af, sa_family_t naf)
2919 {
2920 struct mbuf *n = NULL;
2921 struct ip *ip4;
2922 struct ip6_hdr *ip6;
2923 u_int hlen, ohlen, dlen;
2924 int d;
2925
2926 if (af == naf || (af != AF_INET && af != AF_INET6) ||
2927 (naf != AF_INET && naf != AF_INET6))
2928 return (-1);
2929
2930 /* split the mbuf chain on the quoted ip/ip6 header boundary */
2931 if ((n = m_split(m, ipoff2, M_DONTWAIT)) == NULL)
2932 return (-1);
2933
2934 /* new quoted header */
2935 hlen = naf == AF_INET ? sizeof(*ip4) : sizeof(*ip6);
2936 /* old quoted header */
2937 ohlen = pd2->off - ipoff2;
2938
2939 /* trim old quoted header */
2940 pf_cksum_uncover(pd->pcksum, in_cksum(n, ohlen), pd->proto);
2941 m_adj(n, ohlen);
2942
2943 /* prepend a new, translated, quoted header */
2944 if ((M_PREPEND(n, hlen, M_DONTWAIT)) == NULL)
2945 return (-1);
2946
2947 switch (naf) {
2948 case AF_INET:
2949 ip4 = mtod(n, struct ip *);
2950 memset(ip4, 0, sizeof(*ip4));
2951 ip4->ip_v = IPVERSION;
2952 ip4->ip_hl = sizeof(*ip4) >> 2;
2953 ip4->ip_len = htons(sizeof(*ip4) + pd2->tot_len - ohlen);
2954 ip4->ip_id = htons(ip_randomid());
2955 ip4->ip_off = htons(IP_DF);
2956 ip4->ip_ttl = pd2->ttl;
2957 if (pd2->proto == IPPROTO_ICMPV6)
2958 ip4->ip_p = IPPROTO_ICMP;
2959 else
2960 ip4->ip_p = pd2->proto;
2961 ip4->ip_src = src->v4;
2962 ip4->ip_dst = dst->v4;
2963 in_hdr_cksum_out(n, NULL);
2964 break;
2965 case AF_INET6:
2966 ip6 = mtod(n, struct ip6_hdr *);
2967 memset(ip6, 0, sizeof(*ip6));
2968 ip6->ip6_vfc = IPV6_VERSION;
2969 ip6->ip6_plen = htons(pd2->tot_len - ohlen);
2970 if (pd2->proto == IPPROTO_ICMP)
2971 ip6->ip6_nxt = IPPROTO_ICMPV6;
2972 else
2973 ip6->ip6_nxt = pd2->proto;
2974 if (!pd2->ttl || pd2->ttl > IPV6_DEFHLIM)
2975 ip6->ip6_hlim = IPV6_DEFHLIM;
2976 else
2977 ip6->ip6_hlim = pd2->ttl;
2978 ip6->ip6_src = src->v6;
2979 ip6->ip6_dst = dst->v6;
2980 break;
2981 }
2982
2983 /* cover new quoted header */
2984 /* optimise: any new AF_INET header of ours sums to zero */
2985 if (naf != AF_INET) {
2986 pf_cksum_cover(pd->pcksum, in_cksum(n, hlen), pd->proto);
2987 }
2988
2989 /* reattach modified quoted packet to outer header */
2990 {
2991 int nlen = n->m_pkthdr.len;
2992 m_cat(m, n);
2993 m->m_pkthdr.len += nlen;
2994 }
2995
2996 /* account for altered length */
2997 d = hlen - ohlen;
2998
2999 if (pd->proto == IPPROTO_ICMPV6) {
3000 /* fixup pseudo-header */
3001 dlen = pd->tot_len - pd->off;
3002 pf_cksum_fixup(pd->pcksum,
3003 htons(dlen), htons(dlen + d), pd->proto);
3004 }
3005
3006 pd->tot_len += d;
3007 pd2->tot_len += d;
3008 pd2->off += d;
3009
3010 /* note: not bothering to update network headers as
3011 these due for rewrite by pf_translate_af() */
3012
3013 return (0);
3014 }
3015
3016
3017 #define PTR_IP(field) (offsetof(struct ip, field))
3018 #define PTR_IP6(field) (offsetof(struct ip6_hdr, field))
3019
3020 int
pf_translate_icmp_af(struct pf_pdesc * pd,int af,void * arg)3021 pf_translate_icmp_af(struct pf_pdesc *pd, int af, void *arg)
3022 {
3023 struct icmp *icmp4;
3024 struct icmp6_hdr *icmp6;
3025 u_int32_t mtu;
3026 int32_t ptr = -1;
3027 u_int8_t type;
3028 u_int8_t code;
3029
3030 switch (af) {
3031 case AF_INET:
3032 icmp6 = arg;
3033 type = icmp6->icmp6_type;
3034 code = icmp6->icmp6_code;
3035 mtu = ntohl(icmp6->icmp6_mtu);
3036
3037 switch (type) {
3038 case ICMP6_ECHO_REQUEST:
3039 type = ICMP_ECHO;
3040 break;
3041 case ICMP6_ECHO_REPLY:
3042 type = ICMP_ECHOREPLY;
3043 break;
3044 case ICMP6_DST_UNREACH:
3045 type = ICMP_UNREACH;
3046 switch (code) {
3047 case ICMP6_DST_UNREACH_NOROUTE:
3048 case ICMP6_DST_UNREACH_BEYONDSCOPE:
3049 case ICMP6_DST_UNREACH_ADDR:
3050 code = ICMP_UNREACH_HOST;
3051 break;
3052 case ICMP6_DST_UNREACH_ADMIN:
3053 code = ICMP_UNREACH_HOST_PROHIB;
3054 break;
3055 case ICMP6_DST_UNREACH_NOPORT:
3056 code = ICMP_UNREACH_PORT;
3057 break;
3058 default:
3059 return (-1);
3060 }
3061 break;
3062 case ICMP6_PACKET_TOO_BIG:
3063 type = ICMP_UNREACH;
3064 code = ICMP_UNREACH_NEEDFRAG;
3065 mtu -= 20;
3066 break;
3067 case ICMP6_TIME_EXCEEDED:
3068 type = ICMP_TIMXCEED;
3069 break;
3070 case ICMP6_PARAM_PROB:
3071 switch (code) {
3072 case ICMP6_PARAMPROB_HEADER:
3073 type = ICMP_PARAMPROB;
3074 code = ICMP_PARAMPROB_ERRATPTR;
3075 ptr = ntohl(icmp6->icmp6_pptr);
3076
3077 if (ptr == PTR_IP6(ip6_vfc))
3078 ; /* preserve */
3079 else if (ptr == PTR_IP6(ip6_vfc) + 1)
3080 ptr = PTR_IP(ip_tos);
3081 else if (ptr == PTR_IP6(ip6_plen) ||
3082 ptr == PTR_IP6(ip6_plen) + 1)
3083 ptr = PTR_IP(ip_len);
3084 else if (ptr == PTR_IP6(ip6_nxt))
3085 ptr = PTR_IP(ip_p);
3086 else if (ptr == PTR_IP6(ip6_hlim))
3087 ptr = PTR_IP(ip_ttl);
3088 else if (ptr >= PTR_IP6(ip6_src) &&
3089 ptr < PTR_IP6(ip6_dst))
3090 ptr = PTR_IP(ip_src);
3091 else if (ptr >= PTR_IP6(ip6_dst) &&
3092 ptr < sizeof(struct ip6_hdr))
3093 ptr = PTR_IP(ip_dst);
3094 else {
3095 return (-1);
3096 }
3097 break;
3098 case ICMP6_PARAMPROB_NEXTHEADER:
3099 type = ICMP_UNREACH;
3100 code = ICMP_UNREACH_PROTOCOL;
3101 break;
3102 default:
3103 return (-1);
3104 }
3105 break;
3106 default:
3107 return (-1);
3108 }
3109
3110 pf_patch_8(pd, &icmp6->icmp6_type, type, PF_HI);
3111 pf_patch_8(pd, &icmp6->icmp6_code, code, PF_LO);
3112
3113 /* aligns well with a icmpv4 nextmtu */
3114 pf_patch_32(pd, &icmp6->icmp6_mtu, htonl(mtu));
3115
3116 /* icmpv4 pptr is a one most significant byte */
3117 if (ptr >= 0)
3118 pf_patch_32(pd, &icmp6->icmp6_pptr, htonl(ptr << 24));
3119 break;
3120 case AF_INET6:
3121 icmp4 = arg;
3122 type = icmp4->icmp_type;
3123 code = icmp4->icmp_code;
3124 mtu = ntohs(icmp4->icmp_nextmtu);
3125
3126 switch (type) {
3127 case ICMP_ECHO:
3128 type = ICMP6_ECHO_REQUEST;
3129 break;
3130 case ICMP_ECHOREPLY:
3131 type = ICMP6_ECHO_REPLY;
3132 break;
3133 case ICMP_UNREACH:
3134 type = ICMP6_DST_UNREACH;
3135 switch (code) {
3136 case ICMP_UNREACH_NET:
3137 case ICMP_UNREACH_HOST:
3138 case ICMP_UNREACH_NET_UNKNOWN:
3139 case ICMP_UNREACH_HOST_UNKNOWN:
3140 case ICMP_UNREACH_ISOLATED:
3141 case ICMP_UNREACH_TOSNET:
3142 case ICMP_UNREACH_TOSHOST:
3143 code = ICMP6_DST_UNREACH_NOROUTE;
3144 break;
3145 case ICMP_UNREACH_PORT:
3146 code = ICMP6_DST_UNREACH_NOPORT;
3147 break;
3148 case ICMP_UNREACH_NET_PROHIB:
3149 case ICMP_UNREACH_HOST_PROHIB:
3150 case ICMP_UNREACH_FILTER_PROHIB:
3151 case ICMP_UNREACH_PRECEDENCE_CUTOFF:
3152 code = ICMP6_DST_UNREACH_ADMIN;
3153 break;
3154 case ICMP_UNREACH_PROTOCOL:
3155 type = ICMP6_PARAM_PROB;
3156 code = ICMP6_PARAMPROB_NEXTHEADER;
3157 ptr = offsetof(struct ip6_hdr, ip6_nxt);
3158 break;
3159 case ICMP_UNREACH_NEEDFRAG:
3160 type = ICMP6_PACKET_TOO_BIG;
3161 code = 0;
3162 mtu += 20;
3163 break;
3164 default:
3165 return (-1);
3166 }
3167 break;
3168 case ICMP_TIMXCEED:
3169 type = ICMP6_TIME_EXCEEDED;
3170 break;
3171 case ICMP_PARAMPROB:
3172 type = ICMP6_PARAM_PROB;
3173 switch (code) {
3174 case ICMP_PARAMPROB_ERRATPTR:
3175 code = ICMP6_PARAMPROB_HEADER;
3176 break;
3177 case ICMP_PARAMPROB_LENGTH:
3178 code = ICMP6_PARAMPROB_HEADER;
3179 break;
3180 default:
3181 return (-1);
3182 }
3183
3184 ptr = icmp4->icmp_pptr;
3185 if (ptr == 0 || ptr == PTR_IP(ip_tos))
3186 ; /* preserve */
3187 else if (ptr == PTR_IP(ip_len) ||
3188 ptr == PTR_IP(ip_len) + 1)
3189 ptr = PTR_IP6(ip6_plen);
3190 else if (ptr == PTR_IP(ip_ttl))
3191 ptr = PTR_IP6(ip6_hlim);
3192 else if (ptr == PTR_IP(ip_p))
3193 ptr = PTR_IP6(ip6_nxt);
3194 else if (ptr >= PTR_IP(ip_src) &&
3195 ptr < PTR_IP(ip_dst))
3196 ptr = PTR_IP6(ip6_src);
3197 else if (ptr >= PTR_IP(ip_dst) &&
3198 ptr < sizeof(struct ip))
3199 ptr = PTR_IP6(ip6_dst);
3200 else {
3201 return (-1);
3202 }
3203 break;
3204 default:
3205 return (-1);
3206 }
3207
3208 pf_patch_8(pd, &icmp4->icmp_type, type, PF_HI);
3209 pf_patch_8(pd, &icmp4->icmp_code, code, PF_LO);
3210 pf_patch_16(pd, &icmp4->icmp_nextmtu, htons(mtu));
3211 if (ptr >= 0)
3212 pf_patch_32(pd, &icmp4->icmp_void, htonl(ptr));
3213 break;
3214 }
3215
3216 return (0);
3217 }
3218 #endif /* INET6 */
3219
3220 /*
3221 * Need to modulate the sequence numbers in the TCP SACK option
3222 * (credits to Krzysztof Pfaff for report and patch)
3223 */
3224 int
pf_modulate_sack(struct pf_pdesc * pd,struct pf_state_peer * dst)3225 pf_modulate_sack(struct pf_pdesc *pd, struct pf_state_peer *dst)
3226 {
3227 struct sackblk sack;
3228 int copyback = 0, i;
3229 int olen, optsoff;
3230 u_int8_t opts[MAX_TCPOPTLEN], *opt, *eoh;
3231
3232 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3233 optsoff = pd->off + sizeof(struct tcphdr);
3234 #define TCPOLEN_MINSACK (TCPOLEN_SACK + 2)
3235 if (olen < TCPOLEN_MINSACK ||
3236 !pf_pull_hdr(pd->m, optsoff, opts, olen, NULL, pd->af))
3237 return (0);
3238
3239 eoh = opts + olen;
3240 opt = opts;
3241 while ((opt = pf_find_tcpopt(opt, opts, olen,
3242 TCPOPT_SACK, TCPOLEN_MINSACK)) != NULL)
3243 {
3244 size_t safelen = MIN(opt[1], (eoh - opt));
3245 for (i = 2; i + TCPOLEN_SACK <= safelen; i += TCPOLEN_SACK) {
3246 size_t startoff = (opt + i) - opts;
3247 memcpy(&sack, &opt[i], sizeof(sack));
3248 pf_patch_32_unaligned(pd, &sack.start,
3249 htonl(ntohl(sack.start) - dst->seqdiff),
3250 PF_ALGNMNT(startoff));
3251 pf_patch_32_unaligned(pd, &sack.end,
3252 htonl(ntohl(sack.end) - dst->seqdiff),
3253 PF_ALGNMNT(startoff + sizeof(sack.start)));
3254 memcpy(&opt[i], &sack, sizeof(sack));
3255 }
3256 copyback = 1;
3257 opt += opt[1];
3258 }
3259
3260 if (copyback)
3261 m_copyback(pd->m, optsoff, olen, opts, M_NOWAIT);
3262 return (copyback);
3263 }
3264
3265 struct mbuf *
pf_build_tcp(const struct pf_rule * r,sa_family_t af,const struct pf_addr * saddr,const struct pf_addr * daddr,u_int16_t sport,u_int16_t dport,u_int32_t seq,u_int32_t ack,u_int8_t flags,u_int16_t win,u_int16_t mss,u_int8_t ttl,int tag,u_int16_t rtag,u_int sack,u_int rdom)3266 pf_build_tcp(const struct pf_rule *r, sa_family_t af,
3267 const struct pf_addr *saddr, const struct pf_addr *daddr,
3268 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3269 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
3270 u_int16_t rtag, u_int sack, u_int rdom)
3271 {
3272 struct mbuf *m;
3273 int len, tlen;
3274 struct ip *h;
3275 #ifdef INET6
3276 struct ip6_hdr *h6;
3277 #endif /* INET6 */
3278 struct tcphdr *th;
3279 char *opt;
3280
3281 /* maximum segment size tcp option */
3282 tlen = sizeof(struct tcphdr);
3283 if (mss)
3284 tlen += 4;
3285 if (sack)
3286 tlen += 2;
3287
3288 switch (af) {
3289 case AF_INET:
3290 len = sizeof(struct ip) + tlen;
3291 break;
3292 #ifdef INET6
3293 case AF_INET6:
3294 len = sizeof(struct ip6_hdr) + tlen;
3295 break;
3296 #endif /* INET6 */
3297 default:
3298 unhandled_af(af);
3299 }
3300
3301 /* create outgoing mbuf */
3302 m = m_gethdr(M_DONTWAIT, MT_HEADER);
3303 if (m == NULL)
3304 return (NULL);
3305 if (tag)
3306 m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
3307 m->m_pkthdr.pf.tag = rtag;
3308 m->m_pkthdr.ph_rtableid = rdom;
3309 if (r && (r->scrub_flags & PFSTATE_SETPRIO))
3310 m->m_pkthdr.pf.prio = r->set_prio[0];
3311 if (r && r->qid)
3312 m->m_pkthdr.pf.qid = r->qid;
3313 m->m_data += max_linkhdr;
3314 m->m_pkthdr.len = m->m_len = len;
3315 m->m_pkthdr.ph_ifidx = 0;
3316 m->m_pkthdr.csum_flags |= M_TCP_CSUM_OUT;
3317 memset(m->m_data, 0, len);
3318 switch (af) {
3319 case AF_INET:
3320 h = mtod(m, struct ip *);
3321 h->ip_p = IPPROTO_TCP;
3322 h->ip_len = htons(tlen);
3323 h->ip_v = 4;
3324 h->ip_hl = sizeof(*h) >> 2;
3325 h->ip_tos = IPTOS_LOWDELAY;
3326 h->ip_len = htons(len);
3327 h->ip_off = htons(ip_mtudisc ? IP_DF : 0);
3328 h->ip_ttl = ttl ? ttl : ip_defttl;
3329 h->ip_sum = 0;
3330 h->ip_src.s_addr = saddr->v4.s_addr;
3331 h->ip_dst.s_addr = daddr->v4.s_addr;
3332
3333 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
3334 break;
3335 #ifdef INET6
3336 case AF_INET6:
3337 h6 = mtod(m, struct ip6_hdr *);
3338 h6->ip6_nxt = IPPROTO_TCP;
3339 h6->ip6_plen = htons(tlen);
3340 h6->ip6_vfc |= IPV6_VERSION;
3341 h6->ip6_hlim = IPV6_DEFHLIM;
3342 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
3343 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
3344
3345 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
3346 break;
3347 #endif /* INET6 */
3348 default:
3349 unhandled_af(af);
3350 }
3351
3352 /* TCP header */
3353 th->th_sport = sport;
3354 th->th_dport = dport;
3355 th->th_seq = htonl(seq);
3356 th->th_ack = htonl(ack);
3357 th->th_off = tlen >> 2;
3358 th->th_flags = flags;
3359 th->th_win = htons(win);
3360
3361 opt = (char *)(th + 1);
3362 if (mss) {
3363 opt[0] = TCPOPT_MAXSEG;
3364 opt[1] = 4;
3365 mss = htons(mss);
3366 memcpy((opt + 2), &mss, 2);
3367 opt += 4;
3368 }
3369 if (sack) {
3370 opt[0] = TCPOPT_SACK_PERMITTED;
3371 opt[1] = 2;
3372 opt += 2;
3373 }
3374
3375 return (m);
3376 }
3377
3378 void
pf_send_tcp(const struct pf_rule * r,sa_family_t af,const struct pf_addr * saddr,const struct pf_addr * daddr,u_int16_t sport,u_int16_t dport,u_int32_t seq,u_int32_t ack,u_int8_t flags,u_int16_t win,u_int16_t mss,u_int8_t ttl,int tag,u_int16_t rtag,u_int rdom)3379 pf_send_tcp(const struct pf_rule *r, sa_family_t af,
3380 const struct pf_addr *saddr, const struct pf_addr *daddr,
3381 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
3382 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
3383 u_int16_t rtag, u_int rdom)
3384 {
3385 struct mbuf *m;
3386
3387 if ((m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack,
3388 flags, win, mss, ttl, tag, rtag, 0, rdom)) == NULL)
3389 return;
3390
3391 switch (af) {
3392 case AF_INET:
3393 ip_send(m);
3394 break;
3395 #ifdef INET6
3396 case AF_INET6:
3397 ip6_send(m);
3398 break;
3399 #endif /* INET6 */
3400 }
3401 }
3402
3403 static void
pf_send_challenge_ack(struct pf_pdesc * pd,struct pf_state * st,struct pf_state_peer * src,struct pf_state_peer * dst)3404 pf_send_challenge_ack(struct pf_pdesc *pd, struct pf_state *st,
3405 struct pf_state_peer *src, struct pf_state_peer *dst)
3406 {
3407 /*
3408 * We are sending challenge ACK as a response to SYN packet, which
3409 * matches existing state (modulo TCP window check). Therefore packet
3410 * must be sent on behalf of destination.
3411 *
3412 * We expect sender to remain either silent, or send RST packet
3413 * so both, firewall and remote peer, can purge dead state from
3414 * memory.
3415 */
3416 pf_send_tcp(st->rule.ptr, pd->af, pd->dst, pd->src,
3417 pd->hdr.tcp.th_dport, pd->hdr.tcp.th_sport, dst->seqlo,
3418 src->seqlo, TH_ACK, 0, 0, st->rule.ptr->return_ttl, 1, 0,
3419 pd->rdomain);
3420 }
3421
3422 void
pf_send_icmp(struct mbuf * m,u_int8_t type,u_int8_t code,int param,sa_family_t af,struct pf_rule * r,u_int rdomain)3423 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, int param,
3424 sa_family_t af, struct pf_rule *r, u_int rdomain)
3425 {
3426 struct mbuf *m0;
3427
3428 if ((m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT)) == NULL)
3429 return;
3430
3431 m0->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
3432 m0->m_pkthdr.ph_rtableid = rdomain;
3433 if (r && (r->scrub_flags & PFSTATE_SETPRIO))
3434 m0->m_pkthdr.pf.prio = r->set_prio[0];
3435 if (r && r->qid)
3436 m0->m_pkthdr.pf.qid = r->qid;
3437
3438 switch (af) {
3439 case AF_INET:
3440 icmp_error(m0, type, code, 0, param);
3441 break;
3442 #ifdef INET6
3443 case AF_INET6:
3444 icmp6_error(m0, type, code, param);
3445 break;
3446 #endif /* INET6 */
3447 }
3448 }
3449
3450 /*
3451 * Return ((n = 0) == (a = b [with mask m]))
3452 * Note: n != 0 => returns (a != b [with mask m])
3453 */
3454 int
pf_match_addr(u_int8_t n,struct pf_addr * a,struct pf_addr * m,struct pf_addr * b,sa_family_t af)3455 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
3456 struct pf_addr *b, sa_family_t af)
3457 {
3458 switch (af) {
3459 case AF_INET:
3460 if ((a->addr32[0] & m->addr32[0]) ==
3461 (b->addr32[0] & m->addr32[0]))
3462 return (n == 0);
3463 break;
3464 #ifdef INET6
3465 case AF_INET6:
3466 if (((a->addr32[0] & m->addr32[0]) ==
3467 (b->addr32[0] & m->addr32[0])) &&
3468 ((a->addr32[1] & m->addr32[1]) ==
3469 (b->addr32[1] & m->addr32[1])) &&
3470 ((a->addr32[2] & m->addr32[2]) ==
3471 (b->addr32[2] & m->addr32[2])) &&
3472 ((a->addr32[3] & m->addr32[3]) ==
3473 (b->addr32[3] & m->addr32[3])))
3474 return (n == 0);
3475 break;
3476 #endif /* INET6 */
3477 }
3478
3479 return (n != 0);
3480 }
3481
3482 /*
3483 * Return 1 if b <= a <= e, otherwise return 0.
3484 */
3485 int
pf_match_addr_range(struct pf_addr * b,struct pf_addr * e,struct pf_addr * a,sa_family_t af)3486 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3487 struct pf_addr *a, sa_family_t af)
3488 {
3489 switch (af) {
3490 case AF_INET:
3491 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
3492 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
3493 return (0);
3494 break;
3495 #ifdef INET6
3496 case AF_INET6: {
3497 int i;
3498
3499 /* check a >= b */
3500 for (i = 0; i < 4; ++i)
3501 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
3502 break;
3503 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
3504 return (0);
3505 /* check a <= e */
3506 for (i = 0; i < 4; ++i)
3507 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
3508 break;
3509 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
3510 return (0);
3511 break;
3512 }
3513 #endif /* INET6 */
3514 }
3515 return (1);
3516 }
3517
3518 int
pf_match(u_int8_t op,u_int32_t a1,u_int32_t a2,u_int32_t p)3519 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3520 {
3521 switch (op) {
3522 case PF_OP_IRG:
3523 return ((p > a1) && (p < a2));
3524 case PF_OP_XRG:
3525 return ((p < a1) || (p > a2));
3526 case PF_OP_RRG:
3527 return ((p >= a1) && (p <= a2));
3528 case PF_OP_EQ:
3529 return (p == a1);
3530 case PF_OP_NE:
3531 return (p != a1);
3532 case PF_OP_LT:
3533 return (p < a1);
3534 case PF_OP_LE:
3535 return (p <= a1);
3536 case PF_OP_GT:
3537 return (p > a1);
3538 case PF_OP_GE:
3539 return (p >= a1);
3540 }
3541 return (0); /* never reached */
3542 }
3543
3544 int
pf_match_port(u_int8_t op,u_int16_t a1,u_int16_t a2,u_int16_t p)3545 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3546 {
3547 return (pf_match(op, ntohs(a1), ntohs(a2), ntohs(p)));
3548 }
3549
3550 int
pf_match_uid(u_int8_t op,uid_t a1,uid_t a2,uid_t u)3551 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3552 {
3553 if (u == -1 && op != PF_OP_EQ && op != PF_OP_NE)
3554 return (0);
3555 return (pf_match(op, a1, a2, u));
3556 }
3557
3558 int
pf_match_gid(u_int8_t op,gid_t a1,gid_t a2,gid_t g)3559 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3560 {
3561 if (g == -1 && op != PF_OP_EQ && op != PF_OP_NE)
3562 return (0);
3563 return (pf_match(op, a1, a2, g));
3564 }
3565
3566 int
pf_match_tag(struct mbuf * m,struct pf_rule * r,int * tag)3567 pf_match_tag(struct mbuf *m, struct pf_rule *r, int *tag)
3568 {
3569 if (*tag == -1)
3570 *tag = m->m_pkthdr.pf.tag;
3571
3572 return ((!r->match_tag_not && r->match_tag == *tag) ||
3573 (r->match_tag_not && r->match_tag != *tag));
3574 }
3575
3576 int
pf_match_rcvif(struct mbuf * m,struct pf_rule * r)3577 pf_match_rcvif(struct mbuf *m, struct pf_rule *r)
3578 {
3579 struct ifnet *ifp;
3580 #if NCARP > 0
3581 struct ifnet *ifp0;
3582 #endif
3583 struct pfi_kif *kif;
3584
3585 ifp = if_get(m->m_pkthdr.ph_ifidx);
3586 if (ifp == NULL)
3587 return (0);
3588
3589 #if NCARP > 0
3590 if (ifp->if_type == IFT_CARP &&
3591 (ifp0 = if_get(ifp->if_carpdevidx)) != NULL) {
3592 kif = (struct pfi_kif *)ifp0->if_pf_kif;
3593 if_put(ifp0);
3594 } else
3595 #endif /* NCARP */
3596 kif = (struct pfi_kif *)ifp->if_pf_kif;
3597
3598 if_put(ifp);
3599
3600 if (kif == NULL) {
3601 DPFPRINTF(LOG_ERR,
3602 "%s: kif == NULL, @%d via %s", __func__,
3603 r->nr, r->rcv_ifname);
3604 return (0);
3605 }
3606
3607 return (pfi_kif_match(r->rcv_kif, kif));
3608 }
3609
3610 void
pf_tag_packet(struct mbuf * m,int tag,int rtableid)3611 pf_tag_packet(struct mbuf *m, int tag, int rtableid)
3612 {
3613 if (tag > 0)
3614 m->m_pkthdr.pf.tag = tag;
3615 if (rtableid >= 0)
3616 m->m_pkthdr.ph_rtableid = (u_int)rtableid;
3617 }
3618
3619 void
pf_anchor_stack_init(void)3620 pf_anchor_stack_init(void)
3621 {
3622 struct pf_anchor_stackframe *stack;
3623
3624 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3625 stack[PF_ANCHOR_STACK_MAX].sf_stack_top = &stack[0];
3626 cpumem_leave(pf_anchor_stack, stack);
3627 }
3628
3629 int
pf_anchor_stack_is_full(struct pf_anchor_stackframe * sf)3630 pf_anchor_stack_is_full(struct pf_anchor_stackframe *sf)
3631 {
3632 struct pf_anchor_stackframe *stack;
3633 int rv;
3634
3635 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3636 rv = (sf == &stack[PF_ANCHOR_STACK_MAX]);
3637 cpumem_leave(pf_anchor_stack, stack);
3638
3639 return (rv);
3640 }
3641
3642 int
pf_anchor_stack_is_empty(struct pf_anchor_stackframe * sf)3643 pf_anchor_stack_is_empty(struct pf_anchor_stackframe *sf)
3644 {
3645 struct pf_anchor_stackframe *stack;
3646 int rv;
3647
3648 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3649 rv = (sf == &stack[0]);
3650 cpumem_leave(pf_anchor_stack, stack);
3651
3652 return (rv);
3653 }
3654
3655 struct pf_anchor_stackframe *
pf_anchor_stack_top(void)3656 pf_anchor_stack_top(void)
3657 {
3658 struct pf_anchor_stackframe *stack;
3659 struct pf_anchor_stackframe *top_sf;
3660
3661 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3662 top_sf = stack[PF_ANCHOR_STACK_MAX].sf_stack_top;
3663 cpumem_leave(pf_anchor_stack, stack);
3664
3665 return (top_sf);
3666 }
3667
3668 int
pf_anchor_stack_push(struct pf_ruleset * rs,struct pf_rule * r,struct pf_anchor * child,int jump_target)3669 pf_anchor_stack_push(struct pf_ruleset *rs, struct pf_rule *r,
3670 struct pf_anchor *child, int jump_target)
3671 {
3672 struct pf_anchor_stackframe *stack;
3673 struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top();
3674
3675 top_sf++;
3676 if (pf_anchor_stack_is_full(top_sf))
3677 return (-1);
3678
3679 top_sf->sf_rs = rs;
3680 top_sf->sf_r = r;
3681 top_sf->sf_child = child;
3682 top_sf->sf_jump_target = jump_target;
3683
3684 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3685
3686 if ((top_sf <= &stack[0]) || (top_sf >= &stack[PF_ANCHOR_STACK_MAX]))
3687 panic("%s: top frame outside of anchor stack range", __func__);
3688
3689 stack[PF_ANCHOR_STACK_MAX].sf_stack_top = top_sf;
3690 cpumem_leave(pf_anchor_stack, stack);
3691
3692 return (0);
3693 }
3694
3695 int
pf_anchor_stack_pop(struct pf_ruleset ** rs,struct pf_rule ** r,struct pf_anchor ** child,int * jump_target)3696 pf_anchor_stack_pop(struct pf_ruleset **rs, struct pf_rule **r,
3697 struct pf_anchor **child, int *jump_target)
3698 {
3699 struct pf_anchor_stackframe *top_sf = pf_anchor_stack_top();
3700 struct pf_anchor_stackframe *stack;
3701 int on_top;
3702
3703 stack = (struct pf_anchor_stackframe *)cpumem_enter(pf_anchor_stack);
3704 if (pf_anchor_stack_is_empty(top_sf)) {
3705 on_top = -1;
3706 } else {
3707 if ((top_sf <= &stack[0]) ||
3708 (top_sf >= &stack[PF_ANCHOR_STACK_MAX]))
3709 panic("%s: top frame outside of anchor stack range",
3710 __func__);
3711
3712 *rs = top_sf->sf_rs;
3713 *r = top_sf->sf_r;
3714 *child = top_sf->sf_child;
3715 *jump_target = top_sf->sf_jump_target;
3716 top_sf--;
3717 stack[PF_ANCHOR_STACK_MAX].sf_stack_top = top_sf;
3718 on_top = 0;
3719 }
3720 cpumem_leave(pf_anchor_stack, stack);
3721
3722 return (on_top);
3723 }
3724
3725 void
pf_poolmask(struct pf_addr * naddr,struct pf_addr * raddr,struct pf_addr * rmask,struct pf_addr * saddr,sa_family_t af)3726 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3727 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3728 {
3729 switch (af) {
3730 case AF_INET:
3731 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3732 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3733 break;
3734 #ifdef INET6
3735 case AF_INET6:
3736 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3737 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3738 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3739 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3740 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3741 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3742 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3743 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3744 break;
3745 #endif /* INET6 */
3746 default:
3747 unhandled_af(af);
3748 }
3749 }
3750
3751 void
pf_addr_inc(struct pf_addr * addr,sa_family_t af)3752 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3753 {
3754 switch (af) {
3755 case AF_INET:
3756 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3757 break;
3758 #ifdef INET6
3759 case AF_INET6:
3760 if (addr->addr32[3] == 0xffffffff) {
3761 addr->addr32[3] = 0;
3762 if (addr->addr32[2] == 0xffffffff) {
3763 addr->addr32[2] = 0;
3764 if (addr->addr32[1] == 0xffffffff) {
3765 addr->addr32[1] = 0;
3766 addr->addr32[0] =
3767 htonl(ntohl(addr->addr32[0]) + 1);
3768 } else
3769 addr->addr32[1] =
3770 htonl(ntohl(addr->addr32[1]) + 1);
3771 } else
3772 addr->addr32[2] =
3773 htonl(ntohl(addr->addr32[2]) + 1);
3774 } else
3775 addr->addr32[3] =
3776 htonl(ntohl(addr->addr32[3]) + 1);
3777 break;
3778 #endif /* INET6 */
3779 default:
3780 unhandled_af(af);
3781 }
3782 }
3783
3784 int
pf_socket_lookup(struct pf_pdesc * pd)3785 pf_socket_lookup(struct pf_pdesc *pd)
3786 {
3787 struct pf_addr *saddr, *daddr;
3788 u_int16_t sport, dport;
3789 struct inpcbtable *table;
3790 struct inpcb *inp;
3791
3792 pd->lookup.uid = -1;
3793 pd->lookup.gid = -1;
3794 pd->lookup.pid = NO_PID;
3795 switch (pd->virtual_proto) {
3796 case IPPROTO_TCP:
3797 sport = pd->hdr.tcp.th_sport;
3798 dport = pd->hdr.tcp.th_dport;
3799 PF_ASSERT_LOCKED();
3800 NET_ASSERT_LOCKED();
3801 table = &tcbtable;
3802 break;
3803 case IPPROTO_UDP:
3804 sport = pd->hdr.udp.uh_sport;
3805 dport = pd->hdr.udp.uh_dport;
3806 PF_ASSERT_LOCKED();
3807 NET_ASSERT_LOCKED();
3808 table = &udbtable;
3809 break;
3810 default:
3811 return (-1);
3812 }
3813 if (pd->dir == PF_IN) {
3814 saddr = pd->src;
3815 daddr = pd->dst;
3816 } else {
3817 u_int16_t p;
3818
3819 p = sport;
3820 sport = dport;
3821 dport = p;
3822 saddr = pd->dst;
3823 daddr = pd->src;
3824 }
3825 switch (pd->af) {
3826 case AF_INET:
3827 /*
3828 * Fails when rtable is changed while evaluating the ruleset
3829 * The socket looked up will not match the one hit in the end.
3830 */
3831 inp = in_pcblookup(table, saddr->v4, sport, daddr->v4, dport,
3832 pd->rdomain);
3833 if (inp == NULL) {
3834 inp = in_pcblookup_listen(table, daddr->v4, dport,
3835 NULL, pd->rdomain);
3836 if (inp == NULL)
3837 return (-1);
3838 }
3839 break;
3840 #ifdef INET6
3841 case AF_INET6:
3842 if (pd->virtual_proto == IPPROTO_UDP)
3843 table = &udb6table;
3844 if (pd->virtual_proto == IPPROTO_TCP)
3845 table = &tcb6table;
3846 inp = in6_pcblookup(table, &saddr->v6, sport, &daddr->v6,
3847 dport, pd->rdomain);
3848 if (inp == NULL) {
3849 inp = in6_pcblookup_listen(table, &daddr->v6, dport,
3850 NULL, pd->rdomain);
3851 if (inp == NULL)
3852 return (-1);
3853 }
3854 break;
3855 #endif /* INET6 */
3856 default:
3857 unhandled_af(pd->af);
3858 }
3859 pd->lookup.uid = inp->inp_socket->so_euid;
3860 pd->lookup.gid = inp->inp_socket->so_egid;
3861 pd->lookup.pid = inp->inp_socket->so_cpid;
3862 in_pcbunref(inp);
3863 return (1);
3864 }
3865
3866 /* post: r => (r[0] == type /\ r[1] >= min_typelen >= 2 "validity"
3867 * /\ (eoh - r) >= min_typelen >= 2 "safety" )
3868 *
3869 * warning: r + r[1] may exceed opts bounds for r[1] > min_typelen
3870 */
3871 u_int8_t*
pf_find_tcpopt(u_int8_t * opt,u_int8_t * opts,size_t hlen,u_int8_t type,u_int8_t min_typelen)3872 pf_find_tcpopt(u_int8_t *opt, u_int8_t *opts, size_t hlen, u_int8_t type,
3873 u_int8_t min_typelen)
3874 {
3875 u_int8_t *eoh = opts + hlen;
3876
3877 if (min_typelen < 2)
3878 return (NULL);
3879
3880 while ((eoh - opt) >= min_typelen) {
3881 switch (*opt) {
3882 case TCPOPT_EOL:
3883 /* FALLTHROUGH - Workaround the failure of some
3884 systems to NOP-pad their bzero'd option buffers,
3885 producing spurious EOLs */
3886 case TCPOPT_NOP:
3887 opt++;
3888 continue;
3889 default:
3890 if (opt[0] == type &&
3891 opt[1] >= min_typelen)
3892 return (opt);
3893 }
3894
3895 opt += MAX(opt[1], 2); /* evade infinite loops */
3896 }
3897
3898 return (NULL);
3899 }
3900
3901 u_int8_t
pf_get_wscale(struct pf_pdesc * pd)3902 pf_get_wscale(struct pf_pdesc *pd)
3903 {
3904 int olen;
3905 u_int8_t opts[MAX_TCPOPTLEN], *opt;
3906 u_int8_t wscale = 0;
3907
3908 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3909 if (olen < TCPOLEN_WINDOW || !pf_pull_hdr(pd->m,
3910 pd->off + sizeof(struct tcphdr), opts, olen, NULL, pd->af))
3911 return (0);
3912
3913 opt = opts;
3914 while ((opt = pf_find_tcpopt(opt, opts, olen,
3915 TCPOPT_WINDOW, TCPOLEN_WINDOW)) != NULL) {
3916 wscale = opt[2];
3917 wscale = MIN(wscale, TCP_MAX_WINSHIFT);
3918 wscale |= PF_WSCALE_FLAG;
3919
3920 opt += opt[1];
3921 }
3922
3923 return (wscale);
3924 }
3925
3926 u_int16_t
pf_get_mss(struct pf_pdesc * pd)3927 pf_get_mss(struct pf_pdesc *pd)
3928 {
3929 int olen;
3930 u_int8_t opts[MAX_TCPOPTLEN], *opt;
3931 u_int16_t mss = tcp_mssdflt;
3932
3933 olen = (pd->hdr.tcp.th_off << 2) - sizeof(struct tcphdr);
3934 if (olen < TCPOLEN_MAXSEG || !pf_pull_hdr(pd->m,
3935 pd->off + sizeof(struct tcphdr), opts, olen, NULL, pd->af))
3936 return (0);
3937
3938 opt = opts;
3939 while ((opt = pf_find_tcpopt(opt, opts, olen,
3940 TCPOPT_MAXSEG, TCPOLEN_MAXSEG)) != NULL) {
3941 memcpy(&mss, (opt + 2), 2);
3942 mss = ntohs(mss);
3943
3944 opt += opt[1];
3945 }
3946 return (mss);
3947 }
3948
3949 u_int16_t
pf_calc_mss(struct pf_addr * addr,sa_family_t af,int rtableid,u_int16_t offer)3950 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3951 {
3952 struct ifnet *ifp;
3953 struct sockaddr_in *dst;
3954 #ifdef INET6
3955 struct sockaddr_in6 *dst6;
3956 #endif /* INET6 */
3957 struct rtentry *rt = NULL;
3958 struct sockaddr_storage ss;
3959 int hlen;
3960 u_int16_t mss = tcp_mssdflt;
3961
3962 memset(&ss, 0, sizeof(ss));
3963
3964 switch (af) {
3965 case AF_INET:
3966 hlen = sizeof(struct ip);
3967 dst = (struct sockaddr_in *)&ss;
3968 dst->sin_family = AF_INET;
3969 dst->sin_len = sizeof(*dst);
3970 dst->sin_addr = addr->v4;
3971 rt = rtalloc(sintosa(dst), 0, rtableid);
3972 break;
3973 #ifdef INET6
3974 case AF_INET6:
3975 hlen = sizeof(struct ip6_hdr);
3976 dst6 = (struct sockaddr_in6 *)&ss;
3977 dst6->sin6_family = AF_INET6;
3978 dst6->sin6_len = sizeof(*dst6);
3979 dst6->sin6_addr = addr->v6;
3980 rt = rtalloc(sin6tosa(dst6), 0, rtableid);
3981 break;
3982 #endif /* INET6 */
3983 }
3984
3985 if (rt != NULL && (ifp = if_get(rt->rt_ifidx)) != NULL) {
3986 mss = ifp->if_mtu - hlen - sizeof(struct tcphdr);
3987 mss = max(tcp_mssdflt, mss);
3988 if_put(ifp);
3989 }
3990 rtfree(rt);
3991 mss = min(mss, offer);
3992 mss = max(mss, 64); /* sanity - at least max opt space */
3993 return (mss);
3994 }
3995
3996 static __inline int
pf_set_rt_ifp(struct pf_state * st,struct pf_addr * saddr,sa_family_t af,struct pf_src_node ** sns)3997 pf_set_rt_ifp(struct pf_state *st, struct pf_addr *saddr, sa_family_t af,
3998 struct pf_src_node **sns)
3999 {
4000 struct pf_rule *r = st->rule.ptr;
4001 int rv;
4002
4003 if (!r->rt)
4004 return (0);
4005
4006 rv = pf_map_addr(af, r, saddr, &st->rt_addr, NULL, sns,
4007 &r->route, PF_SN_ROUTE);
4008 if (rv == 0)
4009 st->rt = r->rt;
4010
4011 return (rv);
4012 }
4013
4014 u_int32_t
pf_tcp_iss(struct pf_pdesc * pd)4015 pf_tcp_iss(struct pf_pdesc *pd)
4016 {
4017 SHA2_CTX ctx;
4018 union {
4019 uint8_t bytes[SHA512_DIGEST_LENGTH];
4020 uint32_t words[1];
4021 } digest;
4022
4023 if (pf_tcp_secret_init == 0) {
4024 arc4random_buf(pf_tcp_secret, sizeof(pf_tcp_secret));
4025 SHA512Init(&pf_tcp_secret_ctx);
4026 SHA512Update(&pf_tcp_secret_ctx, pf_tcp_secret,
4027 sizeof(pf_tcp_secret));
4028 pf_tcp_secret_init = 1;
4029 }
4030 ctx = pf_tcp_secret_ctx;
4031
4032 SHA512Update(&ctx, &pd->rdomain, sizeof(pd->rdomain));
4033 SHA512Update(&ctx, &pd->hdr.tcp.th_sport, sizeof(u_short));
4034 SHA512Update(&ctx, &pd->hdr.tcp.th_dport, sizeof(u_short));
4035 switch (pd->af) {
4036 case AF_INET:
4037 SHA512Update(&ctx, &pd->src->v4, sizeof(struct in_addr));
4038 SHA512Update(&ctx, &pd->dst->v4, sizeof(struct in_addr));
4039 break;
4040 #ifdef INET6
4041 case AF_INET6:
4042 SHA512Update(&ctx, &pd->src->v6, sizeof(struct in6_addr));
4043 SHA512Update(&ctx, &pd->dst->v6, sizeof(struct in6_addr));
4044 break;
4045 #endif /* INET6 */
4046 }
4047 SHA512Final(digest.bytes, &ctx);
4048 pf_tcp_iss_off += 4096;
4049 return (digest.words[0] + READ_ONCE(tcp_iss) + pf_tcp_iss_off);
4050 }
4051
4052 void
pf_rule_to_actions(struct pf_rule * r,struct pf_rule_actions * a)4053 pf_rule_to_actions(struct pf_rule *r, struct pf_rule_actions *a)
4054 {
4055 if (r->qid)
4056 a->qid = r->qid;
4057 if (r->pqid)
4058 a->pqid = r->pqid;
4059 if (r->rtableid >= 0)
4060 a->rtableid = r->rtableid;
4061 #if NPFLOG > 0
4062 a->log |= r->log;
4063 #endif /* NPFLOG > 0 */
4064 if (r->scrub_flags & PFSTATE_SETTOS)
4065 a->set_tos = r->set_tos;
4066 if (r->min_ttl)
4067 a->min_ttl = r->min_ttl;
4068 if (r->max_mss)
4069 a->max_mss = r->max_mss;
4070 a->flags |= (r->scrub_flags & (PFSTATE_NODF|PFSTATE_RANDOMID|
4071 PFSTATE_SETTOS|PFSTATE_SCRUB_TCP|PFSTATE_SETPRIO));
4072 if (r->scrub_flags & PFSTATE_SETPRIO) {
4073 a->set_prio[0] = r->set_prio[0];
4074 a->set_prio[1] = r->set_prio[1];
4075 }
4076 if (r->rule_flag & PFRULE_SETDELAY)
4077 a->delay = r->delay;
4078 }
4079
4080 #define PF_TEST_ATTRIB(t, a) \
4081 if (t) { \
4082 r = a; \
4083 continue; \
4084 } else do { \
4085 } while (0)
4086
4087 enum pf_test_status
pf_match_rule(struct pf_test_ctx * ctx,struct pf_ruleset * ruleset)4088 pf_match_rule(struct pf_test_ctx *ctx, struct pf_ruleset *ruleset)
4089 {
4090 struct pf_rule *r;
4091 struct pf_anchor *child = NULL;
4092 int target;
4093
4094 pf_anchor_stack_init();
4095 enter_ruleset:
4096 r = TAILQ_FIRST(ruleset->rules.active.ptr);
4097 while (r != NULL) {
4098 PF_TEST_ATTRIB(r->rule_flag & PFRULE_EXPIRED,
4099 TAILQ_NEXT(r, entries));
4100 r->evaluations++;
4101 PF_TEST_ATTRIB(
4102 (pfi_kif_match(r->kif, ctx->pd->kif) == r->ifnot),
4103 r->skip[PF_SKIP_IFP].ptr);
4104 PF_TEST_ATTRIB((r->direction && r->direction != ctx->pd->dir),
4105 r->skip[PF_SKIP_DIR].ptr);
4106 PF_TEST_ATTRIB((r->onrdomain >= 0 &&
4107 (r->onrdomain == ctx->pd->rdomain) == r->ifnot),
4108 r->skip[PF_SKIP_RDOM].ptr);
4109 PF_TEST_ATTRIB((r->af && r->af != ctx->pd->af),
4110 r->skip[PF_SKIP_AF].ptr);
4111 PF_TEST_ATTRIB((r->proto && r->proto != ctx->pd->proto),
4112 r->skip[PF_SKIP_PROTO].ptr);
4113 PF_TEST_ATTRIB((PF_MISMATCHAW(&r->src.addr, &ctx->pd->nsaddr,
4114 ctx->pd->naf, r->src.neg, ctx->pd->kif,
4115 ctx->act.rtableid)),
4116 r->skip[PF_SKIP_SRC_ADDR].ptr);
4117 PF_TEST_ATTRIB((PF_MISMATCHAW(&r->dst.addr, &ctx->pd->ndaddr,
4118 ctx->pd->af, r->dst.neg, NULL, ctx->act.rtableid)),
4119 r->skip[PF_SKIP_DST_ADDR].ptr);
4120
4121 switch (ctx->pd->virtual_proto) {
4122 case PF_VPROTO_FRAGMENT:
4123 /* tcp/udp only. port_op always 0 in other cases */
4124 PF_TEST_ATTRIB((r->src.port_op || r->dst.port_op),
4125 TAILQ_NEXT(r, entries));
4126 PF_TEST_ATTRIB((ctx->pd->proto == IPPROTO_TCP &&
4127 r->flagset),
4128 TAILQ_NEXT(r, entries));
4129 /* icmp only. type/code always 0 in other cases */
4130 PF_TEST_ATTRIB((r->type || r->code),
4131 TAILQ_NEXT(r, entries));
4132 /* tcp/udp only. {uid|gid}.op always 0 in other cases */
4133 PF_TEST_ATTRIB((r->gid.op || r->uid.op),
4134 TAILQ_NEXT(r, entries));
4135 break;
4136
4137 case IPPROTO_TCP:
4138 PF_TEST_ATTRIB(((r->flagset & ctx->th->th_flags) !=
4139 r->flags),
4140 TAILQ_NEXT(r, entries));
4141 PF_TEST_ATTRIB((r->os_fingerprint != PF_OSFP_ANY &&
4142 !pf_osfp_match(pf_osfp_fingerprint(ctx->pd),
4143 r->os_fingerprint)),
4144 TAILQ_NEXT(r, entries));
4145 /* FALLTHROUGH */
4146
4147 case IPPROTO_UDP:
4148 /* tcp/udp only. port_op always 0 in other cases */
4149 PF_TEST_ATTRIB((r->src.port_op &&
4150 !pf_match_port(r->src.port_op, r->src.port[0],
4151 r->src.port[1], ctx->pd->nsport)),
4152 r->skip[PF_SKIP_SRC_PORT].ptr);
4153 PF_TEST_ATTRIB((r->dst.port_op &&
4154 !pf_match_port(r->dst.port_op, r->dst.port[0],
4155 r->dst.port[1], ctx->pd->ndport)),
4156 r->skip[PF_SKIP_DST_PORT].ptr);
4157 /* tcp/udp only. uid.op always 0 in other cases */
4158 PF_TEST_ATTRIB((r->uid.op && (ctx->pd->lookup.done ||
4159 (ctx->pd->lookup.done =
4160 pf_socket_lookup(ctx->pd), 1)) &&
4161 !pf_match_uid(r->uid.op, r->uid.uid[0],
4162 r->uid.uid[1], ctx->pd->lookup.uid)),
4163 TAILQ_NEXT(r, entries));
4164 /* tcp/udp only. gid.op always 0 in other cases */
4165 PF_TEST_ATTRIB((r->gid.op && (ctx->pd->lookup.done ||
4166 (ctx->pd->lookup.done =
4167 pf_socket_lookup(ctx->pd), 1)) &&
4168 !pf_match_gid(r->gid.op, r->gid.gid[0],
4169 r->gid.gid[1], ctx->pd->lookup.gid)),
4170 TAILQ_NEXT(r, entries));
4171 break;
4172
4173 case IPPROTO_ICMP:
4174 /* icmp only. type always 0 in other cases */
4175 PF_TEST_ATTRIB((r->type &&
4176 r->type != ctx->icmptype + 1),
4177 TAILQ_NEXT(r, entries));
4178 /* icmp only. type always 0 in other cases */
4179 PF_TEST_ATTRIB((r->code &&
4180 r->code != ctx->icmpcode + 1),
4181 TAILQ_NEXT(r, entries));
4182 /* icmp only. don't create states on replies */
4183 PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&
4184 (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&
4185 ctx->icmp_dir != PF_IN),
4186 TAILQ_NEXT(r, entries));
4187 break;
4188
4189 case IPPROTO_ICMPV6:
4190 /* icmp only. type always 0 in other cases */
4191 PF_TEST_ATTRIB((r->type &&
4192 r->type != ctx->icmptype + 1),
4193 TAILQ_NEXT(r, entries));
4194 /* icmp only. type always 0 in other cases */
4195 PF_TEST_ATTRIB((r->code &&
4196 r->code != ctx->icmpcode + 1),
4197 TAILQ_NEXT(r, entries));
4198 /* icmp only. don't create states on replies */
4199 PF_TEST_ATTRIB((r->keep_state && !ctx->state_icmp &&
4200 (r->rule_flag & PFRULE_STATESLOPPY) == 0 &&
4201 ctx->icmp_dir != PF_IN &&
4202 ctx->icmptype != ND_NEIGHBOR_ADVERT),
4203 TAILQ_NEXT(r, entries));
4204 break;
4205
4206 default:
4207 break;
4208 }
4209
4210 PF_TEST_ATTRIB((r->rule_flag & PFRULE_FRAGMENT &&
4211 ctx->pd->virtual_proto != PF_VPROTO_FRAGMENT),
4212 TAILQ_NEXT(r, entries));
4213 PF_TEST_ATTRIB((r->tos && !(r->tos == ctx->pd->tos)),
4214 TAILQ_NEXT(r, entries));
4215 PF_TEST_ATTRIB((r->prob &&
4216 r->prob <= arc4random_uniform(UINT_MAX - 1) + 1),
4217 TAILQ_NEXT(r, entries));
4218 PF_TEST_ATTRIB((r->match_tag &&
4219 !pf_match_tag(ctx->pd->m, r, &ctx->tag)),
4220 TAILQ_NEXT(r, entries));
4221 PF_TEST_ATTRIB((r->rcv_kif && pf_match_rcvif(ctx->pd->m, r) ==
4222 r->rcvifnot),
4223 TAILQ_NEXT(r, entries));
4224 PF_TEST_ATTRIB((r->prio &&
4225 (r->prio == PF_PRIO_ZERO ? 0 : r->prio) !=
4226 ctx->pd->m->m_pkthdr.pf.prio),
4227 TAILQ_NEXT(r, entries));
4228
4229 /* must be last! */
4230 if (r->pktrate.limit) {
4231 pf_add_threshold(&r->pktrate);
4232 PF_TEST_ATTRIB((pf_check_threshold(&r->pktrate)),
4233 TAILQ_NEXT(r, entries));
4234 }
4235
4236 /* FALLTHROUGH */
4237 if (r->tag)
4238 ctx->tag = r->tag;
4239 if (r->anchor == NULL) {
4240
4241 if (r->rule_flag & PFRULE_ONCE) {
4242 u_int32_t rule_flag;
4243
4244 rule_flag = r->rule_flag;
4245 if (((rule_flag & PFRULE_EXPIRED) == 0) &&
4246 atomic_cas_uint(&r->rule_flag, rule_flag,
4247 rule_flag | PFRULE_EXPIRED) == rule_flag) {
4248 r->exptime = gettime();
4249 } else {
4250 r = TAILQ_NEXT(r, entries);
4251 continue;
4252 }
4253 }
4254
4255 if (r->action == PF_MATCH) {
4256 if ((ctx->ri = pool_get(&pf_rule_item_pl,
4257 PR_NOWAIT)) == NULL) {
4258 REASON_SET(&ctx->reason, PFRES_MEMORY);
4259 return (PF_TEST_FAIL);
4260 }
4261 ctx->ri->r = r;
4262 /* order is irrelevant */
4263 SLIST_INSERT_HEAD(&ctx->rules, ctx->ri, entry);
4264 ctx->ri = NULL;
4265 pf_rule_to_actions(r, &ctx->act);
4266 if (r->rule_flag & PFRULE_AFTO)
4267 ctx->pd->naf = r->naf;
4268 if (pf_get_transaddr(r, ctx->pd, ctx->sns,
4269 &ctx->nr) == -1) {
4270 REASON_SET(&ctx->reason,
4271 PFRES_TRANSLATE);
4272 return (PF_TEST_FAIL);
4273 }
4274 #if NPFLOG > 0
4275 if (r->log) {
4276 REASON_SET(&ctx->reason, PFRES_MATCH);
4277 pflog_packet(ctx->pd, ctx->reason, r,
4278 ctx->a, ruleset, NULL);
4279 }
4280 #endif /* NPFLOG > 0 */
4281 } else {
4282 /*
4283 * found matching r
4284 */
4285 *ctx->rm = r;
4286 /*
4287 * anchor, with ruleset, where r belongs to
4288 */
4289 *ctx->am = ctx->a;
4290 /*
4291 * ruleset where r belongs to
4292 */
4293 *ctx->rsm = ruleset;
4294 /*
4295 * ruleset, where anchor belongs to.
4296 */
4297 ctx->arsm = ctx->aruleset;
4298 }
4299
4300 #if NPFLOG > 0
4301 if (ctx->act.log & PF_LOG_MATCHES)
4302 pf_log_matches(ctx->pd, r, ctx->a, ruleset,
4303 &ctx->rules);
4304 #endif /* NPFLOG > 0 */
4305
4306 if (r->quick)
4307 return (PF_TEST_QUICK);
4308 } else {
4309 ctx->a = r;
4310 ctx->aruleset = &r->anchor->ruleset;
4311 if (r->anchor_wildcard) {
4312 RB_FOREACH(child, pf_anchor_node,
4313 &r->anchor->children) {
4314 if (pf_anchor_stack_push(ruleset, r,
4315 child, PF_NEXT_CHILD) != 0)
4316 return (PF_TEST_FAIL);
4317
4318 ruleset = &child->ruleset;
4319 goto enter_ruleset;
4320 next_child:
4321 continue; /* with RB_FOREACH() */
4322 }
4323 } else {
4324 if (pf_anchor_stack_push(ruleset, r, child,
4325 PF_NEXT_RULE) != 0)
4326 return (PF_TEST_FAIL);
4327
4328 ruleset = &r->anchor->ruleset;
4329 child = NULL;
4330 goto enter_ruleset;
4331 next_rule:
4332 ;
4333 }
4334 }
4335 r = TAILQ_NEXT(r, entries);
4336 }
4337
4338 if (pf_anchor_stack_pop(&ruleset, &r, &child, &target) == 0) {
4339 /* stop if any rule matched within quick anchors. */
4340 if (r->quick == PF_TEST_QUICK && *ctx->am == r)
4341 return (PF_TEST_QUICK);
4342
4343 switch (target) {
4344 case PF_NEXT_CHILD:
4345 goto next_child;
4346 case PF_NEXT_RULE:
4347 goto next_rule;
4348 default:
4349 panic("%s: unknown jump target", __func__);
4350 }
4351 }
4352
4353 return (PF_TEST_OK);
4354 }
4355
4356 int
pf_test_rule(struct pf_pdesc * pd,struct pf_rule ** rm,struct pf_state ** sm,struct pf_rule ** am,struct pf_ruleset ** rsm,u_short * reason)4357 pf_test_rule(struct pf_pdesc *pd, struct pf_rule **rm, struct pf_state **sm,
4358 struct pf_rule **am, struct pf_ruleset **rsm, u_short *reason)
4359 {
4360 struct pf_rule *r = NULL;
4361 struct pf_rule *a = NULL;
4362 struct pf_ruleset *ruleset = NULL;
4363 struct pf_state_key *skw = NULL, *sks = NULL;
4364 int rewrite = 0;
4365 u_int16_t virtual_type, virtual_id;
4366 int action = PF_DROP;
4367 struct pf_test_ctx ctx;
4368 int rv;
4369
4370 PF_ASSERT_LOCKED();
4371
4372 memset(&ctx, 0, sizeof(ctx));
4373 ctx.pd = pd;
4374 ctx.rm = rm;
4375 ctx.am = am;
4376 ctx.rsm = rsm;
4377 ctx.th = &pd->hdr.tcp;
4378 ctx.act.rtableid = pd->rdomain;
4379 ctx.tag = -1;
4380 SLIST_INIT(&ctx.rules);
4381
4382 if (pd->dir == PF_IN && if_congested()) {
4383 REASON_SET(&ctx.reason, PFRES_CONGEST);
4384 return (PF_DROP);
4385 }
4386
4387 switch (pd->virtual_proto) {
4388 case IPPROTO_ICMP:
4389 ctx.icmptype = pd->hdr.icmp.icmp_type;
4390 ctx.icmpcode = pd->hdr.icmp.icmp_code;
4391 ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
4392 &ctx.icmp_dir, &virtual_id, &virtual_type);
4393 if (ctx.icmp_dir == PF_IN) {
4394 pd->osport = pd->nsport = virtual_id;
4395 pd->odport = pd->ndport = virtual_type;
4396 } else {
4397 pd->osport = pd->nsport = virtual_type;
4398 pd->odport = pd->ndport = virtual_id;
4399 }
4400 break;
4401 #ifdef INET6
4402 case IPPROTO_ICMPV6:
4403 ctx.icmptype = pd->hdr.icmp6.icmp6_type;
4404 ctx.icmpcode = pd->hdr.icmp6.icmp6_code;
4405 ctx.state_icmp = pf_icmp_mapping(pd, ctx.icmptype,
4406 &ctx.icmp_dir, &virtual_id, &virtual_type);
4407 if (ctx.icmp_dir == PF_IN) {
4408 pd->osport = pd->nsport = virtual_id;
4409 pd->odport = pd->ndport = virtual_type;
4410 } else {
4411 pd->osport = pd->nsport = virtual_type;
4412 pd->odport = pd->ndport = virtual_id;
4413 }
4414 break;
4415 #endif /* INET6 */
4416 }
4417
4418 ruleset = &pf_main_ruleset;
4419 rv = pf_match_rule(&ctx, ruleset);
4420 if (rv == PF_TEST_FAIL) {
4421 /*
4422 * Reason has been set in pf_match_rule() already.
4423 */
4424 goto cleanup;
4425 }
4426
4427 r = *ctx.rm; /* matching rule */
4428 a = *ctx.am; /* rule that defines an anchor containing 'r' */
4429 ruleset = *ctx.rsm;/* ruleset of the anchor defined by the rule 'a' */
4430 ctx.aruleset = ctx.arsm;/* ruleset of the 'a' rule itself */
4431
4432 /* apply actions for last matching pass/block rule */
4433 pf_rule_to_actions(r, &ctx.act);
4434 if (r->rule_flag & PFRULE_AFTO)
4435 pd->naf = r->naf;
4436 if (pf_get_transaddr(r, pd, ctx.sns, &ctx.nr) == -1) {
4437 REASON_SET(&ctx.reason, PFRES_TRANSLATE);
4438 goto cleanup;
4439 }
4440 REASON_SET(&ctx.reason, PFRES_MATCH);
4441
4442 #if NPFLOG > 0
4443 if (r->log)
4444 pflog_packet(pd, ctx.reason, r, a, ruleset, NULL);
4445 if (ctx.act.log & PF_LOG_MATCHES)
4446 pf_log_matches(pd, r, a, ruleset, &ctx.rules);
4447 #endif /* NPFLOG > 0 */
4448
4449 if (pd->virtual_proto != PF_VPROTO_FRAGMENT &&
4450 (r->action == PF_DROP) &&
4451 ((r->rule_flag & PFRULE_RETURNRST) ||
4452 (r->rule_flag & PFRULE_RETURNICMP) ||
4453 (r->rule_flag & PFRULE_RETURN))) {
4454 if (pd->proto == IPPROTO_TCP &&
4455 ((r->rule_flag & PFRULE_RETURNRST) ||
4456 (r->rule_flag & PFRULE_RETURN)) &&
4457 !(ctx.th->th_flags & TH_RST)) {
4458 u_int32_t ack =
4459 ntohl(ctx.th->th_seq) + pd->p_len;
4460
4461 if (pf_check_tcp_cksum(pd->m, pd->off,
4462 pd->tot_len - pd->off, pd->af))
4463 REASON_SET(&ctx.reason, PFRES_PROTCKSUM);
4464 else {
4465 if (ctx.th->th_flags & TH_SYN)
4466 ack++;
4467 if (ctx.th->th_flags & TH_FIN)
4468 ack++;
4469 pf_send_tcp(r, pd->af, pd->dst,
4470 pd->src, ctx.th->th_dport,
4471 ctx.th->th_sport, ntohl(ctx.th->th_ack),
4472 ack, TH_RST|TH_ACK, 0, 0, r->return_ttl,
4473 1, 0, pd->rdomain);
4474 }
4475 } else if ((pd->proto != IPPROTO_ICMP ||
4476 ICMP_INFOTYPE(ctx.icmptype)) && pd->af == AF_INET &&
4477 r->return_icmp)
4478 pf_send_icmp(pd->m, r->return_icmp >> 8,
4479 r->return_icmp & 255, 0, pd->af, r, pd->rdomain);
4480 else if ((pd->proto != IPPROTO_ICMPV6 ||
4481 (ctx.icmptype >= ICMP6_ECHO_REQUEST &&
4482 ctx.icmptype != ND_REDIRECT)) && pd->af == AF_INET6 &&
4483 r->return_icmp6)
4484 pf_send_icmp(pd->m, r->return_icmp6 >> 8,
4485 r->return_icmp6 & 255, 0, pd->af, r, pd->rdomain);
4486 }
4487
4488 if (r->action == PF_DROP)
4489 goto cleanup;
4490
4491 pf_tag_packet(pd->m, ctx.tag, ctx.act.rtableid);
4492 if (ctx.act.rtableid >= 0 &&
4493 rtable_l2(ctx.act.rtableid) != pd->rdomain)
4494 pd->destchg = 1;
4495
4496 if (r->action == PF_PASS && pd->badopts != 0 && ! r->allow_opts) {
4497 REASON_SET(&ctx.reason, PFRES_IPOPTIONS);
4498 #if NPFLOG > 0
4499 pd->pflog |= PF_LOG_FORCE;
4500 #endif /* NPFLOG > 0 */
4501 DPFPRINTF(LOG_NOTICE, "dropping packet with "
4502 "ip/ipv6 options in pf_test_rule()");
4503 goto cleanup;
4504 }
4505
4506 if (pd->virtual_proto != PF_VPROTO_FRAGMENT
4507 && !ctx.state_icmp && r->keep_state) {
4508
4509 if (r->rule_flag & PFRULE_SRCTRACK &&
4510 pf_insert_src_node(&ctx.sns[PF_SN_NONE], r, PF_SN_NONE,
4511 pd->af, pd->src, NULL, NULL) != 0) {
4512 REASON_SET(&ctx.reason, PFRES_SRCLIMIT);
4513 goto cleanup;
4514 }
4515
4516 if (r->max_states && (r->states_cur >= r->max_states)) {
4517 pf_status.lcounters[LCNT_STATES]++;
4518 REASON_SET(&ctx.reason, PFRES_MAXSTATES);
4519 goto cleanup;
4520 }
4521
4522 action = pf_create_state(pd, r, a, ctx.nr, &skw, &sks,
4523 &rewrite, sm, ctx.tag, &ctx.rules, &ctx.act, ctx.sns);
4524
4525 if (action != PF_PASS)
4526 goto cleanup;
4527 if (sks != skw) {
4528 struct pf_state_key *sk;
4529
4530 if (pd->dir == PF_IN)
4531 sk = sks;
4532 else
4533 sk = skw;
4534 rewrite += pf_translate(pd,
4535 &sk->addr[pd->af == pd->naf ? pd->sidx : pd->didx],
4536 sk->port[pd->af == pd->naf ? pd->sidx : pd->didx],
4537 &sk->addr[pd->af == pd->naf ? pd->didx : pd->sidx],
4538 sk->port[pd->af == pd->naf ? pd->didx : pd->sidx],
4539 virtual_type, ctx.icmp_dir);
4540 }
4541
4542 #ifdef INET6
4543 if (rewrite && skw->af != sks->af)
4544 action = PF_AFRT;
4545 #endif /* INET6 */
4546
4547 } else {
4548 action = PF_PASS;
4549
4550 while ((ctx.ri = SLIST_FIRST(&ctx.rules))) {
4551 SLIST_REMOVE_HEAD(&ctx.rules, entry);
4552 pool_put(&pf_rule_item_pl, ctx.ri);
4553 }
4554 }
4555
4556 /* copy back packet headers if needed */
4557 if (rewrite && pd->hdrlen) {
4558 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT);
4559 }
4560
4561 #if NPFSYNC > 0
4562 if (*sm != NULL && !ISSET((*sm)->state_flags, PFSTATE_NOSYNC) &&
4563 pd->dir == PF_OUT && pfsync_is_up()) {
4564 /*
4565 * We want the state created, but we dont
4566 * want to send this in case a partner
4567 * firewall has to know about it to allow
4568 * replies through it.
4569 */
4570 if (pfsync_defer(*sm, pd->m))
4571 return (PF_DEFER);
4572 }
4573 #endif /* NPFSYNC > 0 */
4574
4575 return (action);
4576
4577 cleanup:
4578 while ((ctx.ri = SLIST_FIRST(&ctx.rules))) {
4579 SLIST_REMOVE_HEAD(&ctx.rules, entry);
4580 pool_put(&pf_rule_item_pl, ctx.ri);
4581 }
4582
4583 return (action);
4584 }
4585
4586 static __inline int
pf_create_state(struct pf_pdesc * pd,struct pf_rule * r,struct pf_rule * a,struct pf_rule * nr,struct pf_state_key ** skw,struct pf_state_key ** sks,int * rewrite,struct pf_state ** sm,int tag,struct pf_rule_slist * rules,struct pf_rule_actions * act,struct pf_src_node * sns[PF_SN_MAX])4587 pf_create_state(struct pf_pdesc *pd, struct pf_rule *r, struct pf_rule *a,
4588 struct pf_rule *nr, struct pf_state_key **skw, struct pf_state_key **sks,
4589 int *rewrite, struct pf_state **sm, int tag, struct pf_rule_slist *rules,
4590 struct pf_rule_actions *act, struct pf_src_node *sns[PF_SN_MAX])
4591 {
4592 struct pf_state *st = NULL;
4593 struct tcphdr *th = &pd->hdr.tcp;
4594 u_int16_t mss = tcp_mssdflt;
4595 u_short reason;
4596 u_int i;
4597
4598 st = pool_get(&pf_state_pl, PR_NOWAIT | PR_ZERO);
4599 if (st == NULL) {
4600 REASON_SET(&reason, PFRES_MEMORY);
4601 goto csfailed;
4602 }
4603 st->rule.ptr = r;
4604 st->anchor.ptr = a;
4605 st->natrule.ptr = nr;
4606 if (r->allow_opts)
4607 st->state_flags |= PFSTATE_ALLOWOPTS;
4608 if (r->rule_flag & PFRULE_STATESLOPPY)
4609 st->state_flags |= PFSTATE_SLOPPY;
4610 if (r->rule_flag & PFRULE_PFLOW)
4611 st->state_flags |= PFSTATE_PFLOW;
4612 if (r->rule_flag & PFRULE_NOSYNC)
4613 st->state_flags |= PFSTATE_NOSYNC;
4614 #if NPFLOG > 0
4615 st->log = act->log & PF_LOG_ALL;
4616 #endif /* NPFLOG > 0 */
4617 st->qid = act->qid;
4618 st->pqid = act->pqid;
4619 st->rtableid[pd->didx] = act->rtableid;
4620 st->rtableid[pd->sidx] = -1; /* return traffic is routed normally */
4621 st->min_ttl = act->min_ttl;
4622 st->set_tos = act->set_tos;
4623 st->max_mss = act->max_mss;
4624 st->state_flags |= act->flags;
4625 #if NPFSYNC > 0
4626 st->sync_state = PFSYNC_S_NONE;
4627 #endif /* NPFSYNC > 0 */
4628 st->set_prio[0] = act->set_prio[0];
4629 st->set_prio[1] = act->set_prio[1];
4630 st->delay = act->delay;
4631 SLIST_INIT(&st->src_nodes);
4632
4633 /*
4634 * must initialize refcnt, before pf_state_insert() gets called.
4635 * pf_state_inserts() grabs reference for pfsync!
4636 */
4637 PF_REF_INIT(st->refcnt);
4638 mtx_init(&st->mtx, IPL_NET);
4639
4640 switch (pd->proto) {
4641 case IPPROTO_TCP:
4642 st->src.seqlo = ntohl(th->th_seq);
4643 st->src.seqhi = st->src.seqlo + pd->p_len + 1;
4644 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
4645 r->keep_state == PF_STATE_MODULATE) {
4646 /* Generate sequence number modulator */
4647 st->src.seqdiff = pf_tcp_iss(pd) - st->src.seqlo;
4648 if (st->src.seqdiff == 0)
4649 st->src.seqdiff = 1;
4650 pf_patch_32(pd, &th->th_seq,
4651 htonl(st->src.seqlo + st->src.seqdiff));
4652 *rewrite = 1;
4653 } else
4654 st->src.seqdiff = 0;
4655 if (th->th_flags & TH_SYN) {
4656 st->src.seqhi++;
4657 st->src.wscale = pf_get_wscale(pd);
4658 }
4659 st->src.max_win = MAX(ntohs(th->th_win), 1);
4660 if (st->src.wscale & PF_WSCALE_MASK) {
4661 /* Remove scale factor from initial window */
4662 int win = st->src.max_win;
4663 win += 1 << (st->src.wscale & PF_WSCALE_MASK);
4664 st->src.max_win = (win - 1) >>
4665 (st->src.wscale & PF_WSCALE_MASK);
4666 }
4667 if (th->th_flags & TH_FIN)
4668 st->src.seqhi++;
4669 st->dst.seqhi = 1;
4670 st->dst.max_win = 1;
4671 pf_set_protostate(st, PF_PEER_SRC, TCPS_SYN_SENT);
4672 pf_set_protostate(st, PF_PEER_DST, TCPS_CLOSED);
4673 st->timeout = PFTM_TCP_FIRST_PACKET;
4674 pf_status.states_halfopen++;
4675 break;
4676 case IPPROTO_UDP:
4677 pf_set_protostate(st, PF_PEER_SRC, PFUDPS_SINGLE);
4678 pf_set_protostate(st, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
4679 st->timeout = PFTM_UDP_FIRST_PACKET;
4680 break;
4681 case IPPROTO_ICMP:
4682 #ifdef INET6
4683 case IPPROTO_ICMPV6:
4684 #endif /* INET6 */
4685 st->timeout = PFTM_ICMP_FIRST_PACKET;
4686 break;
4687 default:
4688 pf_set_protostate(st, PF_PEER_SRC, PFOTHERS_SINGLE);
4689 pf_set_protostate(st, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
4690 st->timeout = PFTM_OTHER_FIRST_PACKET;
4691 }
4692
4693 st->creation = getuptime();
4694 st->expire = getuptime();
4695
4696 if (pd->proto == IPPROTO_TCP) {
4697 if (st->state_flags & PFSTATE_SCRUB_TCP &&
4698 pf_normalize_tcp_init(pd, &st->src)) {
4699 REASON_SET(&reason, PFRES_MEMORY);
4700 goto csfailed;
4701 }
4702 if (st->state_flags & PFSTATE_SCRUB_TCP && st->src.scrub &&
4703 pf_normalize_tcp_stateful(pd, &reason, st,
4704 &st->src, &st->dst, rewrite)) {
4705 /* This really shouldn't happen!!! */
4706 DPFPRINTF(LOG_ERR,
4707 "%s: tcp normalize failed on first pkt", __func__);
4708 goto csfailed;
4709 }
4710 }
4711 st->direction = pd->dir;
4712
4713 if (pf_state_key_setup(pd, skw, sks, act->rtableid)) {
4714 REASON_SET(&reason, PFRES_MEMORY);
4715 goto csfailed;
4716 }
4717
4718 if (pf_set_rt_ifp(st, pd->src, (*skw)->af, sns) != 0) {
4719 REASON_SET(&reason, PFRES_NOROUTE);
4720 goto csfailed;
4721 }
4722
4723 for (i = 0; i < PF_SN_MAX; i++)
4724 if (sns[i] != NULL) {
4725 struct pf_sn_item *sni;
4726
4727 sni = pool_get(&pf_sn_item_pl, PR_NOWAIT);
4728 if (sni == NULL) {
4729 REASON_SET(&reason, PFRES_MEMORY);
4730 goto csfailed;
4731 }
4732 sni->sn = sns[i];
4733 SLIST_INSERT_HEAD(&st->src_nodes, sni, next);
4734 sni->sn->states++;
4735 }
4736
4737 #if NPFSYNC > 0
4738 pfsync_init_state(st, *skw, *sks, 0);
4739 #endif
4740
4741 if (pf_state_insert(BOUND_IFACE(r, pd->kif), skw, sks, st)) {
4742 *sks = *skw = NULL;
4743 REASON_SET(&reason, PFRES_STATEINS);
4744 goto csfailed;
4745 } else
4746 *sm = st;
4747
4748 /*
4749 * Make state responsible for rules it binds here.
4750 */
4751 memcpy(&st->match_rules, rules, sizeof(st->match_rules));
4752 memset(rules, 0, sizeof(*rules));
4753 STATE_INC_COUNTERS(st);
4754
4755 if (tag > 0) {
4756 pf_tag_ref(tag);
4757 st->tag = tag;
4758 }
4759 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4760 TH_SYN && r->keep_state == PF_STATE_SYNPROXY && pd->dir == PF_IN) {
4761 int rtid = pd->rdomain;
4762 if (act->rtableid >= 0)
4763 rtid = act->rtableid;
4764 pf_set_protostate(st, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
4765 st->src.seqhi = arc4random();
4766 /* Find mss option */
4767 mss = pf_get_mss(pd);
4768 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4769 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4770 st->src.mss = mss;
4771 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4772 th->th_sport, st->src.seqhi, ntohl(th->th_seq) + 1,
4773 TH_SYN|TH_ACK, 0, st->src.mss, 0, 1, 0, pd->rdomain);
4774 REASON_SET(&reason, PFRES_SYNPROXY);
4775 return (PF_SYNPROXY_DROP);
4776 }
4777
4778 return (PF_PASS);
4779
4780 csfailed:
4781 if (st) {
4782 pf_normalize_tcp_cleanup(st); /* safe even w/o init */
4783 pf_src_tree_remove_state(st);
4784 pool_put(&pf_state_pl, st);
4785 }
4786
4787 for (i = 0; i < PF_SN_MAX; i++)
4788 if (sns[i] != NULL)
4789 pf_remove_src_node(sns[i]);
4790
4791 return (PF_DROP);
4792 }
4793
4794 int
pf_translate(struct pf_pdesc * pd,struct pf_addr * saddr,u_int16_t sport,struct pf_addr * daddr,u_int16_t dport,u_int16_t virtual_type,int icmp_dir)4795 pf_translate(struct pf_pdesc *pd, struct pf_addr *saddr, u_int16_t sport,
4796 struct pf_addr *daddr, u_int16_t dport, u_int16_t virtual_type,
4797 int icmp_dir)
4798 {
4799 int rewrite = 0;
4800 int afto = pd->af != pd->naf;
4801
4802 if (afto || PF_ANEQ(daddr, pd->dst, pd->af))
4803 pd->destchg = 1;
4804
4805 switch (pd->proto) {
4806 case IPPROTO_TCP: /* FALLTHROUGH */
4807 case IPPROTO_UDP:
4808 rewrite += pf_patch_16(pd, pd->sport, sport);
4809 rewrite += pf_patch_16(pd, pd->dport, dport);
4810 break;
4811
4812 case IPPROTO_ICMP:
4813 if (pd->af != AF_INET)
4814 return (0);
4815
4816 #ifdef INET6
4817 if (afto) {
4818 if (pf_translate_icmp_af(pd, AF_INET6, &pd->hdr.icmp))
4819 return (0);
4820 pd->proto = IPPROTO_ICMPV6;
4821 rewrite = 1;
4822 }
4823 #endif /* INET6 */
4824 if (virtual_type == htons(ICMP_ECHO)) {
4825 u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
4826 rewrite += pf_patch_16(pd,
4827 &pd->hdr.icmp.icmp_id, icmpid);
4828 }
4829 break;
4830
4831 #ifdef INET6
4832 case IPPROTO_ICMPV6:
4833 if (pd->af != AF_INET6)
4834 return (0);
4835
4836 if (afto) {
4837 if (pf_translate_icmp_af(pd, AF_INET, &pd->hdr.icmp6))
4838 return (0);
4839 pd->proto = IPPROTO_ICMP;
4840 rewrite = 1;
4841 }
4842 if (virtual_type == htons(ICMP6_ECHO_REQUEST)) {
4843 u_int16_t icmpid = (icmp_dir == PF_IN) ? sport : dport;
4844 rewrite += pf_patch_16(pd,
4845 &pd->hdr.icmp6.icmp6_id, icmpid);
4846 }
4847 break;
4848 #endif /* INET6 */
4849 }
4850
4851 if (!afto) {
4852 rewrite += pf_translate_a(pd, pd->src, saddr);
4853 rewrite += pf_translate_a(pd, pd->dst, daddr);
4854 }
4855
4856 return (rewrite);
4857 }
4858
4859 int
pf_tcp_track_full(struct pf_pdesc * pd,struct pf_state ** stp,u_short * reason,int * copyback,int reverse)4860 pf_tcp_track_full(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason,
4861 int *copyback, int reverse)
4862 {
4863 struct tcphdr *th = &pd->hdr.tcp;
4864 struct pf_state_peer *src, *dst;
4865 u_int16_t win = ntohs(th->th_win);
4866 u_int32_t ack, end, data_end, seq, orig_seq;
4867 u_int8_t sws, dws, psrc, pdst;
4868 int ackskew;
4869
4870 if ((pd->dir == (*stp)->direction && !reverse) ||
4871 (pd->dir != (*stp)->direction && reverse)) {
4872 src = &(*stp)->src;
4873 dst = &(*stp)->dst;
4874 psrc = PF_PEER_SRC;
4875 pdst = PF_PEER_DST;
4876 } else {
4877 src = &(*stp)->dst;
4878 dst = &(*stp)->src;
4879 psrc = PF_PEER_DST;
4880 pdst = PF_PEER_SRC;
4881 }
4882
4883 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4884 sws = src->wscale & PF_WSCALE_MASK;
4885 dws = dst->wscale & PF_WSCALE_MASK;
4886 } else
4887 sws = dws = 0;
4888
4889 /*
4890 * Sequence tracking algorithm from Guido van Rooij's paper:
4891 * http://www.madison-gurkha.com/publications/tcp_filtering/
4892 * tcp_filtering.ps
4893 */
4894
4895 orig_seq = seq = ntohl(th->th_seq);
4896 if (src->seqlo == 0) {
4897 /* First packet from this end. Set its state */
4898
4899 if (((*stp)->state_flags & PFSTATE_SCRUB_TCP || dst->scrub) &&
4900 src->scrub == NULL) {
4901 if (pf_normalize_tcp_init(pd, src)) {
4902 REASON_SET(reason, PFRES_MEMORY);
4903 return (PF_DROP);
4904 }
4905 }
4906
4907 /* Deferred generation of sequence number modulator */
4908 if (dst->seqdiff && !src->seqdiff) {
4909 /* use random iss for the TCP server */
4910 while ((src->seqdiff = arc4random() - seq) == 0)
4911 continue;
4912 ack = ntohl(th->th_ack) - dst->seqdiff;
4913 pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff));
4914 pf_patch_32(pd, &th->th_ack, htonl(ack));
4915 *copyback = 1;
4916 } else {
4917 ack = ntohl(th->th_ack);
4918 }
4919
4920 end = seq + pd->p_len;
4921 if (th->th_flags & TH_SYN) {
4922 end++;
4923 if (dst->wscale & PF_WSCALE_FLAG) {
4924 src->wscale = pf_get_wscale(pd);
4925 if (src->wscale & PF_WSCALE_FLAG) {
4926 /* Remove scale factor from initial
4927 * window */
4928 sws = src->wscale & PF_WSCALE_MASK;
4929 win = ((u_int32_t)win + (1 << sws) - 1)
4930 >> sws;
4931 dws = dst->wscale & PF_WSCALE_MASK;
4932 } else {
4933 /* fixup other window */
4934 dst->max_win = MIN(TCP_MAXWIN,
4935 (u_int32_t)dst->max_win <<
4936 (dst->wscale & PF_WSCALE_MASK));
4937 /* in case of a retrans SYN|ACK */
4938 dst->wscale = 0;
4939 }
4940 }
4941 }
4942 data_end = end;
4943 if (th->th_flags & TH_FIN)
4944 end++;
4945
4946 src->seqlo = seq;
4947 if (src->state < TCPS_SYN_SENT)
4948 pf_set_protostate(*stp, psrc, TCPS_SYN_SENT);
4949
4950 /*
4951 * May need to slide the window (seqhi may have been set by
4952 * the crappy stack check or if we picked up the connection
4953 * after establishment)
4954 */
4955 if (src->seqhi == 1 ||
4956 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4957 src->seqhi = end + MAX(1, dst->max_win << dws);
4958 if (win > src->max_win)
4959 src->max_win = win;
4960
4961 } else {
4962 ack = ntohl(th->th_ack) - dst->seqdiff;
4963 if (src->seqdiff) {
4964 /* Modulate sequence numbers */
4965 pf_patch_32(pd, &th->th_seq, htonl(seq + src->seqdiff));
4966 pf_patch_32(pd, &th->th_ack, htonl(ack));
4967 *copyback = 1;
4968 }
4969 end = seq + pd->p_len;
4970 if (th->th_flags & TH_SYN)
4971 end++;
4972 data_end = end;
4973 if (th->th_flags & TH_FIN)
4974 end++;
4975 }
4976
4977 if ((th->th_flags & TH_ACK) == 0) {
4978 /* Let it pass through the ack skew check */
4979 ack = dst->seqlo;
4980 } else if ((ack == 0 &&
4981 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
4982 /* broken tcp stacks do not set ack */
4983 (dst->state < TCPS_SYN_SENT)) {
4984 /*
4985 * Many stacks (ours included) will set the ACK number in an
4986 * FIN|ACK if the SYN times out -- no sequence to ACK.
4987 */
4988 ack = dst->seqlo;
4989 }
4990
4991 if (seq == end) {
4992 /* Ease sequencing restrictions on no data packets */
4993 seq = src->seqlo;
4994 data_end = end = seq;
4995 }
4996
4997 ackskew = dst->seqlo - ack;
4998
4999
5000 /*
5001 * Need to demodulate the sequence numbers in any TCP SACK options
5002 * (Selective ACK). We could optionally validate the SACK values
5003 * against the current ACK window, either forwards or backwards, but
5004 * I'm not confident that SACK has been implemented properly
5005 * everywhere. It wouldn't surprise me if several stacks accidently
5006 * SACK too far backwards of previously ACKed data. There really aren't
5007 * any security implications of bad SACKing unless the target stack
5008 * doesn't validate the option length correctly. Someone trying to
5009 * spoof into a TCP connection won't bother blindly sending SACK
5010 * options anyway.
5011 */
5012 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
5013 if (pf_modulate_sack(pd, dst))
5014 *copyback = 1;
5015 }
5016
5017
5018 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
5019 if (SEQ_GEQ(src->seqhi, data_end) &&
5020 /* Last octet inside other's window space */
5021 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
5022 /* Retrans: not more than one window back */
5023 (ackskew >= -MAXACKWINDOW) &&
5024 /* Acking not more than one reassembled fragment backwards */
5025 (ackskew <= (MAXACKWINDOW << sws)) &&
5026 /* Acking not more than one window forward */
5027 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
5028 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo))) {
5029 /* Require an exact/+1 sequence match on resets when possible */
5030
5031 if (dst->scrub || src->scrub) {
5032 if (pf_normalize_tcp_stateful(pd, reason, *stp, src,
5033 dst, copyback))
5034 return (PF_DROP);
5035 }
5036
5037 /* update max window */
5038 if (src->max_win < win)
5039 src->max_win = win;
5040 /* synchronize sequencing */
5041 if (SEQ_GT(end, src->seqlo))
5042 src->seqlo = end;
5043 /* slide the window of what the other end can send */
5044 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5045 dst->seqhi = ack + MAX((win << sws), 1);
5046
5047 /* update states */
5048 if (th->th_flags & TH_SYN)
5049 if (src->state < TCPS_SYN_SENT)
5050 pf_set_protostate(*stp, psrc, TCPS_SYN_SENT);
5051 if (th->th_flags & TH_FIN)
5052 if (src->state < TCPS_CLOSING)
5053 pf_set_protostate(*stp, psrc, TCPS_CLOSING);
5054 if (th->th_flags & TH_ACK) {
5055 if (dst->state == TCPS_SYN_SENT) {
5056 pf_set_protostate(*stp, pdst,
5057 TCPS_ESTABLISHED);
5058 if (src->state == TCPS_ESTABLISHED &&
5059 !SLIST_EMPTY(&(*stp)->src_nodes) &&
5060 pf_src_connlimit(stp)) {
5061 REASON_SET(reason, PFRES_SRCLIMIT);
5062 return (PF_DROP);
5063 }
5064 } else if (dst->state == TCPS_CLOSING)
5065 pf_set_protostate(*stp, pdst,
5066 TCPS_FIN_WAIT_2);
5067 }
5068 if (th->th_flags & TH_RST)
5069 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT);
5070
5071 /* update expire time */
5072 (*stp)->expire = getuptime();
5073 if (src->state >= TCPS_FIN_WAIT_2 &&
5074 dst->state >= TCPS_FIN_WAIT_2)
5075 pf_update_state_timeout(*stp, PFTM_TCP_CLOSED);
5076 else if (src->state >= TCPS_CLOSING &&
5077 dst->state >= TCPS_CLOSING)
5078 pf_update_state_timeout(*stp, PFTM_TCP_FIN_WAIT);
5079 else if (src->state < TCPS_ESTABLISHED ||
5080 dst->state < TCPS_ESTABLISHED)
5081 pf_update_state_timeout(*stp, PFTM_TCP_OPENING);
5082 else if (src->state >= TCPS_CLOSING ||
5083 dst->state >= TCPS_CLOSING)
5084 pf_update_state_timeout(*stp, PFTM_TCP_CLOSING);
5085 else
5086 pf_update_state_timeout(*stp, PFTM_TCP_ESTABLISHED);
5087
5088 /* Fall through to PASS packet */
5089 } else if ((dst->state < TCPS_SYN_SENT ||
5090 dst->state >= TCPS_FIN_WAIT_2 ||
5091 src->state >= TCPS_FIN_WAIT_2) &&
5092 SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) &&
5093 /* Within a window forward of the originating packet */
5094 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
5095 /* Within a window backward of the originating packet */
5096
5097 /*
5098 * This currently handles three situations:
5099 * 1) Stupid stacks will shotgun SYNs before their peer
5100 * replies.
5101 * 2) When PF catches an already established stream (the
5102 * firewall rebooted, the state table was flushed, routes
5103 * changed...)
5104 * 3) Packets get funky immediately after the connection
5105 * closes (this should catch Solaris spurious ACK|FINs
5106 * that web servers like to spew after a close)
5107 *
5108 * This must be a little more careful than the above code
5109 * since packet floods will also be caught here. We don't
5110 * update the TTL here to mitigate the damage of a packet
5111 * flood and so the same code can handle awkward establishment
5112 * and a loosened connection close.
5113 * In the establishment case, a correct peer response will
5114 * validate the connection, go through the normal state code
5115 * and keep updating the state TTL.
5116 */
5117
5118 if (pf_status.debug >= LOG_NOTICE) {
5119 log(LOG_NOTICE, "pf: loose state match: ");
5120 pf_print_state(*stp);
5121 pf_print_flags(th->th_flags);
5122 addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5123 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
5124 pd->p_len, ackskew, (*stp)->packets[0],
5125 (*stp)->packets[1],
5126 pd->dir == PF_IN ? "in" : "out",
5127 pd->dir == (*stp)->direction ? "fwd" : "rev");
5128 }
5129
5130 if (dst->scrub || src->scrub) {
5131 if (pf_normalize_tcp_stateful(pd, reason, *stp, src,
5132 dst, copyback))
5133 return (PF_DROP);
5134 }
5135
5136 /* update max window */
5137 if (src->max_win < win)
5138 src->max_win = win;
5139 /* synchronize sequencing */
5140 if (SEQ_GT(end, src->seqlo))
5141 src->seqlo = end;
5142 /* slide the window of what the other end can send */
5143 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5144 dst->seqhi = ack + MAX((win << sws), 1);
5145
5146 /*
5147 * Cannot set dst->seqhi here since this could be a shotgunned
5148 * SYN and not an already established connection.
5149 */
5150 if (th->th_flags & TH_FIN)
5151 if (src->state < TCPS_CLOSING)
5152 pf_set_protostate(*stp, psrc, TCPS_CLOSING);
5153 if (th->th_flags & TH_RST)
5154 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT);
5155
5156 /* Fall through to PASS packet */
5157 } else {
5158 if ((*stp)->dst.state == TCPS_SYN_SENT &&
5159 (*stp)->src.state == TCPS_SYN_SENT) {
5160 /* Send RST for state mismatches during handshake */
5161 if (!(th->th_flags & TH_RST))
5162 pf_send_tcp((*stp)->rule.ptr, pd->af,
5163 pd->dst, pd->src, th->th_dport,
5164 th->th_sport, ntohl(th->th_ack), 0,
5165 TH_RST, 0, 0,
5166 (*stp)->rule.ptr->return_ttl, 1, 0,
5167 pd->rdomain);
5168 src->seqlo = 0;
5169 src->seqhi = 1;
5170 src->max_win = 1;
5171 } else if (pf_status.debug >= LOG_NOTICE) {
5172 log(LOG_NOTICE, "pf: BAD state: ");
5173 pf_print_state(*stp);
5174 pf_print_flags(th->th_flags);
5175 addlog(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5176 "pkts=%llu:%llu dir=%s,%s\n",
5177 seq, orig_seq, ack, pd->p_len, ackskew,
5178 (*stp)->packets[0], (*stp)->packets[1],
5179 pd->dir == PF_IN ? "in" : "out",
5180 pd->dir == (*stp)->direction ? "fwd" : "rev");
5181 addlog("pf: State failure on: %c %c %c %c | %c %c\n",
5182 SEQ_GEQ(src->seqhi, data_end) ? ' ' : '1',
5183 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
5184 ' ': '2',
5185 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
5186 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
5187 SEQ_GEQ(src->seqhi + MAXACKWINDOW, data_end) ?
5188 ' ' :'5',
5189 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
5190 }
5191 REASON_SET(reason, PFRES_BADSTATE);
5192 return (PF_DROP);
5193 }
5194
5195 return (PF_PASS);
5196 }
5197
5198 int
pf_tcp_track_sloppy(struct pf_pdesc * pd,struct pf_state ** stp,u_short * reason)5199 pf_tcp_track_sloppy(struct pf_pdesc *pd, struct pf_state **stp,
5200 u_short *reason)
5201 {
5202 struct tcphdr *th = &pd->hdr.tcp;
5203 struct pf_state_peer *src, *dst;
5204 u_int8_t psrc, pdst;
5205
5206 if (pd->dir == (*stp)->direction) {
5207 src = &(*stp)->src;
5208 dst = &(*stp)->dst;
5209 psrc = PF_PEER_SRC;
5210 pdst = PF_PEER_DST;
5211 } else {
5212 src = &(*stp)->dst;
5213 dst = &(*stp)->src;
5214 psrc = PF_PEER_DST;
5215 pdst = PF_PEER_SRC;
5216 }
5217
5218 if (th->th_flags & TH_SYN)
5219 if (src->state < TCPS_SYN_SENT)
5220 pf_set_protostate(*stp, psrc, TCPS_SYN_SENT);
5221 if (th->th_flags & TH_FIN)
5222 if (src->state < TCPS_CLOSING)
5223 pf_set_protostate(*stp, psrc, TCPS_CLOSING);
5224 if (th->th_flags & TH_ACK) {
5225 if (dst->state == TCPS_SYN_SENT) {
5226 pf_set_protostate(*stp, pdst, TCPS_ESTABLISHED);
5227 if (src->state == TCPS_ESTABLISHED &&
5228 !SLIST_EMPTY(&(*stp)->src_nodes) &&
5229 pf_src_connlimit(stp)) {
5230 REASON_SET(reason, PFRES_SRCLIMIT);
5231 return (PF_DROP);
5232 }
5233 } else if (dst->state == TCPS_CLOSING) {
5234 pf_set_protostate(*stp, pdst, TCPS_FIN_WAIT_2);
5235 } else if (src->state == TCPS_SYN_SENT &&
5236 dst->state < TCPS_SYN_SENT) {
5237 /*
5238 * Handle a special sloppy case where we only see one
5239 * half of the connection. If there is a ACK after
5240 * the initial SYN without ever seeing a packet from
5241 * the destination, set the connection to established.
5242 */
5243 pf_set_protostate(*stp, PF_PEER_BOTH,
5244 TCPS_ESTABLISHED);
5245 if (!SLIST_EMPTY(&(*stp)->src_nodes) &&
5246 pf_src_connlimit(stp)) {
5247 REASON_SET(reason, PFRES_SRCLIMIT);
5248 return (PF_DROP);
5249 }
5250 } else if (src->state == TCPS_CLOSING &&
5251 dst->state == TCPS_ESTABLISHED &&
5252 dst->seqlo == 0) {
5253 /*
5254 * Handle the closing of half connections where we
5255 * don't see the full bidirectional FIN/ACK+ACK
5256 * handshake.
5257 */
5258 pf_set_protostate(*stp, pdst, TCPS_CLOSING);
5259 }
5260 }
5261 if (th->th_flags & TH_RST)
5262 pf_set_protostate(*stp, PF_PEER_BOTH, TCPS_TIME_WAIT);
5263
5264 /* update expire time */
5265 (*stp)->expire = getuptime();
5266 if (src->state >= TCPS_FIN_WAIT_2 &&
5267 dst->state >= TCPS_FIN_WAIT_2)
5268 pf_update_state_timeout(*stp, PFTM_TCP_CLOSED);
5269 else if (src->state >= TCPS_CLOSING &&
5270 dst->state >= TCPS_CLOSING)
5271 pf_update_state_timeout(*stp, PFTM_TCP_FIN_WAIT);
5272 else if (src->state < TCPS_ESTABLISHED ||
5273 dst->state < TCPS_ESTABLISHED)
5274 pf_update_state_timeout(*stp, PFTM_TCP_OPENING);
5275 else if (src->state >= TCPS_CLOSING ||
5276 dst->state >= TCPS_CLOSING)
5277 pf_update_state_timeout(*stp, PFTM_TCP_CLOSING);
5278 else
5279 pf_update_state_timeout(*stp, PFTM_TCP_ESTABLISHED);
5280
5281 return (PF_PASS);
5282 }
5283
5284 static __inline int
pf_synproxy(struct pf_pdesc * pd,struct pf_state ** stp,u_short * reason)5285 pf_synproxy(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason)
5286 {
5287 struct pf_state_key *sk = (*stp)->key[pd->didx];
5288
5289 if ((*stp)->src.state == PF_TCPS_PROXY_SRC) {
5290 struct tcphdr *th = &pd->hdr.tcp;
5291
5292 if (pd->dir != (*stp)->direction) {
5293 REASON_SET(reason, PFRES_SYNPROXY);
5294 return (PF_SYNPROXY_DROP);
5295 }
5296 if (th->th_flags & TH_SYN) {
5297 if (ntohl(th->th_seq) != (*stp)->src.seqlo) {
5298 REASON_SET(reason, PFRES_SYNPROXY);
5299 return (PF_DROP);
5300 }
5301 pf_send_tcp((*stp)->rule.ptr, pd->af, pd->dst,
5302 pd->src, th->th_dport, th->th_sport,
5303 (*stp)->src.seqhi, ntohl(th->th_seq) + 1,
5304 TH_SYN|TH_ACK, 0, (*stp)->src.mss, 0, 1,
5305 0, pd->rdomain);
5306 REASON_SET(reason, PFRES_SYNPROXY);
5307 return (PF_SYNPROXY_DROP);
5308 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
5309 (ntohl(th->th_ack) != (*stp)->src.seqhi + 1) ||
5310 (ntohl(th->th_seq) != (*stp)->src.seqlo + 1)) {
5311 REASON_SET(reason, PFRES_SYNPROXY);
5312 return (PF_DROP);
5313 } else if (!SLIST_EMPTY(&(*stp)->src_nodes) &&
5314 pf_src_connlimit(stp)) {
5315 REASON_SET(reason, PFRES_SRCLIMIT);
5316 return (PF_DROP);
5317 } else
5318 pf_set_protostate(*stp, PF_PEER_SRC,
5319 PF_TCPS_PROXY_DST);
5320 }
5321 if ((*stp)->src.state == PF_TCPS_PROXY_DST) {
5322 struct tcphdr *th = &pd->hdr.tcp;
5323
5324 if (pd->dir == (*stp)->direction) {
5325 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
5326 (ntohl(th->th_ack) != (*stp)->src.seqhi + 1) ||
5327 (ntohl(th->th_seq) != (*stp)->src.seqlo + 1)) {
5328 REASON_SET(reason, PFRES_SYNPROXY);
5329 return (PF_DROP);
5330 }
5331 (*stp)->src.max_win = MAX(ntohs(th->th_win), 1);
5332 if ((*stp)->dst.seqhi == 1)
5333 (*stp)->dst.seqhi = arc4random();
5334 pf_send_tcp((*stp)->rule.ptr, pd->af,
5335 &sk->addr[pd->sidx], &sk->addr[pd->didx],
5336 sk->port[pd->sidx], sk->port[pd->didx],
5337 (*stp)->dst.seqhi, 0, TH_SYN, 0,
5338 (*stp)->src.mss, 0, 0, (*stp)->tag,
5339 sk->rdomain);
5340 REASON_SET(reason, PFRES_SYNPROXY);
5341 return (PF_SYNPROXY_DROP);
5342 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
5343 (TH_SYN|TH_ACK)) ||
5344 (ntohl(th->th_ack) != (*stp)->dst.seqhi + 1)) {
5345 REASON_SET(reason, PFRES_SYNPROXY);
5346 return (PF_DROP);
5347 } else {
5348 (*stp)->dst.max_win = MAX(ntohs(th->th_win), 1);
5349 (*stp)->dst.seqlo = ntohl(th->th_seq);
5350 pf_send_tcp((*stp)->rule.ptr, pd->af, pd->dst,
5351 pd->src, th->th_dport, th->th_sport,
5352 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
5353 TH_ACK, (*stp)->src.max_win, 0, 0, 0,
5354 (*stp)->tag, pd->rdomain);
5355 pf_send_tcp((*stp)->rule.ptr, pd->af,
5356 &sk->addr[pd->sidx], &sk->addr[pd->didx],
5357 sk->port[pd->sidx], sk->port[pd->didx],
5358 (*stp)->src.seqhi + 1, (*stp)->src.seqlo + 1,
5359 TH_ACK, (*stp)->dst.max_win, 0, 0, 1,
5360 0, sk->rdomain);
5361 (*stp)->src.seqdiff = (*stp)->dst.seqhi -
5362 (*stp)->src.seqlo;
5363 (*stp)->dst.seqdiff = (*stp)->src.seqhi -
5364 (*stp)->dst.seqlo;
5365 (*stp)->src.seqhi = (*stp)->src.seqlo +
5366 (*stp)->dst.max_win;
5367 (*stp)->dst.seqhi = (*stp)->dst.seqlo +
5368 (*stp)->src.max_win;
5369 (*stp)->src.wscale = (*stp)->dst.wscale = 0;
5370 pf_set_protostate(*stp, PF_PEER_BOTH,
5371 TCPS_ESTABLISHED);
5372 REASON_SET(reason, PFRES_SYNPROXY);
5373 return (PF_SYNPROXY_DROP);
5374 }
5375 }
5376 return (PF_PASS);
5377 }
5378
5379 int
pf_test_state(struct pf_pdesc * pd,struct pf_state ** stp,u_short * reason)5380 pf_test_state(struct pf_pdesc *pd, struct pf_state **stp, u_short *reason)
5381 {
5382 int copyback = 0;
5383 struct pf_state_peer *src, *dst;
5384 int action;
5385 struct inpcb *inp = pd->m->m_pkthdr.pf.inp;
5386 u_int8_t psrc, pdst;
5387
5388 action = PF_PASS;
5389 if (pd->dir == (*stp)->direction) {
5390 src = &(*stp)->src;
5391 dst = &(*stp)->dst;
5392 psrc = PF_PEER_SRC;
5393 pdst = PF_PEER_DST;
5394 } else {
5395 src = &(*stp)->dst;
5396 dst = &(*stp)->src;
5397 psrc = PF_PEER_DST;
5398 pdst = PF_PEER_SRC;
5399 }
5400
5401 switch (pd->virtual_proto) {
5402 case IPPROTO_TCP:
5403 if ((action = pf_synproxy(pd, stp, reason)) != PF_PASS)
5404 return (action);
5405 if ((pd->hdr.tcp.th_flags & (TH_SYN|TH_ACK)) == TH_SYN) {
5406
5407 if (dst->state >= TCPS_FIN_WAIT_2 &&
5408 src->state >= TCPS_FIN_WAIT_2) {
5409 if (pf_status.debug >= LOG_NOTICE) {
5410 log(LOG_NOTICE, "pf: state reuse ");
5411 pf_print_state(*stp);
5412 pf_print_flags(pd->hdr.tcp.th_flags);
5413 addlog("\n");
5414 }
5415 /* XXX make sure it's the same direction ?? */
5416 pf_update_state_timeout(*stp, PFTM_PURGE);
5417 pf_state_unref(*stp);
5418 *stp = NULL;
5419 pf_mbuf_link_inpcb(pd->m, inp);
5420 return (PF_DROP);
5421 } else if (dst->state >= TCPS_ESTABLISHED &&
5422 src->state >= TCPS_ESTABLISHED) {
5423 /*
5424 * SYN matches existing state???
5425 * Typically happens when sender boots up after
5426 * sudden panic. Certain protocols (NFSv3) are
5427 * always using same port numbers. Challenge
5428 * ACK enables all parties (firewall and peers)
5429 * to get in sync again.
5430 */
5431 pf_send_challenge_ack(pd, *stp, src, dst);
5432 return (PF_DROP);
5433 }
5434 }
5435
5436 if ((*stp)->state_flags & PFSTATE_SLOPPY) {
5437 if (pf_tcp_track_sloppy(pd, stp, reason) == PF_DROP)
5438 return (PF_DROP);
5439 } else {
5440 if (pf_tcp_track_full(pd, stp, reason, ©back,
5441 PF_REVERSED_KEY((*stp)->key, pd->af)) == PF_DROP)
5442 return (PF_DROP);
5443 }
5444 break;
5445 case IPPROTO_UDP:
5446 /* update states */
5447 if (src->state < PFUDPS_SINGLE)
5448 pf_set_protostate(*stp, psrc, PFUDPS_SINGLE);
5449 if (dst->state == PFUDPS_SINGLE)
5450 pf_set_protostate(*stp, pdst, PFUDPS_MULTIPLE);
5451
5452 /* update expire time */
5453 (*stp)->expire = getuptime();
5454 if (src->state == PFUDPS_MULTIPLE &&
5455 dst->state == PFUDPS_MULTIPLE)
5456 pf_update_state_timeout(*stp, PFTM_UDP_MULTIPLE);
5457 else
5458 pf_update_state_timeout(*stp, PFTM_UDP_SINGLE);
5459 break;
5460 default:
5461 /* update states */
5462 if (src->state < PFOTHERS_SINGLE)
5463 pf_set_protostate(*stp, psrc, PFOTHERS_SINGLE);
5464 if (dst->state == PFOTHERS_SINGLE)
5465 pf_set_protostate(*stp, pdst, PFOTHERS_MULTIPLE);
5466
5467 /* update expire time */
5468 (*stp)->expire = getuptime();
5469 if (src->state == PFOTHERS_MULTIPLE &&
5470 dst->state == PFOTHERS_MULTIPLE)
5471 pf_update_state_timeout(*stp, PFTM_OTHER_MULTIPLE);
5472 else
5473 pf_update_state_timeout(*stp, PFTM_OTHER_SINGLE);
5474 break;
5475 }
5476
5477 /* translate source/destination address, if necessary */
5478 if ((*stp)->key[PF_SK_WIRE] != (*stp)->key[PF_SK_STACK]) {
5479 struct pf_state_key *nk;
5480 int afto, sidx, didx;
5481
5482 if (PF_REVERSED_KEY((*stp)->key, pd->af))
5483 nk = (*stp)->key[pd->sidx];
5484 else
5485 nk = (*stp)->key[pd->didx];
5486
5487 afto = pd->af != nk->af;
5488 sidx = afto ? pd->didx : pd->sidx;
5489 didx = afto ? pd->sidx : pd->didx;
5490
5491 #ifdef INET6
5492 if (afto) {
5493 pf_addrcpy(&pd->nsaddr, &nk->addr[sidx], nk->af);
5494 pf_addrcpy(&pd->ndaddr, &nk->addr[didx], nk->af);
5495 pd->naf = nk->af;
5496 action = PF_AFRT;
5497 }
5498 #endif /* INET6 */
5499
5500 if (!afto)
5501 pf_translate_a(pd, pd->src, &nk->addr[sidx]);
5502
5503 if (pd->sport != NULL)
5504 pf_patch_16(pd, pd->sport, nk->port[sidx]);
5505
5506 if (afto || PF_ANEQ(pd->dst, &nk->addr[didx], pd->af) ||
5507 pd->rdomain != nk->rdomain)
5508 pd->destchg = 1;
5509
5510 if (!afto)
5511 pf_translate_a(pd, pd->dst, &nk->addr[didx]);
5512
5513 if (pd->dport != NULL)
5514 pf_patch_16(pd, pd->dport, nk->port[didx]);
5515
5516 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
5517 copyback = 1;
5518 }
5519
5520 if (copyback && pd->hdrlen > 0) {
5521 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT);
5522 }
5523
5524 return (action);
5525 }
5526
5527 int
pf_icmp_state_lookup(struct pf_pdesc * pd,struct pf_state_key_cmp * key,struct pf_state ** stp,u_int16_t icmpid,u_int16_t type,int icmp_dir,int * iidx,int multi,int inner)5528 pf_icmp_state_lookup(struct pf_pdesc *pd, struct pf_state_key_cmp *key,
5529 struct pf_state **stp, u_int16_t icmpid, u_int16_t type,
5530 int icmp_dir, int *iidx, int multi, int inner)
5531 {
5532 int direction, action;
5533
5534 key->af = pd->af;
5535 key->proto = pd->proto;
5536 key->rdomain = pd->rdomain;
5537 if (icmp_dir == PF_IN) {
5538 *iidx = pd->sidx;
5539 key->port[pd->sidx] = icmpid;
5540 key->port[pd->didx] = type;
5541 } else {
5542 *iidx = pd->didx;
5543 key->port[pd->sidx] = type;
5544 key->port[pd->didx] = icmpid;
5545 }
5546
5547 if (pf_state_key_addr_setup(pd, key, pd->sidx, pd->src, pd->didx,
5548 pd->dst, pd->af, multi))
5549 return (PF_DROP);
5550
5551 key->hash = pf_pkt_hash(key->af, key->proto,
5552 &key->addr[0], &key->addr[1], 0, 0);
5553
5554 action = pf_find_state(pd, key, stp);
5555 if (action != PF_MATCH)
5556 return (action);
5557
5558 if ((*stp)->state_flags & PFSTATE_SLOPPY)
5559 return (-1);
5560
5561 /* Is this ICMP message flowing in right direction? */
5562 if ((*stp)->key[PF_SK_WIRE]->af != (*stp)->key[PF_SK_STACK]->af)
5563 direction = (pd->af == (*stp)->key[PF_SK_WIRE]->af) ?
5564 PF_IN : PF_OUT;
5565 else
5566 direction = (*stp)->direction;
5567 if ((((!inner && direction == pd->dir) ||
5568 (inner && direction != pd->dir)) ?
5569 PF_IN : PF_OUT) != icmp_dir) {
5570 if (pf_status.debug >= LOG_NOTICE) {
5571 log(LOG_NOTICE,
5572 "pf: icmp type %d in wrong direction (%d): ",
5573 ntohs(type), icmp_dir);
5574 pf_print_state(*stp);
5575 addlog("\n");
5576 }
5577 return (PF_DROP);
5578 }
5579 return (-1);
5580 }
5581
5582 int
pf_test_state_icmp(struct pf_pdesc * pd,struct pf_state ** stp,u_short * reason)5583 pf_test_state_icmp(struct pf_pdesc *pd, struct pf_state **stp,
5584 u_short *reason)
5585 {
5586 u_int16_t virtual_id, virtual_type;
5587 u_int8_t icmptype, icmpcode;
5588 int icmp_dir, iidx, ret, copyback = 0;
5589
5590 struct pf_state_key_cmp key;
5591
5592 switch (pd->proto) {
5593 case IPPROTO_ICMP:
5594 icmptype = pd->hdr.icmp.icmp_type;
5595 icmpcode = pd->hdr.icmp.icmp_code;
5596 break;
5597 #ifdef INET6
5598 case IPPROTO_ICMPV6:
5599 icmptype = pd->hdr.icmp6.icmp6_type;
5600 icmpcode = pd->hdr.icmp6.icmp6_code;
5601 break;
5602 #endif /* INET6 */
5603 default:
5604 panic("unhandled proto %d", pd->proto);
5605 }
5606
5607 if (pf_icmp_mapping(pd, icmptype, &icmp_dir, &virtual_id,
5608 &virtual_type) == 0) {
5609 /*
5610 * ICMP query/reply message not related to a TCP/UDP packet.
5611 * Search for an ICMP state.
5612 */
5613 ret = pf_icmp_state_lookup(pd, &key, stp,
5614 virtual_id, virtual_type, icmp_dir, &iidx,
5615 0, 0);
5616 /* IPv6? try matching a multicast address */
5617 if (ret == PF_DROP && pd->af == AF_INET6 && icmp_dir == PF_OUT)
5618 ret = pf_icmp_state_lookup(pd, &key, stp, virtual_id,
5619 virtual_type, icmp_dir, &iidx, 1, 0);
5620 if (ret >= 0)
5621 return (ret);
5622
5623 (*stp)->expire = getuptime();
5624 pf_update_state_timeout(*stp, PFTM_ICMP_ERROR_REPLY);
5625
5626 /* translate source/destination address, if necessary */
5627 if ((*stp)->key[PF_SK_WIRE] != (*stp)->key[PF_SK_STACK]) {
5628 struct pf_state_key *nk;
5629 int afto, sidx, didx;
5630
5631 if (PF_REVERSED_KEY((*stp)->key, pd->af))
5632 nk = (*stp)->key[pd->sidx];
5633 else
5634 nk = (*stp)->key[pd->didx];
5635
5636 afto = pd->af != nk->af;
5637 sidx = afto ? pd->didx : pd->sidx;
5638 didx = afto ? pd->sidx : pd->didx;
5639 iidx = afto ? !iidx : iidx;
5640 #ifdef INET6
5641 if (afto) {
5642 pf_addrcpy(&pd->nsaddr, &nk->addr[sidx],
5643 nk->af);
5644 pf_addrcpy(&pd->ndaddr, &nk->addr[didx],
5645 nk->af);
5646 pd->naf = nk->af;
5647 }
5648 #endif /* INET6 */
5649 if (!afto) {
5650 pf_translate_a(pd, pd->src, &nk->addr[sidx]);
5651 pf_translate_a(pd, pd->dst, &nk->addr[didx]);
5652 }
5653
5654 if (pd->rdomain != nk->rdomain)
5655 pd->destchg = 1;
5656 if (!afto && PF_ANEQ(pd->dst,
5657 &nk->addr[didx], pd->af))
5658 pd->destchg = 1;
5659 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
5660
5661 switch (pd->af) {
5662 case AF_INET:
5663 #ifdef INET6
5664 if (afto) {
5665 if (pf_translate_icmp_af(pd, AF_INET6,
5666 &pd->hdr.icmp))
5667 return (PF_DROP);
5668 pd->proto = IPPROTO_ICMPV6;
5669 }
5670 #endif /* INET6 */
5671 pf_patch_16(pd,
5672 &pd->hdr.icmp.icmp_id, nk->port[iidx]);
5673
5674 m_copyback(pd->m, pd->off, ICMP_MINLEN,
5675 &pd->hdr.icmp, M_NOWAIT);
5676 copyback = 1;
5677 break;
5678 #ifdef INET6
5679 case AF_INET6:
5680 if (afto) {
5681 if (pf_translate_icmp_af(pd, AF_INET,
5682 &pd->hdr.icmp6))
5683 return (PF_DROP);
5684 pd->proto = IPPROTO_ICMP;
5685 }
5686
5687 pf_patch_16(pd,
5688 &pd->hdr.icmp6.icmp6_id, nk->port[iidx]);
5689
5690 m_copyback(pd->m, pd->off,
5691 sizeof(struct icmp6_hdr), &pd->hdr.icmp6,
5692 M_NOWAIT);
5693 copyback = 1;
5694 break;
5695 #endif /* INET6 */
5696 }
5697 #ifdef INET6
5698 if (afto)
5699 return (PF_AFRT);
5700 #endif /* INET6 */
5701 }
5702 } else {
5703 /*
5704 * ICMP error message in response to a TCP/UDP packet.
5705 * Extract the inner TCP/UDP header and search for that state.
5706 */
5707 struct pf_pdesc pd2;
5708 struct ip h2;
5709 #ifdef INET6
5710 struct ip6_hdr h2_6;
5711 #endif /* INET6 */
5712 int ipoff2;
5713
5714 /* Initialize pd2 fields valid for both packets with pd. */
5715 memset(&pd2, 0, sizeof(pd2));
5716 pd2.af = pd->af;
5717 pd2.dir = pd->dir;
5718 pd2.kif = pd->kif;
5719 pd2.m = pd->m;
5720 pd2.rdomain = pd->rdomain;
5721 /* Payload packet is from the opposite direction. */
5722 pd2.sidx = (pd2.dir == PF_IN) ? 1 : 0;
5723 pd2.didx = (pd2.dir == PF_IN) ? 0 : 1;
5724 switch (pd->af) {
5725 case AF_INET:
5726 /* offset of h2 in mbuf chain */
5727 ipoff2 = pd->off + ICMP_MINLEN;
5728
5729 if (!pf_pull_hdr(pd2.m, ipoff2, &h2, sizeof(h2),
5730 reason, pd2.af)) {
5731 DPFPRINTF(LOG_NOTICE,
5732 "ICMP error message too short (ip)");
5733 return (PF_DROP);
5734 }
5735 /*
5736 * ICMP error messages don't refer to non-first
5737 * fragments
5738 */
5739 if (h2.ip_off & htons(IP_OFFMASK)) {
5740 REASON_SET(reason, PFRES_FRAG);
5741 return (PF_DROP);
5742 }
5743
5744 /* offset of protocol header that follows h2 */
5745 pd2.off = ipoff2;
5746 if (pf_walk_header(&pd2, &h2, reason) != PF_PASS)
5747 return (PF_DROP);
5748
5749 pd2.tot_len = ntohs(h2.ip_len);
5750 pd2.src = (struct pf_addr *)&h2.ip_src;
5751 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5752 break;
5753 #ifdef INET6
5754 case AF_INET6:
5755 ipoff2 = pd->off + sizeof(struct icmp6_hdr);
5756
5757 if (!pf_pull_hdr(pd2.m, ipoff2, &h2_6, sizeof(h2_6),
5758 reason, pd2.af)) {
5759 DPFPRINTF(LOG_NOTICE,
5760 "ICMP error message too short (ip6)");
5761 return (PF_DROP);
5762 }
5763
5764 pd2.off = ipoff2;
5765 if (pf_walk_header6(&pd2, &h2_6, reason) != PF_PASS)
5766 return (PF_DROP);
5767
5768 pd2.tot_len = ntohs(h2_6.ip6_plen) +
5769 sizeof(struct ip6_hdr);
5770 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5771 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5772 break;
5773 #endif /* INET6 */
5774 default:
5775 unhandled_af(pd->af);
5776 }
5777
5778 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
5779 if (pf_status.debug >= LOG_NOTICE) {
5780 log(LOG_NOTICE,
5781 "pf: BAD ICMP %d:%d outer dst: ",
5782 icmptype, icmpcode);
5783 pf_print_host(pd->src, 0, pd->af);
5784 addlog(" -> ");
5785 pf_print_host(pd->dst, 0, pd->af);
5786 addlog(" inner src: ");
5787 pf_print_host(pd2.src, 0, pd2.af);
5788 addlog(" -> ");
5789 pf_print_host(pd2.dst, 0, pd2.af);
5790 addlog("\n");
5791 }
5792 REASON_SET(reason, PFRES_BADSTATE);
5793 return (PF_DROP);
5794 }
5795
5796 switch (pd2.proto) {
5797 case IPPROTO_TCP: {
5798 struct tcphdr *th = &pd2.hdr.tcp;
5799 u_int32_t seq;
5800 struct pf_state_peer *src, *dst;
5801 u_int8_t dws;
5802 int action;
5803
5804 /*
5805 * Only the first 8 bytes of the TCP header can be
5806 * expected. Don't access any TCP header fields after
5807 * th_seq, an ackskew test is not possible.
5808 */
5809 if (!pf_pull_hdr(pd2.m, pd2.off, th, 8, reason,
5810 pd2.af)) {
5811 DPFPRINTF(LOG_NOTICE,
5812 "ICMP error message too short (tcp)");
5813 return (PF_DROP);
5814 }
5815
5816 key.af = pd2.af;
5817 key.proto = IPPROTO_TCP;
5818 key.rdomain = pd2.rdomain;
5819 pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
5820 pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
5821 key.port[pd2.sidx] = th->th_sport;
5822 key.port[pd2.didx] = th->th_dport;
5823 key.hash = pf_pkt_hash(pd2.af, pd2.proto,
5824 pd2.src, pd2.dst, th->th_sport, th->th_dport);
5825
5826 action = pf_find_state(&pd2, &key, stp);
5827 if (action != PF_MATCH)
5828 return (action);
5829
5830 if (pd2.dir == (*stp)->direction) {
5831 if (PF_REVERSED_KEY((*stp)->key, pd->af)) {
5832 src = &(*stp)->src;
5833 dst = &(*stp)->dst;
5834 } else {
5835 src = &(*stp)->dst;
5836 dst = &(*stp)->src;
5837 }
5838 } else {
5839 if (PF_REVERSED_KEY((*stp)->key, pd->af)) {
5840 src = &(*stp)->dst;
5841 dst = &(*stp)->src;
5842 } else {
5843 src = &(*stp)->src;
5844 dst = &(*stp)->dst;
5845 }
5846 }
5847
5848 if (src->wscale && dst->wscale)
5849 dws = dst->wscale & PF_WSCALE_MASK;
5850 else
5851 dws = 0;
5852
5853 /* Demodulate sequence number */
5854 seq = ntohl(th->th_seq) - src->seqdiff;
5855 if (src->seqdiff) {
5856 pf_patch_32(pd, &th->th_seq, htonl(seq));
5857 copyback = 1;
5858 }
5859
5860 if (!((*stp)->state_flags & PFSTATE_SLOPPY) &&
5861 (!SEQ_GEQ(src->seqhi, seq) || !SEQ_GEQ(seq,
5862 src->seqlo - (dst->max_win << dws)))) {
5863 if (pf_status.debug >= LOG_NOTICE) {
5864 log(LOG_NOTICE,
5865 "pf: BAD ICMP %d:%d ",
5866 icmptype, icmpcode);
5867 pf_print_host(pd->src, 0, pd->af);
5868 addlog(" -> ");
5869 pf_print_host(pd->dst, 0, pd->af);
5870 addlog(" state: ");
5871 pf_print_state(*stp);
5872 addlog(" seq=%u\n", seq);
5873 }
5874 REASON_SET(reason, PFRES_BADSTATE);
5875 return (PF_DROP);
5876 } else {
5877 if (pf_status.debug >= LOG_DEBUG) {
5878 log(LOG_DEBUG,
5879 "pf: OK ICMP %d:%d ",
5880 icmptype, icmpcode);
5881 pf_print_host(pd->src, 0, pd->af);
5882 addlog(" -> ");
5883 pf_print_host(pd->dst, 0, pd->af);
5884 addlog(" state: ");
5885 pf_print_state(*stp);
5886 addlog(" seq=%u\n", seq);
5887 }
5888 }
5889
5890 /* translate source/destination address, if necessary */
5891 if ((*stp)->key[PF_SK_WIRE] !=
5892 (*stp)->key[PF_SK_STACK]) {
5893 struct pf_state_key *nk;
5894 int afto, sidx, didx;
5895
5896 if (PF_REVERSED_KEY((*stp)->key, pd->af))
5897 nk = (*stp)->key[pd->sidx];
5898 else
5899 nk = (*stp)->key[pd->didx];
5900
5901 afto = pd->af != nk->af;
5902 sidx = afto ? pd2.didx : pd2.sidx;
5903 didx = afto ? pd2.sidx : pd2.didx;
5904
5905 #ifdef INET6
5906 if (afto) {
5907 if (pf_translate_icmp_af(pd, nk->af,
5908 &pd->hdr.icmp))
5909 return (PF_DROP);
5910 m_copyback(pd->m, pd->off,
5911 sizeof(struct icmp6_hdr),
5912 &pd->hdr.icmp6, M_NOWAIT);
5913 if (pf_change_icmp_af(pd->m, ipoff2,
5914 pd, &pd2, &nk->addr[sidx],
5915 &nk->addr[didx], pd->af, nk->af))
5916 return (PF_DROP);
5917 if (nk->af == AF_INET)
5918 pd->proto = IPPROTO_ICMP;
5919 else
5920 pd->proto = IPPROTO_ICMPV6;
5921 pd->m->m_pkthdr.ph_rtableid =
5922 nk->rdomain;
5923 pd->destchg = 1;
5924 pf_addrcpy(&pd->nsaddr,
5925 &nk->addr[pd2.sidx], nk->af);
5926 pf_addrcpy(&pd->ndaddr,
5927 &nk->addr[pd2.didx], nk->af);
5928 pd->naf = nk->af;
5929
5930 pf_patch_16(pd,
5931 &th->th_sport, nk->port[sidx]);
5932 pf_patch_16(pd,
5933 &th->th_dport, nk->port[didx]);
5934
5935 m_copyback(pd2.m, pd2.off, 8, th,
5936 M_NOWAIT);
5937 return (PF_AFRT);
5938 }
5939 #endif /* INET6 */
5940 if (PF_ANEQ(pd2.src,
5941 &nk->addr[pd2.sidx], pd2.af) ||
5942 nk->port[pd2.sidx] != th->th_sport)
5943 pf_translate_icmp(pd, pd2.src,
5944 &th->th_sport, pd->dst,
5945 &nk->addr[pd2.sidx],
5946 nk->port[pd2.sidx]);
5947
5948 if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
5949 pd2.af) || pd2.rdomain != nk->rdomain)
5950 pd->destchg = 1;
5951 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
5952
5953 if (PF_ANEQ(pd2.dst,
5954 &nk->addr[pd2.didx], pd2.af) ||
5955 nk->port[pd2.didx] != th->th_dport)
5956 pf_translate_icmp(pd, pd2.dst,
5957 &th->th_dport, pd->src,
5958 &nk->addr[pd2.didx],
5959 nk->port[pd2.didx]);
5960 copyback = 1;
5961 }
5962
5963 if (copyback) {
5964 switch (pd2.af) {
5965 case AF_INET:
5966 m_copyback(pd->m, pd->off, ICMP_MINLEN,
5967 &pd->hdr.icmp, M_NOWAIT);
5968 m_copyback(pd2.m, ipoff2, sizeof(h2),
5969 &h2, M_NOWAIT);
5970 break;
5971 #ifdef INET6
5972 case AF_INET6:
5973 m_copyback(pd->m, pd->off,
5974 sizeof(struct icmp6_hdr),
5975 &pd->hdr.icmp6, M_NOWAIT);
5976 m_copyback(pd2.m, ipoff2, sizeof(h2_6),
5977 &h2_6, M_NOWAIT);
5978 break;
5979 #endif /* INET6 */
5980 }
5981 m_copyback(pd2.m, pd2.off, 8, th, M_NOWAIT);
5982 }
5983 break;
5984 }
5985 case IPPROTO_UDP: {
5986 struct udphdr *uh = &pd2.hdr.udp;
5987 int action;
5988
5989 if (!pf_pull_hdr(pd2.m, pd2.off, uh, sizeof(*uh),
5990 reason, pd2.af)) {
5991 DPFPRINTF(LOG_NOTICE,
5992 "ICMP error message too short (udp)");
5993 return (PF_DROP);
5994 }
5995
5996 key.af = pd2.af;
5997 key.proto = IPPROTO_UDP;
5998 key.rdomain = pd2.rdomain;
5999 pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
6000 pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
6001 key.port[pd2.sidx] = uh->uh_sport;
6002 key.port[pd2.didx] = uh->uh_dport;
6003 key.hash = pf_pkt_hash(pd2.af, pd2.proto,
6004 pd2.src, pd2.dst, uh->uh_sport, uh->uh_dport);
6005
6006 action = pf_find_state(&pd2, &key, stp);
6007 if (action != PF_MATCH)
6008 return (action);
6009
6010 /* translate source/destination address, if necessary */
6011 if ((*stp)->key[PF_SK_WIRE] !=
6012 (*stp)->key[PF_SK_STACK]) {
6013 struct pf_state_key *nk;
6014 int afto, sidx, didx;
6015
6016 if (PF_REVERSED_KEY((*stp)->key, pd->af))
6017 nk = (*stp)->key[pd->sidx];
6018 else
6019 nk = (*stp)->key[pd->didx];
6020
6021 afto = pd->af != nk->af;
6022 sidx = afto ? pd2.didx : pd2.sidx;
6023 didx = afto ? pd2.sidx : pd2.didx;
6024
6025 #ifdef INET6
6026 if (afto) {
6027 if (pf_translate_icmp_af(pd, nk->af,
6028 &pd->hdr.icmp))
6029 return (PF_DROP);
6030 m_copyback(pd->m, pd->off,
6031 sizeof(struct icmp6_hdr),
6032 &pd->hdr.icmp6, M_NOWAIT);
6033 if (pf_change_icmp_af(pd->m, ipoff2,
6034 pd, &pd2, &nk->addr[sidx],
6035 &nk->addr[didx], pd->af, nk->af))
6036 return (PF_DROP);
6037 if (nk->af == AF_INET)
6038 pd->proto = IPPROTO_ICMP;
6039 else
6040 pd->proto = IPPROTO_ICMPV6;
6041 pd->m->m_pkthdr.ph_rtableid =
6042 nk->rdomain;
6043 pd->destchg = 1;
6044 pf_addrcpy(&pd->nsaddr,
6045 &nk->addr[pd2.sidx], nk->af);
6046 pf_addrcpy(&pd->ndaddr,
6047 &nk->addr[pd2.didx], nk->af);
6048 pd->naf = nk->af;
6049
6050 pf_patch_16(pd,
6051 &uh->uh_sport, nk->port[sidx]);
6052 pf_patch_16(pd,
6053 &uh->uh_dport, nk->port[didx]);
6054
6055 m_copyback(pd2.m, pd2.off, sizeof(*uh),
6056 uh, M_NOWAIT);
6057 return (PF_AFRT);
6058 }
6059 #endif /* INET6 */
6060
6061 if (PF_ANEQ(pd2.src,
6062 &nk->addr[pd2.sidx], pd2.af) ||
6063 nk->port[pd2.sidx] != uh->uh_sport)
6064 pf_translate_icmp(pd, pd2.src,
6065 &uh->uh_sport, pd->dst,
6066 &nk->addr[pd2.sidx],
6067 nk->port[pd2.sidx]);
6068
6069 if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
6070 pd2.af) || pd2.rdomain != nk->rdomain)
6071 pd->destchg = 1;
6072 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
6073
6074 if (PF_ANEQ(pd2.dst,
6075 &nk->addr[pd2.didx], pd2.af) ||
6076 nk->port[pd2.didx] != uh->uh_dport)
6077 pf_translate_icmp(pd, pd2.dst,
6078 &uh->uh_dport, pd->src,
6079 &nk->addr[pd2.didx],
6080 nk->port[pd2.didx]);
6081
6082 switch (pd2.af) {
6083 case AF_INET:
6084 m_copyback(pd->m, pd->off, ICMP_MINLEN,
6085 &pd->hdr.icmp, M_NOWAIT);
6086 m_copyback(pd2.m, ipoff2, sizeof(h2),
6087 &h2, M_NOWAIT);
6088 break;
6089 #ifdef INET6
6090 case AF_INET6:
6091 m_copyback(pd->m, pd->off,
6092 sizeof(struct icmp6_hdr),
6093 &pd->hdr.icmp6, M_NOWAIT);
6094 m_copyback(pd2.m, ipoff2, sizeof(h2_6),
6095 &h2_6, M_NOWAIT);
6096 break;
6097 #endif /* INET6 */
6098 }
6099 /* Avoid recomputing quoted UDP checksum.
6100 * note: udp6 0 csum invalid per rfc2460 p27.
6101 * but presumed nothing cares in this context */
6102 pf_patch_16(pd, &uh->uh_sum, 0);
6103 m_copyback(pd2.m, pd2.off, sizeof(*uh), uh,
6104 M_NOWAIT);
6105 copyback = 1;
6106 }
6107 break;
6108 }
6109 case IPPROTO_ICMP: {
6110 struct icmp *iih = &pd2.hdr.icmp;
6111
6112 if (pd2.af != AF_INET) {
6113 REASON_SET(reason, PFRES_NORM);
6114 return (PF_DROP);
6115 }
6116
6117 if (!pf_pull_hdr(pd2.m, pd2.off, iih, ICMP_MINLEN,
6118 reason, pd2.af)) {
6119 DPFPRINTF(LOG_NOTICE,
6120 "ICMP error message too short (icmp)");
6121 return (PF_DROP);
6122 }
6123
6124 pf_icmp_mapping(&pd2, iih->icmp_type,
6125 &icmp_dir, &virtual_id, &virtual_type);
6126
6127 ret = pf_icmp_state_lookup(&pd2, &key, stp,
6128 virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
6129 if (ret >= 0)
6130 return (ret);
6131
6132 /* translate source/destination address, if necessary */
6133 if ((*stp)->key[PF_SK_WIRE] !=
6134 (*stp)->key[PF_SK_STACK]) {
6135 struct pf_state_key *nk;
6136 int afto, sidx, didx;
6137
6138 if (PF_REVERSED_KEY((*stp)->key, pd->af))
6139 nk = (*stp)->key[pd->sidx];
6140 else
6141 nk = (*stp)->key[pd->didx];
6142
6143 afto = pd->af != nk->af;
6144 sidx = afto ? pd2.didx : pd2.sidx;
6145 didx = afto ? pd2.sidx : pd2.didx;
6146 iidx = afto ? !iidx : iidx;
6147
6148 #ifdef INET6
6149 if (afto) {
6150 if (nk->af != AF_INET6)
6151 return (PF_DROP);
6152 if (pf_translate_icmp_af(pd, nk->af,
6153 &pd->hdr.icmp))
6154 return (PF_DROP);
6155 m_copyback(pd->m, pd->off,
6156 sizeof(struct icmp6_hdr),
6157 &pd->hdr.icmp6, M_NOWAIT);
6158 if (pf_change_icmp_af(pd->m, ipoff2,
6159 pd, &pd2, &nk->addr[sidx],
6160 &nk->addr[didx], pd->af, nk->af))
6161 return (PF_DROP);
6162 pd->proto = IPPROTO_ICMPV6;
6163 if (pf_translate_icmp_af(pd,
6164 nk->af, iih))
6165 return (PF_DROP);
6166 if (virtual_type == htons(ICMP_ECHO))
6167 pf_patch_16(pd, &iih->icmp_id,
6168 nk->port[iidx]);
6169 m_copyback(pd2.m, pd2.off, ICMP_MINLEN,
6170 iih, M_NOWAIT);
6171 pd->m->m_pkthdr.ph_rtableid =
6172 nk->rdomain;
6173 pd->destchg = 1;
6174 pf_addrcpy(&pd->nsaddr,
6175 &nk->addr[pd2.sidx], nk->af);
6176 pf_addrcpy(&pd->ndaddr,
6177 &nk->addr[pd2.didx], nk->af);
6178 pd->naf = nk->af;
6179 return (PF_AFRT);
6180 }
6181 #endif /* INET6 */
6182
6183 if (PF_ANEQ(pd2.src,
6184 &nk->addr[pd2.sidx], pd2.af) ||
6185 (virtual_type == htons(ICMP_ECHO) &&
6186 nk->port[iidx] != iih->icmp_id))
6187 pf_translate_icmp(pd, pd2.src,
6188 (virtual_type == htons(ICMP_ECHO)) ?
6189 &iih->icmp_id : NULL,
6190 pd->dst, &nk->addr[pd2.sidx],
6191 (virtual_type == htons(ICMP_ECHO)) ?
6192 nk->port[iidx] : 0);
6193
6194 if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
6195 pd2.af) || pd2.rdomain != nk->rdomain)
6196 pd->destchg = 1;
6197 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
6198
6199 if (PF_ANEQ(pd2.dst,
6200 &nk->addr[pd2.didx], pd2.af))
6201 pf_translate_icmp(pd, pd2.dst, NULL,
6202 pd->src, &nk->addr[pd2.didx], 0);
6203
6204 m_copyback(pd->m, pd->off, ICMP_MINLEN,
6205 &pd->hdr.icmp, M_NOWAIT);
6206 m_copyback(pd2.m, ipoff2, sizeof(h2), &h2,
6207 M_NOWAIT);
6208 m_copyback(pd2.m, pd2.off, ICMP_MINLEN, iih,
6209 M_NOWAIT);
6210 copyback = 1;
6211 }
6212 break;
6213 }
6214 #ifdef INET6
6215 case IPPROTO_ICMPV6: {
6216 struct icmp6_hdr *iih = &pd2.hdr.icmp6;
6217
6218 if (pd2.af != AF_INET6) {
6219 REASON_SET(reason, PFRES_NORM);
6220 return (PF_DROP);
6221 }
6222
6223 if (!pf_pull_hdr(pd2.m, pd2.off, iih,
6224 sizeof(struct icmp6_hdr), reason, pd2.af)) {
6225 DPFPRINTF(LOG_NOTICE,
6226 "ICMP error message too short (icmp6)");
6227 return (PF_DROP);
6228 }
6229
6230 pf_icmp_mapping(&pd2, iih->icmp6_type,
6231 &icmp_dir, &virtual_id, &virtual_type);
6232 ret = pf_icmp_state_lookup(&pd2, &key, stp,
6233 virtual_id, virtual_type, icmp_dir, &iidx, 0, 1);
6234 /* IPv6? try matching a multicast address */
6235 if (ret == PF_DROP && pd2.af == AF_INET6 &&
6236 icmp_dir == PF_OUT)
6237 ret = pf_icmp_state_lookup(&pd2, &key, stp,
6238 virtual_id, virtual_type, icmp_dir, &iidx,
6239 1, 1);
6240 if (ret >= 0)
6241 return (ret);
6242
6243 /* translate source/destination address, if necessary */
6244 if ((*stp)->key[PF_SK_WIRE] !=
6245 (*stp)->key[PF_SK_STACK]) {
6246 struct pf_state_key *nk;
6247 int afto, sidx, didx;
6248
6249 if (PF_REVERSED_KEY((*stp)->key, pd->af))
6250 nk = (*stp)->key[pd->sidx];
6251 else
6252 nk = (*stp)->key[pd->didx];
6253
6254 afto = pd->af != nk->af;
6255 sidx = afto ? pd2.didx : pd2.sidx;
6256 didx = afto ? pd2.sidx : pd2.didx;
6257 iidx = afto ? !iidx : iidx;
6258
6259 if (afto) {
6260 if (nk->af != AF_INET)
6261 return (PF_DROP);
6262 if (pf_translate_icmp_af(pd, nk->af,
6263 &pd->hdr.icmp))
6264 return (PF_DROP);
6265 m_copyback(pd->m, pd->off,
6266 sizeof(struct icmp6_hdr),
6267 &pd->hdr.icmp6, M_NOWAIT);
6268 if (pf_change_icmp_af(pd->m, ipoff2,
6269 pd, &pd2, &nk->addr[sidx],
6270 &nk->addr[didx], pd->af, nk->af))
6271 return (PF_DROP);
6272 pd->proto = IPPROTO_ICMP;
6273 if (pf_translate_icmp_af(pd,
6274 nk->af, iih))
6275 return (PF_DROP);
6276 if (virtual_type ==
6277 htons(ICMP6_ECHO_REQUEST))
6278 pf_patch_16(pd, &iih->icmp6_id,
6279 nk->port[iidx]);
6280 m_copyback(pd2.m, pd2.off,
6281 sizeof(struct icmp6_hdr), iih,
6282 M_NOWAIT);
6283 pd->m->m_pkthdr.ph_rtableid =
6284 nk->rdomain;
6285 pd->destchg = 1;
6286 pf_addrcpy(&pd->nsaddr,
6287 &nk->addr[pd2.sidx], nk->af);
6288 pf_addrcpy(&pd->ndaddr,
6289 &nk->addr[pd2.didx], nk->af);
6290 pd->naf = nk->af;
6291 return (PF_AFRT);
6292 }
6293
6294 if (PF_ANEQ(pd2.src,
6295 &nk->addr[pd2.sidx], pd2.af) ||
6296 ((virtual_type ==
6297 htons(ICMP6_ECHO_REQUEST)) &&
6298 nk->port[pd2.sidx] != iih->icmp6_id))
6299 pf_translate_icmp(pd, pd2.src,
6300 (virtual_type ==
6301 htons(ICMP6_ECHO_REQUEST))
6302 ? &iih->icmp6_id : NULL,
6303 pd->dst, &nk->addr[pd2.sidx],
6304 (virtual_type ==
6305 htons(ICMP6_ECHO_REQUEST))
6306 ? nk->port[iidx] : 0);
6307
6308 if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
6309 pd2.af) || pd2.rdomain != nk->rdomain)
6310 pd->destchg = 1;
6311 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
6312
6313 if (PF_ANEQ(pd2.dst,
6314 &nk->addr[pd2.didx], pd2.af))
6315 pf_translate_icmp(pd, pd2.dst, NULL,
6316 pd->src, &nk->addr[pd2.didx], 0);
6317
6318 m_copyback(pd->m, pd->off,
6319 sizeof(struct icmp6_hdr), &pd->hdr.icmp6,
6320 M_NOWAIT);
6321 m_copyback(pd2.m, ipoff2, sizeof(h2_6), &h2_6,
6322 M_NOWAIT);
6323 m_copyback(pd2.m, pd2.off,
6324 sizeof(struct icmp6_hdr), iih, M_NOWAIT);
6325 copyback = 1;
6326 }
6327 break;
6328 }
6329 #endif /* INET6 */
6330 default: {
6331 int action;
6332
6333 key.af = pd2.af;
6334 key.proto = pd2.proto;
6335 key.rdomain = pd2.rdomain;
6336 pf_addrcpy(&key.addr[pd2.sidx], pd2.src, key.af);
6337 pf_addrcpy(&key.addr[pd2.didx], pd2.dst, key.af);
6338 key.port[0] = key.port[1] = 0;
6339 key.hash = pf_pkt_hash(pd2.af, pd2.proto,
6340 pd2.src, pd2.dst, 0, 0);
6341
6342 action = pf_find_state(&pd2, &key, stp);
6343 if (action != PF_MATCH)
6344 return (action);
6345
6346 /* translate source/destination address, if necessary */
6347 if ((*stp)->key[PF_SK_WIRE] !=
6348 (*stp)->key[PF_SK_STACK]) {
6349 struct pf_state_key *nk =
6350 (*stp)->key[pd->didx];
6351
6352 if (PF_ANEQ(pd2.src,
6353 &nk->addr[pd2.sidx], pd2.af))
6354 pf_translate_icmp(pd, pd2.src, NULL,
6355 pd->dst, &nk->addr[pd2.sidx], 0);
6356
6357 if (PF_ANEQ(pd2.dst, &nk->addr[pd2.didx],
6358 pd2.af) || pd2.rdomain != nk->rdomain)
6359 pd->destchg = 1;
6360 pd->m->m_pkthdr.ph_rtableid = nk->rdomain;
6361
6362 if (PF_ANEQ(pd2.dst,
6363 &nk->addr[pd2.didx], pd2.af))
6364 pf_translate_icmp(pd, pd2.dst, NULL,
6365 pd->src, &nk->addr[pd2.didx], 0);
6366
6367 switch (pd2.af) {
6368 case AF_INET:
6369 m_copyback(pd->m, pd->off, ICMP_MINLEN,
6370 &pd->hdr.icmp, M_NOWAIT);
6371 m_copyback(pd2.m, ipoff2, sizeof(h2),
6372 &h2, M_NOWAIT);
6373 break;
6374 #ifdef INET6
6375 case AF_INET6:
6376 m_copyback(pd->m, pd->off,
6377 sizeof(struct icmp6_hdr),
6378 &pd->hdr.icmp6, M_NOWAIT);
6379 m_copyback(pd2.m, ipoff2, sizeof(h2_6),
6380 &h2_6, M_NOWAIT);
6381 break;
6382 #endif /* INET6 */
6383 }
6384 copyback = 1;
6385 }
6386 break;
6387 }
6388 }
6389 }
6390 if (copyback) {
6391 m_copyback(pd->m, pd->off, pd->hdrlen, &pd->hdr, M_NOWAIT);
6392 }
6393
6394 return (PF_PASS);
6395 }
6396
6397 /*
6398 * ipoff and off are measured from the start of the mbuf chain.
6399 * h must be at "ipoff" on the mbuf chain.
6400 */
6401 void *
pf_pull_hdr(struct mbuf * m,int off,void * p,int len,u_short * reasonp,sa_family_t af)6402 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
6403 u_short *reasonp, sa_family_t af)
6404 {
6405 int iplen = 0;
6406
6407 switch (af) {
6408 case AF_INET: {
6409 struct ip *h = mtod(m, struct ip *);
6410 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
6411
6412 if (fragoff) {
6413 REASON_SET(reasonp, PFRES_FRAG);
6414 return (NULL);
6415 }
6416 iplen = ntohs(h->ip_len);
6417 break;
6418 }
6419 #ifdef INET6
6420 case AF_INET6: {
6421 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
6422
6423 iplen = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
6424 break;
6425 }
6426 #endif /* INET6 */
6427 }
6428 if (m->m_pkthdr.len < off + len || iplen < off + len) {
6429 REASON_SET(reasonp, PFRES_SHORT);
6430 return (NULL);
6431 }
6432 m_copydata(m, off, len, p);
6433 return (p);
6434 }
6435
6436 int
pf_routable(struct pf_addr * addr,sa_family_t af,struct pfi_kif * kif,int rtableid)6437 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kif *kif,
6438 int rtableid)
6439 {
6440 struct sockaddr_storage ss;
6441 struct sockaddr_in *dst;
6442 int ret = 1;
6443 int check_mpath;
6444 #ifdef INET6
6445 struct sockaddr_in6 *dst6;
6446 #endif /* INET6 */
6447 struct rtentry *rt = NULL;
6448
6449 check_mpath = 0;
6450 memset(&ss, 0, sizeof(ss));
6451 switch (af) {
6452 case AF_INET:
6453 dst = (struct sockaddr_in *)&ss;
6454 dst->sin_family = AF_INET;
6455 dst->sin_len = sizeof(*dst);
6456 dst->sin_addr = addr->v4;
6457 if (ipmultipath)
6458 check_mpath = 1;
6459 break;
6460 #ifdef INET6
6461 case AF_INET6:
6462 /*
6463 * Skip check for addresses with embedded interface scope,
6464 * as they would always match anyway.
6465 */
6466 if (IN6_IS_SCOPE_EMBED(&addr->v6))
6467 goto out;
6468 dst6 = (struct sockaddr_in6 *)&ss;
6469 dst6->sin6_family = AF_INET6;
6470 dst6->sin6_len = sizeof(*dst6);
6471 dst6->sin6_addr = addr->v6;
6472 if (ip6_multipath)
6473 check_mpath = 1;
6474 break;
6475 #endif /* INET6 */
6476 }
6477
6478 /* Skip checks for ipsec interfaces */
6479 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
6480 goto out;
6481
6482 rt = rtalloc(sstosa(&ss), 0, rtableid);
6483 if (rt != NULL) {
6484 /* No interface given, this is a no-route check */
6485 if (kif == NULL)
6486 goto out;
6487
6488 if (kif->pfik_ifp == NULL) {
6489 ret = 0;
6490 goto out;
6491 }
6492
6493 /* Perform uRPF check if passed input interface */
6494 ret = 0;
6495 do {
6496 if (rt->rt_ifidx == kif->pfik_ifp->if_index) {
6497 ret = 1;
6498 #if NCARP > 0
6499 } else {
6500 struct ifnet *ifp;
6501
6502 ifp = if_get(rt->rt_ifidx);
6503 if (ifp != NULL && ifp->if_type == IFT_CARP &&
6504 ifp->if_carpdevidx ==
6505 kif->pfik_ifp->if_index)
6506 ret = 1;
6507 if_put(ifp);
6508 #endif /* NCARP */
6509 }
6510
6511 rt = rtable_iterate(rt);
6512 } while (check_mpath == 1 && rt != NULL && ret == 0);
6513 } else
6514 ret = 0;
6515 out:
6516 rtfree(rt);
6517 return (ret);
6518 }
6519
6520 int
pf_rtlabel_match(struct pf_addr * addr,sa_family_t af,struct pf_addr_wrap * aw,int rtableid)6521 pf_rtlabel_match(struct pf_addr *addr, sa_family_t af, struct pf_addr_wrap *aw,
6522 int rtableid)
6523 {
6524 struct sockaddr_storage ss;
6525 struct sockaddr_in *dst;
6526 #ifdef INET6
6527 struct sockaddr_in6 *dst6;
6528 #endif /* INET6 */
6529 struct rtentry *rt;
6530 int ret = 0;
6531
6532 memset(&ss, 0, sizeof(ss));
6533 switch (af) {
6534 case AF_INET:
6535 dst = (struct sockaddr_in *)&ss;
6536 dst->sin_family = AF_INET;
6537 dst->sin_len = sizeof(*dst);
6538 dst->sin_addr = addr->v4;
6539 break;
6540 #ifdef INET6
6541 case AF_INET6:
6542 dst6 = (struct sockaddr_in6 *)&ss;
6543 dst6->sin6_family = AF_INET6;
6544 dst6->sin6_len = sizeof(*dst6);
6545 dst6->sin6_addr = addr->v6;
6546 break;
6547 #endif /* INET6 */
6548 }
6549
6550 rt = rtalloc(sstosa(&ss), RT_RESOLVE, rtableid);
6551 if (rt != NULL) {
6552 if (rt->rt_labelid == aw->v.rtlabel)
6553 ret = 1;
6554 rtfree(rt);
6555 }
6556
6557 return (ret);
6558 }
6559
6560 /* pf_route() may change pd->m, adjust local copies after calling */
6561 void
pf_route(struct pf_pdesc * pd,struct pf_state * st)6562 pf_route(struct pf_pdesc *pd, struct pf_state *st)
6563 {
6564 struct mbuf *m0;
6565 struct mbuf_list ml;
6566 struct sockaddr_in *dst, sin;
6567 struct rtentry *rt = NULL;
6568 struct ip *ip;
6569 struct ifnet *ifp = NULL;
6570 unsigned int rtableid;
6571
6572 if (pd->m->m_pkthdr.pf.routed++ > 3) {
6573 m_freem(pd->m);
6574 pd->m = NULL;
6575 return;
6576 }
6577
6578 if (st->rt == PF_DUPTO) {
6579 if ((m0 = m_dup_pkt(pd->m, max_linkhdr, M_NOWAIT)) == NULL)
6580 return;
6581 } else {
6582 if ((st->rt == PF_REPLYTO) == (st->direction == pd->dir))
6583 return;
6584 m0 = pd->m;
6585 pd->m = NULL;
6586 }
6587
6588 if (m0->m_len < sizeof(struct ip)) {
6589 DPFPRINTF(LOG_ERR,
6590 "%s: m0->m_len < sizeof(struct ip)", __func__);
6591 goto bad;
6592 }
6593
6594 ip = mtod(m0, struct ip *);
6595
6596 if (pd->dir == PF_IN) {
6597 if (ip->ip_ttl <= IPTTLDEC) {
6598 if (st->rt != PF_DUPTO) {
6599 pf_send_icmp(m0, ICMP_TIMXCEED,
6600 ICMP_TIMXCEED_INTRANS, 0,
6601 pd->af, st->rule.ptr, pd->rdomain);
6602 }
6603 goto bad;
6604 }
6605 ip->ip_ttl -= IPTTLDEC;
6606 }
6607
6608 memset(&sin, 0, sizeof(sin));
6609 dst = &sin;
6610 dst->sin_family = AF_INET;
6611 dst->sin_len = sizeof(*dst);
6612 dst->sin_addr = st->rt_addr.v4;
6613 rtableid = m0->m_pkthdr.ph_rtableid;
6614
6615 rt = rtalloc_mpath(sintosa(dst), &ip->ip_src.s_addr, rtableid);
6616 if (!rtisvalid(rt)) {
6617 if (st->rt != PF_DUPTO) {
6618 pf_send_icmp(m0, ICMP_UNREACH, ICMP_UNREACH_HOST,
6619 0, pd->af, st->rule.ptr, pd->rdomain);
6620 }
6621 ipstat_inc(ips_noroute);
6622 goto bad;
6623 }
6624
6625 ifp = if_get(rt->rt_ifidx);
6626 if (ifp == NULL)
6627 goto bad;
6628
6629 /* A locally generated packet may have invalid source address. */
6630 if ((ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET &&
6631 (ifp->if_flags & IFF_LOOPBACK) == 0)
6632 ip->ip_src = ifatoia(rt->rt_ifa)->ia_addr.sin_addr;
6633
6634 if (st->rt != PF_DUPTO && pd->dir == PF_IN) {
6635 if (pf_test(AF_INET, PF_OUT, ifp, &m0) != PF_PASS)
6636 goto bad;
6637 else if (m0 == NULL)
6638 goto done;
6639 if (m0->m_len < sizeof(struct ip)) {
6640 DPFPRINTF(LOG_ERR,
6641 "%s: m0->m_len < sizeof(struct ip)", __func__);
6642 goto bad;
6643 }
6644 ip = mtod(m0, struct ip *);
6645 }
6646
6647 if (if_output_tso(ifp, &m0, sintosa(dst), rt, ifp->if_mtu) ||
6648 m0 == NULL)
6649 goto done;
6650
6651 /*
6652 * Too large for interface; fragment if possible.
6653 * Must be able to put at least 8 bytes per fragment.
6654 */
6655 if (ip->ip_off & htons(IP_DF)) {
6656 ipstat_inc(ips_cantfrag);
6657 if (st->rt != PF_DUPTO)
6658 pf_send_icmp(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG,
6659 ifp->if_mtu, pd->af, st->rule.ptr, pd->rdomain);
6660 goto bad;
6661 }
6662
6663 if (ip_fragment(m0, &ml, ifp, ifp->if_mtu) ||
6664 if_output_ml(ifp, &ml, sintosa(dst), rt))
6665 goto done;
6666 ipstat_inc(ips_fragmented);
6667
6668 done:
6669 if_put(ifp);
6670 rtfree(rt);
6671 return;
6672
6673 bad:
6674 m_freem(m0);
6675 goto done;
6676 }
6677
6678 #ifdef INET6
6679 /* pf_route6() may change pd->m, adjust local copies after calling */
6680 void
pf_route6(struct pf_pdesc * pd,struct pf_state * st)6681 pf_route6(struct pf_pdesc *pd, struct pf_state *st)
6682 {
6683 struct mbuf *m0;
6684 struct sockaddr_in6 *dst, sin6;
6685 struct rtentry *rt = NULL;
6686 struct ip6_hdr *ip6;
6687 struct ifnet *ifp = NULL;
6688 struct m_tag *mtag;
6689 unsigned int rtableid;
6690
6691 if (pd->m->m_pkthdr.pf.routed++ > 3) {
6692 m_freem(pd->m);
6693 pd->m = NULL;
6694 return;
6695 }
6696
6697 if (st->rt == PF_DUPTO) {
6698 if ((m0 = m_dup_pkt(pd->m, max_linkhdr, M_NOWAIT)) == NULL)
6699 return;
6700 } else {
6701 if ((st->rt == PF_REPLYTO) == (st->direction == pd->dir))
6702 return;
6703 m0 = pd->m;
6704 pd->m = NULL;
6705 }
6706
6707 if (m0->m_len < sizeof(struct ip6_hdr)) {
6708 DPFPRINTF(LOG_ERR,
6709 "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__);
6710 goto bad;
6711 }
6712 ip6 = mtod(m0, struct ip6_hdr *);
6713
6714 if (pd->dir == PF_IN) {
6715 if (ip6->ip6_hlim <= IPV6_HLIMDEC) {
6716 if (st->rt != PF_DUPTO) {
6717 pf_send_icmp(m0, ICMP6_TIME_EXCEEDED,
6718 ICMP6_TIME_EXCEED_TRANSIT, 0,
6719 pd->af, st->rule.ptr, pd->rdomain);
6720 }
6721 goto bad;
6722 }
6723 ip6->ip6_hlim -= IPV6_HLIMDEC;
6724 }
6725
6726 memset(&sin6, 0, sizeof(sin6));
6727 dst = &sin6;
6728 dst->sin6_family = AF_INET6;
6729 dst->sin6_len = sizeof(*dst);
6730 dst->sin6_addr = st->rt_addr.v6;
6731 rtableid = m0->m_pkthdr.ph_rtableid;
6732
6733 rt = rtalloc_mpath(sin6tosa(dst), &ip6->ip6_src.s6_addr32[0],
6734 rtableid);
6735 if (!rtisvalid(rt)) {
6736 if (st->rt != PF_DUPTO) {
6737 pf_send_icmp(m0, ICMP6_DST_UNREACH,
6738 ICMP6_DST_UNREACH_NOROUTE, 0,
6739 pd->af, st->rule.ptr, pd->rdomain);
6740 }
6741 ip6stat_inc(ip6s_noroute);
6742 goto bad;
6743 }
6744
6745 ifp = if_get(rt->rt_ifidx);
6746 if (ifp == NULL)
6747 goto bad;
6748
6749 /* A locally generated packet may have invalid source address. */
6750 if (IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src) &&
6751 (ifp->if_flags & IFF_LOOPBACK) == 0)
6752 ip6->ip6_src = ifatoia6(rt->rt_ifa)->ia_addr.sin6_addr;
6753
6754 if (st->rt != PF_DUPTO && pd->dir == PF_IN) {
6755 if (pf_test(AF_INET6, PF_OUT, ifp, &m0) != PF_PASS)
6756 goto bad;
6757 else if (m0 == NULL)
6758 goto done;
6759 if (m0->m_len < sizeof(struct ip6_hdr)) {
6760 DPFPRINTF(LOG_ERR,
6761 "%s: m0->m_len < sizeof(struct ip6_hdr)", __func__);
6762 goto bad;
6763 }
6764 }
6765
6766 /*
6767 * If packet has been reassembled by PF earlier, we have to
6768 * use pf_refragment6() here to turn it back to fragments.
6769 */
6770 if ((mtag = m_tag_find(m0, PACKET_TAG_PF_REASSEMBLED, NULL))) {
6771 (void) pf_refragment6(&m0, mtag, dst, ifp, rt);
6772 goto done;
6773 }
6774
6775 if (if_output_tso(ifp, &m0, sin6tosa(dst), rt, ifp->if_mtu) ||
6776 m0 == NULL)
6777 goto done;
6778
6779 ip6stat_inc(ip6s_cantfrag);
6780 if (st->rt != PF_DUPTO)
6781 pf_send_icmp(m0, ICMP6_PACKET_TOO_BIG, 0,
6782 ifp->if_mtu, pd->af, st->rule.ptr, pd->rdomain);
6783 goto bad;
6784
6785 done:
6786 if_put(ifp);
6787 rtfree(rt);
6788 return;
6789
6790 bad:
6791 m_freem(m0);
6792 goto done;
6793 }
6794 #endif /* INET6 */
6795
6796 /*
6797 * check TCP checksum and set mbuf flag
6798 * off is the offset where the protocol header starts
6799 * len is the total length of protocol header plus payload
6800 * returns 0 when the checksum is valid, otherwise returns 1.
6801 * if the _OUT flag is set the checksum isn't done yet, consider these ok
6802 */
6803 int
pf_check_tcp_cksum(struct mbuf * m,int off,int len,sa_family_t af)6804 pf_check_tcp_cksum(struct mbuf *m, int off, int len, sa_family_t af)
6805 {
6806 u_int16_t sum;
6807
6808 if (m->m_pkthdr.csum_flags &
6809 (M_TCP_CSUM_IN_OK | M_TCP_CSUM_OUT)) {
6810 return (0);
6811 }
6812 if (m->m_pkthdr.csum_flags & M_TCP_CSUM_IN_BAD ||
6813 off < sizeof(struct ip) ||
6814 m->m_pkthdr.len < off + len) {
6815 return (1);
6816 }
6817
6818 /* need to do it in software */
6819 tcpstat_inc(tcps_inswcsum);
6820
6821 switch (af) {
6822 case AF_INET:
6823 if (m->m_len < sizeof(struct ip))
6824 return (1);
6825
6826 sum = in4_cksum(m, IPPROTO_TCP, off, len);
6827 break;
6828 #ifdef INET6
6829 case AF_INET6:
6830 if (m->m_len < sizeof(struct ip6_hdr))
6831 return (1);
6832
6833 sum = in6_cksum(m, IPPROTO_TCP, off, len);
6834 break;
6835 #endif /* INET6 */
6836 default:
6837 unhandled_af(af);
6838 }
6839 if (sum) {
6840 tcpstat_inc(tcps_rcvbadsum);
6841 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_BAD;
6842 return (1);
6843 }
6844
6845 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK;
6846 return (0);
6847 }
6848
6849 struct pf_divert *
pf_find_divert(struct mbuf * m)6850 pf_find_divert(struct mbuf *m)
6851 {
6852 struct m_tag *mtag;
6853
6854 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL)
6855 return (NULL);
6856
6857 return ((struct pf_divert *)(mtag + 1));
6858 }
6859
6860 struct pf_divert *
pf_get_divert(struct mbuf * m)6861 pf_get_divert(struct mbuf *m)
6862 {
6863 struct m_tag *mtag;
6864
6865 if ((mtag = m_tag_find(m, PACKET_TAG_PF_DIVERT, NULL)) == NULL) {
6866 mtag = m_tag_get(PACKET_TAG_PF_DIVERT, sizeof(struct pf_divert),
6867 M_NOWAIT);
6868 if (mtag == NULL)
6869 return (NULL);
6870 memset(mtag + 1, 0, sizeof(struct pf_divert));
6871 m_tag_prepend(m, mtag);
6872 }
6873
6874 return ((struct pf_divert *)(mtag + 1));
6875 }
6876
6877 int
pf_walk_option(struct pf_pdesc * pd,struct ip * h,int off,int end,u_short * reason)6878 pf_walk_option(struct pf_pdesc *pd, struct ip *h, int off, int end,
6879 u_short *reason)
6880 {
6881 uint8_t type, length, opts[15 * 4 - sizeof(struct ip)];
6882
6883 /* IP header in payload of ICMP packet may be too short */
6884 if (pd->m->m_pkthdr.len < end) {
6885 DPFPRINTF(LOG_NOTICE, "IP option too short");
6886 REASON_SET(reason, PFRES_SHORT);
6887 return (PF_DROP);
6888 }
6889
6890 KASSERT(end - off <= sizeof(opts));
6891 m_copydata(pd->m, off, end - off, opts);
6892 end -= off;
6893 off = 0;
6894
6895 while (off < end) {
6896 type = opts[off];
6897 if (type == IPOPT_EOL)
6898 break;
6899 if (type == IPOPT_NOP) {
6900 off++;
6901 continue;
6902 }
6903 if (off + 2 > end) {
6904 DPFPRINTF(LOG_NOTICE, "IP length opt");
6905 REASON_SET(reason, PFRES_IPOPTIONS);
6906 return (PF_DROP);
6907 }
6908 length = opts[off + 1];
6909 if (length < 2) {
6910 DPFPRINTF(LOG_NOTICE, "IP short opt");
6911 REASON_SET(reason, PFRES_IPOPTIONS);
6912 return (PF_DROP);
6913 }
6914 if (off + length > end) {
6915 DPFPRINTF(LOG_NOTICE, "IP long opt");
6916 REASON_SET(reason, PFRES_IPOPTIONS);
6917 return (PF_DROP);
6918 }
6919 switch (type) {
6920 case IPOPT_RA:
6921 SET(pd->badopts, PF_OPT_ROUTER_ALERT);
6922 break;
6923 default:
6924 SET(pd->badopts, PF_OPT_OTHER);
6925 break;
6926 }
6927 off += length;
6928 }
6929
6930 return (PF_PASS);
6931 }
6932
6933 int
pf_walk_header(struct pf_pdesc * pd,struct ip * h,u_short * reason)6934 pf_walk_header(struct pf_pdesc *pd, struct ip *h, u_short *reason)
6935 {
6936 struct ip6_ext ext;
6937 u_int32_t hlen, end;
6938 int hdr_cnt;
6939
6940 hlen = h->ip_hl << 2;
6941 if (hlen < sizeof(struct ip) || hlen > ntohs(h->ip_len)) {
6942 REASON_SET(reason, PFRES_SHORT);
6943 return (PF_DROP);
6944 }
6945 if (hlen != sizeof(struct ip)) {
6946 if (pf_walk_option(pd, h, pd->off + sizeof(struct ip),
6947 pd->off + hlen, reason) != PF_PASS)
6948 return (PF_DROP);
6949 /* header options which contain only padding is fishy */
6950 if (pd->badopts == 0)
6951 SET(pd->badopts, PF_OPT_OTHER);
6952 }
6953 end = pd->off + ntohs(h->ip_len);
6954 pd->off += hlen;
6955 pd->proto = h->ip_p;
6956 /* IGMP packets have router alert options, allow them */
6957 if (pd->proto == IPPROTO_IGMP) {
6958 /*
6959 * According to RFC 1112 ttl must be set to 1 in all IGMP
6960 * packets sent to 224.0.0.1
6961 */
6962 if ((h->ip_ttl != 1) &&
6963 (h->ip_dst.s_addr == INADDR_ALLHOSTS_GROUP)) {
6964 DPFPRINTF(LOG_NOTICE, "Invalid IGMP");
6965 REASON_SET(reason, PFRES_IPOPTIONS);
6966 return (PF_DROP);
6967 }
6968 CLR(pd->badopts, PF_OPT_ROUTER_ALERT);
6969 }
6970 /* stop walking over non initial fragments */
6971 if ((h->ip_off & htons(IP_OFFMASK)) != 0)
6972 return (PF_PASS);
6973
6974 for (hdr_cnt = 0; hdr_cnt < pf_hdr_limit; hdr_cnt++) {
6975 switch (pd->proto) {
6976 case IPPROTO_AH:
6977 /* fragments may be short */
6978 if ((h->ip_off & htons(IP_MF | IP_OFFMASK)) != 0 &&
6979 end < pd->off + sizeof(ext))
6980 return (PF_PASS);
6981 if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
6982 reason, AF_INET)) {
6983 DPFPRINTF(LOG_NOTICE, "IP short exthdr");
6984 return (PF_DROP);
6985 }
6986 pd->off += (ext.ip6e_len + 2) * 4;
6987 pd->proto = ext.ip6e_nxt;
6988 break;
6989 default:
6990 return (PF_PASS);
6991 }
6992 }
6993 DPFPRINTF(LOG_NOTICE, "IPv4 nested authentication header limit");
6994 REASON_SET(reason, PFRES_IPOPTIONS);
6995 return (PF_DROP);
6996 }
6997
6998 #ifdef INET6
6999 int
pf_walk_option6(struct pf_pdesc * pd,struct ip6_hdr * h,int off,int end,u_short * reason)7000 pf_walk_option6(struct pf_pdesc *pd, struct ip6_hdr *h, int off, int end,
7001 u_short *reason)
7002 {
7003 struct ip6_opt opt;
7004 struct ip6_opt_jumbo jumbo;
7005
7006 while (off < end) {
7007 if (!pf_pull_hdr(pd->m, off, &opt.ip6o_type,
7008 sizeof(opt.ip6o_type), reason, AF_INET6)) {
7009 DPFPRINTF(LOG_NOTICE, "IPv6 short opt type");
7010 return (PF_DROP);
7011 }
7012 if (opt.ip6o_type == IP6OPT_PAD1) {
7013 off++;
7014 continue;
7015 }
7016 if (!pf_pull_hdr(pd->m, off, &opt, sizeof(opt),
7017 reason, AF_INET6)) {
7018 DPFPRINTF(LOG_NOTICE, "IPv6 short opt");
7019 return (PF_DROP);
7020 }
7021 if (off + sizeof(opt) + opt.ip6o_len > end) {
7022 DPFPRINTF(LOG_NOTICE, "IPv6 long opt");
7023 REASON_SET(reason, PFRES_IPOPTIONS);
7024 return (PF_DROP);
7025 }
7026 switch (opt.ip6o_type) {
7027 case IP6OPT_PADN:
7028 break;
7029 case IP6OPT_JUMBO:
7030 SET(pd->badopts, PF_OPT_JUMBO);
7031 if (pd->jumbolen != 0) {
7032 DPFPRINTF(LOG_NOTICE, "IPv6 multiple jumbo");
7033 REASON_SET(reason, PFRES_IPOPTIONS);
7034 return (PF_DROP);
7035 }
7036 if (ntohs(h->ip6_plen) != 0) {
7037 DPFPRINTF(LOG_NOTICE, "IPv6 bad jumbo plen");
7038 REASON_SET(reason, PFRES_IPOPTIONS);
7039 return (PF_DROP);
7040 }
7041 if (!pf_pull_hdr(pd->m, off, &jumbo, sizeof(jumbo),
7042 reason, AF_INET6)) {
7043 DPFPRINTF(LOG_NOTICE, "IPv6 short jumbo");
7044 return (PF_DROP);
7045 }
7046 memcpy(&pd->jumbolen, jumbo.ip6oj_jumbo_len,
7047 sizeof(pd->jumbolen));
7048 pd->jumbolen = ntohl(pd->jumbolen);
7049 if (pd->jumbolen < IPV6_MAXPACKET) {
7050 DPFPRINTF(LOG_NOTICE, "IPv6 short jumbolen");
7051 REASON_SET(reason, PFRES_IPOPTIONS);
7052 return (PF_DROP);
7053 }
7054 break;
7055 case IP6OPT_ROUTER_ALERT:
7056 SET(pd->badopts, PF_OPT_ROUTER_ALERT);
7057 break;
7058 default:
7059 SET(pd->badopts, PF_OPT_OTHER);
7060 break;
7061 }
7062 off += sizeof(opt) + opt.ip6o_len;
7063 }
7064
7065 return (PF_PASS);
7066 }
7067
7068 int
pf_walk_header6(struct pf_pdesc * pd,struct ip6_hdr * h,u_short * reason)7069 pf_walk_header6(struct pf_pdesc *pd, struct ip6_hdr *h, u_short *reason)
7070 {
7071 struct ip6_frag frag;
7072 struct ip6_ext ext;
7073 struct icmp6_hdr icmp6;
7074 struct ip6_rthdr rthdr;
7075 u_int32_t end;
7076 int hdr_cnt, fraghdr_cnt = 0, rthdr_cnt = 0;
7077
7078 pd->off += sizeof(struct ip6_hdr);
7079 end = pd->off + ntohs(h->ip6_plen);
7080 pd->fragoff = pd->extoff = pd->jumbolen = 0;
7081 pd->proto = h->ip6_nxt;
7082
7083 for (hdr_cnt = 0; hdr_cnt < pf_hdr_limit; hdr_cnt++) {
7084 switch (pd->proto) {
7085 case IPPROTO_ROUTING:
7086 case IPPROTO_DSTOPTS:
7087 SET(pd->badopts, PF_OPT_OTHER);
7088 break;
7089 case IPPROTO_HOPOPTS:
7090 if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
7091 reason, AF_INET6)) {
7092 DPFPRINTF(LOG_NOTICE, "IPv6 short exthdr");
7093 return (PF_DROP);
7094 }
7095 if (pf_walk_option6(pd, h, pd->off + sizeof(ext),
7096 pd->off + (ext.ip6e_len + 1) * 8, reason)
7097 != PF_PASS)
7098 return (PF_DROP);
7099 /* option header which contains only padding is fishy */
7100 if (pd->badopts == 0)
7101 SET(pd->badopts, PF_OPT_OTHER);
7102 break;
7103 }
7104 switch (pd->proto) {
7105 case IPPROTO_FRAGMENT:
7106 if (fraghdr_cnt++) {
7107 DPFPRINTF(LOG_NOTICE, "IPv6 multiple fragment");
7108 REASON_SET(reason, PFRES_FRAG);
7109 return (PF_DROP);
7110 }
7111 /* jumbo payload packets cannot be fragmented */
7112 if (pd->jumbolen != 0) {
7113 DPFPRINTF(LOG_NOTICE, "IPv6 fragmented jumbo");
7114 REASON_SET(reason, PFRES_FRAG);
7115 return (PF_DROP);
7116 }
7117 if (!pf_pull_hdr(pd->m, pd->off, &frag, sizeof(frag),
7118 reason, AF_INET6)) {
7119 DPFPRINTF(LOG_NOTICE, "IPv6 short fragment");
7120 return (PF_DROP);
7121 }
7122 /* stop walking over non initial fragments */
7123 if (ntohs((frag.ip6f_offlg & IP6F_OFF_MASK)) != 0) {
7124 pd->fragoff = pd->off;
7125 return (PF_PASS);
7126 }
7127 /* RFC6946: reassemble only non atomic fragments */
7128 if (frag.ip6f_offlg & IP6F_MORE_FRAG)
7129 pd->fragoff = pd->off;
7130 pd->off += sizeof(frag);
7131 pd->proto = frag.ip6f_nxt;
7132 break;
7133 case IPPROTO_ROUTING:
7134 if (rthdr_cnt++) {
7135 DPFPRINTF(LOG_NOTICE, "IPv6 multiple rthdr");
7136 REASON_SET(reason, PFRES_IPOPTIONS);
7137 return (PF_DROP);
7138 }
7139 /* fragments may be short */
7140 if (pd->fragoff != 0 && end < pd->off + sizeof(rthdr)) {
7141 pd->off = pd->fragoff;
7142 pd->proto = IPPROTO_FRAGMENT;
7143 return (PF_PASS);
7144 }
7145 if (!pf_pull_hdr(pd->m, pd->off, &rthdr, sizeof(rthdr),
7146 reason, AF_INET6)) {
7147 DPFPRINTF(LOG_NOTICE, "IPv6 short rthdr");
7148 return (PF_DROP);
7149 }
7150 if (rthdr.ip6r_type == IPV6_RTHDR_TYPE_0) {
7151 DPFPRINTF(LOG_NOTICE, "IPv6 rthdr0");
7152 REASON_SET(reason, PFRES_IPOPTIONS);
7153 return (PF_DROP);
7154 }
7155 /* FALLTHROUGH */
7156 case IPPROTO_HOPOPTS:
7157 /* RFC2460 4.1: Hop-by-Hop only after IPv6 header */
7158 if (pd->proto == IPPROTO_HOPOPTS && hdr_cnt > 0) {
7159 DPFPRINTF(LOG_NOTICE, "IPv6 hopopts not first");
7160 REASON_SET(reason, PFRES_IPOPTIONS);
7161 return (PF_DROP);
7162 }
7163 /* FALLTHROUGH */
7164 case IPPROTO_AH:
7165 case IPPROTO_DSTOPTS:
7166 /* fragments may be short */
7167 if (pd->fragoff != 0 && end < pd->off + sizeof(ext)) {
7168 pd->off = pd->fragoff;
7169 pd->proto = IPPROTO_FRAGMENT;
7170 return (PF_PASS);
7171 }
7172 if (!pf_pull_hdr(pd->m, pd->off, &ext, sizeof(ext),
7173 reason, AF_INET6)) {
7174 DPFPRINTF(LOG_NOTICE, "IPv6 short exthdr");
7175 return (PF_DROP);
7176 }
7177 /* reassembly needs the ext header before the frag */
7178 if (pd->fragoff == 0)
7179 pd->extoff = pd->off;
7180 if (pd->proto == IPPROTO_HOPOPTS && pd->fragoff == 0 &&
7181 ntohs(h->ip6_plen) == 0 && pd->jumbolen != 0) {
7182 DPFPRINTF(LOG_NOTICE, "IPv6 missing jumbo");
7183 REASON_SET(reason, PFRES_IPOPTIONS);
7184 return (PF_DROP);
7185 }
7186 if (pd->proto == IPPROTO_AH)
7187 pd->off += (ext.ip6e_len + 2) * 4;
7188 else
7189 pd->off += (ext.ip6e_len + 1) * 8;
7190 pd->proto = ext.ip6e_nxt;
7191 break;
7192 case IPPROTO_ICMPV6:
7193 /* fragments may be short, ignore inner header then */
7194 if (pd->fragoff != 0 && end < pd->off + sizeof(icmp6)) {
7195 pd->off = pd->fragoff;
7196 pd->proto = IPPROTO_FRAGMENT;
7197 return (PF_PASS);
7198 }
7199 if (!pf_pull_hdr(pd->m, pd->off, &icmp6, sizeof(icmp6),
7200 reason, AF_INET6)) {
7201 DPFPRINTF(LOG_NOTICE, "IPv6 short icmp6hdr");
7202 return (PF_DROP);
7203 }
7204 /* ICMP multicast packets have router alert options */
7205 switch (icmp6.icmp6_type) {
7206 case MLD_LISTENER_QUERY:
7207 case MLD_LISTENER_REPORT:
7208 case MLD_LISTENER_DONE:
7209 case MLDV2_LISTENER_REPORT:
7210 /*
7211 * According to RFC 2710 all MLD messages are
7212 * sent with hop-limit (ttl) set to 1, and link
7213 * local source address. If either one is
7214 * missing then MLD message is invalid and
7215 * should be discarded.
7216 */
7217 if ((h->ip6_hlim != 1) ||
7218 !IN6_IS_ADDR_LINKLOCAL(&h->ip6_src)) {
7219 DPFPRINTF(LOG_NOTICE, "Invalid MLD");
7220 REASON_SET(reason, PFRES_IPOPTIONS);
7221 return (PF_DROP);
7222 }
7223 CLR(pd->badopts, PF_OPT_ROUTER_ALERT);
7224 break;
7225 }
7226 return (PF_PASS);
7227 case IPPROTO_TCP:
7228 case IPPROTO_UDP:
7229 /* fragments may be short, ignore inner header then */
7230 if (pd->fragoff != 0 && end < pd->off +
7231 (pd->proto == IPPROTO_TCP ? sizeof(struct tcphdr) :
7232 pd->proto == IPPROTO_UDP ? sizeof(struct udphdr) :
7233 sizeof(struct icmp6_hdr))) {
7234 pd->off = pd->fragoff;
7235 pd->proto = IPPROTO_FRAGMENT;
7236 }
7237 /* FALLTHROUGH */
7238 default:
7239 return (PF_PASS);
7240 }
7241 }
7242 DPFPRINTF(LOG_NOTICE, "IPv6 nested extension header limit");
7243 REASON_SET(reason, PFRES_IPOPTIONS);
7244 return (PF_DROP);
7245 }
7246 #endif /* INET6 */
7247
7248 u_int16_t
pf_pkt_hash(sa_family_t af,uint8_t proto,const struct pf_addr * src,const struct pf_addr * dst,uint16_t sport,uint16_t dport)7249 pf_pkt_hash(sa_family_t af, uint8_t proto,
7250 const struct pf_addr *src, const struct pf_addr *dst,
7251 uint16_t sport, uint16_t dport)
7252 {
7253 uint32_t hash;
7254
7255 hash = src->addr32[0] ^ dst->addr32[0];
7256 #ifdef INET6
7257 if (af == AF_INET6) {
7258 hash ^= src->addr32[1] ^ dst->addr32[1];
7259 hash ^= src->addr32[2] ^ dst->addr32[2];
7260 hash ^= src->addr32[3] ^ dst->addr32[3];
7261 }
7262 #endif
7263
7264 switch (proto) {
7265 case IPPROTO_TCP:
7266 case IPPROTO_UDP:
7267 hash ^= sport ^ dport;
7268 break;
7269 }
7270
7271 return stoeplitz_n32(hash);
7272 }
7273
7274 int
pf_setup_pdesc(struct pf_pdesc * pd,sa_family_t af,int dir,struct pfi_kif * kif,struct mbuf * m,u_short * reason)7275 pf_setup_pdesc(struct pf_pdesc *pd, sa_family_t af, int dir,
7276 struct pfi_kif *kif, struct mbuf *m, u_short *reason)
7277 {
7278 memset(pd, 0, sizeof(*pd));
7279 pd->dir = dir;
7280 pd->kif = kif; /* kif is NULL when called by pflog */
7281 pd->m = m;
7282 pd->sidx = (dir == PF_IN) ? 0 : 1;
7283 pd->didx = (dir == PF_IN) ? 1 : 0;
7284 pd->af = pd->naf = af;
7285 pd->rdomain = rtable_l2(pd->m->m_pkthdr.ph_rtableid);
7286
7287 switch (pd->af) {
7288 case AF_INET: {
7289 struct ip *h;
7290
7291 /* Check for illegal packets */
7292 if (pd->m->m_pkthdr.len < (int)sizeof(struct ip)) {
7293 REASON_SET(reason, PFRES_SHORT);
7294 return (PF_DROP);
7295 }
7296
7297 h = mtod(pd->m, struct ip *);
7298 if (pd->m->m_pkthdr.len < ntohs(h->ip_len)) {
7299 REASON_SET(reason, PFRES_SHORT);
7300 return (PF_DROP);
7301 }
7302
7303 if (pf_walk_header(pd, h, reason) != PF_PASS)
7304 return (PF_DROP);
7305
7306 pd->src = (struct pf_addr *)&h->ip_src;
7307 pd->dst = (struct pf_addr *)&h->ip_dst;
7308 pd->tot_len = ntohs(h->ip_len);
7309 pd->tos = h->ip_tos & ~IPTOS_ECN_MASK;
7310 pd->ttl = h->ip_ttl;
7311 pd->virtual_proto = (h->ip_off & htons(IP_MF | IP_OFFMASK)) ?
7312 PF_VPROTO_FRAGMENT : pd->proto;
7313
7314 break;
7315 }
7316 #ifdef INET6
7317 case AF_INET6: {
7318 struct ip6_hdr *h;
7319
7320 /* Check for illegal packets */
7321 if (pd->m->m_pkthdr.len < (int)sizeof(struct ip6_hdr)) {
7322 REASON_SET(reason, PFRES_SHORT);
7323 return (PF_DROP);
7324 }
7325
7326 h = mtod(pd->m, struct ip6_hdr *);
7327 if (pd->m->m_pkthdr.len <
7328 sizeof(struct ip6_hdr) + ntohs(h->ip6_plen)) {
7329 REASON_SET(reason, PFRES_SHORT);
7330 return (PF_DROP);
7331 }
7332
7333 if (pf_walk_header6(pd, h, reason) != PF_PASS)
7334 return (PF_DROP);
7335
7336 #if 1
7337 /*
7338 * we do not support jumbogram yet. if we keep going, zero
7339 * ip6_plen will do something bad, so drop the packet for now.
7340 */
7341 if (pd->jumbolen != 0) {
7342 REASON_SET(reason, PFRES_NORM);
7343 return (PF_DROP);
7344 }
7345 #endif /* 1 */
7346
7347 pd->src = (struct pf_addr *)&h->ip6_src;
7348 pd->dst = (struct pf_addr *)&h->ip6_dst;
7349 pd->tot_len = ntohs(h->ip6_plen) + sizeof(struct ip6_hdr);
7350 pd->tos = (ntohl(h->ip6_flow) & 0x0fc00000) >> 20;
7351 pd->ttl = h->ip6_hlim;
7352 pd->virtual_proto = (pd->fragoff != 0) ?
7353 PF_VPROTO_FRAGMENT : pd->proto;
7354
7355 break;
7356 }
7357 #endif /* INET6 */
7358 default:
7359 panic("pf_setup_pdesc called with illegal af %u", pd->af);
7360
7361 }
7362
7363 pf_addrcpy(&pd->nsaddr, pd->src, pd->af);
7364 pf_addrcpy(&pd->ndaddr, pd->dst, pd->af);
7365
7366 switch (pd->virtual_proto) {
7367 case IPPROTO_TCP: {
7368 struct tcphdr *th = &pd->hdr.tcp;
7369
7370 if (!pf_pull_hdr(pd->m, pd->off, th, sizeof(*th),
7371 reason, pd->af))
7372 return (PF_DROP);
7373 pd->hdrlen = sizeof(*th);
7374 if (th->th_dport == 0 ||
7375 pd->off + (th->th_off << 2) > pd->tot_len ||
7376 (th->th_off << 2) < sizeof(struct tcphdr)) {
7377 REASON_SET(reason, PFRES_SHORT);
7378 return (PF_DROP);
7379 }
7380 pd->p_len = pd->tot_len - pd->off - (th->th_off << 2);
7381 pd->sport = &th->th_sport;
7382 pd->dport = &th->th_dport;
7383 pd->pcksum = &th->th_sum;
7384 break;
7385 }
7386 case IPPROTO_UDP: {
7387 struct udphdr *uh = &pd->hdr.udp;
7388
7389 if (!pf_pull_hdr(pd->m, pd->off, uh, sizeof(*uh),
7390 reason, pd->af))
7391 return (PF_DROP);
7392 pd->hdrlen = sizeof(*uh);
7393 if (uh->uh_dport == 0 ||
7394 pd->off + ntohs(uh->uh_ulen) > pd->tot_len ||
7395 ntohs(uh->uh_ulen) < sizeof(struct udphdr)) {
7396 REASON_SET(reason, PFRES_SHORT);
7397 return (PF_DROP);
7398 }
7399 pd->sport = &uh->uh_sport;
7400 pd->dport = &uh->uh_dport;
7401 pd->pcksum = &uh->uh_sum;
7402 break;
7403 }
7404 case IPPROTO_ICMP: {
7405 if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp, ICMP_MINLEN,
7406 reason, pd->af))
7407 return (PF_DROP);
7408 pd->hdrlen = ICMP_MINLEN;
7409 if (pd->off + pd->hdrlen > pd->tot_len) {
7410 REASON_SET(reason, PFRES_SHORT);
7411 return (PF_DROP);
7412 }
7413 pd->pcksum = &pd->hdr.icmp.icmp_cksum;
7414 break;
7415 }
7416 #ifdef INET6
7417 case IPPROTO_ICMPV6: {
7418 size_t icmp_hlen = sizeof(struct icmp6_hdr);
7419
7420 if (!pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
7421 reason, pd->af))
7422 return (PF_DROP);
7423 /* ICMP headers we look further into to match state */
7424 switch (pd->hdr.icmp6.icmp6_type) {
7425 case MLD_LISTENER_QUERY:
7426 case MLD_LISTENER_REPORT:
7427 icmp_hlen = sizeof(struct mld_hdr);
7428 break;
7429 case ND_NEIGHBOR_SOLICIT:
7430 case ND_NEIGHBOR_ADVERT:
7431 icmp_hlen = sizeof(struct nd_neighbor_solicit);
7432 /* FALLTHROUGH */
7433 case ND_ROUTER_SOLICIT:
7434 case ND_ROUTER_ADVERT:
7435 case ND_REDIRECT:
7436 if (pd->ttl != 255) {
7437 REASON_SET(reason, PFRES_NORM);
7438 return (PF_DROP);
7439 }
7440 break;
7441 }
7442 if (icmp_hlen > sizeof(struct icmp6_hdr) &&
7443 !pf_pull_hdr(pd->m, pd->off, &pd->hdr.icmp6, icmp_hlen,
7444 reason, pd->af))
7445 return (PF_DROP);
7446 pd->hdrlen = icmp_hlen;
7447 if (pd->off + pd->hdrlen > pd->tot_len) {
7448 REASON_SET(reason, PFRES_SHORT);
7449 return (PF_DROP);
7450 }
7451 pd->pcksum = &pd->hdr.icmp6.icmp6_cksum;
7452 break;
7453 }
7454 #endif /* INET6 */
7455 }
7456
7457 if (pd->sport)
7458 pd->osport = pd->nsport = *pd->sport;
7459 if (pd->dport)
7460 pd->odport = pd->ndport = *pd->dport;
7461
7462 pd->hash = pf_pkt_hash(pd->af, pd->proto,
7463 pd->src, pd->dst, pd->osport, pd->odport);
7464
7465 return (PF_PASS);
7466 }
7467
7468 void
pf_counters_inc(int action,struct pf_pdesc * pd,struct pf_state * st,struct pf_rule * r,struct pf_rule * a)7469 pf_counters_inc(int action, struct pf_pdesc *pd, struct pf_state *st,
7470 struct pf_rule *r, struct pf_rule *a)
7471 {
7472 int dirndx;
7473 pd->kif->pfik_bytes[pd->af == AF_INET6][pd->dir == PF_OUT]
7474 [action != PF_PASS] += pd->tot_len;
7475 pd->kif->pfik_packets[pd->af == AF_INET6][pd->dir == PF_OUT]
7476 [action != PF_PASS]++;
7477
7478 if (action == PF_PASS || action == PF_AFRT || r->action == PF_DROP) {
7479 dirndx = (pd->dir == PF_OUT);
7480 r->packets[dirndx]++;
7481 r->bytes[dirndx] += pd->tot_len;
7482 if (a != NULL) {
7483 a->packets[dirndx]++;
7484 a->bytes[dirndx] += pd->tot_len;
7485 }
7486 if (st != NULL) {
7487 struct pf_rule_item *ri;
7488 struct pf_sn_item *sni;
7489
7490 SLIST_FOREACH(sni, &st->src_nodes, next) {
7491 sni->sn->packets[dirndx]++;
7492 sni->sn->bytes[dirndx] += pd->tot_len;
7493 }
7494 dirndx = (pd->dir == st->direction) ? 0 : 1;
7495 st->packets[dirndx]++;
7496 st->bytes[dirndx] += pd->tot_len;
7497
7498 SLIST_FOREACH(ri, &st->match_rules, entry) {
7499 ri->r->packets[dirndx]++;
7500 ri->r->bytes[dirndx] += pd->tot_len;
7501
7502 if (ri->r->src.addr.type == PF_ADDR_TABLE)
7503 pfr_update_stats(ri->r->src.addr.p.tbl,
7504 &st->key[(st->direction == PF_IN)]->
7505 addr[(st->direction == PF_OUT)],
7506 pd, ri->r->action, ri->r->src.neg);
7507 if (ri->r->dst.addr.type == PF_ADDR_TABLE)
7508 pfr_update_stats(ri->r->dst.addr.p.tbl,
7509 &st->key[(st->direction == PF_IN)]->
7510 addr[(st->direction == PF_IN)],
7511 pd, ri->r->action, ri->r->dst.neg);
7512 }
7513 }
7514 if (r->src.addr.type == PF_ADDR_TABLE)
7515 pfr_update_stats(r->src.addr.p.tbl,
7516 (st == NULL) ? pd->src :
7517 &st->key[(st->direction == PF_IN)]->
7518 addr[(st->direction == PF_OUT)],
7519 pd, r->action, r->src.neg);
7520 if (r->dst.addr.type == PF_ADDR_TABLE)
7521 pfr_update_stats(r->dst.addr.p.tbl,
7522 (st == NULL) ? pd->dst :
7523 &st->key[(st->direction == PF_IN)]->
7524 addr[(st->direction == PF_IN)],
7525 pd, r->action, r->dst.neg);
7526 }
7527 }
7528
7529 int
pf_test(sa_family_t af,int fwdir,struct ifnet * ifp,struct mbuf ** m0)7530 pf_test(sa_family_t af, int fwdir, struct ifnet *ifp, struct mbuf **m0)
7531 {
7532 #if NCARP > 0
7533 struct ifnet *ifp0;
7534 #endif
7535 struct pfi_kif *kif;
7536 u_short action, reason = 0;
7537 struct pf_rule *a = NULL, *r = &pf_default_rule;
7538 struct pf_state *st = NULL;
7539 struct pf_state_key_cmp key;
7540 struct pf_ruleset *ruleset = NULL;
7541 struct pf_pdesc pd;
7542 int dir = (fwdir == PF_FWD) ? PF_OUT : fwdir;
7543 u_int32_t qid, pqid = 0;
7544 int have_pf_lock = 0;
7545
7546 if (!pf_status.running)
7547 return (PF_PASS);
7548
7549 #if NCARP > 0
7550 if (ifp->if_type == IFT_CARP &&
7551 (ifp0 = if_get(ifp->if_carpdevidx)) != NULL) {
7552 kif = (struct pfi_kif *)ifp0->if_pf_kif;
7553 if_put(ifp0);
7554 } else
7555 #endif /* NCARP */
7556 kif = (struct pfi_kif *)ifp->if_pf_kif;
7557
7558 if (kif == NULL) {
7559 DPFPRINTF(LOG_ERR,
7560 "%s: kif == NULL, if_xname %s", __func__, ifp->if_xname);
7561 return (PF_DROP);
7562 }
7563 if (kif->pfik_flags & PFI_IFLAG_SKIP)
7564 return (PF_PASS);
7565
7566 #ifdef DIAGNOSTIC
7567 if (((*m0)->m_flags & M_PKTHDR) == 0)
7568 panic("non-M_PKTHDR is passed to pf_test");
7569 #endif /* DIAGNOSTIC */
7570
7571 if ((*m0)->m_pkthdr.pf.flags & PF_TAG_GENERATED)
7572 return (PF_PASS);
7573
7574 if ((*m0)->m_pkthdr.pf.flags & PF_TAG_DIVERTED_PACKET) {
7575 (*m0)->m_pkthdr.pf.flags &= ~PF_TAG_DIVERTED_PACKET;
7576 return (PF_PASS);
7577 }
7578
7579 if ((*m0)->m_pkthdr.pf.flags & PF_TAG_REFRAGMENTED) {
7580 (*m0)->m_pkthdr.pf.flags &= ~PF_TAG_REFRAGMENTED;
7581 return (PF_PASS);
7582 }
7583
7584 action = pf_setup_pdesc(&pd, af, dir, kif, *m0, &reason);
7585 if (action != PF_PASS) {
7586 #if NPFLOG > 0
7587 pd.pflog |= PF_LOG_FORCE;
7588 #endif /* NPFLOG > 0 */
7589 goto done;
7590 }
7591
7592 /* packet normalization and reassembly */
7593 switch (pd.af) {
7594 case AF_INET:
7595 action = pf_normalize_ip(&pd, &reason);
7596 break;
7597 #ifdef INET6
7598 case AF_INET6:
7599 action = pf_normalize_ip6(&pd, &reason);
7600 break;
7601 #endif /* INET6 */
7602 }
7603 *m0 = pd.m;
7604 /* if packet sits in reassembly queue, return without error */
7605 if (pd.m == NULL)
7606 return PF_PASS;
7607
7608 if (action != PF_PASS) {
7609 #if NPFLOG > 0
7610 pd.pflog |= PF_LOG_FORCE;
7611 #endif /* NPFLOG > 0 */
7612 goto done;
7613 }
7614
7615 /* if packet has been reassembled, update packet description */
7616 if (pf_status.reass && pd.virtual_proto == PF_VPROTO_FRAGMENT) {
7617 action = pf_setup_pdesc(&pd, af, dir, kif, pd.m, &reason);
7618 if (action != PF_PASS) {
7619 #if NPFLOG > 0
7620 pd.pflog |= PF_LOG_FORCE;
7621 #endif /* NPFLOG > 0 */
7622 goto done;
7623 }
7624 }
7625 pd.m->m_pkthdr.pf.flags |= PF_TAG_PROCESSED;
7626
7627 /*
7628 * Avoid pcb-lookups from the forwarding path. They should never
7629 * match and would cause MP locking problems.
7630 */
7631 if (fwdir == PF_FWD) {
7632 pd.lookup.done = -1;
7633 pd.lookup.uid = -1;
7634 pd.lookup.gid = -1;
7635 pd.lookup.pid = NO_PID;
7636 }
7637
7638 switch (pd.virtual_proto) {
7639
7640 case PF_VPROTO_FRAGMENT: {
7641 /*
7642 * handle fragments that aren't reassembled by
7643 * normalization
7644 */
7645 PF_LOCK();
7646 have_pf_lock = 1;
7647 action = pf_test_rule(&pd, &r, &st, &a, &ruleset, &reason);
7648 st = pf_state_ref(st);
7649 if (action != PF_PASS)
7650 REASON_SET(&reason, PFRES_FRAG);
7651 break;
7652 }
7653
7654 case IPPROTO_ICMP: {
7655 if (pd.af != AF_INET) {
7656 action = PF_DROP;
7657 REASON_SET(&reason, PFRES_NORM);
7658 DPFPRINTF(LOG_NOTICE,
7659 "dropping IPv6 packet with ICMPv4 payload");
7660 break;
7661 }
7662 PF_STATE_ENTER_READ();
7663 action = pf_test_state_icmp(&pd, &st, &reason);
7664 st = pf_state_ref(st);
7665 PF_STATE_EXIT_READ();
7666 if (action == PF_PASS || action == PF_AFRT) {
7667 #if NPFSYNC > 0
7668 pfsync_update_state(st);
7669 #endif /* NPFSYNC > 0 */
7670 r = st->rule.ptr;
7671 a = st->anchor.ptr;
7672 #if NPFLOG > 0
7673 pd.pflog |= st->log;
7674 #endif /* NPFLOG > 0 */
7675 } else if (st == NULL) {
7676 PF_LOCK();
7677 have_pf_lock = 1;
7678 action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
7679 &reason);
7680 st = pf_state_ref(st);
7681 }
7682 break;
7683 }
7684
7685 #ifdef INET6
7686 case IPPROTO_ICMPV6: {
7687 if (pd.af != AF_INET6) {
7688 action = PF_DROP;
7689 REASON_SET(&reason, PFRES_NORM);
7690 DPFPRINTF(LOG_NOTICE,
7691 "dropping IPv4 packet with ICMPv6 payload");
7692 break;
7693 }
7694 PF_STATE_ENTER_READ();
7695 action = pf_test_state_icmp(&pd, &st, &reason);
7696 st = pf_state_ref(st);
7697 PF_STATE_EXIT_READ();
7698 if (action == PF_PASS || action == PF_AFRT) {
7699 #if NPFSYNC > 0
7700 pfsync_update_state(st);
7701 #endif /* NPFSYNC > 0 */
7702 r = st->rule.ptr;
7703 a = st->anchor.ptr;
7704 #if NPFLOG > 0
7705 pd.pflog |= st->log;
7706 #endif /* NPFLOG > 0 */
7707 } else if (st == NULL) {
7708 PF_LOCK();
7709 have_pf_lock = 1;
7710 action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
7711 &reason);
7712 st = pf_state_ref(st);
7713 }
7714 break;
7715 }
7716 #endif /* INET6 */
7717
7718 default:
7719 if (pd.virtual_proto == IPPROTO_TCP) {
7720 if (pd.dir == PF_IN && (pd.hdr.tcp.th_flags &
7721 (TH_SYN|TH_ACK)) == TH_SYN &&
7722 pf_synflood_check(&pd)) {
7723 PF_LOCK();
7724 have_pf_lock = 1;
7725 pf_syncookie_send(&pd);
7726 action = PF_DROP;
7727 break;
7728 }
7729 if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
7730 pqid = 1;
7731 action = pf_normalize_tcp(&pd);
7732 if (action == PF_DROP)
7733 break;
7734 }
7735
7736 key.af = pd.af;
7737 key.proto = pd.virtual_proto;
7738 key.rdomain = pd.rdomain;
7739 pf_addrcpy(&key.addr[pd.sidx], pd.src, key.af);
7740 pf_addrcpy(&key.addr[pd.didx], pd.dst, key.af);
7741 key.port[pd.sidx] = pd.osport;
7742 key.port[pd.didx] = pd.odport;
7743 key.hash = pd.hash;
7744
7745 PF_STATE_ENTER_READ();
7746 action = pf_find_state(&pd, &key, &st);
7747 st = pf_state_ref(st);
7748 PF_STATE_EXIT_READ();
7749
7750 /* check for syncookies if tcp ack and no active state */
7751 if (pd.dir == PF_IN && pd.virtual_proto == IPPROTO_TCP &&
7752 (st == NULL || (st->src.state >= TCPS_FIN_WAIT_2 &&
7753 st->dst.state >= TCPS_FIN_WAIT_2)) &&
7754 (pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK &&
7755 pf_syncookie_validate(&pd)) {
7756 struct mbuf *msyn = pf_syncookie_recreate_syn(&pd);
7757 if (msyn) {
7758 action = pf_test(af, fwdir, ifp, &msyn);
7759 m_freem(msyn);
7760 if (action == PF_PASS || action == PF_AFRT) {
7761 PF_STATE_ENTER_READ();
7762 pf_state_unref(st);
7763 action = pf_find_state(&pd, &key, &st);
7764 st = pf_state_ref(st);
7765 PF_STATE_EXIT_READ();
7766 if (st == NULL)
7767 return (PF_DROP);
7768 st->src.seqhi = st->dst.seqhi =
7769 ntohl(pd.hdr.tcp.th_ack) - 1;
7770 st->src.seqlo =
7771 ntohl(pd.hdr.tcp.th_seq) - 1;
7772 pf_set_protostate(st, PF_PEER_SRC,
7773 PF_TCPS_PROXY_DST);
7774 }
7775 } else
7776 action = PF_DROP;
7777 }
7778
7779 if (action == PF_MATCH)
7780 action = pf_test_state(&pd, &st, &reason);
7781
7782 if (action == PF_PASS || action == PF_AFRT) {
7783 #if NPFSYNC > 0
7784 pfsync_update_state(st);
7785 #endif /* NPFSYNC > 0 */
7786 r = st->rule.ptr;
7787 a = st->anchor.ptr;
7788 #if NPFLOG > 0
7789 pd.pflog |= st->log;
7790 #endif /* NPFLOG > 0 */
7791 } else if (st == NULL) {
7792 PF_LOCK();
7793 have_pf_lock = 1;
7794 action = pf_test_rule(&pd, &r, &st, &a, &ruleset,
7795 &reason);
7796 st = pf_state_ref(st);
7797 }
7798
7799 if (pd.virtual_proto == IPPROTO_TCP) {
7800 if (st) {
7801 if (st->max_mss)
7802 pf_normalize_mss(&pd, st->max_mss);
7803 } else if (r->max_mss)
7804 pf_normalize_mss(&pd, r->max_mss);
7805 }
7806
7807 break;
7808 }
7809
7810 if (have_pf_lock != 0)
7811 PF_UNLOCK();
7812
7813 /*
7814 * At the moment, we rely on NET_LOCK() to prevent removal of items
7815 * we've collected above ('r', 'anchor' and 'ruleset'). They'll have
7816 * to be refcounted when NET_LOCK() is gone.
7817 */
7818
7819 done:
7820 if (action != PF_DROP) {
7821 if (st) {
7822 /* The non-state case is handled in pf_test_rule() */
7823 if (action == PF_PASS && pd.badopts != 0 &&
7824 !(st->state_flags & PFSTATE_ALLOWOPTS)) {
7825 action = PF_DROP;
7826 REASON_SET(&reason, PFRES_IPOPTIONS);
7827 #if NPFLOG > 0
7828 pd.pflog |= PF_LOG_FORCE;
7829 #endif /* NPFLOG > 0 */
7830 DPFPRINTF(LOG_NOTICE, "dropping packet with "
7831 "ip/ipv6 options in pf_test()");
7832 }
7833
7834 pf_scrub(pd.m, st->state_flags, pd.af, st->min_ttl,
7835 st->set_tos);
7836 pf_tag_packet(pd.m, st->tag, st->rtableid[pd.didx]);
7837 if (pqid || (pd.tos & IPTOS_LOWDELAY)) {
7838 qid = st->pqid;
7839 if (st->state_flags & PFSTATE_SETPRIO) {
7840 pd.m->m_pkthdr.pf.prio =
7841 st->set_prio[1];
7842 }
7843 } else {
7844 qid = st->qid;
7845 if (st->state_flags & PFSTATE_SETPRIO) {
7846 pd.m->m_pkthdr.pf.prio =
7847 st->set_prio[0];
7848 }
7849 }
7850 pd.m->m_pkthdr.pf.delay = st->delay;
7851 } else {
7852 pf_scrub(pd.m, r->scrub_flags, pd.af, r->min_ttl,
7853 r->set_tos);
7854 if (pqid || (pd.tos & IPTOS_LOWDELAY)) {
7855 qid = r->pqid;
7856 if (r->scrub_flags & PFSTATE_SETPRIO)
7857 pd.m->m_pkthdr.pf.prio = r->set_prio[1];
7858 } else {
7859 qid = r->qid;
7860 if (r->scrub_flags & PFSTATE_SETPRIO)
7861 pd.m->m_pkthdr.pf.prio = r->set_prio[0];
7862 }
7863 pd.m->m_pkthdr.pf.delay = r->delay;
7864 }
7865 }
7866
7867 if (action == PF_PASS && qid)
7868 pd.m->m_pkthdr.pf.qid = qid;
7869 if (pd.dir == PF_IN && st && st->key[PF_SK_STACK])
7870 pf_mbuf_link_state_key(pd.m, st->key[PF_SK_STACK]);
7871 if (pd.dir == PF_OUT && st && st->key[PF_SK_STACK])
7872 pf_state_key_link_inpcb(st->key[PF_SK_STACK],
7873 pd.m->m_pkthdr.pf.inp);
7874
7875 if (st != NULL && !ISSET(pd.m->m_pkthdr.csum_flags, M_FLOWID)) {
7876 pd.m->m_pkthdr.ph_flowid = st->key[PF_SK_WIRE]->hash;
7877 SET(pd.m->m_pkthdr.csum_flags, M_FLOWID);
7878 }
7879
7880 /*
7881 * connections redirected to loopback should not match sockets
7882 * bound specifically to loopback due to security implications,
7883 * see in_pcblookup_listen().
7884 */
7885 if (pd.destchg)
7886 if ((pd.af == AF_INET && (ntohl(pd.dst->v4.s_addr) >>
7887 IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) ||
7888 (pd.af == AF_INET6 && IN6_IS_ADDR_LOOPBACK(&pd.dst->v6)))
7889 pd.m->m_pkthdr.pf.flags |= PF_TAG_TRANSLATE_LOCALHOST;
7890 /* We need to redo the route lookup on outgoing routes. */
7891 if (pd.destchg && pd.dir == PF_OUT)
7892 pd.m->m_pkthdr.pf.flags |= PF_TAG_REROUTE;
7893
7894 if (pd.dir == PF_IN && action == PF_PASS &&
7895 (r->divert.type == PF_DIVERT_TO ||
7896 r->divert.type == PF_DIVERT_REPLY)) {
7897 struct pf_divert *divert;
7898
7899 if ((divert = pf_get_divert(pd.m))) {
7900 pd.m->m_pkthdr.pf.flags |= PF_TAG_DIVERTED;
7901 divert->addr = r->divert.addr;
7902 divert->port = r->divert.port;
7903 divert->rdomain = pd.rdomain;
7904 divert->type = r->divert.type;
7905 }
7906 }
7907
7908 if (action == PF_PASS && r->divert.type == PF_DIVERT_PACKET)
7909 action = PF_DIVERT;
7910
7911 #if NPFLOG > 0
7912 if (pd.pflog) {
7913 struct pf_rule_item *ri;
7914
7915 if (pd.pflog & PF_LOG_FORCE || r->log & PF_LOG_ALL)
7916 pflog_packet(&pd, reason, r, a, ruleset, NULL);
7917 if (st) {
7918 SLIST_FOREACH(ri, &st->match_rules, entry)
7919 if (ri->r->log & PF_LOG_ALL)
7920 pflog_packet(&pd, reason, ri->r, a,
7921 ruleset, NULL);
7922 }
7923 }
7924 #endif /* NPFLOG > 0 */
7925
7926 pf_counters_inc(action, &pd, st, r, a);
7927
7928 switch (action) {
7929 case PF_SYNPROXY_DROP:
7930 m_freem(pd.m);
7931 /* FALLTHROUGH */
7932 case PF_DEFER:
7933 pd.m = NULL;
7934 action = PF_PASS;
7935 break;
7936 case PF_DIVERT:
7937 switch (pd.af) {
7938 case AF_INET:
7939 divert_packet(pd.m, pd.dir, r->divert.port);
7940 pd.m = NULL;
7941 break;
7942 #ifdef INET6
7943 case AF_INET6:
7944 divert6_packet(pd.m, pd.dir, r->divert.port);
7945 pd.m = NULL;
7946 break;
7947 #endif /* INET6 */
7948 }
7949 action = PF_PASS;
7950 break;
7951 #ifdef INET6
7952 case PF_AFRT:
7953 if (pf_translate_af(&pd)) {
7954 action = PF_DROP;
7955 break;
7956 }
7957 pd.m->m_pkthdr.pf.flags |= PF_TAG_GENERATED;
7958 switch (pd.naf) {
7959 case AF_INET:
7960 if (pd.dir == PF_IN) {
7961 int flags;
7962
7963 if (ip_forwarding == 0) {
7964 ipstat_inc(ips_cantforward);
7965 action = PF_DROP;
7966 break;
7967 }
7968 flags = IP_FORWARDING | IP_REDIRECT;
7969 if (ip_directedbcast)
7970 SET(flags, IP_ALLOWBROADCAST);
7971 ip_forward(pd.m, ifp, NULL, flags);
7972 } else
7973 ip_output(pd.m, NULL, NULL, 0, NULL, NULL, 0);
7974 break;
7975 case AF_INET6:
7976 if (pd.dir == PF_IN) {
7977 if (ip6_forwarding == 0) {
7978 ip6stat_inc(ip6s_cantforward);
7979 action = PF_DROP;
7980 break;
7981 }
7982 ip6_forward(pd.m, NULL, 1);
7983 } else
7984 ip6_output(pd.m, NULL, NULL, 0, NULL, NULL);
7985 break;
7986 }
7987 if (action != PF_DROP) {
7988 pd.m = NULL;
7989 action = PF_PASS;
7990 }
7991 break;
7992 #endif /* INET6 */
7993 case PF_DROP:
7994 m_freem(pd.m);
7995 pd.m = NULL;
7996 break;
7997 default:
7998 if (st && st->rt) {
7999 switch (pd.af) {
8000 case AF_INET:
8001 pf_route(&pd, st);
8002 break;
8003 #ifdef INET6
8004 case AF_INET6:
8005 pf_route6(&pd, st);
8006 break;
8007 #endif /* INET6 */
8008 }
8009 }
8010 break;
8011 }
8012
8013 #ifdef INET6
8014 /* if reassembled packet passed, create new fragments */
8015 if (pf_status.reass && action == PF_PASS && pd.m && fwdir == PF_FWD &&
8016 pd.af == AF_INET6) {
8017 struct m_tag *mtag;
8018
8019 if ((mtag = m_tag_find(pd.m, PACKET_TAG_PF_REASSEMBLED, NULL)))
8020 action = pf_refragment6(&pd.m, mtag, NULL, NULL, NULL);
8021 }
8022 #endif /* INET6 */
8023 if (st && action != PF_DROP) {
8024 if (!st->if_index_in && dir == PF_IN)
8025 st->if_index_in = ifp->if_index;
8026 else if (!st->if_index_out && dir == PF_OUT)
8027 st->if_index_out = ifp->if_index;
8028 }
8029
8030 *m0 = pd.m;
8031
8032 pf_state_unref(st);
8033
8034 return (action);
8035 }
8036
8037 int
pf_ouraddr(struct mbuf * m)8038 pf_ouraddr(struct mbuf *m)
8039 {
8040 struct pf_state_key *sk;
8041
8042 if (m->m_pkthdr.pf.flags & PF_TAG_DIVERTED)
8043 return (1);
8044
8045 sk = m->m_pkthdr.pf.statekey;
8046 if (sk != NULL) {
8047 if (READ_ONCE(sk->sk_inp) != NULL)
8048 return (1);
8049 }
8050
8051 return (-1);
8052 }
8053
8054 /*
8055 * must be called whenever any addressing information such as
8056 * address, port, protocol has changed
8057 */
8058 void
pf_pkt_addr_changed(struct mbuf * m)8059 pf_pkt_addr_changed(struct mbuf *m)
8060 {
8061 pf_mbuf_unlink_state_key(m);
8062 pf_mbuf_unlink_inpcb(m);
8063 }
8064
8065 struct inpcb *
pf_inp_lookup(struct mbuf * m)8066 pf_inp_lookup(struct mbuf *m)
8067 {
8068 struct inpcb *inp = NULL;
8069 struct pf_state_key *sk = m->m_pkthdr.pf.statekey;
8070
8071 if (!pf_state_key_isvalid(sk))
8072 pf_mbuf_unlink_state_key(m);
8073 else if (READ_ONCE(sk->sk_inp) != NULL) {
8074 mtx_enter(&pf_inp_mtx);
8075 inp = in_pcbref(sk->sk_inp);
8076 mtx_leave(&pf_inp_mtx);
8077 }
8078
8079 return (inp);
8080 }
8081
8082 void
pf_inp_link(struct mbuf * m,struct inpcb * inp)8083 pf_inp_link(struct mbuf *m, struct inpcb *inp)
8084 {
8085 struct pf_state_key *sk = m->m_pkthdr.pf.statekey;
8086
8087 if (!pf_state_key_isvalid(sk)) {
8088 pf_mbuf_unlink_state_key(m);
8089 return;
8090 }
8091
8092 /*
8093 * we don't need to grab PF-lock here. At worst case we link inp to
8094 * state, which might be just being marked as deleted by another
8095 * thread.
8096 */
8097 pf_state_key_link_inpcb(sk, inp);
8098
8099 /* The statekey has finished finding the inp, it is no longer needed. */
8100 pf_mbuf_unlink_state_key(m);
8101 }
8102
8103 void
pf_inp_unlink(struct inpcb * inp)8104 pf_inp_unlink(struct inpcb *inp)
8105 {
8106 struct pf_state_key *sk;
8107
8108 if (READ_ONCE(inp->inp_pf_sk) == NULL)
8109 return;
8110
8111 mtx_enter(&pf_inp_mtx);
8112 sk = inp->inp_pf_sk;
8113 if (sk == NULL) {
8114 mtx_leave(&pf_inp_mtx);
8115 return;
8116 }
8117 KASSERT(sk->sk_inp == inp);
8118 sk->sk_inp = NULL;
8119 inp->inp_pf_sk = NULL;
8120 mtx_leave(&pf_inp_mtx);
8121
8122 pf_state_key_unref(sk);
8123 in_pcbunref(inp);
8124 }
8125
8126 void
pf_state_key_link_reverse(struct pf_state_key * sk,struct pf_state_key * skrev)8127 pf_state_key_link_reverse(struct pf_state_key *sk, struct pf_state_key *skrev)
8128 {
8129 struct pf_state_key *old_reverse;
8130
8131 old_reverse = atomic_cas_ptr(&sk->sk_reverse, NULL, skrev);
8132 if (old_reverse != NULL)
8133 KASSERT(old_reverse == skrev);
8134 else {
8135 pf_state_key_ref(skrev);
8136
8137 /*
8138 * NOTE: if sk == skrev, then KASSERT() below holds true, we
8139 * still want to grab a reference in such case, because
8140 * pf_state_key_unlink_reverse() does not check whether keys
8141 * are identical or not.
8142 */
8143 old_reverse = atomic_cas_ptr(&skrev->sk_reverse, NULL, sk);
8144 if (old_reverse != NULL)
8145 KASSERT(old_reverse == sk);
8146
8147 pf_state_key_ref(sk);
8148 }
8149 }
8150
8151 #if NPFLOG > 0
8152 void
pf_log_matches(struct pf_pdesc * pd,struct pf_rule * rm,struct pf_rule * am,struct pf_ruleset * ruleset,struct pf_rule_slist * matchrules)8153 pf_log_matches(struct pf_pdesc *pd, struct pf_rule *rm, struct pf_rule *am,
8154 struct pf_ruleset *ruleset, struct pf_rule_slist *matchrules)
8155 {
8156 struct pf_rule_item *ri;
8157
8158 /* if this is the log(matches) rule, packet has been logged already */
8159 if (rm->log & PF_LOG_MATCHES)
8160 return;
8161
8162 SLIST_FOREACH(ri, matchrules, entry)
8163 if (ri->r->log & PF_LOG_MATCHES)
8164 pflog_packet(pd, PFRES_MATCH, rm, am, ruleset, ri->r);
8165 }
8166 #endif /* NPFLOG > 0 */
8167
8168 struct pf_state_key *
pf_state_key_ref(struct pf_state_key * sk)8169 pf_state_key_ref(struct pf_state_key *sk)
8170 {
8171 if (sk != NULL)
8172 PF_REF_TAKE(sk->sk_refcnt);
8173
8174 return (sk);
8175 }
8176
8177 void
pf_state_key_unref(struct pf_state_key * sk)8178 pf_state_key_unref(struct pf_state_key *sk)
8179 {
8180 if (PF_REF_RELE(sk->sk_refcnt)) {
8181 /* state key must be removed from tree */
8182 KASSERT(!pf_state_key_isvalid(sk));
8183 /* state key must be unlinked from reverse key */
8184 KASSERT(sk->sk_reverse == NULL);
8185 /* state key must be unlinked from socket */
8186 KASSERT(sk->sk_inp == NULL);
8187 pool_put(&pf_state_key_pl, sk);
8188 }
8189 }
8190
8191 int
pf_state_key_isvalid(struct pf_state_key * sk)8192 pf_state_key_isvalid(struct pf_state_key *sk)
8193 {
8194 return ((sk != NULL) && (sk->sk_removed == 0));
8195 }
8196
8197 void
pf_mbuf_link_state_key(struct mbuf * m,struct pf_state_key * sk)8198 pf_mbuf_link_state_key(struct mbuf *m, struct pf_state_key *sk)
8199 {
8200 KASSERT(m->m_pkthdr.pf.statekey == NULL);
8201 m->m_pkthdr.pf.statekey = pf_state_key_ref(sk);
8202 }
8203
8204 void
pf_mbuf_unlink_state_key(struct mbuf * m)8205 pf_mbuf_unlink_state_key(struct mbuf *m)
8206 {
8207 struct pf_state_key *sk = m->m_pkthdr.pf.statekey;
8208
8209 if (sk != NULL) {
8210 m->m_pkthdr.pf.statekey = NULL;
8211 pf_state_key_unref(sk);
8212 }
8213 }
8214
8215 void
pf_mbuf_link_inpcb(struct mbuf * m,struct inpcb * inp)8216 pf_mbuf_link_inpcb(struct mbuf *m, struct inpcb *inp)
8217 {
8218 KASSERT(m->m_pkthdr.pf.inp == NULL);
8219 m->m_pkthdr.pf.inp = in_pcbref(inp);
8220 }
8221
8222 void
pf_mbuf_unlink_inpcb(struct mbuf * m)8223 pf_mbuf_unlink_inpcb(struct mbuf *m)
8224 {
8225 struct inpcb *inp = m->m_pkthdr.pf.inp;
8226
8227 if (inp != NULL) {
8228 m->m_pkthdr.pf.inp = NULL;
8229 in_pcbunref(inp);
8230 }
8231 }
8232
8233 void
pf_state_key_link_inpcb(struct pf_state_key * sk,struct inpcb * inp)8234 pf_state_key_link_inpcb(struct pf_state_key *sk, struct inpcb *inp)
8235 {
8236 if (inp == NULL || READ_ONCE(sk->sk_inp) != NULL)
8237 return;
8238
8239 mtx_enter(&pf_inp_mtx);
8240 if (inp->inp_pf_sk != NULL || sk->sk_inp != NULL) {
8241 mtx_leave(&pf_inp_mtx);
8242 return;
8243 }
8244 sk->sk_inp = in_pcbref(inp);
8245 inp->inp_pf_sk = pf_state_key_ref(sk);
8246 mtx_leave(&pf_inp_mtx);
8247 }
8248
8249 void
pf_state_key_unlink_inpcb(struct pf_state_key * sk)8250 pf_state_key_unlink_inpcb(struct pf_state_key *sk)
8251 {
8252 struct inpcb *inp;
8253
8254 if (READ_ONCE(sk->sk_inp) == NULL)
8255 return;
8256
8257 mtx_enter(&pf_inp_mtx);
8258 inp = sk->sk_inp;
8259 if (inp == NULL) {
8260 mtx_leave(&pf_inp_mtx);
8261 return;
8262 }
8263 KASSERT(inp->inp_pf_sk == sk);
8264 sk->sk_inp = NULL;
8265 inp->inp_pf_sk = NULL;
8266 mtx_leave(&pf_inp_mtx);
8267
8268 pf_state_key_unref(sk);
8269 in_pcbunref(inp);
8270 }
8271
8272 void
pf_state_key_unlink_reverse(struct pf_state_key * sk)8273 pf_state_key_unlink_reverse(struct pf_state_key *sk)
8274 {
8275 struct pf_state_key *skrev = sk->sk_reverse;
8276
8277 /* Note that sk and skrev may be equal, then we unref twice. */
8278 if (skrev != NULL) {
8279 KASSERT(skrev->sk_reverse == sk);
8280 sk->sk_reverse = NULL;
8281 skrev->sk_reverse = NULL;
8282 pf_state_key_unref(skrev);
8283 pf_state_key_unref(sk);
8284 }
8285 }
8286
8287 struct pf_state *
pf_state_ref(struct pf_state * st)8288 pf_state_ref(struct pf_state *st)
8289 {
8290 if (st != NULL)
8291 PF_REF_TAKE(st->refcnt);
8292 return (st);
8293 }
8294
8295 void
pf_state_unref(struct pf_state * st)8296 pf_state_unref(struct pf_state *st)
8297 {
8298 if ((st != NULL) && PF_REF_RELE(st->refcnt)) {
8299 /* never inserted or removed */
8300 #if NPFSYNC > 0
8301 KASSERT((TAILQ_NEXT(st, sync_list) == NULL) ||
8302 ((TAILQ_NEXT(st, sync_list) == _Q_INVALID) &&
8303 (st->sync_state >= PFSYNC_S_NONE)));
8304 #endif /* NPFSYNC */
8305 KASSERT((TAILQ_NEXT(st, entry_list) == NULL) ||
8306 (TAILQ_NEXT(st, entry_list) == _Q_INVALID));
8307
8308 pf_state_key_unref(st->key[PF_SK_WIRE]);
8309 pf_state_key_unref(st->key[PF_SK_STACK]);
8310
8311 pool_put(&pf_state_pl, st);
8312 }
8313 }
8314
8315 int
pf_delay_pkt(struct mbuf * m,u_int ifidx)8316 pf_delay_pkt(struct mbuf *m, u_int ifidx)
8317 {
8318 struct pf_pktdelay *pdy;
8319
8320 if ((pdy = pool_get(&pf_pktdelay_pl, PR_NOWAIT)) == NULL) {
8321 m_freem(m);
8322 return (ENOBUFS);
8323 }
8324 pdy->ifidx = ifidx;
8325 pdy->m = m;
8326 timeout_set(&pdy->to, pf_pktenqueue_delayed, pdy);
8327 timeout_add_msec(&pdy->to, m->m_pkthdr.pf.delay);
8328 m->m_pkthdr.pf.delay = 0;
8329 return (0);
8330 }
8331
8332 void
pf_pktenqueue_delayed(void * arg)8333 pf_pktenqueue_delayed(void *arg)
8334 {
8335 struct pf_pktdelay *pdy = arg;
8336 struct ifnet *ifp;
8337
8338 ifp = if_get(pdy->ifidx);
8339 if (ifp != NULL) {
8340 if_enqueue(ifp, pdy->m);
8341 if_put(ifp);
8342 } else
8343 m_freem(pdy->m);
8344
8345 pool_put(&pf_pktdelay_pl, pdy);
8346 }
8347