FreeBSD/Linux Kernel Cross Reference
sys/netpfil/pf/pf.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2001 Daniel Hartmeier
5 * Copyright (c) 2002 - 2008 Henning Brauer
6 * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * - Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * - Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 *
33 * Effort sponsored in part by the Defense Advanced Research Projects
34 * Agency (DARPA) and Air Force Research Laboratory, Air Force
35 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 *
37 * $OpenBSD: pf.c,v 1.634 2009/02/27 12:37:45 henning Exp $
38 */
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include "opt_bpf.h"
44 #include "opt_inet.h"
45 #include "opt_inet6.h"
46 #include "opt_pf.h"
47 #include "opt_sctp.h"
48
49 #include <sys/param.h>
50 #include <sys/bus.h>
51 #include <sys/endian.h>
52 #include <sys/gsb_crc32.h>
53 #include <sys/hash.h>
54 #include <sys/interrupt.h>
55 #include <sys/kernel.h>
56 #include <sys/kthread.h>
57 #include <sys/limits.h>
58 #include <sys/mbuf.h>
59 #include <sys/md5.h>
60 #include <sys/random.h>
61 #include <sys/refcount.h>
62 #include <sys/sdt.h>
63 #include <sys/socket.h>
64 #include <sys/sysctl.h>
65 #include <sys/taskqueue.h>
66 #include <sys/ucred.h>
67
68 #include <net/if.h>
69 #include <net/if_var.h>
70 #include <net/if_types.h>
71 #include <net/if_vlan_var.h>
72 #include <net/route.h>
73 #include <net/route/nhop.h>
74 #include <net/vnet.h>
75
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pflog.h>
79 #include <net/if_pfsync.h>
80
81 #include <netinet/in_pcb.h>
82 #include <netinet/in_var.h>
83 #include <netinet/in_fib.h>
84 #include <netinet/ip.h>
85 #include <netinet/ip_fw.h>
86 #include <netinet/ip_icmp.h>
87 #include <netinet/icmp_var.h>
88 #include <netinet/ip_var.h>
89 #include <netinet/tcp.h>
90 #include <netinet/tcp_fsm.h>
91 #include <netinet/tcp_seq.h>
92 #include <netinet/tcp_timer.h>
93 #include <netinet/tcp_var.h>
94 #include <netinet/udp.h>
95 #include <netinet/udp_var.h>
96
97 /* dummynet */
98 #include <netinet/ip_dummynet.h>
99 #include <netinet/ip_fw.h>
100 #include <netpfil/ipfw/dn_heap.h>
101 #include <netpfil/ipfw/ip_fw_private.h>
102 #include <netpfil/ipfw/ip_dn_private.h>
103
104 #ifdef INET6
105 #include <netinet/ip6.h>
106 #include <netinet/icmp6.h>
107 #include <netinet6/nd6.h>
108 #include <netinet6/ip6_var.h>
109 #include <netinet6/in6_pcb.h>
110 #include <netinet6/in6_fib.h>
111 #include <netinet6/scope6_var.h>
112 #endif /* INET6 */
113
114 #if defined(SCTP) || defined(SCTP_SUPPORT)
115 #include <netinet/sctp_crc32.h>
116 #endif
117
118 #include <machine/in_cksum.h>
119 #include <security/mac/mac_framework.h>
120
121 #define DPFPRINTF(n, x) if (V_pf_status.debug >= (n)) printf x
122
123 SDT_PROVIDER_DEFINE(pf);
124 SDT_PROBE_DEFINE4(pf, ip, test, done, "int", "int", "struct pf_krule *",
125 "struct pf_kstate *");
126 SDT_PROBE_DEFINE4(pf, ip, test6, done, "int", "int", "struct pf_krule *",
127 "struct pf_kstate *");
128 SDT_PROBE_DEFINE5(pf, ip, state, lookup, "struct pfi_kkif *",
129 "struct pf_state_key_cmp *", "int", "struct pf_pdesc *",
130 "struct pf_kstate *");
131
132 SDT_PROBE_DEFINE3(pf, eth, test_rule, entry, "int", "struct ifnet *",
133 "struct mbuf *");
134 SDT_PROBE_DEFINE2(pf, eth, test_rule, test, "int", "struct pf_keth_rule *");
135 SDT_PROBE_DEFINE3(pf, eth, test_rule, mismatch,
136 "int", "struct pf_keth_rule *", "char *");
137 SDT_PROBE_DEFINE2(pf, eth, test_rule, match, "int", "struct pf_keth_rule *");
138 SDT_PROBE_DEFINE2(pf, eth, test_rule, final_match,
139 "int", "struct pf_keth_rule *");
140
141 /*
142 * Global variables
143 */
144
145 /* state tables */
146 VNET_DEFINE(struct pf_altqqueue, pf_altqs[4]);
147 VNET_DEFINE(struct pf_kpalist, pf_pabuf);
148 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_active);
149 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_active);
150 VNET_DEFINE(struct pf_altqqueue *, pf_altqs_inactive);
151 VNET_DEFINE(struct pf_altqqueue *, pf_altq_ifs_inactive);
152 VNET_DEFINE(struct pf_kstatus, pf_status);
153
154 VNET_DEFINE(u_int32_t, ticket_altqs_active);
155 VNET_DEFINE(u_int32_t, ticket_altqs_inactive);
156 VNET_DEFINE(int, altqs_inactive_open);
157 VNET_DEFINE(u_int32_t, ticket_pabuf);
158
159 VNET_DEFINE(MD5_CTX, pf_tcp_secret_ctx);
160 #define V_pf_tcp_secret_ctx VNET(pf_tcp_secret_ctx)
161 VNET_DEFINE(u_char, pf_tcp_secret[16]);
162 #define V_pf_tcp_secret VNET(pf_tcp_secret)
163 VNET_DEFINE(int, pf_tcp_secret_init);
164 #define V_pf_tcp_secret_init VNET(pf_tcp_secret_init)
165 VNET_DEFINE(int, pf_tcp_iss_off);
166 #define V_pf_tcp_iss_off VNET(pf_tcp_iss_off)
167 VNET_DECLARE(int, pf_vnet_active);
168 #define V_pf_vnet_active VNET(pf_vnet_active)
169
170 VNET_DEFINE_STATIC(uint32_t, pf_purge_idx);
171 #define V_pf_purge_idx VNET(pf_purge_idx)
172
173 #ifdef PF_WANT_32_TO_64_COUNTER
174 VNET_DEFINE_STATIC(uint32_t, pf_counter_periodic_iter);
175 #define V_pf_counter_periodic_iter VNET(pf_counter_periodic_iter)
176
177 VNET_DEFINE(struct allrulelist_head, pf_allrulelist);
178 VNET_DEFINE(size_t, pf_allrulecount);
179 VNET_DEFINE(struct pf_krule *, pf_rulemarker);
180 #endif
181
182 /*
183 * Queue for pf_intr() sends.
184 */
185 static MALLOC_DEFINE(M_PFTEMP, "pf_temp", "pf(4) temporary allocations");
186 struct pf_send_entry {
187 STAILQ_ENTRY(pf_send_entry) pfse_next;
188 struct mbuf *pfse_m;
189 enum {
190 PFSE_IP,
191 PFSE_IP6,
192 PFSE_ICMP,
193 PFSE_ICMP6,
194 } pfse_type;
195 struct {
196 int type;
197 int code;
198 int mtu;
199 } icmpopts;
200 };
201
202 STAILQ_HEAD(pf_send_head, pf_send_entry);
203 VNET_DEFINE_STATIC(struct pf_send_head, pf_sendqueue);
204 #define V_pf_sendqueue VNET(pf_sendqueue)
205
206 static struct mtx_padalign pf_sendqueue_mtx;
207 MTX_SYSINIT(pf_sendqueue_mtx, &pf_sendqueue_mtx, "pf send queue", MTX_DEF);
208 #define PF_SENDQ_LOCK() mtx_lock(&pf_sendqueue_mtx)
209 #define PF_SENDQ_UNLOCK() mtx_unlock(&pf_sendqueue_mtx)
210
211 /*
212 * Queue for pf_overload_task() tasks.
213 */
214 struct pf_overload_entry {
215 SLIST_ENTRY(pf_overload_entry) next;
216 struct pf_addr addr;
217 sa_family_t af;
218 uint8_t dir;
219 struct pf_krule *rule;
220 };
221
222 SLIST_HEAD(pf_overload_head, pf_overload_entry);
223 VNET_DEFINE_STATIC(struct pf_overload_head, pf_overloadqueue);
224 #define V_pf_overloadqueue VNET(pf_overloadqueue)
225 VNET_DEFINE_STATIC(struct task, pf_overloadtask);
226 #define V_pf_overloadtask VNET(pf_overloadtask)
227
228 static struct mtx_padalign pf_overloadqueue_mtx;
229 MTX_SYSINIT(pf_overloadqueue_mtx, &pf_overloadqueue_mtx,
230 "pf overload/flush queue", MTX_DEF);
231 #define PF_OVERLOADQ_LOCK() mtx_lock(&pf_overloadqueue_mtx)
232 #define PF_OVERLOADQ_UNLOCK() mtx_unlock(&pf_overloadqueue_mtx)
233
234 VNET_DEFINE(struct pf_krulequeue, pf_unlinked_rules);
235 struct mtx_padalign pf_unlnkdrules_mtx;
236 MTX_SYSINIT(pf_unlnkdrules_mtx, &pf_unlnkdrules_mtx, "pf unlinked rules",
237 MTX_DEF);
238
239 struct sx pf_config_lock;
240 SX_SYSINIT(pf_config_lock, &pf_config_lock, "pf config");
241
242 struct mtx_padalign pf_table_stats_lock;
243 MTX_SYSINIT(pf_table_stats_lock, &pf_table_stats_lock, "pf table stats",
244 MTX_DEF);
245
246 VNET_DEFINE_STATIC(uma_zone_t, pf_sources_z);
247 #define V_pf_sources_z VNET(pf_sources_z)
248 uma_zone_t pf_mtag_z;
249 VNET_DEFINE(uma_zone_t, pf_state_z);
250 VNET_DEFINE(uma_zone_t, pf_state_key_z);
251
252 VNET_DEFINE(struct unrhdr64, pf_stateid);
253
254 static void pf_src_tree_remove_state(struct pf_kstate *);
255 static void pf_init_threshold(struct pf_threshold *, u_int32_t,
256 u_int32_t);
257 static void pf_add_threshold(struct pf_threshold *);
258 static int pf_check_threshold(struct pf_threshold *);
259
260 static void pf_change_ap(struct mbuf *, struct pf_addr *, u_int16_t *,
261 u_int16_t *, u_int16_t *, struct pf_addr *,
262 u_int16_t, u_int8_t, sa_family_t);
263 static int pf_modulate_sack(struct mbuf *, int, struct pf_pdesc *,
264 struct tcphdr *, struct pf_state_peer *);
265 static void pf_change_icmp(struct pf_addr *, u_int16_t *,
266 struct pf_addr *, struct pf_addr *, u_int16_t,
267 u_int16_t *, u_int16_t *, u_int16_t *,
268 u_int16_t *, u_int8_t, sa_family_t);
269 static void pf_send_icmp(struct mbuf *, u_int8_t, u_int8_t,
270 sa_family_t, struct pf_krule *);
271 static void pf_detach_state(struct pf_kstate *);
272 static int pf_state_key_attach(struct pf_state_key *,
273 struct pf_state_key *, struct pf_kstate *);
274 static void pf_state_key_detach(struct pf_kstate *, int);
275 static int pf_state_key_ctor(void *, int, void *, int);
276 static u_int32_t pf_tcp_iss(struct pf_pdesc *);
277 void pf_rule_to_actions(struct pf_krule *,
278 struct pf_rule_actions *);
279 static int pf_dummynet(struct pf_pdesc *, int, struct pf_kstate *,
280 struct pf_krule *, struct mbuf **);
281 static int pf_dummynet_route(struct pf_pdesc *, int,
282 struct pf_kstate *, struct pf_krule *,
283 struct ifnet *, struct sockaddr *, struct mbuf **);
284 static int pf_test_eth_rule(int, struct pfi_kkif *,
285 struct mbuf **);
286 static int pf_test_rule(struct pf_krule **, struct pf_kstate **,
287 int, struct pfi_kkif *, struct mbuf *, int,
288 struct pf_pdesc *, struct pf_krule **,
289 struct pf_kruleset **, struct inpcb *);
290 static int pf_create_state(struct pf_krule *, struct pf_krule *,
291 struct pf_krule *, struct pf_pdesc *,
292 struct pf_ksrc_node *, struct pf_state_key *,
293 struct pf_state_key *, struct mbuf *, int,
294 u_int16_t, u_int16_t, int *, struct pfi_kkif *,
295 struct pf_kstate **, int, u_int16_t, u_int16_t,
296 int);
297 static int pf_test_fragment(struct pf_krule **, int,
298 struct pfi_kkif *, struct mbuf *, void *,
299 struct pf_pdesc *, struct pf_krule **,
300 struct pf_kruleset **);
301 static int pf_tcp_track_full(struct pf_kstate **,
302 struct pfi_kkif *, struct mbuf *, int,
303 struct pf_pdesc *, u_short *, int *);
304 static int pf_tcp_track_sloppy(struct pf_kstate **,
305 struct pf_pdesc *, u_short *);
306 static int pf_test_state_tcp(struct pf_kstate **, int,
307 struct pfi_kkif *, struct mbuf *, int,
308 void *, struct pf_pdesc *, u_short *);
309 static int pf_test_state_udp(struct pf_kstate **, int,
310 struct pfi_kkif *, struct mbuf *, int,
311 void *, struct pf_pdesc *);
312 static int pf_test_state_icmp(struct pf_kstate **, int,
313 struct pfi_kkif *, struct mbuf *, int,
314 void *, struct pf_pdesc *, u_short *);
315 static int pf_test_state_other(struct pf_kstate **, int,
316 struct pfi_kkif *, struct mbuf *, struct pf_pdesc *);
317 static u_int16_t pf_calc_mss(struct pf_addr *, sa_family_t,
318 int, u_int16_t);
319 static int pf_check_proto_cksum(struct mbuf *, int, int,
320 u_int8_t, sa_family_t);
321 static void pf_print_state_parts(struct pf_kstate *,
322 struct pf_state_key *, struct pf_state_key *);
323 static int pf_addr_wrap_neq(struct pf_addr_wrap *,
324 struct pf_addr_wrap *);
325 static void pf_patch_8(struct mbuf *, u_int16_t *, u_int8_t *, u_int8_t,
326 bool, u_int8_t);
327 static struct pf_kstate *pf_find_state(struct pfi_kkif *,
328 struct pf_state_key_cmp *, u_int);
329 static int pf_src_connlimit(struct pf_kstate **);
330 static void pf_overload_task(void *v, int pending);
331 static int pf_insert_src_node(struct pf_ksrc_node **,
332 struct pf_krule *, struct pf_addr *, sa_family_t);
333 static u_int pf_purge_expired_states(u_int, int);
334 static void pf_purge_unlinked_rules(void);
335 static int pf_mtag_uminit(void *, int, int);
336 static void pf_mtag_free(struct m_tag *);
337 static void pf_packet_rework_nat(struct mbuf *, struct pf_pdesc *,
338 int, struct pf_state_key *);
339 #ifdef INET
340 static void pf_route(struct mbuf **, struct pf_krule *, int,
341 struct ifnet *, struct pf_kstate *,
342 struct pf_pdesc *, struct inpcb *);
343 #endif /* INET */
344 #ifdef INET6
345 static void pf_change_a6(struct pf_addr *, u_int16_t *,
346 struct pf_addr *, u_int8_t);
347 static void pf_route6(struct mbuf **, struct pf_krule *, int,
348 struct ifnet *, struct pf_kstate *,
349 struct pf_pdesc *, struct inpcb *);
350 #endif /* INET6 */
351 static __inline void pf_set_protostate(struct pf_kstate *, int, u_int8_t);
352
353 int in4_cksum(struct mbuf *m, u_int8_t nxt, int off, int len);
354
355 extern int pf_end_threads;
356 extern struct proc *pf_purge_proc;
357
358 VNET_DEFINE(struct pf_limit, pf_limits[PF_LIMIT_MAX]);
359
360 #define PACKET_UNDO_NAT(_m, _pd, _off, _s, _dir) \
361 do { \
362 struct pf_state_key *nk; \
363 if ((_dir) == PF_OUT) \
364 nk = (_s)->key[PF_SK_STACK]; \
365 else \
366 nk = (_s)->key[PF_SK_WIRE]; \
367 pf_packet_rework_nat(_m, _pd, _off, nk); \
368 } while (0)
369
370 #define PACKET_LOOPED(pd) ((pd)->pf_mtag && \
371 (pd)->pf_mtag->flags & PF_PACKET_LOOPED)
372
373 #define STATE_LOOKUP(i, k, d, s, pd) \
374 do { \
375 (s) = pf_find_state((i), (k), (d)); \
376 SDT_PROBE5(pf, ip, state, lookup, i, k, d, pd, (s)); \
377 if ((s) == NULL) \
378 return (PF_DROP); \
379 if (PACKET_LOOPED(pd)) \
380 return (PF_PASS); \
381 } while (0)
382
383 #define BOUND_IFACE(r, k) \
384 ((r)->rule_flag & PFRULE_IFBOUND) ? (k) : V_pfi_all
385
386 #define STATE_INC_COUNTERS(s) \
387 do { \
388 counter_u64_add(s->rule.ptr->states_cur, 1); \
389 counter_u64_add(s->rule.ptr->states_tot, 1); \
390 if (s->anchor.ptr != NULL) { \
391 counter_u64_add(s->anchor.ptr->states_cur, 1); \
392 counter_u64_add(s->anchor.ptr->states_tot, 1); \
393 } \
394 if (s->nat_rule.ptr != NULL) { \
395 counter_u64_add(s->nat_rule.ptr->states_cur, 1);\
396 counter_u64_add(s->nat_rule.ptr->states_tot, 1);\
397 } \
398 } while (0)
399
400 #define STATE_DEC_COUNTERS(s) \
401 do { \
402 if (s->nat_rule.ptr != NULL) \
403 counter_u64_add(s->nat_rule.ptr->states_cur, -1);\
404 if (s->anchor.ptr != NULL) \
405 counter_u64_add(s->anchor.ptr->states_cur, -1); \
406 counter_u64_add(s->rule.ptr->states_cur, -1); \
407 } while (0)
408
409 MALLOC_DEFINE(M_PFHASH, "pf_hash", "pf(4) hash header structures");
410 VNET_DEFINE(struct pf_keyhash *, pf_keyhash);
411 VNET_DEFINE(struct pf_idhash *, pf_idhash);
412 VNET_DEFINE(struct pf_srchash *, pf_srchash);
413
414 SYSCTL_NODE(_net, OID_AUTO, pf, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
415 "pf(4)");
416
417 u_long pf_hashmask;
418 u_long pf_srchashmask;
419 static u_long pf_hashsize;
420 static u_long pf_srchashsize;
421 u_long pf_ioctl_maxcount = 65535;
422
423 SYSCTL_ULONG(_net_pf, OID_AUTO, states_hashsize, CTLFLAG_RDTUN,
424 &pf_hashsize, 0, "Size of pf(4) states hashtable");
425 SYSCTL_ULONG(_net_pf, OID_AUTO, source_nodes_hashsize, CTLFLAG_RDTUN,
426 &pf_srchashsize, 0, "Size of pf(4) source nodes hashtable");
427 SYSCTL_ULONG(_net_pf, OID_AUTO, request_maxcount, CTLFLAG_RWTUN,
428 &pf_ioctl_maxcount, 0, "Maximum number of tables, addresses, ... in a single ioctl() call");
429
430 VNET_DEFINE(void *, pf_swi_cookie);
431 VNET_DEFINE(struct intr_event *, pf_swi_ie);
432
433 VNET_DEFINE(uint32_t, pf_hashseed);
434 #define V_pf_hashseed VNET(pf_hashseed)
435
436 int
437 pf_addr_cmp(struct pf_addr *a, struct pf_addr *b, sa_family_t af)
438 {
439
440 switch (af) {
441 #ifdef INET
442 case AF_INET:
443 if (a->addr32[0] > b->addr32[0])
444 return (1);
445 if (a->addr32[0] < b->addr32[0])
446 return (-1);
447 break;
448 #endif /* INET */
449 #ifdef INET6
450 case AF_INET6:
451 if (a->addr32[3] > b->addr32[3])
452 return (1);
453 if (a->addr32[3] < b->addr32[3])
454 return (-1);
455 if (a->addr32[2] > b->addr32[2])
456 return (1);
457 if (a->addr32[2] < b->addr32[2])
458 return (-1);
459 if (a->addr32[1] > b->addr32[1])
460 return (1);
461 if (a->addr32[1] < b->addr32[1])
462 return (-1);
463 if (a->addr32[0] > b->addr32[0])
464 return (1);
465 if (a->addr32[0] < b->addr32[0])
466 return (-1);
467 break;
468 #endif /* INET6 */
469 default:
470 panic("%s: unknown address family %u", __func__, af);
471 }
472 return (0);
473 }
474
475 static void
476 pf_packet_rework_nat(struct mbuf *m, struct pf_pdesc *pd, int off,
477 struct pf_state_key *nk)
478 {
479
480 switch (pd->proto) {
481 case IPPROTO_TCP: {
482 struct tcphdr *th = &pd->hdr.tcp;
483
484 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
485 pf_change_ap(m, pd->src, &th->th_sport, pd->ip_sum,
486 &th->th_sum, &nk->addr[pd->sidx],
487 nk->port[pd->sidx], 0, pd->af);
488 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
489 pf_change_ap(m, pd->dst, &th->th_dport, pd->ip_sum,
490 &th->th_sum, &nk->addr[pd->didx],
491 nk->port[pd->didx], 0, pd->af);
492 m_copyback(m, off, sizeof(*th), (caddr_t)th);
493 break;
494 }
495 case IPPROTO_UDP: {
496 struct udphdr *uh = &pd->hdr.udp;
497
498 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af))
499 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
500 &uh->uh_sum, &nk->addr[pd->sidx],
501 nk->port[pd->sidx], 1, pd->af);
502 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af))
503 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
504 &uh->uh_sum, &nk->addr[pd->didx],
505 nk->port[pd->didx], 1, pd->af);
506 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
507 break;
508 }
509 case IPPROTO_ICMP: {
510 struct icmp *ih = &pd->hdr.icmp;
511
512 if (nk->port[pd->sidx] != ih->icmp_id) {
513 pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
514 ih->icmp_cksum, ih->icmp_id,
515 nk->port[pd->sidx], 0);
516 ih->icmp_id = nk->port[pd->sidx];
517 pd->sport = &ih->icmp_id;
518
519 m_copyback(m, off, ICMP_MINLEN, (caddr_t)ih);
520 }
521 /* FALLTHROUGH */
522 }
523 default:
524 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af)) {
525 switch (pd->af) {
526 case AF_INET:
527 pf_change_a(&pd->src->v4.s_addr,
528 pd->ip_sum, nk->addr[pd->sidx].v4.s_addr,
529 0);
530 break;
531 case AF_INET6:
532 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
533 break;
534 }
535 }
536 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af)) {
537 switch (pd->af) {
538 case AF_INET:
539 pf_change_a(&pd->dst->v4.s_addr,
540 pd->ip_sum, nk->addr[pd->didx].v4.s_addr,
541 0);
542 break;
543 case AF_INET6:
544 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
545 break;
546 }
547 }
548 break;
549 }
550 }
551
552 static __inline uint32_t
553 pf_hashkey(struct pf_state_key *sk)
554 {
555 uint32_t h;
556
557 h = murmur3_32_hash32((uint32_t *)sk,
558 sizeof(struct pf_state_key_cmp)/sizeof(uint32_t),
559 V_pf_hashseed);
560
561 return (h & pf_hashmask);
562 }
563
564 static __inline uint32_t
565 pf_hashsrc(struct pf_addr *addr, sa_family_t af)
566 {
567 uint32_t h;
568
569 switch (af) {
570 case AF_INET:
571 h = murmur3_32_hash32((uint32_t *)&addr->v4,
572 sizeof(addr->v4)/sizeof(uint32_t), V_pf_hashseed);
573 break;
574 case AF_INET6:
575 h = murmur3_32_hash32((uint32_t *)&addr->v6,
576 sizeof(addr->v6)/sizeof(uint32_t), V_pf_hashseed);
577 break;
578 default:
579 panic("%s: unknown address family %u", __func__, af);
580 }
581
582 return (h & pf_srchashmask);
583 }
584
585 #ifdef ALTQ
586 static int
587 pf_state_hash(struct pf_kstate *s)
588 {
589 u_int32_t hv = (intptr_t)s / sizeof(*s);
590
591 hv ^= crc32(&s->src, sizeof(s->src));
592 hv ^= crc32(&s->dst, sizeof(s->dst));
593 if (hv == 0)
594 hv = 1;
595 return (hv);
596 }
597 #endif
598
599 static __inline void
600 pf_set_protostate(struct pf_kstate *s, int which, u_int8_t newstate)
601 {
602 if (which == PF_PEER_DST || which == PF_PEER_BOTH)
603 s->dst.state = newstate;
604 if (which == PF_PEER_DST)
605 return;
606 if (s->src.state == newstate)
607 return;
608 if (s->creatorid == V_pf_status.hostid &&
609 s->key[PF_SK_STACK] != NULL &&
610 s->key[PF_SK_STACK]->proto == IPPROTO_TCP &&
611 !(TCPS_HAVEESTABLISHED(s->src.state) ||
612 s->src.state == TCPS_CLOSED) &&
613 (TCPS_HAVEESTABLISHED(newstate) || newstate == TCPS_CLOSED))
614 atomic_add_32(&V_pf_status.states_halfopen, -1);
615
616 s->src.state = newstate;
617 }
618
619 #ifdef INET6
620 void
621 pf_addrcpy(struct pf_addr *dst, struct pf_addr *src, sa_family_t af)
622 {
623 switch (af) {
624 #ifdef INET
625 case AF_INET:
626 dst->addr32[0] = src->addr32[0];
627 break;
628 #endif /* INET */
629 case AF_INET6:
630 dst->addr32[0] = src->addr32[0];
631 dst->addr32[1] = src->addr32[1];
632 dst->addr32[2] = src->addr32[2];
633 dst->addr32[3] = src->addr32[3];
634 break;
635 }
636 }
637 #endif /* INET6 */
638
639 static void
640 pf_init_threshold(struct pf_threshold *threshold,
641 u_int32_t limit, u_int32_t seconds)
642 {
643 threshold->limit = limit * PF_THRESHOLD_MULT;
644 threshold->seconds = seconds;
645 threshold->count = 0;
646 threshold->last = time_uptime;
647 }
648
649 static void
650 pf_add_threshold(struct pf_threshold *threshold)
651 {
652 u_int32_t t = time_uptime, diff = t - threshold->last;
653
654 if (diff >= threshold->seconds)
655 threshold->count = 0;
656 else
657 threshold->count -= threshold->count * diff /
658 threshold->seconds;
659 threshold->count += PF_THRESHOLD_MULT;
660 threshold->last = t;
661 }
662
663 static int
664 pf_check_threshold(struct pf_threshold *threshold)
665 {
666 return (threshold->count > threshold->limit);
667 }
668
669 static int
670 pf_src_connlimit(struct pf_kstate **state)
671 {
672 struct pf_overload_entry *pfoe;
673 int bad = 0;
674
675 PF_STATE_LOCK_ASSERT(*state);
676
677 (*state)->src_node->conn++;
678 (*state)->src.tcp_est = 1;
679 pf_add_threshold(&(*state)->src_node->conn_rate);
680
681 if ((*state)->rule.ptr->max_src_conn &&
682 (*state)->rule.ptr->max_src_conn <
683 (*state)->src_node->conn) {
684 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONN], 1);
685 bad++;
686 }
687
688 if ((*state)->rule.ptr->max_src_conn_rate.limit &&
689 pf_check_threshold(&(*state)->src_node->conn_rate)) {
690 counter_u64_add(V_pf_status.lcounters[LCNT_SRCCONNRATE], 1);
691 bad++;
692 }
693
694 if (!bad)
695 return (0);
696
697 /* Kill this state. */
698 (*state)->timeout = PFTM_PURGE;
699 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
700
701 if ((*state)->rule.ptr->overload_tbl == NULL)
702 return (1);
703
704 /* Schedule overloading and flushing task. */
705 pfoe = malloc(sizeof(*pfoe), M_PFTEMP, M_NOWAIT);
706 if (pfoe == NULL)
707 return (1); /* too bad :( */
708
709 bcopy(&(*state)->src_node->addr, &pfoe->addr, sizeof(pfoe->addr));
710 pfoe->af = (*state)->key[PF_SK_WIRE]->af;
711 pfoe->rule = (*state)->rule.ptr;
712 pfoe->dir = (*state)->direction;
713 PF_OVERLOADQ_LOCK();
714 SLIST_INSERT_HEAD(&V_pf_overloadqueue, pfoe, next);
715 PF_OVERLOADQ_UNLOCK();
716 taskqueue_enqueue(taskqueue_swi, &V_pf_overloadtask);
717
718 return (1);
719 }
720
721 static void
722 pf_overload_task(void *v, int pending)
723 {
724 struct pf_overload_head queue;
725 struct pfr_addr p;
726 struct pf_overload_entry *pfoe, *pfoe1;
727 uint32_t killed = 0;
728
729 CURVNET_SET((struct vnet *)v);
730
731 PF_OVERLOADQ_LOCK();
732 queue = V_pf_overloadqueue;
733 SLIST_INIT(&V_pf_overloadqueue);
734 PF_OVERLOADQ_UNLOCK();
735
736 bzero(&p, sizeof(p));
737 SLIST_FOREACH(pfoe, &queue, next) {
738 counter_u64_add(V_pf_status.lcounters[LCNT_OVERLOAD_TABLE], 1);
739 if (V_pf_status.debug >= PF_DEBUG_MISC) {
740 printf("%s: blocking address ", __func__);
741 pf_print_host(&pfoe->addr, 0, pfoe->af);
742 printf("\n");
743 }
744
745 p.pfra_af = pfoe->af;
746 switch (pfoe->af) {
747 #ifdef INET
748 case AF_INET:
749 p.pfra_net = 32;
750 p.pfra_ip4addr = pfoe->addr.v4;
751 break;
752 #endif
753 #ifdef INET6
754 case AF_INET6:
755 p.pfra_net = 128;
756 p.pfra_ip6addr = pfoe->addr.v6;
757 break;
758 #endif
759 }
760
761 PF_RULES_WLOCK();
762 pfr_insert_kentry(pfoe->rule->overload_tbl, &p, time_second);
763 PF_RULES_WUNLOCK();
764 }
765
766 /*
767 * Remove those entries, that don't need flushing.
768 */
769 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
770 if (pfoe->rule->flush == 0) {
771 SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
772 free(pfoe, M_PFTEMP);
773 } else
774 counter_u64_add(
775 V_pf_status.lcounters[LCNT_OVERLOAD_FLUSH], 1);
776
777 /* If nothing to flush, return. */
778 if (SLIST_EMPTY(&queue)) {
779 CURVNET_RESTORE();
780 return;
781 }
782
783 for (int i = 0; i <= pf_hashmask; i++) {
784 struct pf_idhash *ih = &V_pf_idhash[i];
785 struct pf_state_key *sk;
786 struct pf_kstate *s;
787
788 PF_HASHROW_LOCK(ih);
789 LIST_FOREACH(s, &ih->states, entry) {
790 sk = s->key[PF_SK_WIRE];
791 SLIST_FOREACH(pfoe, &queue, next)
792 if (sk->af == pfoe->af &&
793 ((pfoe->rule->flush & PF_FLUSH_GLOBAL) ||
794 pfoe->rule == s->rule.ptr) &&
795 ((pfoe->dir == PF_OUT &&
796 PF_AEQ(&pfoe->addr, &sk->addr[1], sk->af)) ||
797 (pfoe->dir == PF_IN &&
798 PF_AEQ(&pfoe->addr, &sk->addr[0], sk->af)))) {
799 s->timeout = PFTM_PURGE;
800 pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
801 killed++;
802 }
803 }
804 PF_HASHROW_UNLOCK(ih);
805 }
806 SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
807 free(pfoe, M_PFTEMP);
808 if (V_pf_status.debug >= PF_DEBUG_MISC)
809 printf("%s: %u states killed", __func__, killed);
810
811 CURVNET_RESTORE();
812 }
813
814 /*
815 * Can return locked on failure, so that we can consistently
816 * allocate and insert a new one.
817 */
818 struct pf_ksrc_node *
819 pf_find_src_node(struct pf_addr *src, struct pf_krule *rule, sa_family_t af,
820 int returnlocked)
821 {
822 struct pf_srchash *sh;
823 struct pf_ksrc_node *n;
824
825 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_SEARCH], 1);
826
827 sh = &V_pf_srchash[pf_hashsrc(src, af)];
828 PF_HASHROW_LOCK(sh);
829 LIST_FOREACH(n, &sh->nodes, entry)
830 if (n->rule.ptr == rule && n->af == af &&
831 ((af == AF_INET && n->addr.v4.s_addr == src->v4.s_addr) ||
832 (af == AF_INET6 && bcmp(&n->addr, src, sizeof(*src)) == 0)))
833 break;
834 if (n != NULL) {
835 n->states++;
836 PF_HASHROW_UNLOCK(sh);
837 } else if (returnlocked == 0)
838 PF_HASHROW_UNLOCK(sh);
839
840 return (n);
841 }
842
843 static void
844 pf_free_src_node(struct pf_ksrc_node *sn)
845 {
846
847 for (int i = 0; i < 2; i++) {
848 counter_u64_free(sn->bytes[i]);
849 counter_u64_free(sn->packets[i]);
850 }
851 uma_zfree(V_pf_sources_z, sn);
852 }
853
854 static int
855 pf_insert_src_node(struct pf_ksrc_node **sn, struct pf_krule *rule,
856 struct pf_addr *src, sa_family_t af)
857 {
858
859 KASSERT((rule->rule_flag & PFRULE_SRCTRACK ||
860 rule->rpool.opts & PF_POOL_STICKYADDR),
861 ("%s for non-tracking rule %p", __func__, rule));
862
863 if (*sn == NULL)
864 *sn = pf_find_src_node(src, rule, af, 1);
865
866 if (*sn == NULL) {
867 struct pf_srchash *sh = &V_pf_srchash[pf_hashsrc(src, af)];
868
869 PF_HASHROW_ASSERT(sh);
870
871 if (!rule->max_src_nodes ||
872 counter_u64_fetch(rule->src_nodes) < rule->max_src_nodes)
873 (*sn) = uma_zalloc(V_pf_sources_z, M_NOWAIT | M_ZERO);
874 else
875 counter_u64_add(V_pf_status.lcounters[LCNT_SRCNODES],
876 1);
877 if ((*sn) == NULL) {
878 PF_HASHROW_UNLOCK(sh);
879 return (-1);
880 }
881
882 for (int i = 0; i < 2; i++) {
883 (*sn)->bytes[i] = counter_u64_alloc(M_NOWAIT);
884 (*sn)->packets[i] = counter_u64_alloc(M_NOWAIT);
885
886 if ((*sn)->bytes[i] == NULL || (*sn)->packets[i] == NULL) {
887 pf_free_src_node(*sn);
888 PF_HASHROW_UNLOCK(sh);
889 return (-1);
890 }
891 }
892
893 pf_init_threshold(&(*sn)->conn_rate,
894 rule->max_src_conn_rate.limit,
895 rule->max_src_conn_rate.seconds);
896
897 (*sn)->af = af;
898 (*sn)->rule.ptr = rule;
899 PF_ACPY(&(*sn)->addr, src, af);
900 LIST_INSERT_HEAD(&sh->nodes, *sn, entry);
901 (*sn)->creation = time_uptime;
902 (*sn)->ruletype = rule->action;
903 (*sn)->states = 1;
904 if ((*sn)->rule.ptr != NULL)
905 counter_u64_add((*sn)->rule.ptr->src_nodes, 1);
906 PF_HASHROW_UNLOCK(sh);
907 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_INSERT], 1);
908 } else {
909 if (rule->max_src_states &&
910 (*sn)->states >= rule->max_src_states) {
911 counter_u64_add(V_pf_status.lcounters[LCNT_SRCSTATES],
912 1);
913 return (-1);
914 }
915 }
916 return (0);
917 }
918
919 void
920 pf_unlink_src_node(struct pf_ksrc_node *src)
921 {
922
923 PF_HASHROW_ASSERT(&V_pf_srchash[pf_hashsrc(&src->addr, src->af)]);
924 LIST_REMOVE(src, entry);
925 if (src->rule.ptr)
926 counter_u64_add(src->rule.ptr->src_nodes, -1);
927 }
928
929 u_int
930 pf_free_src_nodes(struct pf_ksrc_node_list *head)
931 {
932 struct pf_ksrc_node *sn, *tmp;
933 u_int count = 0;
934
935 LIST_FOREACH_SAFE(sn, head, entry, tmp) {
936 pf_free_src_node(sn);
937 count++;
938 }
939
940 counter_u64_add(V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], count);
941
942 return (count);
943 }
944
945 void
946 pf_mtag_initialize(void)
947 {
948
949 pf_mtag_z = uma_zcreate("pf mtags", sizeof(struct m_tag) +
950 sizeof(struct pf_mtag), NULL, NULL, pf_mtag_uminit, NULL,
951 UMA_ALIGN_PTR, 0);
952 }
953
954 /* Per-vnet data storage structures initialization. */
955 void
956 pf_initialize(void)
957 {
958 struct pf_keyhash *kh;
959 struct pf_idhash *ih;
960 struct pf_srchash *sh;
961 u_int i;
962
963 if (pf_hashsize == 0 || !powerof2(pf_hashsize))
964 pf_hashsize = PF_HASHSIZ;
965 if (pf_srchashsize == 0 || !powerof2(pf_srchashsize))
966 pf_srchashsize = PF_SRCHASHSIZ;
967
968 V_pf_hashseed = arc4random();
969
970 /* States and state keys storage. */
971 V_pf_state_z = uma_zcreate("pf states", sizeof(struct pf_kstate),
972 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
973 V_pf_limits[PF_LIMIT_STATES].zone = V_pf_state_z;
974 uma_zone_set_max(V_pf_state_z, PFSTATE_HIWAT);
975 uma_zone_set_warning(V_pf_state_z, "PF states limit reached");
976
977 V_pf_state_key_z = uma_zcreate("pf state keys",
978 sizeof(struct pf_state_key), pf_state_key_ctor, NULL, NULL, NULL,
979 UMA_ALIGN_PTR, 0);
980
981 V_pf_keyhash = mallocarray(pf_hashsize, sizeof(struct pf_keyhash),
982 M_PFHASH, M_NOWAIT | M_ZERO);
983 V_pf_idhash = mallocarray(pf_hashsize, sizeof(struct pf_idhash),
984 M_PFHASH, M_NOWAIT | M_ZERO);
985 if (V_pf_keyhash == NULL || V_pf_idhash == NULL) {
986 printf("pf: Unable to allocate memory for "
987 "state_hashsize %lu.\n", pf_hashsize);
988
989 free(V_pf_keyhash, M_PFHASH);
990 free(V_pf_idhash, M_PFHASH);
991
992 pf_hashsize = PF_HASHSIZ;
993 V_pf_keyhash = mallocarray(pf_hashsize,
994 sizeof(struct pf_keyhash), M_PFHASH, M_WAITOK | M_ZERO);
995 V_pf_idhash = mallocarray(pf_hashsize,
996 sizeof(struct pf_idhash), M_PFHASH, M_WAITOK | M_ZERO);
997 }
998
999 pf_hashmask = pf_hashsize - 1;
1000 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
1001 i++, kh++, ih++) {
1002 mtx_init(&kh->lock, "pf_keyhash", NULL, MTX_DEF | MTX_DUPOK);
1003 mtx_init(&ih->lock, "pf_idhash", NULL, MTX_DEF);
1004 }
1005
1006 /* Source nodes. */
1007 V_pf_sources_z = uma_zcreate("pf source nodes",
1008 sizeof(struct pf_ksrc_node), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1009 0);
1010 V_pf_limits[PF_LIMIT_SRC_NODES].zone = V_pf_sources_z;
1011 uma_zone_set_max(V_pf_sources_z, PFSNODE_HIWAT);
1012 uma_zone_set_warning(V_pf_sources_z, "PF source nodes limit reached");
1013
1014 V_pf_srchash = mallocarray(pf_srchashsize,
1015 sizeof(struct pf_srchash), M_PFHASH, M_NOWAIT | M_ZERO);
1016 if (V_pf_srchash == NULL) {
1017 printf("pf: Unable to allocate memory for "
1018 "source_hashsize %lu.\n", pf_srchashsize);
1019
1020 pf_srchashsize = PF_SRCHASHSIZ;
1021 V_pf_srchash = mallocarray(pf_srchashsize,
1022 sizeof(struct pf_srchash), M_PFHASH, M_WAITOK | M_ZERO);
1023 }
1024
1025 pf_srchashmask = pf_srchashsize - 1;
1026 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++)
1027 mtx_init(&sh->lock, "pf_srchash", NULL, MTX_DEF);
1028
1029 /* ALTQ */
1030 TAILQ_INIT(&V_pf_altqs[0]);
1031 TAILQ_INIT(&V_pf_altqs[1]);
1032 TAILQ_INIT(&V_pf_altqs[2]);
1033 TAILQ_INIT(&V_pf_altqs[3]);
1034 TAILQ_INIT(&V_pf_pabuf);
1035 V_pf_altqs_active = &V_pf_altqs[0];
1036 V_pf_altq_ifs_active = &V_pf_altqs[1];
1037 V_pf_altqs_inactive = &V_pf_altqs[2];
1038 V_pf_altq_ifs_inactive = &V_pf_altqs[3];
1039
1040 /* Send & overload+flush queues. */
1041 STAILQ_INIT(&V_pf_sendqueue);
1042 SLIST_INIT(&V_pf_overloadqueue);
1043 TASK_INIT(&V_pf_overloadtask, 0, pf_overload_task, curvnet);
1044
1045 /* Unlinked, but may be referenced rules. */
1046 TAILQ_INIT(&V_pf_unlinked_rules);
1047 }
1048
1049 void
1050 pf_mtag_cleanup(void)
1051 {
1052
1053 uma_zdestroy(pf_mtag_z);
1054 }
1055
1056 void
1057 pf_cleanup(void)
1058 {
1059 struct pf_keyhash *kh;
1060 struct pf_idhash *ih;
1061 struct pf_srchash *sh;
1062 struct pf_send_entry *pfse, *next;
1063 u_int i;
1064
1065 for (i = 0, kh = V_pf_keyhash, ih = V_pf_idhash; i <= pf_hashmask;
1066 i++, kh++, ih++) {
1067 KASSERT(LIST_EMPTY(&kh->keys), ("%s: key hash not empty",
1068 __func__));
1069 KASSERT(LIST_EMPTY(&ih->states), ("%s: id hash not empty",
1070 __func__));
1071 mtx_destroy(&kh->lock);
1072 mtx_destroy(&ih->lock);
1073 }
1074 free(V_pf_keyhash, M_PFHASH);
1075 free(V_pf_idhash, M_PFHASH);
1076
1077 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1078 KASSERT(LIST_EMPTY(&sh->nodes),
1079 ("%s: source node hash not empty", __func__));
1080 mtx_destroy(&sh->lock);
1081 }
1082 free(V_pf_srchash, M_PFHASH);
1083
1084 STAILQ_FOREACH_SAFE(pfse, &V_pf_sendqueue, pfse_next, next) {
1085 m_freem(pfse->pfse_m);
1086 free(pfse, M_PFTEMP);
1087 }
1088
1089 uma_zdestroy(V_pf_sources_z);
1090 uma_zdestroy(V_pf_state_z);
1091 uma_zdestroy(V_pf_state_key_z);
1092 }
1093
1094 static int
1095 pf_mtag_uminit(void *mem, int size, int how)
1096 {
1097 struct m_tag *t;
1098
1099 t = (struct m_tag *)mem;
1100 t->m_tag_cookie = MTAG_ABI_COMPAT;
1101 t->m_tag_id = PACKET_TAG_PF;
1102 t->m_tag_len = sizeof(struct pf_mtag);
1103 t->m_tag_free = pf_mtag_free;
1104
1105 return (0);
1106 }
1107
1108 static void
1109 pf_mtag_free(struct m_tag *t)
1110 {
1111
1112 uma_zfree(pf_mtag_z, t);
1113 }
1114
1115 struct pf_mtag *
1116 pf_get_mtag(struct mbuf *m)
1117 {
1118 struct m_tag *mtag;
1119
1120 if ((mtag = m_tag_find(m, PACKET_TAG_PF, NULL)) != NULL)
1121 return ((struct pf_mtag *)(mtag + 1));
1122
1123 mtag = uma_zalloc(pf_mtag_z, M_NOWAIT);
1124 if (mtag == NULL)
1125 return (NULL);
1126 bzero(mtag + 1, sizeof(struct pf_mtag));
1127 m_tag_prepend(m, mtag);
1128
1129 return ((struct pf_mtag *)(mtag + 1));
1130 }
1131
1132 static int
1133 pf_state_key_attach(struct pf_state_key *skw, struct pf_state_key *sks,
1134 struct pf_kstate *s)
1135 {
1136 struct pf_keyhash *khs, *khw, *kh;
1137 struct pf_state_key *sk, *cur;
1138 struct pf_kstate *si, *olds = NULL;
1139 int idx;
1140
1141 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1142 KASSERT(s->key[PF_SK_WIRE] == NULL, ("%s: state has key", __func__));
1143 KASSERT(s->key[PF_SK_STACK] == NULL, ("%s: state has key", __func__));
1144
1145 /*
1146 * We need to lock hash slots of both keys. To avoid deadlock
1147 * we always lock the slot with lower address first. Unlock order
1148 * isn't important.
1149 *
1150 * We also need to lock ID hash slot before dropping key
1151 * locks. On success we return with ID hash slot locked.
1152 */
1153
1154 if (skw == sks) {
1155 khs = khw = &V_pf_keyhash[pf_hashkey(skw)];
1156 PF_HASHROW_LOCK(khs);
1157 } else {
1158 khs = &V_pf_keyhash[pf_hashkey(sks)];
1159 khw = &V_pf_keyhash[pf_hashkey(skw)];
1160 if (khs == khw) {
1161 PF_HASHROW_LOCK(khs);
1162 } else if (khs < khw) {
1163 PF_HASHROW_LOCK(khs);
1164 PF_HASHROW_LOCK(khw);
1165 } else {
1166 PF_HASHROW_LOCK(khw);
1167 PF_HASHROW_LOCK(khs);
1168 }
1169 }
1170
1171 #define KEYS_UNLOCK() do { \
1172 if (khs != khw) { \
1173 PF_HASHROW_UNLOCK(khs); \
1174 PF_HASHROW_UNLOCK(khw); \
1175 } else \
1176 PF_HASHROW_UNLOCK(khs); \
1177 } while (0)
1178
1179 /*
1180 * First run: start with wire key.
1181 */
1182 sk = skw;
1183 kh = khw;
1184 idx = PF_SK_WIRE;
1185
1186 MPASS(s->lock == NULL);
1187 s->lock = &V_pf_idhash[PF_IDHASH(s)].lock;
1188
1189 keyattach:
1190 LIST_FOREACH(cur, &kh->keys, entry)
1191 if (bcmp(cur, sk, sizeof(struct pf_state_key_cmp)) == 0)
1192 break;
1193
1194 if (cur != NULL) {
1195 /* Key exists. Check for same kif, if none, add to key. */
1196 TAILQ_FOREACH(si, &cur->states[idx], key_list[idx]) {
1197 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(si)];
1198
1199 PF_HASHROW_LOCK(ih);
1200 if (si->kif == s->kif &&
1201 si->direction == s->direction) {
1202 if (sk->proto == IPPROTO_TCP &&
1203 si->src.state >= TCPS_FIN_WAIT_2 &&
1204 si->dst.state >= TCPS_FIN_WAIT_2) {
1205 /*
1206 * New state matches an old >FIN_WAIT_2
1207 * state. We can't drop key hash locks,
1208 * thus we can't unlink it properly.
1209 *
1210 * As a workaround we drop it into
1211 * TCPS_CLOSED state, schedule purge
1212 * ASAP and push it into the very end
1213 * of the slot TAILQ, so that it won't
1214 * conflict with our new state.
1215 */
1216 pf_set_protostate(si, PF_PEER_BOTH,
1217 TCPS_CLOSED);
1218 si->timeout = PFTM_PURGE;
1219 olds = si;
1220 } else {
1221 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1222 printf("pf: %s key attach "
1223 "failed on %s: ",
1224 (idx == PF_SK_WIRE) ?
1225 "wire" : "stack",
1226 s->kif->pfik_name);
1227 pf_print_state_parts(s,
1228 (idx == PF_SK_WIRE) ?
1229 sk : NULL,
1230 (idx == PF_SK_STACK) ?
1231 sk : NULL);
1232 printf(", existing: ");
1233 pf_print_state_parts(si,
1234 (idx == PF_SK_WIRE) ?
1235 sk : NULL,
1236 (idx == PF_SK_STACK) ?
1237 sk : NULL);
1238 printf("\n");
1239 }
1240 PF_HASHROW_UNLOCK(ih);
1241 KEYS_UNLOCK();
1242 uma_zfree(V_pf_state_key_z, sk);
1243 if (idx == PF_SK_STACK)
1244 pf_detach_state(s);
1245 return (EEXIST); /* collision! */
1246 }
1247 }
1248 PF_HASHROW_UNLOCK(ih);
1249 }
1250 uma_zfree(V_pf_state_key_z, sk);
1251 s->key[idx] = cur;
1252 } else {
1253 LIST_INSERT_HEAD(&kh->keys, sk, entry);
1254 s->key[idx] = sk;
1255 }
1256
1257 stateattach:
1258 /* List is sorted, if-bound states before floating. */
1259 if (s->kif == V_pfi_all)
1260 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], s, key_list[idx]);
1261 else
1262 TAILQ_INSERT_HEAD(&s->key[idx]->states[idx], s, key_list[idx]);
1263
1264 if (olds) {
1265 TAILQ_REMOVE(&s->key[idx]->states[idx], olds, key_list[idx]);
1266 TAILQ_INSERT_TAIL(&s->key[idx]->states[idx], olds,
1267 key_list[idx]);
1268 olds = NULL;
1269 }
1270
1271 /*
1272 * Attach done. See how should we (or should not?)
1273 * attach a second key.
1274 */
1275 if (sks == skw) {
1276 s->key[PF_SK_STACK] = s->key[PF_SK_WIRE];
1277 idx = PF_SK_STACK;
1278 sks = NULL;
1279 goto stateattach;
1280 } else if (sks != NULL) {
1281 /*
1282 * Continue attaching with stack key.
1283 */
1284 sk = sks;
1285 kh = khs;
1286 idx = PF_SK_STACK;
1287 sks = NULL;
1288 goto keyattach;
1289 }
1290
1291 PF_STATE_LOCK(s);
1292 KEYS_UNLOCK();
1293
1294 KASSERT(s->key[PF_SK_WIRE] != NULL && s->key[PF_SK_STACK] != NULL,
1295 ("%s failure", __func__));
1296
1297 return (0);
1298 #undef KEYS_UNLOCK
1299 }
1300
1301 static void
1302 pf_detach_state(struct pf_kstate *s)
1303 {
1304 struct pf_state_key *sks = s->key[PF_SK_STACK];
1305 struct pf_keyhash *kh;
1306
1307 if (sks != NULL) {
1308 kh = &V_pf_keyhash[pf_hashkey(sks)];
1309 PF_HASHROW_LOCK(kh);
1310 if (s->key[PF_SK_STACK] != NULL)
1311 pf_state_key_detach(s, PF_SK_STACK);
1312 /*
1313 * If both point to same key, then we are done.
1314 */
1315 if (sks == s->key[PF_SK_WIRE]) {
1316 pf_state_key_detach(s, PF_SK_WIRE);
1317 PF_HASHROW_UNLOCK(kh);
1318 return;
1319 }
1320 PF_HASHROW_UNLOCK(kh);
1321 }
1322
1323 if (s->key[PF_SK_WIRE] != NULL) {
1324 kh = &V_pf_keyhash[pf_hashkey(s->key[PF_SK_WIRE])];
1325 PF_HASHROW_LOCK(kh);
1326 if (s->key[PF_SK_WIRE] != NULL)
1327 pf_state_key_detach(s, PF_SK_WIRE);
1328 PF_HASHROW_UNLOCK(kh);
1329 }
1330 }
1331
1332 static void
1333 pf_state_key_detach(struct pf_kstate *s, int idx)
1334 {
1335 struct pf_state_key *sk = s->key[idx];
1336 #ifdef INVARIANTS
1337 struct pf_keyhash *kh = &V_pf_keyhash[pf_hashkey(sk)];
1338
1339 PF_HASHROW_ASSERT(kh);
1340 #endif
1341 TAILQ_REMOVE(&sk->states[idx], s, key_list[idx]);
1342 s->key[idx] = NULL;
1343
1344 if (TAILQ_EMPTY(&sk->states[0]) && TAILQ_EMPTY(&sk->states[1])) {
1345 LIST_REMOVE(sk, entry);
1346 uma_zfree(V_pf_state_key_z, sk);
1347 }
1348 }
1349
1350 static int
1351 pf_state_key_ctor(void *mem, int size, void *arg, int flags)
1352 {
1353 struct pf_state_key *sk = mem;
1354
1355 bzero(sk, sizeof(struct pf_state_key_cmp));
1356 TAILQ_INIT(&sk->states[PF_SK_WIRE]);
1357 TAILQ_INIT(&sk->states[PF_SK_STACK]);
1358
1359 return (0);
1360 }
1361
1362 struct pf_state_key *
1363 pf_state_key_setup(struct pf_pdesc *pd, struct pf_addr *saddr,
1364 struct pf_addr *daddr, u_int16_t sport, u_int16_t dport)
1365 {
1366 struct pf_state_key *sk;
1367
1368 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1369 if (sk == NULL)
1370 return (NULL);
1371
1372 PF_ACPY(&sk->addr[pd->sidx], saddr, pd->af);
1373 PF_ACPY(&sk->addr[pd->didx], daddr, pd->af);
1374 sk->port[pd->sidx] = sport;
1375 sk->port[pd->didx] = dport;
1376 sk->proto = pd->proto;
1377 sk->af = pd->af;
1378
1379 return (sk);
1380 }
1381
1382 struct pf_state_key *
1383 pf_state_key_clone(struct pf_state_key *orig)
1384 {
1385 struct pf_state_key *sk;
1386
1387 sk = uma_zalloc(V_pf_state_key_z, M_NOWAIT);
1388 if (sk == NULL)
1389 return (NULL);
1390
1391 bcopy(orig, sk, sizeof(struct pf_state_key_cmp));
1392
1393 return (sk);
1394 }
1395
1396 int
1397 pf_state_insert(struct pfi_kkif *kif, struct pfi_kkif *orig_kif,
1398 struct pf_state_key *skw, struct pf_state_key *sks, struct pf_kstate *s)
1399 {
1400 struct pf_idhash *ih;
1401 struct pf_kstate *cur;
1402 int error;
1403
1404 KASSERT(TAILQ_EMPTY(&sks->states[0]) && TAILQ_EMPTY(&sks->states[1]),
1405 ("%s: sks not pristine", __func__));
1406 KASSERT(TAILQ_EMPTY(&skw->states[0]) && TAILQ_EMPTY(&skw->states[1]),
1407 ("%s: skw not pristine", __func__));
1408 KASSERT(s->refs == 0, ("%s: state not pristine", __func__));
1409
1410 s->kif = kif;
1411 s->orig_kif = orig_kif;
1412
1413 if (s->id == 0 && s->creatorid == 0) {
1414 s->id = alloc_unr64(&V_pf_stateid);
1415 s->id = htobe64(s->id);
1416 s->creatorid = V_pf_status.hostid;
1417 }
1418
1419 /* Returns with ID locked on success. */
1420 if ((error = pf_state_key_attach(skw, sks, s)) != 0)
1421 return (error);
1422
1423 ih = &V_pf_idhash[PF_IDHASH(s)];
1424 PF_HASHROW_ASSERT(ih);
1425 LIST_FOREACH(cur, &ih->states, entry)
1426 if (cur->id == s->id && cur->creatorid == s->creatorid)
1427 break;
1428
1429 if (cur != NULL) {
1430 PF_HASHROW_UNLOCK(ih);
1431 if (V_pf_status.debug >= PF_DEBUG_MISC) {
1432 printf("pf: state ID collision: "
1433 "id: %016llx creatorid: %08x\n",
1434 (unsigned long long)be64toh(s->id),
1435 ntohl(s->creatorid));
1436 }
1437 pf_detach_state(s);
1438 return (EEXIST);
1439 }
1440 LIST_INSERT_HEAD(&ih->states, s, entry);
1441 /* One for keys, one for ID hash. */
1442 refcount_init(&s->refs, 2);
1443
1444 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_INSERT], 1);
1445 if (V_pfsync_insert_state_ptr != NULL)
1446 V_pfsync_insert_state_ptr(s);
1447
1448 /* Returns locked. */
1449 return (0);
1450 }
1451
1452 /*
1453 * Find state by ID: returns with locked row on success.
1454 */
1455 struct pf_kstate *
1456 pf_find_state_byid(uint64_t id, uint32_t creatorid)
1457 {
1458 struct pf_idhash *ih;
1459 struct pf_kstate *s;
1460
1461 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1462
1463 ih = &V_pf_idhash[(be64toh(id) % (pf_hashmask + 1))];
1464
1465 PF_HASHROW_LOCK(ih);
1466 LIST_FOREACH(s, &ih->states, entry)
1467 if (s->id == id && s->creatorid == creatorid)
1468 break;
1469
1470 if (s == NULL)
1471 PF_HASHROW_UNLOCK(ih);
1472
1473 return (s);
1474 }
1475
1476 /*
1477 * Find state by key.
1478 * Returns with ID hash slot locked on success.
1479 */
1480 static struct pf_kstate *
1481 pf_find_state(struct pfi_kkif *kif, struct pf_state_key_cmp *key, u_int dir)
1482 {
1483 struct pf_keyhash *kh;
1484 struct pf_state_key *sk;
1485 struct pf_kstate *s;
1486 int idx;
1487
1488 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1489
1490 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1491
1492 PF_HASHROW_LOCK(kh);
1493 LIST_FOREACH(sk, &kh->keys, entry)
1494 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1495 break;
1496 if (sk == NULL) {
1497 PF_HASHROW_UNLOCK(kh);
1498 return (NULL);
1499 }
1500
1501 idx = (dir == PF_IN ? PF_SK_WIRE : PF_SK_STACK);
1502
1503 /* List is sorted, if-bound states before floating ones. */
1504 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx])
1505 if (s->kif == V_pfi_all || s->kif == kif) {
1506 PF_STATE_LOCK(s);
1507 PF_HASHROW_UNLOCK(kh);
1508 if (__predict_false(s->timeout >= PFTM_MAX)) {
1509 /*
1510 * State is either being processed by
1511 * pf_unlink_state() in an other thread, or
1512 * is scheduled for immediate expiry.
1513 */
1514 PF_STATE_UNLOCK(s);
1515 return (NULL);
1516 }
1517 return (s);
1518 }
1519 PF_HASHROW_UNLOCK(kh);
1520
1521 return (NULL);
1522 }
1523
1524 /*
1525 * Returns with ID hash slot locked on success.
1526 */
1527 struct pf_kstate *
1528 pf_find_state_all(struct pf_state_key_cmp *key, u_int dir, int *more)
1529 {
1530 struct pf_keyhash *kh;
1531 struct pf_state_key *sk;
1532 struct pf_kstate *s, *ret = NULL;
1533 int idx, inout = 0;
1534
1535 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_SEARCH], 1);
1536
1537 kh = &V_pf_keyhash[pf_hashkey((struct pf_state_key *)key)];
1538
1539 PF_HASHROW_LOCK(kh);
1540 LIST_FOREACH(sk, &kh->keys, entry)
1541 if (bcmp(sk, key, sizeof(struct pf_state_key_cmp)) == 0)
1542 break;
1543 if (sk == NULL) {
1544 PF_HASHROW_UNLOCK(kh);
1545 return (NULL);
1546 }
1547 switch (dir) {
1548 case PF_IN:
1549 idx = PF_SK_WIRE;
1550 break;
1551 case PF_OUT:
1552 idx = PF_SK_STACK;
1553 break;
1554 case PF_INOUT:
1555 idx = PF_SK_WIRE;
1556 inout = 1;
1557 break;
1558 default:
1559 panic("%s: dir %u", __func__, dir);
1560 }
1561 second_run:
1562 TAILQ_FOREACH(s, &sk->states[idx], key_list[idx]) {
1563 if (more == NULL) {
1564 PF_STATE_LOCK(s);
1565 PF_HASHROW_UNLOCK(kh);
1566 return (s);
1567 }
1568
1569 if (ret)
1570 (*more)++;
1571 else {
1572 ret = s;
1573 PF_STATE_LOCK(s);
1574 }
1575 }
1576 if (inout == 1) {
1577 inout = 0;
1578 idx = PF_SK_STACK;
1579 goto second_run;
1580 }
1581 PF_HASHROW_UNLOCK(kh);
1582
1583 return (ret);
1584 }
1585
1586 /*
1587 * FIXME
1588 * This routine is inefficient -- locks the state only to unlock immediately on
1589 * return.
1590 * It is racy -- after the state is unlocked nothing stops other threads from
1591 * removing it.
1592 */
1593 bool
1594 pf_find_state_all_exists(struct pf_state_key_cmp *key, u_int dir)
1595 {
1596 struct pf_kstate *s;
1597
1598 s = pf_find_state_all(key, dir, NULL);
1599 if (s != NULL) {
1600 PF_STATE_UNLOCK(s);
1601 return (true);
1602 }
1603 return (false);
1604 }
1605
1606 /* END state table stuff */
1607
1608 static void
1609 pf_send(struct pf_send_entry *pfse)
1610 {
1611
1612 PF_SENDQ_LOCK();
1613 STAILQ_INSERT_TAIL(&V_pf_sendqueue, pfse, pfse_next);
1614 PF_SENDQ_UNLOCK();
1615 swi_sched(V_pf_swi_cookie, 0);
1616 }
1617
1618 static bool
1619 pf_isforlocal(struct mbuf *m, int af)
1620 {
1621 switch (af) {
1622 #ifdef INET
1623 case AF_INET: {
1624 struct ip *ip = mtod(m, struct ip *);
1625
1626 return (in_localip(ip->ip_dst));
1627 }
1628 #endif
1629 #ifdef INET6
1630 case AF_INET6: {
1631 struct ip6_hdr *ip6;
1632 struct in6_ifaddr *ia;
1633 ip6 = mtod(m, struct ip6_hdr *);
1634 ia = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */, false);
1635 if (ia == NULL)
1636 return (false);
1637 return (! (ia->ia6_flags & IN6_IFF_NOTREADY));
1638 }
1639 #endif
1640 default:
1641 panic("Unsupported af %d", af);
1642 }
1643
1644 return (false);
1645 }
1646
1647 void
1648 pf_intr(void *v)
1649 {
1650 struct epoch_tracker et;
1651 struct pf_send_head queue;
1652 struct pf_send_entry *pfse, *next;
1653
1654 CURVNET_SET((struct vnet *)v);
1655
1656 PF_SENDQ_LOCK();
1657 queue = V_pf_sendqueue;
1658 STAILQ_INIT(&V_pf_sendqueue);
1659 PF_SENDQ_UNLOCK();
1660
1661 NET_EPOCH_ENTER(et);
1662
1663 STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
1664 switch (pfse->pfse_type) {
1665 #ifdef INET
1666 case PFSE_IP: {
1667 if (pf_isforlocal(pfse->pfse_m, AF_INET)) {
1668 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1669 pfse->pfse_m->m_pkthdr.csum_flags |=
1670 CSUM_IP_VALID | CSUM_IP_CHECKED;
1671 ip_input(pfse->pfse_m);
1672 } else {
1673 ip_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1674 NULL);
1675 }
1676 break;
1677 }
1678 case PFSE_ICMP:
1679 icmp_error(pfse->pfse_m, pfse->icmpopts.type,
1680 pfse->icmpopts.code, 0, pfse->icmpopts.mtu);
1681 break;
1682 #endif /* INET */
1683 #ifdef INET6
1684 case PFSE_IP6:
1685 if (pf_isforlocal(pfse->pfse_m, AF_INET6)) {
1686 pfse->pfse_m->m_flags |= M_SKIP_FIREWALL;
1687 ip6_input(pfse->pfse_m);
1688 } else {
1689 ip6_output(pfse->pfse_m, NULL, NULL, 0, NULL,
1690 NULL, NULL);
1691 }
1692 break;
1693 case PFSE_ICMP6:
1694 icmp6_error(pfse->pfse_m, pfse->icmpopts.type,
1695 pfse->icmpopts.code, pfse->icmpopts.mtu);
1696 break;
1697 #endif /* INET6 */
1698 default:
1699 panic("%s: unknown type", __func__);
1700 }
1701 free(pfse, M_PFTEMP);
1702 }
1703 NET_EPOCH_EXIT(et);
1704 CURVNET_RESTORE();
1705 }
1706
1707 #define pf_purge_thread_period (hz / 10)
1708
1709 #ifdef PF_WANT_32_TO_64_COUNTER
1710 static void
1711 pf_status_counter_u64_periodic(void)
1712 {
1713
1714 PF_RULES_RASSERT();
1715
1716 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 60)) != 0) {
1717 return;
1718 }
1719
1720 for (int i = 0; i < FCNT_MAX; i++) {
1721 pf_counter_u64_periodic(&V_pf_status.fcounters[i]);
1722 }
1723 }
1724
1725 static void
1726 pf_kif_counter_u64_periodic(void)
1727 {
1728 struct pfi_kkif *kif;
1729 size_t r, run;
1730
1731 PF_RULES_RASSERT();
1732
1733 if (__predict_false(V_pf_allkifcount == 0)) {
1734 return;
1735 }
1736
1737 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1738 return;
1739 }
1740
1741 run = V_pf_allkifcount / 10;
1742 if (run < 5)
1743 run = 5;
1744
1745 for (r = 0; r < run; r++) {
1746 kif = LIST_NEXT(V_pf_kifmarker, pfik_allkiflist);
1747 if (kif == NULL) {
1748 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1749 LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
1750 break;
1751 }
1752
1753 LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
1754 LIST_INSERT_AFTER(kif, V_pf_kifmarker, pfik_allkiflist);
1755
1756 for (int i = 0; i < 2; i++) {
1757 for (int j = 0; j < 2; j++) {
1758 for (int k = 0; k < 2; k++) {
1759 pf_counter_u64_periodic(&kif->pfik_packets[i][j][k]);
1760 pf_counter_u64_periodic(&kif->pfik_bytes[i][j][k]);
1761 }
1762 }
1763 }
1764 }
1765 }
1766
1767 static void
1768 pf_rule_counter_u64_periodic(void)
1769 {
1770 struct pf_krule *rule;
1771 size_t r, run;
1772
1773 PF_RULES_RASSERT();
1774
1775 if (__predict_false(V_pf_allrulecount == 0)) {
1776 return;
1777 }
1778
1779 if ((V_pf_counter_periodic_iter % (pf_purge_thread_period * 10 * 300)) != 0) {
1780 return;
1781 }
1782
1783 run = V_pf_allrulecount / 10;
1784 if (run < 5)
1785 run = 5;
1786
1787 for (r = 0; r < run; r++) {
1788 rule = LIST_NEXT(V_pf_rulemarker, allrulelist);
1789 if (rule == NULL) {
1790 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1791 LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
1792 break;
1793 }
1794
1795 LIST_REMOVE(V_pf_rulemarker, allrulelist);
1796 LIST_INSERT_AFTER(rule, V_pf_rulemarker, allrulelist);
1797
1798 pf_counter_u64_periodic(&rule->evaluations);
1799 for (int i = 0; i < 2; i++) {
1800 pf_counter_u64_periodic(&rule->packets[i]);
1801 pf_counter_u64_periodic(&rule->bytes[i]);
1802 }
1803 }
1804 }
1805
1806 static void
1807 pf_counter_u64_periodic_main(void)
1808 {
1809 PF_RULES_RLOCK_TRACKER;
1810
1811 V_pf_counter_periodic_iter++;
1812
1813 PF_RULES_RLOCK();
1814 pf_counter_u64_critical_enter();
1815 pf_status_counter_u64_periodic();
1816 pf_kif_counter_u64_periodic();
1817 pf_rule_counter_u64_periodic();
1818 pf_counter_u64_critical_exit();
1819 PF_RULES_RUNLOCK();
1820 }
1821 #else
1822 #define pf_counter_u64_periodic_main() do { } while (0)
1823 #endif
1824
1825 void
1826 pf_purge_thread(void *unused __unused)
1827 {
1828 VNET_ITERATOR_DECL(vnet_iter);
1829
1830 sx_xlock(&pf_end_lock);
1831 while (pf_end_threads == 0) {
1832 sx_sleep(pf_purge_thread, &pf_end_lock, 0, "pftm", pf_purge_thread_period);
1833
1834 VNET_LIST_RLOCK();
1835 VNET_FOREACH(vnet_iter) {
1836 CURVNET_SET(vnet_iter);
1837
1838 /* Wait until V_pf_default_rule is initialized. */
1839 if (V_pf_vnet_active == 0) {
1840 CURVNET_RESTORE();
1841 continue;
1842 }
1843
1844 pf_counter_u64_periodic_main();
1845
1846 /*
1847 * Process 1/interval fraction of the state
1848 * table every run.
1849 */
1850 V_pf_purge_idx =
1851 pf_purge_expired_states(V_pf_purge_idx, pf_hashmask /
1852 (V_pf_default_rule.timeout[PFTM_INTERVAL] * 10));
1853
1854 /*
1855 * Purge other expired types every
1856 * PFTM_INTERVAL seconds.
1857 */
1858 if (V_pf_purge_idx == 0) {
1859 /*
1860 * Order is important:
1861 * - states and src nodes reference rules
1862 * - states and rules reference kifs
1863 */
1864 pf_purge_expired_fragments();
1865 pf_purge_expired_src_nodes();
1866 pf_purge_unlinked_rules();
1867 pfi_kkif_purge();
1868 }
1869 CURVNET_RESTORE();
1870 }
1871 VNET_LIST_RUNLOCK();
1872 }
1873
1874 pf_end_threads++;
1875 sx_xunlock(&pf_end_lock);
1876 kproc_exit(0);
1877 }
1878
1879 void
1880 pf_unload_vnet_purge(void)
1881 {
1882
1883 /*
1884 * To cleanse up all kifs and rules we need
1885 * two runs: first one clears reference flags,
1886 * then pf_purge_expired_states() doesn't
1887 * raise them, and then second run frees.
1888 */
1889 pf_purge_unlinked_rules();
1890 pfi_kkif_purge();
1891
1892 /*
1893 * Now purge everything.
1894 */
1895 pf_purge_expired_states(0, pf_hashmask);
1896 pf_purge_fragments(UINT_MAX);
1897 pf_purge_expired_src_nodes();
1898
1899 /*
1900 * Now all kifs & rules should be unreferenced,
1901 * thus should be successfully freed.
1902 */
1903 pf_purge_unlinked_rules();
1904 pfi_kkif_purge();
1905 }
1906
1907 u_int32_t
1908 pf_state_expires(const struct pf_kstate *state)
1909 {
1910 u_int32_t timeout;
1911 u_int32_t start;
1912 u_int32_t end;
1913 u_int32_t states;
1914
1915 /* handle all PFTM_* > PFTM_MAX here */
1916 if (state->timeout == PFTM_PURGE)
1917 return (time_uptime);
1918 KASSERT(state->timeout != PFTM_UNLINKED,
1919 ("pf_state_expires: timeout == PFTM_UNLINKED"));
1920 KASSERT((state->timeout < PFTM_MAX),
1921 ("pf_state_expires: timeout > PFTM_MAX"));
1922 timeout = state->rule.ptr->timeout[state->timeout];
1923 if (!timeout)
1924 timeout = V_pf_default_rule.timeout[state->timeout];
1925 start = state->rule.ptr->timeout[PFTM_ADAPTIVE_START];
1926 if (start && state->rule.ptr != &V_pf_default_rule) {
1927 end = state->rule.ptr->timeout[PFTM_ADAPTIVE_END];
1928 states = counter_u64_fetch(state->rule.ptr->states_cur);
1929 } else {
1930 start = V_pf_default_rule.timeout[PFTM_ADAPTIVE_START];
1931 end = V_pf_default_rule.timeout[PFTM_ADAPTIVE_END];
1932 states = V_pf_status.states;
1933 }
1934 if (end && states > start && start < end) {
1935 if (states < end) {
1936 timeout = (u_int64_t)timeout * (end - states) /
1937 (end - start);
1938 return (state->expire + timeout);
1939 }
1940 else
1941 return (time_uptime);
1942 }
1943 return (state->expire + timeout);
1944 }
1945
1946 void
1947 pf_purge_expired_src_nodes(void)
1948 {
1949 struct pf_ksrc_node_list freelist;
1950 struct pf_srchash *sh;
1951 struct pf_ksrc_node *cur, *next;
1952 int i;
1953
1954 LIST_INIT(&freelist);
1955 for (i = 0, sh = V_pf_srchash; i <= pf_srchashmask; i++, sh++) {
1956 PF_HASHROW_LOCK(sh);
1957 LIST_FOREACH_SAFE(cur, &sh->nodes, entry, next)
1958 if (cur->states == 0 && cur->expire <= time_uptime) {
1959 pf_unlink_src_node(cur);
1960 LIST_INSERT_HEAD(&freelist, cur, entry);
1961 } else if (cur->rule.ptr != NULL)
1962 cur->rule.ptr->rule_ref |= PFRULE_REFS;
1963 PF_HASHROW_UNLOCK(sh);
1964 }
1965
1966 pf_free_src_nodes(&freelist);
1967
1968 V_pf_status.src_nodes = uma_zone_get_cur(V_pf_sources_z);
1969 }
1970
1971 static void
1972 pf_src_tree_remove_state(struct pf_kstate *s)
1973 {
1974 struct pf_ksrc_node *sn;
1975 struct pf_srchash *sh;
1976 uint32_t timeout;
1977
1978 timeout = s->rule.ptr->timeout[PFTM_SRC_NODE] ?
1979 s->rule.ptr->timeout[PFTM_SRC_NODE] :
1980 V_pf_default_rule.timeout[PFTM_SRC_NODE];
1981
1982 if (s->src_node != NULL) {
1983 sn = s->src_node;
1984 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1985 PF_HASHROW_LOCK(sh);
1986 if (s->src.tcp_est)
1987 --sn->conn;
1988 if (--sn->states == 0)
1989 sn->expire = time_uptime + timeout;
1990 PF_HASHROW_UNLOCK(sh);
1991 }
1992 if (s->nat_src_node != s->src_node && s->nat_src_node != NULL) {
1993 sn = s->nat_src_node;
1994 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
1995 PF_HASHROW_LOCK(sh);
1996 if (--sn->states == 0)
1997 sn->expire = time_uptime + timeout;
1998 PF_HASHROW_UNLOCK(sh);
1999 }
2000 s->src_node = s->nat_src_node = NULL;
2001 }
2002
2003 /*
2004 * Unlink and potentilly free a state. Function may be
2005 * called with ID hash row locked, but always returns
2006 * unlocked, since it needs to go through key hash locking.
2007 */
2008 int
2009 pf_unlink_state(struct pf_kstate *s)
2010 {
2011 struct pf_idhash *ih = &V_pf_idhash[PF_IDHASH(s)];
2012
2013 PF_HASHROW_ASSERT(ih);
2014
2015 if (s->timeout == PFTM_UNLINKED) {
2016 /*
2017 * State is being processed
2018 * by pf_unlink_state() in
2019 * an other thread.
2020 */
2021 PF_HASHROW_UNLOCK(ih);
2022 return (0); /* XXXGL: undefined actually */
2023 }
2024
2025 if (s->src.state == PF_TCPS_PROXY_DST) {
2026 /* XXX wire key the right one? */
2027 pf_send_tcp(s->rule.ptr, s->key[PF_SK_WIRE]->af,
2028 &s->key[PF_SK_WIRE]->addr[1],
2029 &s->key[PF_SK_WIRE]->addr[0],
2030 s->key[PF_SK_WIRE]->port[1],
2031 s->key[PF_SK_WIRE]->port[0],
2032 s->src.seqhi, s->src.seqlo + 1,
2033 TH_RST|TH_ACK, 0, 0, 0, 1, s->tag);
2034 }
2035
2036 LIST_REMOVE(s, entry);
2037 pf_src_tree_remove_state(s);
2038
2039 if (V_pfsync_delete_state_ptr != NULL)
2040 V_pfsync_delete_state_ptr(s);
2041
2042 STATE_DEC_COUNTERS(s);
2043
2044 s->timeout = PFTM_UNLINKED;
2045
2046 /* Ensure we remove it from the list of halfopen states, if needed. */
2047 if (s->key[PF_SK_STACK] != NULL &&
2048 s->key[PF_SK_STACK]->proto == IPPROTO_TCP)
2049 pf_set_protostate(s, PF_PEER_BOTH, TCPS_CLOSED);
2050
2051 PF_HASHROW_UNLOCK(ih);
2052
2053 pf_detach_state(s);
2054 /* pf_state_insert() initialises refs to 2 */
2055 return (pf_release_staten(s, 2));
2056 }
2057
2058 struct pf_kstate *
2059 pf_alloc_state(int flags)
2060 {
2061
2062 return (uma_zalloc(V_pf_state_z, flags | M_ZERO));
2063 }
2064
2065 void
2066 pf_free_state(struct pf_kstate *cur)
2067 {
2068
2069 KASSERT(cur->refs == 0, ("%s: %p has refs", __func__, cur));
2070 KASSERT(cur->timeout == PFTM_UNLINKED, ("%s: timeout %u", __func__,
2071 cur->timeout));
2072
2073 pf_normalize_tcp_cleanup(cur);
2074 uma_zfree(V_pf_state_z, cur);
2075 pf_counter_u64_add(&V_pf_status.fcounters[FCNT_STATE_REMOVALS], 1);
2076 }
2077
2078 /*
2079 * Called only from pf_purge_thread(), thus serialized.
2080 */
2081 static u_int
2082 pf_purge_expired_states(u_int i, int maxcheck)
2083 {
2084 struct pf_idhash *ih;
2085 struct pf_kstate *s;
2086
2087 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2088
2089 /*
2090 * Go through hash and unlink states that expire now.
2091 */
2092 while (maxcheck > 0) {
2093 ih = &V_pf_idhash[i];
2094
2095 /* only take the lock if we expect to do work */
2096 if (!LIST_EMPTY(&ih->states)) {
2097 relock:
2098 PF_HASHROW_LOCK(ih);
2099 LIST_FOREACH(s, &ih->states, entry) {
2100 if (pf_state_expires(s) <= time_uptime) {
2101 V_pf_status.states -=
2102 pf_unlink_state(s);
2103 goto relock;
2104 }
2105 s->rule.ptr->rule_ref |= PFRULE_REFS;
2106 if (s->nat_rule.ptr != NULL)
2107 s->nat_rule.ptr->rule_ref |= PFRULE_REFS;
2108 if (s->anchor.ptr != NULL)
2109 s->anchor.ptr->rule_ref |= PFRULE_REFS;
2110 s->kif->pfik_flags |= PFI_IFLAG_REFS;
2111 if (s->rt_kif)
2112 s->rt_kif->pfik_flags |= PFI_IFLAG_REFS;
2113 }
2114 PF_HASHROW_UNLOCK(ih);
2115 }
2116
2117 /* Return when we hit end of hash. */
2118 if (++i > pf_hashmask) {
2119 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2120 return (0);
2121 }
2122
2123 maxcheck--;
2124 }
2125
2126 V_pf_status.states = uma_zone_get_cur(V_pf_state_z);
2127
2128 return (i);
2129 }
2130
2131 static void
2132 pf_purge_unlinked_rules(void)
2133 {
2134 struct pf_krulequeue tmpq;
2135 struct pf_krule *r, *r1;
2136
2137 /*
2138 * If we have overloading task pending, then we'd
2139 * better skip purging this time. There is a tiny
2140 * probability that overloading task references
2141 * an already unlinked rule.
2142 */
2143 PF_OVERLOADQ_LOCK();
2144 if (!SLIST_EMPTY(&V_pf_overloadqueue)) {
2145 PF_OVERLOADQ_UNLOCK();
2146 return;
2147 }
2148 PF_OVERLOADQ_UNLOCK();
2149
2150 /*
2151 * Do naive mark-and-sweep garbage collecting of old rules.
2152 * Reference flag is raised by pf_purge_expired_states()
2153 * and pf_purge_expired_src_nodes().
2154 *
2155 * To avoid LOR between PF_UNLNKDRULES_LOCK/PF_RULES_WLOCK,
2156 * use a temporary queue.
2157 */
2158 TAILQ_INIT(&tmpq);
2159 PF_UNLNKDRULES_LOCK();
2160 TAILQ_FOREACH_SAFE(r, &V_pf_unlinked_rules, entries, r1) {
2161 if (!(r->rule_ref & PFRULE_REFS)) {
2162 TAILQ_REMOVE(&V_pf_unlinked_rules, r, entries);
2163 TAILQ_INSERT_TAIL(&tmpq, r, entries);
2164 } else
2165 r->rule_ref &= ~PFRULE_REFS;
2166 }
2167 PF_UNLNKDRULES_UNLOCK();
2168
2169 if (!TAILQ_EMPTY(&tmpq)) {
2170 PF_CONFIG_LOCK();
2171 PF_RULES_WLOCK();
2172 TAILQ_FOREACH_SAFE(r, &tmpq, entries, r1) {
2173 TAILQ_REMOVE(&tmpq, r, entries);
2174 pf_free_rule(r);
2175 }
2176 PF_RULES_WUNLOCK();
2177 PF_CONFIG_UNLOCK();
2178 }
2179 }
2180
2181 void
2182 pf_print_host(struct pf_addr *addr, u_int16_t p, sa_family_t af)
2183 {
2184 switch (af) {
2185 #ifdef INET
2186 case AF_INET: {
2187 u_int32_t a = ntohl(addr->addr32[0]);
2188 printf("%u.%u.%u.%u", (a>>24)&255, (a>>16)&255,
2189 (a>>8)&255, a&255);
2190 if (p) {
2191 p = ntohs(p);
2192 printf(":%u", p);
2193 }
2194 break;
2195 }
2196 #endif /* INET */
2197 #ifdef INET6
2198 case AF_INET6: {
2199 u_int16_t b;
2200 u_int8_t i, curstart, curend, maxstart, maxend;
2201 curstart = curend = maxstart = maxend = 255;
2202 for (i = 0; i < 8; i++) {
2203 if (!addr->addr16[i]) {
2204 if (curstart == 255)
2205 curstart = i;
2206 curend = i;
2207 } else {
2208 if ((curend - curstart) >
2209 (maxend - maxstart)) {
2210 maxstart = curstart;
2211 maxend = curend;
2212 }
2213 curstart = curend = 255;
2214 }
2215 }
2216 if ((curend - curstart) >
2217 (maxend - maxstart)) {
2218 maxstart = curstart;
2219 maxend = curend;
2220 }
2221 for (i = 0; i < 8; i++) {
2222 if (i >= maxstart && i <= maxend) {
2223 if (i == 0)
2224 printf(":");
2225 if (i == maxend)
2226 printf(":");
2227 } else {
2228 b = ntohs(addr->addr16[i]);
2229 printf("%x", b);
2230 if (i < 7)
2231 printf(":");
2232 }
2233 }
2234 if (p) {
2235 p = ntohs(p);
2236 printf("[%u]", p);
2237 }
2238 break;
2239 }
2240 #endif /* INET6 */
2241 }
2242 }
2243
2244 void
2245 pf_print_state(struct pf_kstate *s)
2246 {
2247 pf_print_state_parts(s, NULL, NULL);
2248 }
2249
2250 static void
2251 pf_print_state_parts(struct pf_kstate *s,
2252 struct pf_state_key *skwp, struct pf_state_key *sksp)
2253 {
2254 struct pf_state_key *skw, *sks;
2255 u_int8_t proto, dir;
2256
2257 /* Do our best to fill these, but they're skipped if NULL */
2258 skw = skwp ? skwp : (s ? s->key[PF_SK_WIRE] : NULL);
2259 sks = sksp ? sksp : (s ? s->key[PF_SK_STACK] : NULL);
2260 proto = skw ? skw->proto : (sks ? sks->proto : 0);
2261 dir = s ? s->direction : 0;
2262
2263 switch (proto) {
2264 case IPPROTO_IPV4:
2265 printf("IPv4");
2266 break;
2267 case IPPROTO_IPV6:
2268 printf("IPv6");
2269 break;
2270 case IPPROTO_TCP:
2271 printf("TCP");
2272 break;
2273 case IPPROTO_UDP:
2274 printf("UDP");
2275 break;
2276 case IPPROTO_ICMP:
2277 printf("ICMP");
2278 break;
2279 case IPPROTO_ICMPV6:
2280 printf("ICMPv6");
2281 break;
2282 default:
2283 printf("%u", proto);
2284 break;
2285 }
2286 switch (dir) {
2287 case PF_IN:
2288 printf(" in");
2289 break;
2290 case PF_OUT:
2291 printf(" out");
2292 break;
2293 }
2294 if (skw) {
2295 printf(" wire: ");
2296 pf_print_host(&skw->addr[0], skw->port[0], skw->af);
2297 printf(" ");
2298 pf_print_host(&skw->addr[1], skw->port[1], skw->af);
2299 }
2300 if (sks) {
2301 printf(" stack: ");
2302 if (sks != skw) {
2303 pf_print_host(&sks->addr[0], sks->port[0], sks->af);
2304 printf(" ");
2305 pf_print_host(&sks->addr[1], sks->port[1], sks->af);
2306 } else
2307 printf("-");
2308 }
2309 if (s) {
2310 if (proto == IPPROTO_TCP) {
2311 printf(" [lo=%u high=%u win=%u modulator=%u",
2312 s->src.seqlo, s->src.seqhi,
2313 s->src.max_win, s->src.seqdiff);
2314 if (s->src.wscale && s->dst.wscale)
2315 printf(" wscale=%u",
2316 s->src.wscale & PF_WSCALE_MASK);
2317 printf("]");
2318 printf(" [lo=%u high=%u win=%u modulator=%u",
2319 s->dst.seqlo, s->dst.seqhi,
2320 s->dst.max_win, s->dst.seqdiff);
2321 if (s->src.wscale && s->dst.wscale)
2322 printf(" wscale=%u",
2323 s->dst.wscale & PF_WSCALE_MASK);
2324 printf("]");
2325 }
2326 printf(" %u:%u", s->src.state, s->dst.state);
2327 }
2328 }
2329
2330 void
2331 pf_print_flags(u_int8_t f)
2332 {
2333 if (f)
2334 printf(" ");
2335 if (f & TH_FIN)
2336 printf("F");
2337 if (f & TH_SYN)
2338 printf("S");
2339 if (f & TH_RST)
2340 printf("R");
2341 if (f & TH_PUSH)
2342 printf("P");
2343 if (f & TH_ACK)
2344 printf("A");
2345 if (f & TH_URG)
2346 printf("U");
2347 if (f & TH_ECE)
2348 printf("E");
2349 if (f & TH_CWR)
2350 printf("W");
2351 }
2352
2353 #define PF_SET_SKIP_STEPS(i) \
2354 do { \
2355 while (head[i] != cur) { \
2356 head[i]->skip[i].ptr = cur; \
2357 head[i] = TAILQ_NEXT(head[i], entries); \
2358 } \
2359 } while (0)
2360
2361 void
2362 pf_calc_skip_steps(struct pf_krulequeue *rules)
2363 {
2364 struct pf_krule *cur, *prev, *head[PF_SKIP_COUNT];
2365 int i;
2366
2367 cur = TAILQ_FIRST(rules);
2368 prev = cur;
2369 for (i = 0; i < PF_SKIP_COUNT; ++i)
2370 head[i] = cur;
2371 while (cur != NULL) {
2372 if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
2373 PF_SET_SKIP_STEPS(PF_SKIP_IFP);
2374 if (cur->direction != prev->direction)
2375 PF_SET_SKIP_STEPS(PF_SKIP_DIR);
2376 if (cur->af != prev->af)
2377 PF_SET_SKIP_STEPS(PF_SKIP_AF);
2378 if (cur->proto != prev->proto)
2379 PF_SET_SKIP_STEPS(PF_SKIP_PROTO);
2380 if (cur->src.neg != prev->src.neg ||
2381 pf_addr_wrap_neq(&cur->src.addr, &prev->src.addr))
2382 PF_SET_SKIP_STEPS(PF_SKIP_SRC_ADDR);
2383 if (cur->src.port[0] != prev->src.port[0] ||
2384 cur->src.port[1] != prev->src.port[1] ||
2385 cur->src.port_op != prev->src.port_op)
2386 PF_SET_SKIP_STEPS(PF_SKIP_SRC_PORT);
2387 if (cur->dst.neg != prev->dst.neg ||
2388 pf_addr_wrap_neq(&cur->dst.addr, &prev->dst.addr))
2389 PF_SET_SKIP_STEPS(PF_SKIP_DST_ADDR);
2390 if (cur->dst.port[0] != prev->dst.port[0] ||
2391 cur->dst.port[1] != prev->dst.port[1] ||
2392 cur->dst.port_op != prev->dst.port_op)
2393 PF_SET_SKIP_STEPS(PF_SKIP_DST_PORT);
2394
2395 prev = cur;
2396 cur = TAILQ_NEXT(cur, entries);
2397 }
2398 for (i = 0; i < PF_SKIP_COUNT; ++i)
2399 PF_SET_SKIP_STEPS(i);
2400 }
2401
2402 static int
2403 pf_addr_wrap_neq(struct pf_addr_wrap *aw1, struct pf_addr_wrap *aw2)
2404 {
2405 if (aw1->type != aw2->type)
2406 return (1);
2407 switch (aw1->type) {
2408 case PF_ADDR_ADDRMASK:
2409 case PF_ADDR_RANGE:
2410 if (PF_ANEQ(&aw1->v.a.addr, &aw2->v.a.addr, AF_INET6))
2411 return (1);
2412 if (PF_ANEQ(&aw1->v.a.mask, &aw2->v.a.mask, AF_INET6))
2413 return (1);
2414 return (0);
2415 case PF_ADDR_DYNIFTL:
2416 return (aw1->p.dyn->pfid_kt != aw2->p.dyn->pfid_kt);
2417 case PF_ADDR_NOROUTE:
2418 case PF_ADDR_URPFFAILED:
2419 return (0);
2420 case PF_ADDR_TABLE:
2421 return (aw1->p.tbl != aw2->p.tbl);
2422 default:
2423 printf("invalid address type: %d\n", aw1->type);
2424 return (1);
2425 }
2426 }
2427
2428 /**
2429 * Checksum updates are a little complicated because the checksum in the TCP/UDP
2430 * header isn't always a full checksum. In some cases (i.e. output) it's a
2431 * pseudo-header checksum, which is a partial checksum over src/dst IP
2432 * addresses, protocol number and length.
2433 *
2434 * That means we have the following cases:
2435 * * Input or forwarding: we don't have TSO, the checksum fields are full
2436 * checksums, we need to update the checksum whenever we change anything.
2437 * * Output (i.e. the checksum is a pseudo-header checksum):
2438 * x The field being updated is src/dst address or affects the length of
2439 * the packet. We need to update the pseudo-header checksum (note that this
2440 * checksum is not ones' complement).
2441 * x Some other field is being modified (e.g. src/dst port numbers): We
2442 * don't have to update anything.
2443 **/
2444 u_int16_t
2445 pf_cksum_fixup(u_int16_t cksum, u_int16_t old, u_int16_t new, u_int8_t udp)
2446 {
2447 u_int32_t x;
2448
2449 x = cksum + old - new;
2450 x = (x + (x >> 16)) & 0xffff;
2451
2452 /* optimise: eliminate a branch when not udp */
2453 if (udp && cksum == 0x0000)
2454 return cksum;
2455 if (udp && x == 0x0000)
2456 x = 0xffff;
2457
2458 return (u_int16_t)(x);
2459 }
2460
2461 static void
2462 pf_patch_8(struct mbuf *m, u_int16_t *cksum, u_int8_t *f, u_int8_t v, bool hi,
2463 u_int8_t udp)
2464 {
2465 u_int16_t old = htons(hi ? (*f << 8) : *f);
2466 u_int16_t new = htons(hi ? ( v << 8) : v);
2467
2468 if (*f == v)
2469 return;
2470
2471 *f = v;
2472
2473 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2474 return;
2475
2476 *cksum = pf_cksum_fixup(*cksum, old, new, udp);
2477 }
2478
2479 void
2480 pf_patch_16_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int16_t v,
2481 bool hi, u_int8_t udp)
2482 {
2483 u_int8_t *fb = (u_int8_t *)f;
2484 u_int8_t *vb = (u_int8_t *)&v;
2485
2486 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2487 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2488 }
2489
2490 void
2491 pf_patch_32_unaligned(struct mbuf *m, u_int16_t *cksum, void *f, u_int32_t v,
2492 bool hi, u_int8_t udp)
2493 {
2494 u_int8_t *fb = (u_int8_t *)f;
2495 u_int8_t *vb = (u_int8_t *)&v;
2496
2497 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2498 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2499 pf_patch_8(m, cksum, fb++, *vb++, hi, udp);
2500 pf_patch_8(m, cksum, fb++, *vb++, !hi, udp);
2501 }
2502
2503 u_int16_t
2504 pf_proto_cksum_fixup(struct mbuf *m, u_int16_t cksum, u_int16_t old,
2505 u_int16_t new, u_int8_t udp)
2506 {
2507 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2508 return (cksum);
2509
2510 return (pf_cksum_fixup(cksum, old, new, udp));
2511 }
2512
2513 static void
2514 pf_change_ap(struct mbuf *m, struct pf_addr *a, u_int16_t *p, u_int16_t *ic,
2515 u_int16_t *pc, struct pf_addr *an, u_int16_t pn, u_int8_t u,
2516 sa_family_t af)
2517 {
2518 struct pf_addr ao;
2519 u_int16_t po = *p;
2520
2521 PF_ACPY(&ao, a, af);
2522 PF_ACPY(a, an, af);
2523
2524 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6))
2525 *pc = ~*pc;
2526
2527 *p = pn;
2528
2529 switch (af) {
2530 #ifdef INET
2531 case AF_INET:
2532 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2533 ao.addr16[0], an->addr16[0], 0),
2534 ao.addr16[1], an->addr16[1], 0);
2535 *p = pn;
2536
2537 *pc = pf_cksum_fixup(pf_cksum_fixup(*pc,
2538 ao.addr16[0], an->addr16[0], u),
2539 ao.addr16[1], an->addr16[1], u);
2540
2541 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2542 break;
2543 #endif /* INET */
2544 #ifdef INET6
2545 case AF_INET6:
2546 *pc = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2547 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2548 pf_cksum_fixup(pf_cksum_fixup(*pc,
2549 ao.addr16[0], an->addr16[0], u),
2550 ao.addr16[1], an->addr16[1], u),
2551 ao.addr16[2], an->addr16[2], u),
2552 ao.addr16[3], an->addr16[3], u),
2553 ao.addr16[4], an->addr16[4], u),
2554 ao.addr16[5], an->addr16[5], u),
2555 ao.addr16[6], an->addr16[6], u),
2556 ao.addr16[7], an->addr16[7], u);
2557
2558 *pc = pf_proto_cksum_fixup(m, *pc, po, pn, u);
2559 break;
2560 #endif /* INET6 */
2561 }
2562
2563 if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA |
2564 CSUM_DELAY_DATA_IPV6)) {
2565 *pc = ~*pc;
2566 if (! *pc)
2567 *pc = 0xffff;
2568 }
2569 }
2570
2571 /* Changes a u_int32_t. Uses a void * so there are no align restrictions */
2572 void
2573 pf_change_a(void *a, u_int16_t *c, u_int32_t an, u_int8_t u)
2574 {
2575 u_int32_t ao;
2576
2577 memcpy(&ao, a, sizeof(ao));
2578 memcpy(a, &an, sizeof(u_int32_t));
2579 *c = pf_cksum_fixup(pf_cksum_fixup(*c, ao / 65536, an / 65536, u),
2580 ao % 65536, an % 65536, u);
2581 }
2582
2583 void
2584 pf_change_proto_a(struct mbuf *m, void *a, u_int16_t *c, u_int32_t an, u_int8_t udp)
2585 {
2586 u_int32_t ao;
2587
2588 memcpy(&ao, a, sizeof(ao));
2589 memcpy(a, &an, sizeof(u_int32_t));
2590
2591 *c = pf_proto_cksum_fixup(m,
2592 pf_proto_cksum_fixup(m, *c, ao / 65536, an / 65536, udp),
2593 ao % 65536, an % 65536, udp);
2594 }
2595
2596 #ifdef INET6
2597 static void
2598 pf_change_a6(struct pf_addr *a, u_int16_t *c, struct pf_addr *an, u_int8_t u)
2599 {
2600 struct pf_addr ao;
2601
2602 PF_ACPY(&ao, a, AF_INET6);
2603 PF_ACPY(a, an, AF_INET6);
2604
2605 *c = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2606 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2607 pf_cksum_fixup(pf_cksum_fixup(*c,
2608 ao.addr16[0], an->addr16[0], u),
2609 ao.addr16[1], an->addr16[1], u),
2610 ao.addr16[2], an->addr16[2], u),
2611 ao.addr16[3], an->addr16[3], u),
2612 ao.addr16[4], an->addr16[4], u),
2613 ao.addr16[5], an->addr16[5], u),
2614 ao.addr16[6], an->addr16[6], u),
2615 ao.addr16[7], an->addr16[7], u);
2616 }
2617 #endif /* INET6 */
2618
2619 static void
2620 pf_change_icmp(struct pf_addr *ia, u_int16_t *ip, struct pf_addr *oa,
2621 struct pf_addr *na, u_int16_t np, u_int16_t *pc, u_int16_t *h2c,
2622 u_int16_t *ic, u_int16_t *hc, u_int8_t u, sa_family_t af)
2623 {
2624 struct pf_addr oia, ooa;
2625
2626 PF_ACPY(&oia, ia, af);
2627 if (oa)
2628 PF_ACPY(&ooa, oa, af);
2629
2630 /* Change inner protocol port, fix inner protocol checksum. */
2631 if (ip != NULL) {
2632 u_int16_t oip = *ip;
2633 u_int32_t opc;
2634
2635 if (pc != NULL)
2636 opc = *pc;
2637 *ip = np;
2638 if (pc != NULL)
2639 *pc = pf_cksum_fixup(*pc, oip, *ip, u);
2640 *ic = pf_cksum_fixup(*ic, oip, *ip, 0);
2641 if (pc != NULL)
2642 *ic = pf_cksum_fixup(*ic, opc, *pc, 0);
2643 }
2644 /* Change inner ip address, fix inner ip and icmp checksums. */
2645 PF_ACPY(ia, na, af);
2646 switch (af) {
2647 #ifdef INET
2648 case AF_INET: {
2649 u_int32_t oh2c = *h2c;
2650
2651 *h2c = pf_cksum_fixup(pf_cksum_fixup(*h2c,
2652 oia.addr16[0], ia->addr16[0], 0),
2653 oia.addr16[1], ia->addr16[1], 0);
2654 *ic = pf_cksum_fixup(pf_cksum_fixup(*ic,
2655 oia.addr16[0], ia->addr16[0], 0),
2656 oia.addr16[1], ia->addr16[1], 0);
2657 *ic = pf_cksum_fixup(*ic, oh2c, *h2c, 0);
2658 break;
2659 }
2660 #endif /* INET */
2661 #ifdef INET6
2662 case AF_INET6:
2663 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2664 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2665 pf_cksum_fixup(pf_cksum_fixup(*ic,
2666 oia.addr16[0], ia->addr16[0], u),
2667 oia.addr16[1], ia->addr16[1], u),
2668 oia.addr16[2], ia->addr16[2], u),
2669 oia.addr16[3], ia->addr16[3], u),
2670 oia.addr16[4], ia->addr16[4], u),
2671 oia.addr16[5], ia->addr16[5], u),
2672 oia.addr16[6], ia->addr16[6], u),
2673 oia.addr16[7], ia->addr16[7], u);
2674 break;
2675 #endif /* INET6 */
2676 }
2677 /* Outer ip address, fix outer ip or icmpv6 checksum, if necessary. */
2678 if (oa) {
2679 PF_ACPY(oa, na, af);
2680 switch (af) {
2681 #ifdef INET
2682 case AF_INET:
2683 *hc = pf_cksum_fixup(pf_cksum_fixup(*hc,
2684 ooa.addr16[0], oa->addr16[0], 0),
2685 ooa.addr16[1], oa->addr16[1], 0);
2686 break;
2687 #endif /* INET */
2688 #ifdef INET6
2689 case AF_INET6:
2690 *ic = pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2691 pf_cksum_fixup(pf_cksum_fixup(pf_cksum_fixup(
2692 pf_cksum_fixup(pf_cksum_fixup(*ic,
2693 ooa.addr16[0], oa->addr16[0], u),
2694 ooa.addr16[1], oa->addr16[1], u),
2695 ooa.addr16[2], oa->addr16[2], u),
2696 ooa.addr16[3], oa->addr16[3], u),
2697 ooa.addr16[4], oa->addr16[4], u),
2698 ooa.addr16[5], oa->addr16[5], u),
2699 ooa.addr16[6], oa->addr16[6], u),
2700 ooa.addr16[7], oa->addr16[7], u);
2701 break;
2702 #endif /* INET6 */
2703 }
2704 }
2705 }
2706
2707 /*
2708 * Need to modulate the sequence numbers in the TCP SACK option
2709 * (credits to Krzysztof Pfaff for report and patch)
2710 */
2711 static int
2712 pf_modulate_sack(struct mbuf *m, int off, struct pf_pdesc *pd,
2713 struct tcphdr *th, struct pf_state_peer *dst)
2714 {
2715 int hlen = (th->th_off << 2) - sizeof(*th), thoptlen = hlen;
2716 u_int8_t opts[TCP_MAXOLEN], *opt = opts;
2717 int copyback = 0, i, olen;
2718 struct sackblk sack;
2719
2720 #define TCPOLEN_SACKLEN (TCPOLEN_SACK + 2)
2721 if (hlen < TCPOLEN_SACKLEN ||
2722 !pf_pull_hdr(m, off + sizeof(*th), opts, hlen, NULL, NULL, pd->af))
2723 return 0;
2724
2725 while (hlen >= TCPOLEN_SACKLEN) {
2726 size_t startoff = opt - opts;
2727 olen = opt[1];
2728 switch (*opt) {
2729 case TCPOPT_EOL: /* FALLTHROUGH */
2730 case TCPOPT_NOP:
2731 opt++;
2732 hlen--;
2733 break;
2734 case TCPOPT_SACK:
2735 if (olen > hlen)
2736 olen = hlen;
2737 if (olen >= TCPOLEN_SACKLEN) {
2738 for (i = 2; i + TCPOLEN_SACK <= olen;
2739 i += TCPOLEN_SACK) {
2740 memcpy(&sack, &opt[i], sizeof(sack));
2741 pf_patch_32_unaligned(m,
2742 &th->th_sum, &sack.start,
2743 htonl(ntohl(sack.start) - dst->seqdiff),
2744 PF_ALGNMNT(startoff),
2745 0);
2746 pf_patch_32_unaligned(m, &th->th_sum,
2747 &sack.end,
2748 htonl(ntohl(sack.end) - dst->seqdiff),
2749 PF_ALGNMNT(startoff),
2750 0);
2751 memcpy(&opt[i], &sack, sizeof(sack));
2752 }
2753 copyback = 1;
2754 }
2755 /* FALLTHROUGH */
2756 default:
2757 if (olen < 2)
2758 olen = 2;
2759 hlen -= olen;
2760 opt += olen;
2761 }
2762 }
2763
2764 if (copyback)
2765 m_copyback(m, off + sizeof(*th), thoptlen, (caddr_t)opts);
2766 return (copyback);
2767 }
2768
2769 struct mbuf *
2770 pf_build_tcp(const struct pf_krule *r, sa_family_t af,
2771 const struct pf_addr *saddr, const struct pf_addr *daddr,
2772 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2773 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2774 u_int16_t rtag)
2775 {
2776 struct mbuf *m;
2777 int len, tlen;
2778 #ifdef INET
2779 struct ip *h = NULL;
2780 #endif /* INET */
2781 #ifdef INET6
2782 struct ip6_hdr *h6 = NULL;
2783 #endif /* INET6 */
2784 struct tcphdr *th;
2785 char *opt;
2786 struct pf_mtag *pf_mtag;
2787
2788 len = 0;
2789 th = NULL;
2790
2791 /* maximum segment size tcp option */
2792 tlen = sizeof(struct tcphdr);
2793 if (mss)
2794 tlen += 4;
2795
2796 switch (af) {
2797 #ifdef INET
2798 case AF_INET:
2799 len = sizeof(struct ip) + tlen;
2800 break;
2801 #endif /* INET */
2802 #ifdef INET6
2803 case AF_INET6:
2804 len = sizeof(struct ip6_hdr) + tlen;
2805 break;
2806 #endif /* INET6 */
2807 default:
2808 panic("%s: unsupported af %d", __func__, af);
2809 }
2810
2811 m = m_gethdr(M_NOWAIT, MT_DATA);
2812 if (m == NULL)
2813 return (NULL);
2814
2815 #ifdef MAC
2816 mac_netinet_firewall_send(m);
2817 #endif
2818 if ((pf_mtag = pf_get_mtag(m)) == NULL) {
2819 m_freem(m);
2820 return (NULL);
2821 }
2822 if (tag)
2823 m->m_flags |= M_SKIP_FIREWALL;
2824 pf_mtag->tag = rtag;
2825
2826 if (r != NULL && r->rtableid >= 0)
2827 M_SETFIB(m, r->rtableid);
2828
2829 #ifdef ALTQ
2830 if (r != NULL && r->qid) {
2831 pf_mtag->qid = r->qid;
2832
2833 /* add hints for ecn */
2834 pf_mtag->hdr = mtod(m, struct ip *);
2835 }
2836 #endif /* ALTQ */
2837 m->m_data += max_linkhdr;
2838 m->m_pkthdr.len = m->m_len = len;
2839 /* The rest of the stack assumes a rcvif, so provide one.
2840 * This is a locally generated packet, so .. close enough. */
2841 m->m_pkthdr.rcvif = V_loif;
2842 bzero(m->m_data, len);
2843 switch (af) {
2844 #ifdef INET
2845 case AF_INET:
2846 h = mtod(m, struct ip *);
2847
2848 /* IP header fields included in the TCP checksum */
2849 h->ip_p = IPPROTO_TCP;
2850 h->ip_len = htons(tlen);
2851 h->ip_src.s_addr = saddr->v4.s_addr;
2852 h->ip_dst.s_addr = daddr->v4.s_addr;
2853
2854 th = (struct tcphdr *)((caddr_t)h + sizeof(struct ip));
2855 break;
2856 #endif /* INET */
2857 #ifdef INET6
2858 case AF_INET6:
2859 h6 = mtod(m, struct ip6_hdr *);
2860
2861 /* IP header fields included in the TCP checksum */
2862 h6->ip6_nxt = IPPROTO_TCP;
2863 h6->ip6_plen = htons(tlen);
2864 memcpy(&h6->ip6_src, &saddr->v6, sizeof(struct in6_addr));
2865 memcpy(&h6->ip6_dst, &daddr->v6, sizeof(struct in6_addr));
2866
2867 th = (struct tcphdr *)((caddr_t)h6 + sizeof(struct ip6_hdr));
2868 break;
2869 #endif /* INET6 */
2870 }
2871
2872 /* TCP header */
2873 th->th_sport = sport;
2874 th->th_dport = dport;
2875 th->th_seq = htonl(seq);
2876 th->th_ack = htonl(ack);
2877 th->th_off = tlen >> 2;
2878 th->th_flags = flags;
2879 th->th_win = htons(win);
2880
2881 if (mss) {
2882 opt = (char *)(th + 1);
2883 opt[0] = TCPOPT_MAXSEG;
2884 opt[1] = 4;
2885 HTONS(mss);
2886 bcopy((caddr_t)&mss, (caddr_t)(opt + 2), 2);
2887 }
2888
2889 switch (af) {
2890 #ifdef INET
2891 case AF_INET:
2892 /* TCP checksum */
2893 th->th_sum = in_cksum(m, len);
2894
2895 /* Finish the IP header */
2896 h->ip_v = 4;
2897 h->ip_hl = sizeof(*h) >> 2;
2898 h->ip_tos = IPTOS_LOWDELAY;
2899 h->ip_off = htons(V_path_mtu_discovery ? IP_DF : 0);
2900 h->ip_len = htons(len);
2901 h->ip_ttl = ttl ? ttl : V_ip_defttl;
2902 h->ip_sum = 0;
2903 break;
2904 #endif /* INET */
2905 #ifdef INET6
2906 case AF_INET6:
2907 /* TCP checksum */
2908 th->th_sum = in6_cksum(m, IPPROTO_TCP,
2909 sizeof(struct ip6_hdr), tlen);
2910
2911 h6->ip6_vfc |= IPV6_VERSION;
2912 h6->ip6_hlim = IPV6_DEFHLIM;
2913 break;
2914 #endif /* INET6 */
2915 }
2916
2917 return (m);
2918 }
2919
2920 void
2921 pf_send_tcp(const struct pf_krule *r, sa_family_t af,
2922 const struct pf_addr *saddr, const struct pf_addr *daddr,
2923 u_int16_t sport, u_int16_t dport, u_int32_t seq, u_int32_t ack,
2924 u_int8_t flags, u_int16_t win, u_int16_t mss, u_int8_t ttl, int tag,
2925 u_int16_t rtag)
2926 {
2927 struct pf_send_entry *pfse;
2928 struct mbuf *m;
2929
2930 m = pf_build_tcp(r, af, saddr, daddr, sport, dport, seq, ack, flags,
2931 win, mss, ttl, tag, rtag);
2932 if (m == NULL)
2933 return;
2934
2935 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
2936 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
2937 if (pfse == NULL) {
2938 m_freem(m);
2939 return;
2940 }
2941
2942 switch (af) {
2943 #ifdef INET
2944 case AF_INET:
2945 pfse->pfse_type = PFSE_IP;
2946 break;
2947 #endif /* INET */
2948 #ifdef INET6
2949 case AF_INET6:
2950 pfse->pfse_type = PFSE_IP6;
2951 break;
2952 #endif /* INET6 */
2953 }
2954
2955 pfse->pfse_m = m;
2956 pf_send(pfse);
2957 }
2958
2959 static void
2960 pf_return(struct pf_krule *r, struct pf_krule *nr, struct pf_pdesc *pd,
2961 struct pf_state_key *sk, int off, struct mbuf *m, struct tcphdr *th,
2962 struct pfi_kkif *kif, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen,
2963 u_short *reason)
2964 {
2965 struct pf_addr * const saddr = pd->src;
2966 struct pf_addr * const daddr = pd->dst;
2967 sa_family_t af = pd->af;
2968
2969 /* undo NAT changes, if they have taken place */
2970 if (nr != NULL) {
2971 PF_ACPY(saddr, &sk->addr[pd->sidx], af);
2972 PF_ACPY(daddr, &sk->addr[pd->didx], af);
2973 if (pd->sport)
2974 *pd->sport = sk->port[pd->sidx];
2975 if (pd->dport)
2976 *pd->dport = sk->port[pd->didx];
2977 if (pd->proto_sum)
2978 *pd->proto_sum = bproto_sum;
2979 if (pd->ip_sum)
2980 *pd->ip_sum = bip_sum;
2981 m_copyback(m, off, hdrlen, pd->hdr.any);
2982 }
2983 if (pd->proto == IPPROTO_TCP &&
2984 ((r->rule_flag & PFRULE_RETURNRST) ||
2985 (r->rule_flag & PFRULE_RETURN)) &&
2986 !(th->th_flags & TH_RST)) {
2987 u_int32_t ack = ntohl(th->th_seq) + pd->p_len;
2988 int len = 0;
2989 #ifdef INET
2990 struct ip *h4;
2991 #endif
2992 #ifdef INET6
2993 struct ip6_hdr *h6;
2994 #endif
2995
2996 switch (af) {
2997 #ifdef INET
2998 case AF_INET:
2999 h4 = mtod(m, struct ip *);
3000 len = ntohs(h4->ip_len) - off;
3001 break;
3002 #endif
3003 #ifdef INET6
3004 case AF_INET6:
3005 h6 = mtod(m, struct ip6_hdr *);
3006 len = ntohs(h6->ip6_plen) - (off - sizeof(*h6));
3007 break;
3008 #endif
3009 }
3010
3011 if (pf_check_proto_cksum(m, off, len, IPPROTO_TCP, af))
3012 REASON_SET(reason, PFRES_PROTCKSUM);
3013 else {
3014 if (th->th_flags & TH_SYN)
3015 ack++;
3016 if (th->th_flags & TH_FIN)
3017 ack++;
3018 pf_send_tcp(r, af, pd->dst,
3019 pd->src, th->th_dport, th->th_sport,
3020 ntohl(th->th_ack), ack, TH_RST|TH_ACK, 0, 0,
3021 r->return_ttl, 1, 0);
3022 }
3023 } else if (pd->proto != IPPROTO_ICMP && af == AF_INET &&
3024 r->return_icmp)
3025 pf_send_icmp(m, r->return_icmp >> 8,
3026 r->return_icmp & 255, af, r);
3027 else if (pd->proto != IPPROTO_ICMPV6 && af == AF_INET6 &&
3028 r->return_icmp6)
3029 pf_send_icmp(m, r->return_icmp6 >> 8,
3030 r->return_icmp6 & 255, af, r);
3031 }
3032
3033 static int
3034 pf_match_ieee8021q_pcp(u_int8_t prio, struct mbuf *m)
3035 {
3036 struct m_tag *mtag;
3037 u_int8_t mpcp;
3038
3039 mtag = m_tag_locate(m, MTAG_8021Q, MTAG_8021Q_PCP_IN, NULL);
3040 if (mtag == NULL)
3041 return (0);
3042
3043 if (prio == PF_PRIO_ZERO)
3044 prio = 0;
3045
3046 mpcp = *(uint8_t *)(mtag + 1);
3047
3048 return (mpcp == prio);
3049 }
3050
3051 static int
3052 pf_icmp_to_bandlim(uint8_t type)
3053 {
3054 switch (type) {
3055 case ICMP_ECHO:
3056 case ICMP_ECHOREPLY:
3057 return (BANDLIM_ICMP_ECHO);
3058 case ICMP_TSTAMP:
3059 case ICMP_TSTAMPREPLY:
3060 return (BANDLIM_ICMP_TSTAMP);
3061 case ICMP_UNREACH:
3062 default:
3063 return (BANDLIM_ICMP_UNREACH);
3064 }
3065 }
3066
3067 static void
3068 pf_send_icmp(struct mbuf *m, u_int8_t type, u_int8_t code, sa_family_t af,
3069 struct pf_krule *r)
3070 {
3071 struct pf_send_entry *pfse;
3072 struct mbuf *m0;
3073 struct pf_mtag *pf_mtag;
3074
3075 /* ICMP packet rate limitation. */
3076 #ifdef INET6
3077 if (af == AF_INET6) {
3078 if (icmp6_ratelimit(NULL, type, code))
3079 return;
3080 }
3081 #endif
3082 #ifdef INET
3083 if (af == AF_INET) {
3084 if (badport_bandlim(pf_icmp_to_bandlim(type)) != 0)
3085 return;
3086 }
3087 #endif
3088
3089 /* Allocate outgoing queue entry, mbuf and mbuf tag. */
3090 pfse = malloc(sizeof(*pfse), M_PFTEMP, M_NOWAIT);
3091 if (pfse == NULL)
3092 return;
3093
3094 if ((m0 = m_copypacket(m, M_NOWAIT)) == NULL) {
3095 free(pfse, M_PFTEMP);
3096 return;
3097 }
3098
3099 if ((pf_mtag = pf_get_mtag(m0)) == NULL) {
3100 free(pfse, M_PFTEMP);
3101 return;
3102 }
3103 /* XXX: revisit */
3104 m0->m_flags |= M_SKIP_FIREWALL;
3105
3106 if (r->rtableid >= 0)
3107 M_SETFIB(m0, r->rtableid);
3108
3109 #ifdef ALTQ
3110 if (r->qid) {
3111 pf_mtag->qid = r->qid;
3112 /* add hints for ecn */
3113 pf_mtag->hdr = mtod(m0, struct ip *);
3114 }
3115 #endif /* ALTQ */
3116
3117 switch (af) {
3118 #ifdef INET
3119 case AF_INET:
3120 pfse->pfse_type = PFSE_ICMP;
3121 break;
3122 #endif /* INET */
3123 #ifdef INET6
3124 case AF_INET6:
3125 pfse->pfse_type = PFSE_ICMP6;
3126 break;
3127 #endif /* INET6 */
3128 }
3129 pfse->pfse_m = m0;
3130 pfse->icmpopts.type = type;
3131 pfse->icmpopts.code = code;
3132 pf_send(pfse);
3133 }
3134
3135 /*
3136 * Return 1 if the addresses a and b match (with mask m), otherwise return 0.
3137 * If n is 0, they match if they are equal. If n is != 0, they match if they
3138 * are different.
3139 */
3140 int
3141 pf_match_addr(u_int8_t n, struct pf_addr *a, struct pf_addr *m,
3142 struct pf_addr *b, sa_family_t af)
3143 {
3144 int match = 0;
3145
3146 switch (af) {
3147 #ifdef INET
3148 case AF_INET:
3149 if ((a->addr32[0] & m->addr32[0]) ==
3150 (b->addr32[0] & m->addr32[0]))
3151 match++;
3152 break;
3153 #endif /* INET */
3154 #ifdef INET6
3155 case AF_INET6:
3156 if (((a->addr32[0] & m->addr32[0]) ==
3157 (b->addr32[0] & m->addr32[0])) &&
3158 ((a->addr32[1] & m->addr32[1]) ==
3159 (b->addr32[1] & m->addr32[1])) &&
3160 ((a->addr32[2] & m->addr32[2]) ==
3161 (b->addr32[2] & m->addr32[2])) &&
3162 ((a->addr32[3] & m->addr32[3]) ==
3163 (b->addr32[3] & m->addr32[3])))
3164 match++;
3165 break;
3166 #endif /* INET6 */
3167 }
3168 if (match) {
3169 if (n)
3170 return (0);
3171 else
3172 return (1);
3173 } else {
3174 if (n)
3175 return (1);
3176 else
3177 return (0);
3178 }
3179 }
3180
3181 /*
3182 * Return 1 if b <= a <= e, otherwise return 0.
3183 */
3184 int
3185 pf_match_addr_range(struct pf_addr *b, struct pf_addr *e,
3186 struct pf_addr *a, sa_family_t af)
3187 {
3188 switch (af) {
3189 #ifdef INET
3190 case AF_INET:
3191 if ((ntohl(a->addr32[0]) < ntohl(b->addr32[0])) ||
3192 (ntohl(a->addr32[0]) > ntohl(e->addr32[0])))
3193 return (0);
3194 break;
3195 #endif /* INET */
3196 #ifdef INET6
3197 case AF_INET6: {
3198 int i;
3199
3200 /* check a >= b */
3201 for (i = 0; i < 4; ++i)
3202 if (ntohl(a->addr32[i]) > ntohl(b->addr32[i]))
3203 break;
3204 else if (ntohl(a->addr32[i]) < ntohl(b->addr32[i]))
3205 return (0);
3206 /* check a <= e */
3207 for (i = 0; i < 4; ++i)
3208 if (ntohl(a->addr32[i]) < ntohl(e->addr32[i]))
3209 break;
3210 else if (ntohl(a->addr32[i]) > ntohl(e->addr32[i]))
3211 return (0);
3212 break;
3213 }
3214 #endif /* INET6 */
3215 }
3216 return (1);
3217 }
3218
3219 static int
3220 pf_match(u_int8_t op, u_int32_t a1, u_int32_t a2, u_int32_t p)
3221 {
3222 switch (op) {
3223 case PF_OP_IRG:
3224 return ((p > a1) && (p < a2));
3225 case PF_OP_XRG:
3226 return ((p < a1) || (p > a2));
3227 case PF_OP_RRG:
3228 return ((p >= a1) && (p <= a2));
3229 case PF_OP_EQ:
3230 return (p == a1);
3231 case PF_OP_NE:
3232 return (p != a1);
3233 case PF_OP_LT:
3234 return (p < a1);
3235 case PF_OP_LE:
3236 return (p <= a1);
3237 case PF_OP_GT:
3238 return (p > a1);
3239 case PF_OP_GE:
3240 return (p >= a1);
3241 }
3242 return (0); /* never reached */
3243 }
3244
3245 int
3246 pf_match_port(u_int8_t op, u_int16_t a1, u_int16_t a2, u_int16_t p)
3247 {
3248 NTOHS(a1);
3249 NTOHS(a2);
3250 NTOHS(p);
3251 return (pf_match(op, a1, a2, p));
3252 }
3253
3254 static int
3255 pf_match_uid(u_int8_t op, uid_t a1, uid_t a2, uid_t u)
3256 {
3257 if (u == UID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3258 return (0);
3259 return (pf_match(op, a1, a2, u));
3260 }
3261
3262 static int
3263 pf_match_gid(u_int8_t op, gid_t a1, gid_t a2, gid_t g)
3264 {
3265 if (g == GID_MAX && op != PF_OP_EQ && op != PF_OP_NE)
3266 return (0);
3267 return (pf_match(op, a1, a2, g));
3268 }
3269
3270 int
3271 pf_match_tag(struct mbuf *m, struct pf_krule *r, int *tag, int mtag)
3272 {
3273 if (*tag == -1)
3274 *tag = mtag;
3275
3276 return ((!r->match_tag_not && r->match_tag == *tag) ||
3277 (r->match_tag_not && r->match_tag != *tag));
3278 }
3279
3280 int
3281 pf_tag_packet(struct mbuf *m, struct pf_pdesc *pd, int tag)
3282 {
3283
3284 KASSERT(tag > 0, ("%s: tag %d", __func__, tag));
3285
3286 if (pd->pf_mtag == NULL && ((pd->pf_mtag = pf_get_mtag(m)) == NULL))
3287 return (ENOMEM);
3288
3289 pd->pf_mtag->tag = tag;
3290
3291 return (0);
3292 }
3293
3294 #define PF_ANCHOR_STACKSIZE 32
3295 struct pf_kanchor_stackframe {
3296 struct pf_kruleset *rs;
3297 struct pf_krule *r; /* XXX: + match bit */
3298 struct pf_kanchor *child;
3299 };
3300
3301 /*
3302 * XXX: We rely on malloc(9) returning pointer aligned addresses.
3303 */
3304 #define PF_ANCHORSTACK_MATCH 0x00000001
3305 #define PF_ANCHORSTACK_MASK (PF_ANCHORSTACK_MATCH)
3306
3307 #define PF_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3308 #define PF_ANCHOR_RULE(f) (struct pf_krule *) \
3309 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3310 #define PF_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
3311 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
3312 } while (0)
3313
3314 void
3315 pf_step_into_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3316 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3317 int *match)
3318 {
3319 struct pf_kanchor_stackframe *f;
3320
3321 PF_RULES_RASSERT();
3322
3323 if (match)
3324 *match = 0;
3325 if (*depth >= PF_ANCHOR_STACKSIZE) {
3326 printf("%s: anchor stack overflow on %s\n",
3327 __func__, (*r)->anchor->name);
3328 *r = TAILQ_NEXT(*r, entries);
3329 return;
3330 } else if (*depth == 0 && a != NULL)
3331 *a = *r;
3332 f = stack + (*depth)++;
3333 f->rs = *rs;
3334 f->r = *r;
3335 if ((*r)->anchor_wildcard) {
3336 struct pf_kanchor_node *parent = &(*r)->anchor->children;
3337
3338 if ((f->child = RB_MIN(pf_kanchor_node, parent)) == NULL) {
3339 *r = NULL;
3340 return;
3341 }
3342 *rs = &f->child->ruleset;
3343 } else {
3344 f->child = NULL;
3345 *rs = &(*r)->anchor->ruleset;
3346 }
3347 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3348 }
3349
3350 int
3351 pf_step_out_of_anchor(struct pf_kanchor_stackframe *stack, int *depth,
3352 struct pf_kruleset **rs, int n, struct pf_krule **r, struct pf_krule **a,
3353 int *match)
3354 {
3355 struct pf_kanchor_stackframe *f;
3356 struct pf_krule *fr;
3357 int quick = 0;
3358
3359 PF_RULES_RASSERT();
3360
3361 do {
3362 if (*depth <= 0)
3363 break;
3364 f = stack + *depth - 1;
3365 fr = PF_ANCHOR_RULE(f);
3366 if (f->child != NULL) {
3367 /*
3368 * This block traverses through
3369 * a wildcard anchor.
3370 */
3371 if (match != NULL && *match) {
3372 /*
3373 * If any of "*" matched, then
3374 * "foo/ *" matched, mark frame
3375 * appropriately.
3376 */
3377 PF_ANCHOR_SET_MATCH(f);
3378 *match = 0;
3379 }
3380 f->child = RB_NEXT(pf_kanchor_node,
3381 &fr->anchor->children, f->child);
3382 if (f->child != NULL) {
3383 *rs = &f->child->ruleset;
3384 *r = TAILQ_FIRST((*rs)->rules[n].active.ptr);
3385 if (*r == NULL)
3386 continue;
3387 else
3388 break;
3389 }
3390 }
3391 (*depth)--;
3392 if (*depth == 0 && a != NULL)
3393 *a = NULL;
3394 *rs = f->rs;
3395 if (PF_ANCHOR_MATCH(f) || (match != NULL && *match))
3396 quick = fr->quick;
3397 *r = TAILQ_NEXT(fr, entries);
3398 } while (*r == NULL);
3399
3400 return (quick);
3401 }
3402
3403 struct pf_keth_anchor_stackframe {
3404 struct pf_keth_ruleset *rs;
3405 struct pf_keth_rule *r; /* XXX: + match bit */
3406 struct pf_keth_anchor *child;
3407 };
3408
3409 #define PF_ETH_ANCHOR_MATCH(f) ((uintptr_t)(f)->r & PF_ANCHORSTACK_MATCH)
3410 #define PF_ETH_ANCHOR_RULE(f) (struct pf_keth_rule *) \
3411 ((uintptr_t)(f)->r & ~PF_ANCHORSTACK_MASK)
3412 #define PF_ETH_ANCHOR_SET_MATCH(f) do { (f)->r = (void *) \
3413 ((uintptr_t)(f)->r | PF_ANCHORSTACK_MATCH); \
3414 } while (0)
3415
3416 void
3417 pf_step_into_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
3418 struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
3419 struct pf_keth_rule **a, int *match)
3420 {
3421 struct pf_keth_anchor_stackframe *f;
3422
3423 NET_EPOCH_ASSERT();
3424
3425 if (match)
3426 *match = 0;
3427 if (*depth >= PF_ANCHOR_STACKSIZE) {
3428 printf("%s: anchor stack overflow on %s\n",
3429 __func__, (*r)->anchor->name);
3430 *r = TAILQ_NEXT(*r, entries);
3431 return;
3432 } else if (*depth == 0 && a != NULL)
3433 *a = *r;
3434 f = stack + (*depth)++;
3435 f->rs = *rs;
3436 f->r = *r;
3437 if ((*r)->anchor_wildcard) {
3438 struct pf_keth_anchor_node *parent = &(*r)->anchor->children;
3439
3440 if ((f->child = RB_MIN(pf_keth_anchor_node, parent)) == NULL) {
3441 *r = NULL;
3442 return;
3443 }
3444 *rs = &f->child->ruleset;
3445 } else {
3446 f->child = NULL;
3447 *rs = &(*r)->anchor->ruleset;
3448 }
3449 *r = TAILQ_FIRST((*rs)->active.rules);
3450 }
3451
3452 int
3453 pf_step_out_of_keth_anchor(struct pf_keth_anchor_stackframe *stack, int *depth,
3454 struct pf_keth_ruleset **rs, struct pf_keth_rule **r,
3455 struct pf_keth_rule **a, int *match)
3456 {
3457 struct pf_keth_anchor_stackframe *f;
3458 struct pf_keth_rule *fr;
3459 int quick = 0;
3460
3461 NET_EPOCH_ASSERT();
3462
3463 do {
3464 if (*depth <= 0)
3465 break;
3466 f = stack + *depth - 1;
3467 fr = PF_ETH_ANCHOR_RULE(f);
3468 if (f->child != NULL) {
3469 /*
3470 * This block traverses through
3471 * a wildcard anchor.
3472 */
3473 if (match != NULL && *match) {
3474 /*
3475 * If any of "*" matched, then
3476 * "foo/ *" matched, mark frame
3477 * appropriately.
3478 */
3479 PF_ETH_ANCHOR_SET_MATCH(f);
3480 *match = 0;
3481 }
3482 f->child = RB_NEXT(pf_keth_anchor_node,
3483 &fr->anchor->children, f->child);
3484 if (f->child != NULL) {
3485 *rs = &f->child->ruleset;
3486 *r = TAILQ_FIRST((*rs)->active.rules);
3487 if (*r == NULL)
3488 continue;
3489 else
3490 break;
3491 }
3492 }
3493 (*depth)--;
3494 if (*depth == 0 && a != NULL)
3495 *a = NULL;
3496 *rs = f->rs;
3497 if (PF_ETH_ANCHOR_MATCH(f) || (match != NULL && *match))
3498 quick = fr->quick;
3499 *r = TAILQ_NEXT(fr, entries);
3500 } while (*r == NULL);
3501
3502 return (quick);
3503 }
3504
3505 #ifdef INET6
3506 void
3507 pf_poolmask(struct pf_addr *naddr, struct pf_addr *raddr,
3508 struct pf_addr *rmask, struct pf_addr *saddr, sa_family_t af)
3509 {
3510 switch (af) {
3511 #ifdef INET
3512 case AF_INET:
3513 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3514 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3515 break;
3516 #endif /* INET */
3517 case AF_INET6:
3518 naddr->addr32[0] = (raddr->addr32[0] & rmask->addr32[0]) |
3519 ((rmask->addr32[0] ^ 0xffffffff ) & saddr->addr32[0]);
3520 naddr->addr32[1] = (raddr->addr32[1] & rmask->addr32[1]) |
3521 ((rmask->addr32[1] ^ 0xffffffff ) & saddr->addr32[1]);
3522 naddr->addr32[2] = (raddr->addr32[2] & rmask->addr32[2]) |
3523 ((rmask->addr32[2] ^ 0xffffffff ) & saddr->addr32[2]);
3524 naddr->addr32[3] = (raddr->addr32[3] & rmask->addr32[3]) |
3525 ((rmask->addr32[3] ^ 0xffffffff ) & saddr->addr32[3]);
3526 break;
3527 }
3528 }
3529
3530 void
3531 pf_addr_inc(struct pf_addr *addr, sa_family_t af)
3532 {
3533 switch (af) {
3534 #ifdef INET
3535 case AF_INET:
3536 addr->addr32[0] = htonl(ntohl(addr->addr32[0]) + 1);
3537 break;
3538 #endif /* INET */
3539 case AF_INET6:
3540 if (addr->addr32[3] == 0xffffffff) {
3541 addr->addr32[3] = 0;
3542 if (addr->addr32[2] == 0xffffffff) {
3543 addr->addr32[2] = 0;
3544 if (addr->addr32[1] == 0xffffffff) {
3545 addr->addr32[1] = 0;
3546 addr->addr32[0] =
3547 htonl(ntohl(addr->addr32[0]) + 1);
3548 } else
3549 addr->addr32[1] =
3550 htonl(ntohl(addr->addr32[1]) + 1);
3551 } else
3552 addr->addr32[2] =
3553 htonl(ntohl(addr->addr32[2]) + 1);
3554 } else
3555 addr->addr32[3] =
3556 htonl(ntohl(addr->addr32[3]) + 1);
3557 break;
3558 }
3559 }
3560 #endif /* INET6 */
3561
3562 void
3563 pf_rule_to_actions(struct pf_krule *r, struct pf_rule_actions *a)
3564 {
3565 if (r->qid)
3566 a->qid = r->qid;
3567 if (r->pqid)
3568 a->pqid = r->pqid;
3569 if (r->dnpipe)
3570 a->dnpipe = r->dnpipe;
3571 if (r->dnrpipe)
3572 a->dnrpipe = r->dnrpipe;
3573 if (r->dnpipe || r->dnrpipe) {
3574 if (r->free_flags & PFRULE_DN_IS_PIPE)
3575 a->flags |= PFRULE_DN_IS_PIPE;
3576 else
3577 a->flags &= ~PFRULE_DN_IS_PIPE;
3578 }
3579 }
3580
3581 int
3582 pf_socket_lookup(int direction, struct pf_pdesc *pd, struct mbuf *m)
3583 {
3584 struct pf_addr *saddr, *daddr;
3585 u_int16_t sport, dport;
3586 struct inpcbinfo *pi;
3587 struct inpcb *inp;
3588
3589 pd->lookup.uid = UID_MAX;
3590 pd->lookup.gid = GID_MAX;
3591
3592 switch (pd->proto) {
3593 case IPPROTO_TCP:
3594 sport = pd->hdr.tcp.th_sport;
3595 dport = pd->hdr.tcp.th_dport;
3596 pi = &V_tcbinfo;
3597 break;
3598 case IPPROTO_UDP:
3599 sport = pd->hdr.udp.uh_sport;
3600 dport = pd->hdr.udp.uh_dport;
3601 pi = &V_udbinfo;
3602 break;
3603 default:
3604 return (-1);
3605 }
3606 if (direction == PF_IN) {
3607 saddr = pd->src;
3608 daddr = pd->dst;
3609 } else {
3610 u_int16_t p;
3611
3612 p = sport;
3613 sport = dport;
3614 dport = p;
3615 saddr = pd->dst;
3616 daddr = pd->src;
3617 }
3618 switch (pd->af) {
3619 #ifdef INET
3620 case AF_INET:
3621 inp = in_pcblookup_mbuf(pi, saddr->v4, sport, daddr->v4,
3622 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3623 if (inp == NULL) {
3624 inp = in_pcblookup_mbuf(pi, saddr->v4, sport,
3625 daddr->v4, dport, INPLOOKUP_WILDCARD |
3626 INPLOOKUP_RLOCKPCB, NULL, m);
3627 if (inp == NULL)
3628 return (-1);
3629 }
3630 break;
3631 #endif /* INET */
3632 #ifdef INET6
3633 case AF_INET6:
3634 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport, &daddr->v6,
3635 dport, INPLOOKUP_RLOCKPCB, NULL, m);
3636 if (inp == NULL) {
3637 inp = in6_pcblookup_mbuf(pi, &saddr->v6, sport,
3638 &daddr->v6, dport, INPLOOKUP_WILDCARD |
3639 INPLOOKUP_RLOCKPCB, NULL, m);
3640 if (inp == NULL)
3641 return (-1);
3642 }
3643 break;
3644 #endif /* INET6 */
3645
3646 default:
3647 return (-1);
3648 }
3649 INP_RLOCK_ASSERT(inp);
3650 pd->lookup.uid = inp->inp_cred->cr_uid;
3651 pd->lookup.gid = inp->inp_cred->cr_groups[0];
3652 INP_RUNLOCK(inp);
3653
3654 return (1);
3655 }
3656
3657 u_int8_t
3658 pf_get_wscale(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3659 {
3660 int hlen;
3661 u_int8_t hdr[60];
3662 u_int8_t *opt, optlen;
3663 u_int8_t wscale = 0;
3664
3665 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3666 if (hlen <= sizeof(struct tcphdr))
3667 return (0);
3668 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3669 return (0);
3670 opt = hdr + sizeof(struct tcphdr);
3671 hlen -= sizeof(struct tcphdr);
3672 while (hlen >= 3) {
3673 switch (*opt) {
3674 case TCPOPT_EOL:
3675 case TCPOPT_NOP:
3676 ++opt;
3677 --hlen;
3678 break;
3679 case TCPOPT_WINDOW:
3680 wscale = opt[2];
3681 if (wscale > TCP_MAX_WINSHIFT)
3682 wscale = TCP_MAX_WINSHIFT;
3683 wscale |= PF_WSCALE_FLAG;
3684 /* FALLTHROUGH */
3685 default:
3686 optlen = opt[1];
3687 if (optlen < 2)
3688 optlen = 2;
3689 hlen -= optlen;
3690 opt += optlen;
3691 break;
3692 }
3693 }
3694 return (wscale);
3695 }
3696
3697 u_int16_t
3698 pf_get_mss(struct mbuf *m, int off, u_int16_t th_off, sa_family_t af)
3699 {
3700 int hlen;
3701 u_int8_t hdr[60];
3702 u_int8_t *opt, optlen;
3703 u_int16_t mss = V_tcp_mssdflt;
3704
3705 hlen = th_off << 2; /* hlen <= sizeof(hdr) */
3706 if (hlen <= sizeof(struct tcphdr))
3707 return (0);
3708 if (!pf_pull_hdr(m, off, hdr, hlen, NULL, NULL, af))
3709 return (0);
3710 opt = hdr + sizeof(struct tcphdr);
3711 hlen -= sizeof(struct tcphdr);
3712 while (hlen >= TCPOLEN_MAXSEG) {
3713 switch (*opt) {
3714 case TCPOPT_EOL:
3715 case TCPOPT_NOP:
3716 ++opt;
3717 --hlen;
3718 break;
3719 case TCPOPT_MAXSEG:
3720 bcopy((caddr_t)(opt + 2), (caddr_t)&mss, 2);
3721 NTOHS(mss);
3722 /* FALLTHROUGH */
3723 default:
3724 optlen = opt[1];
3725 if (optlen < 2)
3726 optlen = 2;
3727 hlen -= optlen;
3728 opt += optlen;
3729 break;
3730 }
3731 }
3732 return (mss);
3733 }
3734
3735 static u_int16_t
3736 pf_calc_mss(struct pf_addr *addr, sa_family_t af, int rtableid, u_int16_t offer)
3737 {
3738 struct nhop_object *nh;
3739 #ifdef INET6
3740 struct in6_addr dst6;
3741 uint32_t scopeid;
3742 #endif /* INET6 */
3743 int hlen = 0;
3744 uint16_t mss = 0;
3745
3746 NET_EPOCH_ASSERT();
3747
3748 switch (af) {
3749 #ifdef INET
3750 case AF_INET:
3751 hlen = sizeof(struct ip);
3752 nh = fib4_lookup(rtableid, addr->v4, 0, 0, 0);
3753 if (nh != NULL)
3754 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3755 break;
3756 #endif /* INET */
3757 #ifdef INET6
3758 case AF_INET6:
3759 hlen = sizeof(struct ip6_hdr);
3760 in6_splitscope(&addr->v6, &dst6, &scopeid);
3761 nh = fib6_lookup(rtableid, &dst6, scopeid, 0, 0);
3762 if (nh != NULL)
3763 mss = nh->nh_mtu - hlen - sizeof(struct tcphdr);
3764 break;
3765 #endif /* INET6 */
3766 }
3767
3768 mss = max(V_tcp_mssdflt, mss);
3769 mss = min(mss, offer);
3770 mss = max(mss, 64); /* sanity - at least max opt space */
3771 return (mss);
3772 }
3773
3774 static u_int32_t
3775 pf_tcp_iss(struct pf_pdesc *pd)
3776 {
3777 MD5_CTX ctx;
3778 u_int32_t digest[4];
3779
3780 if (V_pf_tcp_secret_init == 0) {
3781 arc4random_buf(&V_pf_tcp_secret, sizeof(V_pf_tcp_secret));
3782 MD5Init(&V_pf_tcp_secret_ctx);
3783 MD5Update(&V_pf_tcp_secret_ctx, V_pf_tcp_secret,
3784 sizeof(V_pf_tcp_secret));
3785 V_pf_tcp_secret_init = 1;
3786 }
3787
3788 ctx = V_pf_tcp_secret_ctx;
3789
3790 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_sport, sizeof(u_short));
3791 MD5Update(&ctx, (char *)&pd->hdr.tcp.th_dport, sizeof(u_short));
3792 if (pd->af == AF_INET6) {
3793 MD5Update(&ctx, (char *)&pd->src->v6, sizeof(struct in6_addr));
3794 MD5Update(&ctx, (char *)&pd->dst->v6, sizeof(struct in6_addr));
3795 } else {
3796 MD5Update(&ctx, (char *)&pd->src->v4, sizeof(struct in_addr));
3797 MD5Update(&ctx, (char *)&pd->dst->v4, sizeof(struct in_addr));
3798 }
3799 MD5Final((u_char *)digest, &ctx);
3800 V_pf_tcp_iss_off += 4096;
3801 #define ISN_RANDOM_INCREMENT (4096 - 1)
3802 return (digest[0] + (arc4random() & ISN_RANDOM_INCREMENT) +
3803 V_pf_tcp_iss_off);
3804 #undef ISN_RANDOM_INCREMENT
3805 }
3806
3807 static bool
3808 pf_match_eth_addr(const uint8_t *a, const struct pf_keth_rule_addr *r)
3809 {
3810 bool match = true;
3811
3812 /* Always matches if not set */
3813 if (! r->isset)
3814 return (!r->neg);
3815
3816 for (int i = 0; i < ETHER_ADDR_LEN; i++) {
3817 if ((a[i] & r->mask[i]) != (r->addr[i] & r->mask[i])) {
3818 match = false;
3819 break;
3820 }
3821 }
3822
3823 return (match ^ r->neg);
3824 }
3825
3826 static int
3827 pf_match_eth_tag(struct mbuf *m, struct pf_keth_rule *r, int *tag, int mtag)
3828 {
3829 if (*tag == -1)
3830 *tag = mtag;
3831
3832 return ((!r->match_tag_not && r->match_tag == *tag) ||
3833 (r->match_tag_not && r->match_tag != *tag));
3834 }
3835
3836 static void
3837 pf_bridge_to(struct pfi_kkif *kif, struct mbuf *m)
3838 {
3839 struct ifnet *ifp = kif->pfik_ifp;
3840
3841 /* If we don't have the interface drop the packet. */
3842 if (ifp == NULL) {
3843 m_freem(m);
3844 return;
3845 }
3846
3847 switch (ifp->if_type) {
3848 case IFT_ETHER:
3849 case IFT_XETHER:
3850 case IFT_L2VLAN:
3851 case IFT_BRIDGE:
3852 case IFT_IEEE8023ADLAG:
3853 break;
3854 default:
3855 m_freem(m);
3856 return;
3857 }
3858
3859 kif->pfik_ifp->if_transmit(kif->pfik_ifp, m);
3860 }
3861
3862 static int
3863 pf_test_eth_rule(int dir, struct pfi_kkif *kif, struct mbuf **m0)
3864 {
3865 #ifdef INET
3866 struct ip ip;
3867 #endif
3868 #ifdef INET6
3869 struct ip6_hdr ip6;
3870 #endif
3871 struct mbuf *m = *m0;
3872 struct ether_header *e;
3873 struct pf_keth_rule *r, *rm, *a = NULL;
3874 struct pf_keth_ruleset *ruleset = NULL;
3875 struct pf_mtag *mtag;
3876 struct pf_keth_ruleq *rules;
3877 struct pf_addr *src = NULL, *dst = NULL;
3878 sa_family_t af = 0;
3879 uint16_t proto;
3880 int asd = 0, match = 0;
3881 int tag = -1;
3882 uint8_t action;
3883 struct pf_keth_anchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
3884
3885 MPASS(kif->pfik_ifp->if_vnet == curvnet);
3886 NET_EPOCH_ASSERT();
3887
3888 PF_RULES_RLOCK_TRACKER;
3889
3890 SDT_PROBE3(pf, eth, test_rule, entry, dir, kif->pfik_ifp, m);
3891
3892 mtag = pf_find_mtag(m);
3893 if (mtag != NULL && mtag->flags & PF_TAG_DUMMYNET) {
3894 /* Dummynet re-injects packets after they've
3895 * completed their delay. We've already
3896 * processed them, so pass unconditionally. */
3897
3898 /* But only once. We may see the packet multiple times (e.g.
3899 * PFIL_IN/PFIL_OUT). */
3900 mtag->flags &= ~PF_TAG_DUMMYNET;
3901
3902 return (PF_PASS);
3903 }
3904
3905 ruleset = V_pf_keth;
3906 rules = ck_pr_load_ptr(&ruleset->active.rules);
3907 r = TAILQ_FIRST(rules);
3908 rm = NULL;
3909
3910 e = mtod(m, struct ether_header *);
3911 proto = ntohs(e->ether_type);
3912
3913 switch (proto) {
3914 #ifdef INET
3915 case ETHERTYPE_IP: {
3916 if (m_length(m, NULL) < (sizeof(struct ether_header) +
3917 sizeof(ip)))
3918 return (PF_DROP);
3919
3920 af = AF_INET;
3921 m_copydata(m, sizeof(struct ether_header), sizeof(ip),
3922 (caddr_t)&ip);
3923 src = (struct pf_addr *)&ip.ip_src;
3924 dst = (struct pf_addr *)&ip.ip_dst;
3925 break;
3926 }
3927 #endif /* INET */
3928 #ifdef INET6
3929 case ETHERTYPE_IPV6: {
3930 if (m_length(m, NULL) < (sizeof(struct ether_header) +
3931 sizeof(ip6)))
3932 return (PF_DROP);
3933
3934 af = AF_INET6;
3935 m_copydata(m, sizeof(struct ether_header), sizeof(ip6),
3936 (caddr_t)&ip6);
3937 src = (struct pf_addr *)&ip6.ip6_src;
3938 dst = (struct pf_addr *)&ip6.ip6_dst;
3939 break;
3940 }
3941 #endif /* INET6 */
3942 }
3943
3944 PF_RULES_RLOCK();
3945
3946 while (r != NULL) {
3947 counter_u64_add(r->evaluations, 1);
3948 SDT_PROBE2(pf, eth, test_rule, test, r->nr, r);
3949
3950 if (pfi_kkif_match(r->kif, kif) == r->ifnot) {
3951 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3952 "kif");
3953 r = r->skip[PFE_SKIP_IFP].ptr;
3954 }
3955 else if (r->direction && r->direction != dir) {
3956 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3957 "dir");
3958 r = r->skip[PFE_SKIP_DIR].ptr;
3959 }
3960 else if (r->proto && r->proto != proto) {
3961 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3962 "proto");
3963 r = r->skip[PFE_SKIP_PROTO].ptr;
3964 }
3965 else if (! pf_match_eth_addr(e->ether_shost, &r->src)) {
3966 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3967 "src");
3968 r = r->skip[PFE_SKIP_SRC_ADDR].ptr;
3969 }
3970 else if (! pf_match_eth_addr(e->ether_dhost, &r->dst)) {
3971 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3972 "dst");
3973 r = TAILQ_NEXT(r, entries);
3974 }
3975 else if (src != NULL && PF_MISMATCHAW(&r->ipsrc.addr, src, af,
3976 r->ipsrc.neg, kif, M_GETFIB(m))) {
3977 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3978 "ip_src");
3979 r = TAILQ_NEXT(r, entries);
3980 }
3981 else if (dst != NULL && PF_MISMATCHAW(&r->ipdst.addr, dst, af,
3982 r->ipdst.neg, kif, M_GETFIB(m))) {
3983 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3984 "ip_dst");
3985 r = TAILQ_NEXT(r, entries);
3986 }
3987 else if (r->match_tag && !pf_match_eth_tag(m, r, &tag,
3988 mtag ? mtag->tag : 0)) {
3989 SDT_PROBE3(pf, eth, test_rule, mismatch, r->nr, r,
3990 "match_tag");
3991 r = TAILQ_NEXT(r, entries);
3992 }
3993 else {
3994 if (r->tag)
3995 tag = r->tag;
3996 if (r->anchor == NULL) {
3997 /* Rule matches */
3998 rm = r;
3999
4000 SDT_PROBE2(pf, eth, test_rule, match, r->nr, r);
4001
4002 if (r->quick)
4003 break;
4004
4005 r = TAILQ_NEXT(r, entries);
4006 } else {
4007 pf_step_into_keth_anchor(anchor_stack, &asd,
4008 &ruleset, &r, &a, &match);
4009 }
4010 }
4011 if (r == NULL && pf_step_out_of_keth_anchor(anchor_stack, &asd,
4012 &ruleset, &r, &a, &match))
4013 break;
4014 }
4015
4016 r = rm;
4017
4018 SDT_PROBE2(pf, eth, test_rule, final_match, (r != NULL ? r->nr : -1), r);
4019
4020 /* Default to pass. */
4021 if (r == NULL) {
4022 PF_RULES_RUNLOCK();
4023 return (PF_PASS);
4024 }
4025
4026 /* Execute action. */
4027 counter_u64_add(r->packets[dir == PF_OUT], 1);
4028 counter_u64_add(r->bytes[dir == PF_OUT], m_length(m, NULL));
4029 pf_update_timestamp(r);
4030
4031 /* Shortcut. Don't tag if we're just going to drop anyway. */
4032 if (r->action == PF_DROP) {
4033 PF_RULES_RUNLOCK();
4034 return (PF_DROP);
4035 }
4036
4037 if (tag > 0) {
4038 if (mtag == NULL)
4039 mtag = pf_get_mtag(m);
4040 if (mtag == NULL) {
4041 PF_RULES_RUNLOCK();
4042 counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4043 return (PF_DROP);
4044 }
4045 mtag->tag = tag;
4046 }
4047
4048 if (r->qid != 0) {
4049 if (mtag == NULL)
4050 mtag = pf_get_mtag(m);
4051 if (mtag == NULL) {
4052 PF_RULES_RUNLOCK();
4053 counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4054 return (PF_DROP);
4055 }
4056 mtag->qid = r->qid;
4057 }
4058
4059 /* Dummynet */
4060 if (r->dnpipe) {
4061 struct ip_fw_args dnflow;
4062
4063 /* Drop packet if dummynet is not loaded. */
4064 if (ip_dn_io_ptr == NULL) {
4065 PF_RULES_RUNLOCK();
4066 m_freem(m);
4067 counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4068 return (PF_DROP);
4069 }
4070 if (mtag == NULL)
4071 mtag = pf_get_mtag(m);
4072 if (mtag == NULL) {
4073 PF_RULES_RUNLOCK();
4074 counter_u64_add(V_pf_status.counters[PFRES_MEMORY], 1);
4075 return (PF_DROP);
4076 }
4077
4078 bzero(&dnflow, sizeof(dnflow));
4079
4080 /* We don't have port numbers here, so we set 0. That means
4081 * that we'll be somewhat limited in distinguishing flows (i.e.
4082 * only based on IP addresses, not based on port numbers), but
4083 * it's better than nothing. */
4084 dnflow.f_id.dst_port = 0;
4085 dnflow.f_id.src_port = 0;
4086 dnflow.f_id.proto = 0;
4087
4088 dnflow.rule.info = r->dnpipe;
4089 dnflow.rule.info |= IPFW_IS_DUMMYNET;
4090 if (r->dnflags & PFRULE_DN_IS_PIPE)
4091 dnflow.rule.info |= IPFW_IS_PIPE;
4092
4093 dnflow.f_id.extra = dnflow.rule.info;
4094
4095 dnflow.flags = dir == PF_IN ? IPFW_ARGS_IN : IPFW_ARGS_OUT;
4096 dnflow.flags |= IPFW_ARGS_ETHER;
4097 dnflow.ifp = kif->pfik_ifp;
4098
4099 switch (af) {
4100 case AF_INET:
4101 dnflow.f_id.addr_type = 4;
4102 dnflow.f_id.src_ip = src->v4.s_addr;
4103 dnflow.f_id.dst_ip = dst->v4.s_addr;
4104 break;
4105 case AF_INET6:
4106 dnflow.flags |= IPFW_ARGS_IP6;
4107 dnflow.f_id.addr_type = 6;
4108 dnflow.f_id.src_ip6 = src->v6;
4109 dnflow.f_id.dst_ip6 = dst->v6;
4110 break;
4111 }
4112
4113 mtag->flags |= PF_TAG_DUMMYNET;
4114 ip_dn_io_ptr(m0, &dnflow);
4115 if (*m0 != NULL)
4116 mtag->flags &= ~PF_TAG_DUMMYNET;
4117 }
4118
4119 action = r->action;
4120
4121 if (action == PF_PASS && r->bridge_to) {
4122 pf_bridge_to(r->bridge_to, *m0);
4123 *m0 = NULL; /* We've eaten the packet. */
4124 }
4125
4126 PF_RULES_RUNLOCK();
4127
4128 return (action);
4129 }
4130
4131 static int
4132 pf_test_rule(struct pf_krule **rm, struct pf_kstate **sm, int direction,
4133 struct pfi_kkif *kif, struct mbuf *m, int off, struct pf_pdesc *pd,
4134 struct pf_krule **am, struct pf_kruleset **rsm, struct inpcb *inp)
4135 {
4136 struct pf_krule *nr = NULL;
4137 struct pf_addr * const saddr = pd->src;
4138 struct pf_addr * const daddr = pd->dst;
4139 sa_family_t af = pd->af;
4140 struct pf_krule *r, *a = NULL;
4141 struct pf_kruleset *ruleset = NULL;
4142 struct pf_ksrc_node *nsn = NULL;
4143 struct tcphdr *th = &pd->hdr.tcp;
4144 struct pf_state_key *sk = NULL, *nk = NULL;
4145 u_short reason;
4146 int rewrite = 0, hdrlen = 0;
4147 int tag = -1, rtableid = -1;
4148 int asd = 0;
4149 int match = 0;
4150 int state_icmp = 0;
4151 u_int16_t sport = 0, dport = 0;
4152 u_int16_t bproto_sum = 0, bip_sum = 0;
4153 u_int8_t icmptype = 0, icmpcode = 0;
4154 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
4155
4156 PF_RULES_RASSERT();
4157
4158 if (inp != NULL) {
4159 INP_LOCK_ASSERT(inp);
4160 pd->lookup.uid = inp->inp_cred->cr_uid;
4161 pd->lookup.gid = inp->inp_cred->cr_groups[0];
4162 pd->lookup.done = 1;
4163 }
4164
4165 switch (pd->proto) {
4166 case IPPROTO_TCP:
4167 sport = th->th_sport;
4168 dport = th->th_dport;
4169 hdrlen = sizeof(*th);
4170 break;
4171 case IPPROTO_UDP:
4172 sport = pd->hdr.udp.uh_sport;
4173 dport = pd->hdr.udp.uh_dport;
4174 hdrlen = sizeof(pd->hdr.udp);
4175 break;
4176 #ifdef INET
4177 case IPPROTO_ICMP:
4178 if (pd->af != AF_INET)
4179 break;
4180 sport = dport = pd->hdr.icmp.icmp_id;
4181 hdrlen = sizeof(pd->hdr.icmp);
4182 icmptype = pd->hdr.icmp.icmp_type;
4183 icmpcode = pd->hdr.icmp.icmp_code;
4184
4185 if (icmptype == ICMP_UNREACH ||
4186 icmptype == ICMP_SOURCEQUENCH ||
4187 icmptype == ICMP_REDIRECT ||
4188 icmptype == ICMP_TIMXCEED ||
4189 icmptype == ICMP_PARAMPROB)
4190 state_icmp++;
4191 break;
4192 #endif /* INET */
4193 #ifdef INET6
4194 case IPPROTO_ICMPV6:
4195 if (af != AF_INET6)
4196 break;
4197 sport = dport = pd->hdr.icmp6.icmp6_id;
4198 hdrlen = sizeof(pd->hdr.icmp6);
4199 icmptype = pd->hdr.icmp6.icmp6_type;
4200 icmpcode = pd->hdr.icmp6.icmp6_code;
4201
4202 if (icmptype == ICMP6_DST_UNREACH ||
4203 icmptype == ICMP6_PACKET_TOO_BIG ||
4204 icmptype == ICMP6_TIME_EXCEEDED ||
4205 icmptype == ICMP6_PARAM_PROB)
4206 state_icmp++;
4207 break;
4208 #endif /* INET6 */
4209 default:
4210 sport = dport = hdrlen = 0;
4211 break;
4212 }
4213
4214 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4215
4216 /* check packet for BINAT/NAT/RDR */
4217 if ((nr = pf_get_translation(pd, m, off, direction, kif, &nsn, &sk,
4218 &nk, saddr, daddr, sport, dport, anchor_stack)) != NULL) {
4219 KASSERT(sk != NULL, ("%s: null sk", __func__));
4220 KASSERT(nk != NULL, ("%s: null nk", __func__));
4221
4222 if (nr->log) {
4223 PFLOG_PACKET(kif, m, af, direction, PFRES_MATCH, nr, a,
4224 ruleset, pd, 1);
4225 }
4226
4227 if (pd->ip_sum)
4228 bip_sum = *pd->ip_sum;
4229
4230 switch (pd->proto) {
4231 case IPPROTO_TCP:
4232 bproto_sum = th->th_sum;
4233 pd->proto_sum = &th->th_sum;
4234
4235 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
4236 nk->port[pd->sidx] != sport) {
4237 pf_change_ap(m, saddr, &th->th_sport, pd->ip_sum,
4238 &th->th_sum, &nk->addr[pd->sidx],
4239 nk->port[pd->sidx], 0, af);
4240 pd->sport = &th->th_sport;
4241 sport = th->th_sport;
4242 }
4243
4244 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
4245 nk->port[pd->didx] != dport) {
4246 pf_change_ap(m, daddr, &th->th_dport, pd->ip_sum,
4247 &th->th_sum, &nk->addr[pd->didx],
4248 nk->port[pd->didx], 0, af);
4249 dport = th->th_dport;
4250 pd->dport = &th->th_dport;
4251 }
4252 rewrite++;
4253 break;
4254 case IPPROTO_UDP:
4255 bproto_sum = pd->hdr.udp.uh_sum;
4256 pd->proto_sum = &pd->hdr.udp.uh_sum;
4257
4258 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], af) ||
4259 nk->port[pd->sidx] != sport) {
4260 pf_change_ap(m, saddr, &pd->hdr.udp.uh_sport,
4261 pd->ip_sum, &pd->hdr.udp.uh_sum,
4262 &nk->addr[pd->sidx],
4263 nk->port[pd->sidx], 1, af);
4264 sport = pd->hdr.udp.uh_sport;
4265 pd->sport = &pd->hdr.udp.uh_sport;
4266 }
4267
4268 if (PF_ANEQ(daddr, &nk->addr[pd->didx], af) ||
4269 nk->port[pd->didx] != dport) {
4270 pf_change_ap(m, daddr, &pd->hdr.udp.uh_dport,
4271 pd->ip_sum, &pd->hdr.udp.uh_sum,
4272 &nk->addr[pd->didx],
4273 nk->port[pd->didx], 1, af);
4274 dport = pd->hdr.udp.uh_dport;
4275 pd->dport = &pd->hdr.udp.uh_dport;
4276 }
4277 rewrite++;
4278 break;
4279 #ifdef INET
4280 case IPPROTO_ICMP:
4281 nk->port[0] = nk->port[1];
4282 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET))
4283 pf_change_a(&saddr->v4.s_addr, pd->ip_sum,
4284 nk->addr[pd->sidx].v4.s_addr, 0);
4285
4286 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET))
4287 pf_change_a(&daddr->v4.s_addr, pd->ip_sum,
4288 nk->addr[pd->didx].v4.s_addr, 0);
4289
4290 if (nk->port[1] != pd->hdr.icmp.icmp_id) {
4291 pd->hdr.icmp.icmp_cksum = pf_cksum_fixup(
4292 pd->hdr.icmp.icmp_cksum, sport,
4293 nk->port[1], 0);
4294 pd->hdr.icmp.icmp_id = nk->port[1];
4295 pd->sport = &pd->hdr.icmp.icmp_id;
4296 }
4297 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
4298 break;
4299 #endif /* INET */
4300 #ifdef INET6
4301 case IPPROTO_ICMPV6:
4302 nk->port[0] = nk->port[1];
4303 if (PF_ANEQ(saddr, &nk->addr[pd->sidx], AF_INET6))
4304 pf_change_a6(saddr, &pd->hdr.icmp6.icmp6_cksum,
4305 &nk->addr[pd->sidx], 0);
4306
4307 if (PF_ANEQ(daddr, &nk->addr[pd->didx], AF_INET6))
4308 pf_change_a6(daddr, &pd->hdr.icmp6.icmp6_cksum,
4309 &nk->addr[pd->didx], 0);
4310 rewrite++;
4311 break;
4312 #endif /* INET */
4313 default:
4314 switch (af) {
4315 #ifdef INET
4316 case AF_INET:
4317 if (PF_ANEQ(saddr,
4318 &nk->addr[pd->sidx], AF_INET))
4319 pf_change_a(&saddr->v4.s_addr,
4320 pd->ip_sum,
4321 nk->addr[pd->sidx].v4.s_addr, 0);
4322
4323 if (PF_ANEQ(daddr,
4324 &nk->addr[pd->didx], AF_INET))
4325 pf_change_a(&daddr->v4.s_addr,
4326 pd->ip_sum,
4327 nk->addr[pd->didx].v4.s_addr, 0);
4328 break;
4329 #endif /* INET */
4330 #ifdef INET6
4331 case AF_INET6:
4332 if (PF_ANEQ(saddr,
4333 &nk->addr[pd->sidx], AF_INET6))
4334 PF_ACPY(saddr, &nk->addr[pd->sidx], af);
4335
4336 if (PF_ANEQ(daddr,
4337 &nk->addr[pd->didx], AF_INET6))
4338 PF_ACPY(daddr, &nk->addr[pd->didx], af);
4339 break;
4340 #endif /* INET */
4341 }
4342 break;
4343 }
4344 if (nr->natpass)
4345 r = NULL;
4346 pd->nat_rule = nr;
4347 }
4348
4349 while (r != NULL) {
4350 pf_counter_u64_add(&r->evaluations, 1);
4351 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
4352 r = r->skip[PF_SKIP_IFP].ptr;
4353 else if (r->direction && r->direction != direction)
4354 r = r->skip[PF_SKIP_DIR].ptr;
4355 else if (r->af && r->af != af)
4356 r = r->skip[PF_SKIP_AF].ptr;
4357 else if (r->proto && r->proto != pd->proto)
4358 r = r->skip[PF_SKIP_PROTO].ptr;
4359 else if (PF_MISMATCHAW(&r->src.addr, saddr, af,
4360 r->src.neg, kif, M_GETFIB(m)))
4361 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4362 /* tcp/udp only. port_op always 0 in other cases */
4363 else if (r->src.port_op && !pf_match_port(r->src.port_op,
4364 r->src.port[0], r->src.port[1], sport))
4365 r = r->skip[PF_SKIP_SRC_PORT].ptr;
4366 else if (PF_MISMATCHAW(&r->dst.addr, daddr, af,
4367 r->dst.neg, NULL, M_GETFIB(m)))
4368 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4369 /* tcp/udp only. port_op always 0 in other cases */
4370 else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
4371 r->dst.port[0], r->dst.port[1], dport))
4372 r = r->skip[PF_SKIP_DST_PORT].ptr;
4373 /* icmp only. type always 0 in other cases */
4374 else if (r->type && r->type != icmptype + 1)
4375 r = TAILQ_NEXT(r, entries);
4376 /* icmp only. type always 0 in other cases */
4377 else if (r->code && r->code != icmpcode + 1)
4378 r = TAILQ_NEXT(r, entries);
4379 else if (r->tos && !(r->tos == pd->tos))
4380 r = TAILQ_NEXT(r, entries);
4381 else if (r->rule_flag & PFRULE_FRAGMENT)
4382 r = TAILQ_NEXT(r, entries);
4383 else if (pd->proto == IPPROTO_TCP &&
4384 (r->flagset & th->th_flags) != r->flags)
4385 r = TAILQ_NEXT(r, entries);
4386 /* tcp/udp only. uid.op always 0 in other cases */
4387 else if (r->uid.op && (pd->lookup.done || (pd->lookup.done =
4388 pf_socket_lookup(direction, pd, m), 1)) &&
4389 !pf_match_uid(r->uid.op, r->uid.uid[0], r->uid.uid[1],
4390 pd->lookup.uid))
4391 r = TAILQ_NEXT(r, entries);
4392 /* tcp/udp only. gid.op always 0 in other cases */
4393 else if (r->gid.op && (pd->lookup.done || (pd->lookup.done =
4394 pf_socket_lookup(direction, pd, m), 1)) &&
4395 !pf_match_gid(r->gid.op, r->gid.gid[0], r->gid.gid[1],
4396 pd->lookup.gid))
4397 r = TAILQ_NEXT(r, entries);
4398 else if (r->prio &&
4399 !pf_match_ieee8021q_pcp(r->prio, m))
4400 r = TAILQ_NEXT(r, entries);
4401 else if (r->prob &&
4402 r->prob <= arc4random())
4403 r = TAILQ_NEXT(r, entries);
4404 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4405 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4406 r = TAILQ_NEXT(r, entries);
4407 else if (r->os_fingerprint != PF_OSFP_ANY &&
4408 (pd->proto != IPPROTO_TCP || !pf_osfp_match(
4409 pf_osfp_fingerprint(pd, m, off, th),
4410 r->os_fingerprint)))
4411 r = TAILQ_NEXT(r, entries);
4412 else {
4413 if (r->tag)
4414 tag = r->tag;
4415 if (r->rtableid >= 0)
4416 rtableid = r->rtableid;
4417 if (r->anchor == NULL) {
4418 if (r->action == PF_MATCH) {
4419 pf_counter_u64_critical_enter();
4420 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
4421 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
4422 pf_counter_u64_critical_exit();
4423 pf_rule_to_actions(r, &pd->act);
4424 if (r->log)
4425 PFLOG_PACKET(kif, m, af,
4426 direction, PFRES_MATCH, r,
4427 a, ruleset, pd, 1);
4428 } else {
4429 match = 1;
4430 *rm = r;
4431 *am = a;
4432 *rsm = ruleset;
4433 }
4434 if ((*rm)->quick)
4435 break;
4436 r = TAILQ_NEXT(r, entries);
4437 } else
4438 pf_step_into_anchor(anchor_stack, &asd,
4439 &ruleset, PF_RULESET_FILTER, &r, &a,
4440 &match);
4441 }
4442 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4443 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4444 break;
4445 }
4446 r = *rm;
4447 a = *am;
4448 ruleset = *rsm;
4449
4450 REASON_SET(&reason, PFRES_MATCH);
4451
4452 /* apply actions for last matching pass/block rule */
4453 pf_rule_to_actions(r, &pd->act);
4454
4455 if (r->log) {
4456 if (rewrite)
4457 m_copyback(m, off, hdrlen, pd->hdr.any);
4458 PFLOG_PACKET(kif, m, af, direction, reason, r, a,
4459 ruleset, pd, 1);
4460 }
4461
4462 if ((r->action == PF_DROP) &&
4463 ((r->rule_flag & PFRULE_RETURNRST) ||
4464 (r->rule_flag & PFRULE_RETURNICMP) ||
4465 (r->rule_flag & PFRULE_RETURN))) {
4466 pf_return(r, nr, pd, sk, off, m, th, kif, bproto_sum,
4467 bip_sum, hdrlen, &reason);
4468 }
4469
4470 if (r->action == PF_DROP)
4471 goto cleanup;
4472
4473 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4474 REASON_SET(&reason, PFRES_MEMORY);
4475 goto cleanup;
4476 }
4477 if (rtableid >= 0)
4478 M_SETFIB(m, rtableid);
4479
4480 if (!state_icmp && (r->keep_state || nr != NULL ||
4481 (pd->flags & PFDESC_TCP_NORM))) {
4482 int action;
4483 action = pf_create_state(r, nr, a, pd, nsn, nk, sk, m, off,
4484 sport, dport, &rewrite, kif, sm, tag, bproto_sum, bip_sum,
4485 hdrlen);
4486 if (action != PF_PASS) {
4487 if (action == PF_DROP &&
4488 (r->rule_flag & PFRULE_RETURN))
4489 pf_return(r, nr, pd, sk, off, m, th, kif,
4490 bproto_sum, bip_sum, hdrlen, &reason);
4491 return (action);
4492 }
4493 } else {
4494 if (sk != NULL)
4495 uma_zfree(V_pf_state_key_z, sk);
4496 if (nk != NULL)
4497 uma_zfree(V_pf_state_key_z, nk);
4498 }
4499
4500 /* copy back packet headers if we performed NAT operations */
4501 if (rewrite)
4502 m_copyback(m, off, hdrlen, pd->hdr.any);
4503
4504 if (*sm != NULL && !((*sm)->state_flags & PFSTATE_NOSYNC) &&
4505 direction == PF_OUT &&
4506 V_pfsync_defer_ptr != NULL && V_pfsync_defer_ptr(*sm, m))
4507 /*
4508 * We want the state created, but we dont
4509 * want to send this in case a partner
4510 * firewall has to know about it to allow
4511 * replies through it.
4512 */
4513 return (PF_DEFER);
4514
4515 return (PF_PASS);
4516
4517 cleanup:
4518 if (sk != NULL)
4519 uma_zfree(V_pf_state_key_z, sk);
4520 if (nk != NULL)
4521 uma_zfree(V_pf_state_key_z, nk);
4522 return (PF_DROP);
4523 }
4524
4525 static int
4526 pf_create_state(struct pf_krule *r, struct pf_krule *nr, struct pf_krule *a,
4527 struct pf_pdesc *pd, struct pf_ksrc_node *nsn, struct pf_state_key *nk,
4528 struct pf_state_key *sk, struct mbuf *m, int off, u_int16_t sport,
4529 u_int16_t dport, int *rewrite, struct pfi_kkif *kif, struct pf_kstate **sm,
4530 int tag, u_int16_t bproto_sum, u_int16_t bip_sum, int hdrlen)
4531 {
4532 struct pf_kstate *s = NULL;
4533 struct pf_ksrc_node *sn = NULL;
4534 struct tcphdr *th = &pd->hdr.tcp;
4535 u_int16_t mss = V_tcp_mssdflt;
4536 u_short reason;
4537
4538 /* check maximums */
4539 if (r->max_states &&
4540 (counter_u64_fetch(r->states_cur) >= r->max_states)) {
4541 counter_u64_add(V_pf_status.lcounters[LCNT_STATES], 1);
4542 REASON_SET(&reason, PFRES_MAXSTATES);
4543 goto csfailed;
4544 }
4545 /* src node for filter rule */
4546 if ((r->rule_flag & PFRULE_SRCTRACK ||
4547 r->rpool.opts & PF_POOL_STICKYADDR) &&
4548 pf_insert_src_node(&sn, r, pd->src, pd->af) != 0) {
4549 REASON_SET(&reason, PFRES_SRCLIMIT);
4550 goto csfailed;
4551 }
4552 /* src node for translation rule */
4553 if (nr != NULL && (nr->rpool.opts & PF_POOL_STICKYADDR) &&
4554 pf_insert_src_node(&nsn, nr, &sk->addr[pd->sidx], pd->af)) {
4555 REASON_SET(&reason, PFRES_SRCLIMIT);
4556 goto csfailed;
4557 }
4558 s = pf_alloc_state(M_NOWAIT);
4559 if (s == NULL) {
4560 REASON_SET(&reason, PFRES_MEMORY);
4561 goto csfailed;
4562 }
4563 s->rule.ptr = r;
4564 s->nat_rule.ptr = nr;
4565 s->anchor.ptr = a;
4566 STATE_INC_COUNTERS(s);
4567 if (r->allow_opts)
4568 s->state_flags |= PFSTATE_ALLOWOPTS;
4569 if (r->rule_flag & PFRULE_STATESLOPPY)
4570 s->state_flags |= PFSTATE_SLOPPY;
4571 s->log = r->log & PF_LOG_ALL;
4572 s->sync_state = PFSYNC_S_NONE;
4573 s->qid = pd->act.qid;
4574 s->pqid = pd->act.pqid;
4575 s->dnpipe = pd->act.dnpipe;
4576 s->dnrpipe = pd->act.dnrpipe;
4577 s->state_flags |= pd->act.flags;
4578 if (nr != NULL)
4579 s->log |= nr->log & PF_LOG_ALL;
4580 switch (pd->proto) {
4581 case IPPROTO_TCP:
4582 s->src.seqlo = ntohl(th->th_seq);
4583 s->src.seqhi = s->src.seqlo + pd->p_len + 1;
4584 if ((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN &&
4585 r->keep_state == PF_STATE_MODULATE) {
4586 /* Generate sequence number modulator */
4587 if ((s->src.seqdiff = pf_tcp_iss(pd) - s->src.seqlo) ==
4588 0)
4589 s->src.seqdiff = 1;
4590 pf_change_proto_a(m, &th->th_seq, &th->th_sum,
4591 htonl(s->src.seqlo + s->src.seqdiff), 0);
4592 *rewrite = 1;
4593 } else
4594 s->src.seqdiff = 0;
4595 if (th->th_flags & TH_SYN) {
4596 s->src.seqhi++;
4597 s->src.wscale = pf_get_wscale(m, off,
4598 th->th_off, pd->af);
4599 }
4600 s->src.max_win = MAX(ntohs(th->th_win), 1);
4601 if (s->src.wscale & PF_WSCALE_MASK) {
4602 /* Remove scale factor from initial window */
4603 int win = s->src.max_win;
4604 win += 1 << (s->src.wscale & PF_WSCALE_MASK);
4605 s->src.max_win = (win - 1) >>
4606 (s->src.wscale & PF_WSCALE_MASK);
4607 }
4608 if (th->th_flags & TH_FIN)
4609 s->src.seqhi++;
4610 s->dst.seqhi = 1;
4611 s->dst.max_win = 1;
4612 pf_set_protostate(s, PF_PEER_SRC, TCPS_SYN_SENT);
4613 pf_set_protostate(s, PF_PEER_DST, TCPS_CLOSED);
4614 s->timeout = PFTM_TCP_FIRST_PACKET;
4615 atomic_add_32(&V_pf_status.states_halfopen, 1);
4616 break;
4617 case IPPROTO_UDP:
4618 pf_set_protostate(s, PF_PEER_SRC, PFUDPS_SINGLE);
4619 pf_set_protostate(s, PF_PEER_DST, PFUDPS_NO_TRAFFIC);
4620 s->timeout = PFTM_UDP_FIRST_PACKET;
4621 break;
4622 case IPPROTO_ICMP:
4623 #ifdef INET6
4624 case IPPROTO_ICMPV6:
4625 #endif
4626 s->timeout = PFTM_ICMP_FIRST_PACKET;
4627 break;
4628 default:
4629 pf_set_protostate(s, PF_PEER_SRC, PFOTHERS_SINGLE);
4630 pf_set_protostate(s, PF_PEER_DST, PFOTHERS_NO_TRAFFIC);
4631 s->timeout = PFTM_OTHER_FIRST_PACKET;
4632 }
4633
4634 if (r->rt) {
4635 if (pf_map_addr(pd->af, r, pd->src, &s->rt_addr, NULL, &sn)) {
4636 REASON_SET(&reason, PFRES_MAPFAILED);
4637 pf_src_tree_remove_state(s);
4638 s->timeout = PFTM_UNLINKED;
4639 STATE_DEC_COUNTERS(s);
4640 pf_free_state(s);
4641 goto csfailed;
4642 }
4643 s->rt_kif = r->rpool.cur->kif;
4644 }
4645
4646 s->creation = time_uptime;
4647 s->expire = time_uptime;
4648
4649 if (sn != NULL)
4650 s->src_node = sn;
4651 if (nsn != NULL) {
4652 /* XXX We only modify one side for now. */
4653 PF_ACPY(&nsn->raddr, &nk->addr[1], pd->af);
4654 s->nat_src_node = nsn;
4655 }
4656 if (pd->proto == IPPROTO_TCP) {
4657 if ((pd->flags & PFDESC_TCP_NORM) && pf_normalize_tcp_init(m,
4658 off, pd, th, &s->src, &s->dst)) {
4659 REASON_SET(&reason, PFRES_MEMORY);
4660 pf_src_tree_remove_state(s);
4661 s->timeout = PFTM_UNLINKED;
4662 STATE_DEC_COUNTERS(s);
4663 pf_free_state(s);
4664 return (PF_DROP);
4665 }
4666 if ((pd->flags & PFDESC_TCP_NORM) && s->src.scrub &&
4667 pf_normalize_tcp_stateful(m, off, pd, &reason, th, s,
4668 &s->src, &s->dst, rewrite)) {
4669 /* This really shouldn't happen!!! */
4670 DPFPRINTF(PF_DEBUG_URGENT,
4671 ("pf_normalize_tcp_stateful failed on first "
4672 "pkt\n"));
4673 pf_src_tree_remove_state(s);
4674 s->timeout = PFTM_UNLINKED;
4675 STATE_DEC_COUNTERS(s);
4676 pf_free_state(s);
4677 return (PF_DROP);
4678 }
4679 }
4680 s->direction = pd->dir;
4681
4682 /*
4683 * sk/nk could already been setup by pf_get_translation().
4684 */
4685 if (nr == NULL) {
4686 KASSERT((sk == NULL && nk == NULL), ("%s: nr %p sk %p, nk %p",
4687 __func__, nr, sk, nk));
4688 sk = pf_state_key_setup(pd, pd->src, pd->dst, sport, dport);
4689 if (sk == NULL)
4690 goto csfailed;
4691 nk = sk;
4692 } else
4693 KASSERT((sk != NULL && nk != NULL), ("%s: nr %p sk %p, nk %p",
4694 __func__, nr, sk, nk));
4695
4696 /* Swap sk/nk for PF_OUT. */
4697 if (pf_state_insert(BOUND_IFACE(r, kif), kif,
4698 (pd->dir == PF_IN) ? sk : nk,
4699 (pd->dir == PF_IN) ? nk : sk, s)) {
4700 REASON_SET(&reason, PFRES_STATEINS);
4701 pf_src_tree_remove_state(s);
4702 s->timeout = PFTM_UNLINKED;
4703 STATE_DEC_COUNTERS(s);
4704 pf_free_state(s);
4705 return (PF_DROP);
4706 } else
4707 *sm = s;
4708
4709 if (tag > 0)
4710 s->tag = tag;
4711 if (pd->proto == IPPROTO_TCP && (th->th_flags & (TH_SYN|TH_ACK)) ==
4712 TH_SYN && r->keep_state == PF_STATE_SYNPROXY) {
4713 pf_set_protostate(s, PF_PEER_SRC, PF_TCPS_PROXY_SRC);
4714 /* undo NAT changes, if they have taken place */
4715 if (nr != NULL) {
4716 struct pf_state_key *skt = s->key[PF_SK_WIRE];
4717 if (pd->dir == PF_OUT)
4718 skt = s->key[PF_SK_STACK];
4719 PF_ACPY(pd->src, &skt->addr[pd->sidx], pd->af);
4720 PF_ACPY(pd->dst, &skt->addr[pd->didx], pd->af);
4721 if (pd->sport)
4722 *pd->sport = skt->port[pd->sidx];
4723 if (pd->dport)
4724 *pd->dport = skt->port[pd->didx];
4725 if (pd->proto_sum)
4726 *pd->proto_sum = bproto_sum;
4727 if (pd->ip_sum)
4728 *pd->ip_sum = bip_sum;
4729 m_copyback(m, off, hdrlen, pd->hdr.any);
4730 }
4731 s->src.seqhi = htonl(arc4random());
4732 /* Find mss option */
4733 int rtid = M_GETFIB(m);
4734 mss = pf_get_mss(m, off, th->th_off, pd->af);
4735 mss = pf_calc_mss(pd->src, pd->af, rtid, mss);
4736 mss = pf_calc_mss(pd->dst, pd->af, rtid, mss);
4737 s->src.mss = mss;
4738 pf_send_tcp(r, pd->af, pd->dst, pd->src, th->th_dport,
4739 th->th_sport, s->src.seqhi, ntohl(th->th_seq) + 1,
4740 TH_SYN|TH_ACK, 0, s->src.mss, 0, 1, 0);
4741 REASON_SET(&reason, PFRES_SYNPROXY);
4742 return (PF_SYNPROXY_DROP);
4743 }
4744
4745 return (PF_PASS);
4746
4747 csfailed:
4748 if (sk != NULL)
4749 uma_zfree(V_pf_state_key_z, sk);
4750 if (nk != NULL)
4751 uma_zfree(V_pf_state_key_z, nk);
4752
4753 if (sn != NULL) {
4754 struct pf_srchash *sh;
4755
4756 sh = &V_pf_srchash[pf_hashsrc(&sn->addr, sn->af)];
4757 PF_HASHROW_LOCK(sh);
4758 if (--sn->states == 0 && sn->expire == 0) {
4759 pf_unlink_src_node(sn);
4760 uma_zfree(V_pf_sources_z, sn);
4761 counter_u64_add(
4762 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4763 }
4764 PF_HASHROW_UNLOCK(sh);
4765 }
4766
4767 if (nsn != sn && nsn != NULL) {
4768 struct pf_srchash *sh;
4769
4770 sh = &V_pf_srchash[pf_hashsrc(&nsn->addr, nsn->af)];
4771 PF_HASHROW_LOCK(sh);
4772 if (--nsn->states == 0 && nsn->expire == 0) {
4773 pf_unlink_src_node(nsn);
4774 uma_zfree(V_pf_sources_z, nsn);
4775 counter_u64_add(
4776 V_pf_status.scounters[SCNT_SRC_NODE_REMOVALS], 1);
4777 }
4778 PF_HASHROW_UNLOCK(sh);
4779 }
4780
4781 return (PF_DROP);
4782 }
4783
4784 static int
4785 pf_test_fragment(struct pf_krule **rm, int direction, struct pfi_kkif *kif,
4786 struct mbuf *m, void *h, struct pf_pdesc *pd, struct pf_krule **am,
4787 struct pf_kruleset **rsm)
4788 {
4789 struct pf_krule *r, *a = NULL;
4790 struct pf_kruleset *ruleset = NULL;
4791 sa_family_t af = pd->af;
4792 u_short reason;
4793 int tag = -1;
4794 int asd = 0;
4795 int match = 0;
4796 struct pf_kanchor_stackframe anchor_stack[PF_ANCHOR_STACKSIZE];
4797
4798 PF_RULES_RASSERT();
4799
4800 r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_FILTER].active.ptr);
4801 while (r != NULL) {
4802 pf_counter_u64_add(&r->evaluations, 1);
4803 if (pfi_kkif_match(r->kif, kif) == r->ifnot)
4804 r = r->skip[PF_SKIP_IFP].ptr;
4805 else if (r->direction && r->direction != direction)
4806 r = r->skip[PF_SKIP_DIR].ptr;
4807 else if (r->af && r->af != af)
4808 r = r->skip[PF_SKIP_AF].ptr;
4809 else if (r->proto && r->proto != pd->proto)
4810 r = r->skip[PF_SKIP_PROTO].ptr;
4811 else if (PF_MISMATCHAW(&r->src.addr, pd->src, af,
4812 r->src.neg, kif, M_GETFIB(m)))
4813 r = r->skip[PF_SKIP_SRC_ADDR].ptr;
4814 else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af,
4815 r->dst.neg, NULL, M_GETFIB(m)))
4816 r = r->skip[PF_SKIP_DST_ADDR].ptr;
4817 else if (r->tos && !(r->tos == pd->tos))
4818 r = TAILQ_NEXT(r, entries);
4819 else if (r->os_fingerprint != PF_OSFP_ANY)
4820 r = TAILQ_NEXT(r, entries);
4821 else if (pd->proto == IPPROTO_UDP &&
4822 (r->src.port_op || r->dst.port_op))
4823 r = TAILQ_NEXT(r, entries);
4824 else if (pd->proto == IPPROTO_TCP &&
4825 (r->src.port_op || r->dst.port_op || r->flagset))
4826 r = TAILQ_NEXT(r, entries);
4827 else if ((pd->proto == IPPROTO_ICMP ||
4828 pd->proto == IPPROTO_ICMPV6) &&
4829 (r->type || r->code))
4830 r = TAILQ_NEXT(r, entries);
4831 else if (r->prio &&
4832 !pf_match_ieee8021q_pcp(r->prio, m))
4833 r = TAILQ_NEXT(r, entries);
4834 else if (r->prob && r->prob <=
4835 (arc4random() % (UINT_MAX - 1) + 1))
4836 r = TAILQ_NEXT(r, entries);
4837 else if (r->match_tag && !pf_match_tag(m, r, &tag,
4838 pd->pf_mtag ? pd->pf_mtag->tag : 0))
4839 r = TAILQ_NEXT(r, entries);
4840 else {
4841 if (r->anchor == NULL) {
4842 if (r->action == PF_MATCH) {
4843 pf_counter_u64_critical_enter();
4844 pf_counter_u64_add_protected(&r->packets[direction == PF_OUT], 1);
4845 pf_counter_u64_add_protected(&r->bytes[direction == PF_OUT], pd->tot_len);
4846 pf_counter_u64_critical_exit();
4847 pf_rule_to_actions(r, &pd->act);
4848 if (r->log)
4849 PFLOG_PACKET(kif, m, af,
4850 direction, PFRES_MATCH, r,
4851 a, ruleset, pd, 1);
4852 } else {
4853 match = 1;
4854 *rm = r;
4855 *am = a;
4856 *rsm = ruleset;
4857 }
4858 if ((*rm)->quick)
4859 break;
4860 r = TAILQ_NEXT(r, entries);
4861 } else
4862 pf_step_into_anchor(anchor_stack, &asd,
4863 &ruleset, PF_RULESET_FILTER, &r, &a,
4864 &match);
4865 }
4866 if (r == NULL && pf_step_out_of_anchor(anchor_stack, &asd,
4867 &ruleset, PF_RULESET_FILTER, &r, &a, &match))
4868 break;
4869 }
4870 r = *rm;
4871 a = *am;
4872 ruleset = *rsm;
4873
4874 REASON_SET(&reason, PFRES_MATCH);
4875
4876 /* apply actions for last matching pass/block rule */
4877 pf_rule_to_actions(r, &pd->act);
4878
4879 if (r->log)
4880 PFLOG_PACKET(kif, m, af, direction, reason, r, a, ruleset, pd,
4881 1);
4882
4883 if (r->action != PF_PASS)
4884 return (PF_DROP);
4885
4886 if (tag > 0 && pf_tag_packet(m, pd, tag)) {
4887 REASON_SET(&reason, PFRES_MEMORY);
4888 return (PF_DROP);
4889 }
4890
4891 return (PF_PASS);
4892 }
4893
4894 static int
4895 pf_tcp_track_full(struct pf_kstate **state, struct pfi_kkif *kif,
4896 struct mbuf *m, int off, struct pf_pdesc *pd, u_short *reason,
4897 int *copyback)
4898 {
4899 struct tcphdr *th = &pd->hdr.tcp;
4900 struct pf_state_peer *src, *dst;
4901 u_int16_t win = ntohs(th->th_win);
4902 u_int32_t ack, end, seq, orig_seq;
4903 u_int8_t sws, dws, psrc, pdst;
4904 int ackskew;
4905
4906 if (pd->dir == (*state)->direction) {
4907 src = &(*state)->src;
4908 dst = &(*state)->dst;
4909 psrc = PF_PEER_SRC;
4910 pdst = PF_PEER_DST;
4911 } else {
4912 src = &(*state)->dst;
4913 dst = &(*state)->src;
4914 psrc = PF_PEER_DST;
4915 pdst = PF_PEER_SRC;
4916 }
4917
4918 if (src->wscale && dst->wscale && !(th->th_flags & TH_SYN)) {
4919 sws = src->wscale & PF_WSCALE_MASK;
4920 dws = dst->wscale & PF_WSCALE_MASK;
4921 } else
4922 sws = dws = 0;
4923
4924 /*
4925 * Sequence tracking algorithm from Guido van Rooij's paper:
4926 * http://www.madison-gurkha.com/publications/tcp_filtering/
4927 * tcp_filtering.ps
4928 */
4929
4930 orig_seq = seq = ntohl(th->th_seq);
4931 if (src->seqlo == 0) {
4932 /* First packet from this end. Set its state */
4933
4934 if ((pd->flags & PFDESC_TCP_NORM || dst->scrub) &&
4935 src->scrub == NULL) {
4936 if (pf_normalize_tcp_init(m, off, pd, th, src, dst)) {
4937 REASON_SET(reason, PFRES_MEMORY);
4938 return (PF_DROP);
4939 }
4940 }
4941
4942 /* Deferred generation of sequence number modulator */
4943 if (dst->seqdiff && !src->seqdiff) {
4944 /* use random iss for the TCP server */
4945 while ((src->seqdiff = arc4random() - seq) == 0)
4946 ;
4947 ack = ntohl(th->th_ack) - dst->seqdiff;
4948 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
4949 src->seqdiff), 0);
4950 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
4951 *copyback = 1;
4952 } else {
4953 ack = ntohl(th->th_ack);
4954 }
4955
4956 end = seq + pd->p_len;
4957 if (th->th_flags & TH_SYN) {
4958 end++;
4959 if (dst->wscale & PF_WSCALE_FLAG) {
4960 src->wscale = pf_get_wscale(m, off, th->th_off,
4961 pd->af);
4962 if (src->wscale & PF_WSCALE_FLAG) {
4963 /* Remove scale factor from initial
4964 * window */
4965 sws = src->wscale & PF_WSCALE_MASK;
4966 win = ((u_int32_t)win + (1 << sws) - 1)
4967 >> sws;
4968 dws = dst->wscale & PF_WSCALE_MASK;
4969 } else {
4970 /* fixup other window */
4971 dst->max_win <<= dst->wscale &
4972 PF_WSCALE_MASK;
4973 /* in case of a retrans SYN|ACK */
4974 dst->wscale = 0;
4975 }
4976 }
4977 }
4978 if (th->th_flags & TH_FIN)
4979 end++;
4980
4981 src->seqlo = seq;
4982 if (src->state < TCPS_SYN_SENT)
4983 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
4984
4985 /*
4986 * May need to slide the window (seqhi may have been set by
4987 * the crappy stack check or if we picked up the connection
4988 * after establishment)
4989 */
4990 if (src->seqhi == 1 ||
4991 SEQ_GEQ(end + MAX(1, dst->max_win << dws), src->seqhi))
4992 src->seqhi = end + MAX(1, dst->max_win << dws);
4993 if (win > src->max_win)
4994 src->max_win = win;
4995
4996 } else {
4997 ack = ntohl(th->th_ack) - dst->seqdiff;
4998 if (src->seqdiff) {
4999 /* Modulate sequence numbers */
5000 pf_change_proto_a(m, &th->th_seq, &th->th_sum, htonl(seq +
5001 src->seqdiff), 0);
5002 pf_change_proto_a(m, &th->th_ack, &th->th_sum, htonl(ack), 0);
5003 *copyback = 1;
5004 }
5005 end = seq + pd->p_len;
5006 if (th->th_flags & TH_SYN)
5007 end++;
5008 if (th->th_flags & TH_FIN)
5009 end++;
5010 }
5011
5012 if ((th->th_flags & TH_ACK) == 0) {
5013 /* Let it pass through the ack skew check */
5014 ack = dst->seqlo;
5015 } else if ((ack == 0 &&
5016 (th->th_flags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) ||
5017 /* broken tcp stacks do not set ack */
5018 (dst->state < TCPS_SYN_SENT)) {
5019 /*
5020 * Many stacks (ours included) will set the ACK number in an
5021 * FIN|ACK if the SYN times out -- no sequence to ACK.
5022 */
5023 ack = dst->seqlo;
5024 }
5025
5026 if (seq == end) {
5027 /* Ease sequencing restrictions on no data packets */
5028 seq = src->seqlo;
5029 end = seq;
5030 }
5031
5032 ackskew = dst->seqlo - ack;
5033
5034 /*
5035 * Need to demodulate the sequence numbers in any TCP SACK options
5036 * (Selective ACK). We could optionally validate the SACK values
5037 * against the current ACK window, either forwards or backwards, but
5038 * I'm not confident that SACK has been implemented properly
5039 * everywhere. It wouldn't surprise me if several stacks accidentally
5040 * SACK too far backwards of previously ACKed data. There really aren't
5041 * any security implications of bad SACKing unless the target stack
5042 * doesn't validate the option length correctly. Someone trying to
5043 * spoof into a TCP connection won't bother blindly sending SACK
5044 * options anyway.
5045 */
5046 if (dst->seqdiff && (th->th_off << 2) > sizeof(struct tcphdr)) {
5047 if (pf_modulate_sack(m, off, pd, th, dst))
5048 *copyback = 1;
5049 }
5050
5051 #define MAXACKWINDOW (0xffff + 1500) /* 1500 is an arbitrary fudge factor */
5052 if (SEQ_GEQ(src->seqhi, end) &&
5053 /* Last octet inside other's window space */
5054 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) &&
5055 /* Retrans: not more than one window back */
5056 (ackskew >= -MAXACKWINDOW) &&
5057 /* Acking not more than one reassembled fragment backwards */
5058 (ackskew <= (MAXACKWINDOW << sws)) &&
5059 /* Acking not more than one window forward */
5060 ((th->th_flags & TH_RST) == 0 || orig_seq == src->seqlo ||
5061 (orig_seq == src->seqlo + 1) || (orig_seq + 1 == src->seqlo) ||
5062 (pd->flags & PFDESC_IP_REAS) == 0)) {
5063 /* Require an exact/+1 sequence match on resets when possible */
5064
5065 if (dst->scrub || src->scrub) {
5066 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
5067 *state, src, dst, copyback))
5068 return (PF_DROP);
5069 }
5070
5071 /* update max window */
5072 if (src->max_win < win)
5073 src->max_win = win;
5074 /* synchronize sequencing */
5075 if (SEQ_GT(end, src->seqlo))
5076 src->seqlo = end;
5077 /* slide the window of what the other end can send */
5078 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5079 dst->seqhi = ack + MAX((win << sws), 1);
5080
5081 /* update states */
5082 if (th->th_flags & TH_SYN)
5083 if (src->state < TCPS_SYN_SENT)
5084 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
5085 if (th->th_flags & TH_FIN)
5086 if (src->state < TCPS_CLOSING)
5087 pf_set_protostate(*state, psrc, TCPS_CLOSING);
5088 if (th->th_flags & TH_ACK) {
5089 if (dst->state == TCPS_SYN_SENT) {
5090 pf_set_protostate(*state, pdst,
5091 TCPS_ESTABLISHED);
5092 if (src->state == TCPS_ESTABLISHED &&
5093 (*state)->src_node != NULL &&
5094 pf_src_connlimit(state)) {
5095 REASON_SET(reason, PFRES_SRCLIMIT);
5096 return (PF_DROP);
5097 }
5098 } else if (dst->state == TCPS_CLOSING)
5099 pf_set_protostate(*state, pdst,
5100 TCPS_FIN_WAIT_2);
5101 }
5102 if (th->th_flags & TH_RST)
5103 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5104
5105 /* update expire time */
5106 (*state)->expire = time_uptime;
5107 if (src->state >= TCPS_FIN_WAIT_2 &&
5108 dst->state >= TCPS_FIN_WAIT_2)
5109 (*state)->timeout = PFTM_TCP_CLOSED;
5110 else if (src->state >= TCPS_CLOSING &&
5111 dst->state >= TCPS_CLOSING)
5112 (*state)->timeout = PFTM_TCP_FIN_WAIT;
5113 else if (src->state < TCPS_ESTABLISHED ||
5114 dst->state < TCPS_ESTABLISHED)
5115 (*state)->timeout = PFTM_TCP_OPENING;
5116 else if (src->state >= TCPS_CLOSING ||
5117 dst->state >= TCPS_CLOSING)
5118 (*state)->timeout = PFTM_TCP_CLOSING;
5119 else
5120 (*state)->timeout = PFTM_TCP_ESTABLISHED;
5121
5122 /* Fall through to PASS packet */
5123
5124 } else if ((dst->state < TCPS_SYN_SENT ||
5125 dst->state >= TCPS_FIN_WAIT_2 ||
5126 src->state >= TCPS_FIN_WAIT_2) &&
5127 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) &&
5128 /* Within a window forward of the originating packet */
5129 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW)) {
5130 /* Within a window backward of the originating packet */
5131
5132 /*
5133 * This currently handles three situations:
5134 * 1) Stupid stacks will shotgun SYNs before their peer
5135 * replies.
5136 * 2) When PF catches an already established stream (the
5137 * firewall rebooted, the state table was flushed, routes
5138 * changed...)
5139 * 3) Packets get funky immediately after the connection
5140 * closes (this should catch Solaris spurious ACK|FINs
5141 * that web servers like to spew after a close)
5142 *
5143 * This must be a little more careful than the above code
5144 * since packet floods will also be caught here. We don't
5145 * update the TTL here to mitigate the damage of a packet
5146 * flood and so the same code can handle awkward establishment
5147 * and a loosened connection close.
5148 * In the establishment case, a correct peer response will
5149 * validate the connection, go through the normal state code
5150 * and keep updating the state TTL.
5151 */
5152
5153 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5154 printf("pf: loose state match: ");
5155 pf_print_state(*state);
5156 pf_print_flags(th->th_flags);
5157 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5158 "pkts=%llu:%llu dir=%s,%s\n", seq, orig_seq, ack,
5159 pd->p_len, ackskew, (unsigned long long)(*state)->packets[0],
5160 (unsigned long long)(*state)->packets[1],
5161 pd->dir == PF_IN ? "in" : "out",
5162 pd->dir == (*state)->direction ? "fwd" : "rev");
5163 }
5164
5165 if (dst->scrub || src->scrub) {
5166 if (pf_normalize_tcp_stateful(m, off, pd, reason, th,
5167 *state, src, dst, copyback))
5168 return (PF_DROP);
5169 }
5170
5171 /* update max window */
5172 if (src->max_win < win)
5173 src->max_win = win;
5174 /* synchronize sequencing */
5175 if (SEQ_GT(end, src->seqlo))
5176 src->seqlo = end;
5177 /* slide the window of what the other end can send */
5178 if (SEQ_GEQ(ack + (win << sws), dst->seqhi))
5179 dst->seqhi = ack + MAX((win << sws), 1);
5180
5181 /*
5182 * Cannot set dst->seqhi here since this could be a shotgunned
5183 * SYN and not an already established connection.
5184 */
5185
5186 if (th->th_flags & TH_FIN)
5187 if (src->state < TCPS_CLOSING)
5188 pf_set_protostate(*state, psrc, TCPS_CLOSING);
5189 if (th->th_flags & TH_RST)
5190 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5191
5192 /* Fall through to PASS packet */
5193
5194 } else {
5195 if ((*state)->dst.state == TCPS_SYN_SENT &&
5196 (*state)->src.state == TCPS_SYN_SENT) {
5197 /* Send RST for state mismatches during handshake */
5198 if (!(th->th_flags & TH_RST))
5199 pf_send_tcp((*state)->rule.ptr, pd->af,
5200 pd->dst, pd->src, th->th_dport,
5201 th->th_sport, ntohl(th->th_ack), 0,
5202 TH_RST, 0, 0,
5203 (*state)->rule.ptr->return_ttl, 1, 0);
5204 src->seqlo = 0;
5205 src->seqhi = 1;
5206 src->max_win = 1;
5207 } else if (V_pf_status.debug >= PF_DEBUG_MISC) {
5208 printf("pf: BAD state: ");
5209 pf_print_state(*state);
5210 pf_print_flags(th->th_flags);
5211 printf(" seq=%u (%u) ack=%u len=%u ackskew=%d "
5212 "pkts=%llu:%llu dir=%s,%s\n",
5213 seq, orig_seq, ack, pd->p_len, ackskew,
5214 (unsigned long long)(*state)->packets[0],
5215 (unsigned long long)(*state)->packets[1],
5216 pd->dir == PF_IN ? "in" : "out",
5217 pd->dir == (*state)->direction ? "fwd" : "rev");
5218 printf("pf: State failure on: %c %c %c %c | %c %c\n",
5219 SEQ_GEQ(src->seqhi, end) ? ' ' : '1',
5220 SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)) ?
5221 ' ': '2',
5222 (ackskew >= -MAXACKWINDOW) ? ' ' : '3',
5223 (ackskew <= (MAXACKWINDOW << sws)) ? ' ' : '4',
5224 SEQ_GEQ(src->seqhi + MAXACKWINDOW, end) ?' ' :'5',
5225 SEQ_GEQ(seq, src->seqlo - MAXACKWINDOW) ?' ' :'6');
5226 }
5227 REASON_SET(reason, PFRES_BADSTATE);
5228 return (PF_DROP);
5229 }
5230
5231 return (PF_PASS);
5232 }
5233
5234 static int
5235 pf_tcp_track_sloppy(struct pf_kstate **state, struct pf_pdesc *pd, u_short *reason)
5236 {
5237 struct tcphdr *th = &pd->hdr.tcp;
5238 struct pf_state_peer *src, *dst;
5239 u_int8_t psrc, pdst;
5240
5241 if (pd->dir == (*state)->direction) {
5242 src = &(*state)->src;
5243 dst = &(*state)->dst;
5244 psrc = PF_PEER_SRC;
5245 pdst = PF_PEER_DST;
5246 } else {
5247 src = &(*state)->dst;
5248 dst = &(*state)->src;
5249 psrc = PF_PEER_DST;
5250 pdst = PF_PEER_SRC;
5251 }
5252
5253 if (th->th_flags & TH_SYN)
5254 if (src->state < TCPS_SYN_SENT)
5255 pf_set_protostate(*state, psrc, TCPS_SYN_SENT);
5256 if (th->th_flags & TH_FIN)
5257 if (src->state < TCPS_CLOSING)
5258 pf_set_protostate(*state, psrc, TCPS_CLOSING);
5259 if (th->th_flags & TH_ACK) {
5260 if (dst->state == TCPS_SYN_SENT) {
5261 pf_set_protostate(*state, pdst, TCPS_ESTABLISHED);
5262 if (src->state == TCPS_ESTABLISHED &&
5263 (*state)->src_node != NULL &&
5264 pf_src_connlimit(state)) {
5265 REASON_SET(reason, PFRES_SRCLIMIT);
5266 return (PF_DROP);
5267 }
5268 } else if (dst->state == TCPS_CLOSING) {
5269 pf_set_protostate(*state, pdst, TCPS_FIN_WAIT_2);
5270 } else if (src->state == TCPS_SYN_SENT &&
5271 dst->state < TCPS_SYN_SENT) {
5272 /*
5273 * Handle a special sloppy case where we only see one
5274 * half of the connection. If there is a ACK after
5275 * the initial SYN without ever seeing a packet from
5276 * the destination, set the connection to established.
5277 */
5278 pf_set_protostate(*state, PF_PEER_BOTH,
5279 TCPS_ESTABLISHED);
5280 dst->state = src->state = TCPS_ESTABLISHED;
5281 if ((*state)->src_node != NULL &&
5282 pf_src_connlimit(state)) {
5283 REASON_SET(reason, PFRES_SRCLIMIT);
5284 return (PF_DROP);
5285 }
5286 } else if (src->state == TCPS_CLOSING &&
5287 dst->state == TCPS_ESTABLISHED &&
5288 dst->seqlo == 0) {
5289 /*
5290 * Handle the closing of half connections where we
5291 * don't see the full bidirectional FIN/ACK+ACK
5292 * handshake.
5293 */
5294 pf_set_protostate(*state, pdst, TCPS_CLOSING);
5295 }
5296 }
5297 if (th->th_flags & TH_RST)
5298 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_TIME_WAIT);
5299
5300 /* update expire time */
5301 (*state)->expire = time_uptime;
5302 if (src->state >= TCPS_FIN_WAIT_2 &&
5303 dst->state >= TCPS_FIN_WAIT_2)
5304 (*state)->timeout = PFTM_TCP_CLOSED;
5305 else if (src->state >= TCPS_CLOSING &&
5306 dst->state >= TCPS_CLOSING)
5307 (*state)->timeout = PFTM_TCP_FIN_WAIT;
5308 else if (src->state < TCPS_ESTABLISHED ||
5309 dst->state < TCPS_ESTABLISHED)
5310 (*state)->timeout = PFTM_TCP_OPENING;
5311 else if (src->state >= TCPS_CLOSING ||
5312 dst->state >= TCPS_CLOSING)
5313 (*state)->timeout = PFTM_TCP_CLOSING;
5314 else
5315 (*state)->timeout = PFTM_TCP_ESTABLISHED;
5316
5317 return (PF_PASS);
5318 }
5319
5320 static int
5321 pf_synproxy(struct pf_pdesc *pd, struct pf_kstate **state, u_short *reason)
5322 {
5323 struct pf_state_key *sk = (*state)->key[pd->didx];
5324 struct tcphdr *th = &pd->hdr.tcp;
5325
5326 if ((*state)->src.state == PF_TCPS_PROXY_SRC) {
5327 if (pd->dir != (*state)->direction) {
5328 REASON_SET(reason, PFRES_SYNPROXY);
5329 return (PF_SYNPROXY_DROP);
5330 }
5331 if (th->th_flags & TH_SYN) {
5332 if (ntohl(th->th_seq) != (*state)->src.seqlo) {
5333 REASON_SET(reason, PFRES_SYNPROXY);
5334 return (PF_DROP);
5335 }
5336 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5337 pd->src, th->th_dport, th->th_sport,
5338 (*state)->src.seqhi, ntohl(th->th_seq) + 1,
5339 TH_SYN|TH_ACK, 0, (*state)->src.mss, 0, 1, 0);
5340 REASON_SET(reason, PFRES_SYNPROXY);
5341 return (PF_SYNPROXY_DROP);
5342 } else if ((th->th_flags & (TH_ACK|TH_RST|TH_FIN)) != TH_ACK ||
5343 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5344 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5345 REASON_SET(reason, PFRES_SYNPROXY);
5346 return (PF_DROP);
5347 } else if ((*state)->src_node != NULL &&
5348 pf_src_connlimit(state)) {
5349 REASON_SET(reason, PFRES_SRCLIMIT);
5350 return (PF_DROP);
5351 } else
5352 pf_set_protostate(*state, PF_PEER_SRC,
5353 PF_TCPS_PROXY_DST);
5354 }
5355 if ((*state)->src.state == PF_TCPS_PROXY_DST) {
5356 if (pd->dir == (*state)->direction) {
5357 if (((th->th_flags & (TH_SYN|TH_ACK)) != TH_ACK) ||
5358 (ntohl(th->th_ack) != (*state)->src.seqhi + 1) ||
5359 (ntohl(th->th_seq) != (*state)->src.seqlo + 1)) {
5360 REASON_SET(reason, PFRES_SYNPROXY);
5361 return (PF_DROP);
5362 }
5363 (*state)->src.max_win = MAX(ntohs(th->th_win), 1);
5364 if ((*state)->dst.seqhi == 1)
5365 (*state)->dst.seqhi = htonl(arc4random());
5366 pf_send_tcp((*state)->rule.ptr, pd->af,
5367 &sk->addr[pd->sidx], &sk->addr[pd->didx],
5368 sk->port[pd->sidx], sk->port[pd->didx],
5369 (*state)->dst.seqhi, 0, TH_SYN, 0,
5370 (*state)->src.mss, 0, 0, (*state)->tag);
5371 REASON_SET(reason, PFRES_SYNPROXY);
5372 return (PF_SYNPROXY_DROP);
5373 } else if (((th->th_flags & (TH_SYN|TH_ACK)) !=
5374 (TH_SYN|TH_ACK)) ||
5375 (ntohl(th->th_ack) != (*state)->dst.seqhi + 1)) {
5376 REASON_SET(reason, PFRES_SYNPROXY);
5377 return (PF_DROP);
5378 } else {
5379 (*state)->dst.max_win = MAX(ntohs(th->th_win), 1);
5380 (*state)->dst.seqlo = ntohl(th->th_seq);
5381 pf_send_tcp((*state)->rule.ptr, pd->af, pd->dst,
5382 pd->src, th->th_dport, th->th_sport,
5383 ntohl(th->th_ack), ntohl(th->th_seq) + 1,
5384 TH_ACK, (*state)->src.max_win, 0, 0, 0,
5385 (*state)->tag);
5386 pf_send_tcp((*state)->rule.ptr, pd->af,
5387 &sk->addr[pd->sidx], &sk->addr[pd->didx],
5388 sk->port[pd->sidx], sk->port[pd->didx],
5389 (*state)->src.seqhi + 1, (*state)->src.seqlo + 1,
5390 TH_ACK, (*state)->dst.max_win, 0, 0, 1, 0);
5391 (*state)->src.seqdiff = (*state)->dst.seqhi -
5392 (*state)->src.seqlo;
5393 (*state)->dst.seqdiff = (*state)->src.seqhi -
5394 (*state)->dst.seqlo;
5395 (*state)->src.seqhi = (*state)->src.seqlo +
5396 (*state)->dst.max_win;
5397 (*state)->dst.seqhi = (*state)->dst.seqlo +
5398 (*state)->src.max_win;
5399 (*state)->src.wscale = (*state)->dst.wscale = 0;
5400 pf_set_protostate(*state, PF_PEER_BOTH,
5401 TCPS_ESTABLISHED);
5402 REASON_SET(reason, PFRES_SYNPROXY);
5403 return (PF_SYNPROXY_DROP);
5404 }
5405 }
5406
5407 return (PF_PASS);
5408 }
5409
5410 static int
5411 pf_test_state_tcp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5412 struct mbuf *m, int off, void *h, struct pf_pdesc *pd,
5413 u_short *reason)
5414 {
5415 struct pf_state_key_cmp key;
5416 struct tcphdr *th = &pd->hdr.tcp;
5417 int copyback = 0;
5418 int action;
5419 struct pf_state_peer *src, *dst;
5420
5421 bzero(&key, sizeof(key));
5422 key.af = pd->af;
5423 key.proto = IPPROTO_TCP;
5424 if (direction == PF_IN) { /* wire side, straight */
5425 PF_ACPY(&key.addr[0], pd->src, key.af);
5426 PF_ACPY(&key.addr[1], pd->dst, key.af);
5427 key.port[0] = th->th_sport;
5428 key.port[1] = th->th_dport;
5429 } else { /* stack side, reverse */
5430 PF_ACPY(&key.addr[1], pd->src, key.af);
5431 PF_ACPY(&key.addr[0], pd->dst, key.af);
5432 key.port[1] = th->th_sport;
5433 key.port[0] = th->th_dport;
5434 }
5435
5436 STATE_LOOKUP(kif, &key, direction, *state, pd);
5437
5438 if (direction == (*state)->direction) {
5439 src = &(*state)->src;
5440 dst = &(*state)->dst;
5441 } else {
5442 src = &(*state)->dst;
5443 dst = &(*state)->src;
5444 }
5445
5446 if ((action = pf_synproxy(pd, state, reason)) != PF_PASS)
5447 return (action);
5448
5449 if (dst->state >= TCPS_FIN_WAIT_2 &&
5450 src->state >= TCPS_FIN_WAIT_2 &&
5451 (((th->th_flags & (TH_SYN|TH_ACK)) == TH_SYN) ||
5452 ((th->th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_ACK &&
5453 pf_syncookie_check(pd) && pd->dir == PF_IN))) {
5454 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5455 printf("pf: state reuse ");
5456 pf_print_state(*state);
5457 pf_print_flags(th->th_flags);
5458 printf("\n");
5459 }
5460 /* XXX make sure it's the same direction ?? */
5461 pf_set_protostate(*state, PF_PEER_BOTH, TCPS_CLOSED);
5462 pf_unlink_state(*state);
5463 *state = NULL;
5464 return (PF_DROP);
5465 }
5466
5467 if ((*state)->state_flags & PFSTATE_SLOPPY) {
5468 if (pf_tcp_track_sloppy(state, pd, reason) == PF_DROP)
5469 return (PF_DROP);
5470 } else {
5471 if (pf_tcp_track_full(state, kif, m, off, pd, reason,
5472 ©back) == PF_DROP)
5473 return (PF_DROP);
5474 }
5475
5476 /* translate source/destination address, if necessary */
5477 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5478 struct pf_state_key *nk = (*state)->key[pd->didx];
5479
5480 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5481 nk->port[pd->sidx] != th->th_sport)
5482 pf_change_ap(m, pd->src, &th->th_sport,
5483 pd->ip_sum, &th->th_sum, &nk->addr[pd->sidx],
5484 nk->port[pd->sidx], 0, pd->af);
5485
5486 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5487 nk->port[pd->didx] != th->th_dport)
5488 pf_change_ap(m, pd->dst, &th->th_dport,
5489 pd->ip_sum, &th->th_sum, &nk->addr[pd->didx],
5490 nk->port[pd->didx], 0, pd->af);
5491 copyback = 1;
5492 }
5493
5494 /* Copyback sequence modulation or stateful scrub changes if needed */
5495 if (copyback)
5496 m_copyback(m, off, sizeof(*th), (caddr_t)th);
5497
5498 return (PF_PASS);
5499 }
5500
5501 static int
5502 pf_test_state_udp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5503 struct mbuf *m, int off, void *h, struct pf_pdesc *pd)
5504 {
5505 struct pf_state_peer *src, *dst;
5506 struct pf_state_key_cmp key;
5507 struct udphdr *uh = &pd->hdr.udp;
5508 uint8_t psrc, pdst;
5509
5510 bzero(&key, sizeof(key));
5511 key.af = pd->af;
5512 key.proto = IPPROTO_UDP;
5513 if (direction == PF_IN) { /* wire side, straight */
5514 PF_ACPY(&key.addr[0], pd->src, key.af);
5515 PF_ACPY(&key.addr[1], pd->dst, key.af);
5516 key.port[0] = uh->uh_sport;
5517 key.port[1] = uh->uh_dport;
5518 } else { /* stack side, reverse */
5519 PF_ACPY(&key.addr[1], pd->src, key.af);
5520 PF_ACPY(&key.addr[0], pd->dst, key.af);
5521 key.port[1] = uh->uh_sport;
5522 key.port[0] = uh->uh_dport;
5523 }
5524
5525 STATE_LOOKUP(kif, &key, direction, *state, pd);
5526
5527 if (direction == (*state)->direction) {
5528 src = &(*state)->src;
5529 dst = &(*state)->dst;
5530 psrc = PF_PEER_SRC;
5531 pdst = PF_PEER_DST;
5532 } else {
5533 src = &(*state)->dst;
5534 dst = &(*state)->src;
5535 psrc = PF_PEER_DST;
5536 pdst = PF_PEER_SRC;
5537 }
5538
5539 /* update states */
5540 if (src->state < PFUDPS_SINGLE)
5541 pf_set_protostate(*state, psrc, PFUDPS_SINGLE);
5542 if (dst->state == PFUDPS_SINGLE)
5543 pf_set_protostate(*state, pdst, PFUDPS_MULTIPLE);
5544
5545 /* update expire time */
5546 (*state)->expire = time_uptime;
5547 if (src->state == PFUDPS_MULTIPLE && dst->state == PFUDPS_MULTIPLE)
5548 (*state)->timeout = PFTM_UDP_MULTIPLE;
5549 else
5550 (*state)->timeout = PFTM_UDP_SINGLE;
5551
5552 /* translate source/destination address, if necessary */
5553 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5554 struct pf_state_key *nk = (*state)->key[pd->didx];
5555
5556 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], pd->af) ||
5557 nk->port[pd->sidx] != uh->uh_sport)
5558 pf_change_ap(m, pd->src, &uh->uh_sport, pd->ip_sum,
5559 &uh->uh_sum, &nk->addr[pd->sidx],
5560 nk->port[pd->sidx], 1, pd->af);
5561
5562 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], pd->af) ||
5563 nk->port[pd->didx] != uh->uh_dport)
5564 pf_change_ap(m, pd->dst, &uh->uh_dport, pd->ip_sum,
5565 &uh->uh_sum, &nk->addr[pd->didx],
5566 nk->port[pd->didx], 1, pd->af);
5567 m_copyback(m, off, sizeof(*uh), (caddr_t)uh);
5568 }
5569
5570 return (PF_PASS);
5571 }
5572
5573 static int
5574 pf_test_state_icmp(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
5575 struct mbuf *m, int off, void *h, struct pf_pdesc *pd, u_short *reason)
5576 {
5577 struct pf_addr *saddr = pd->src, *daddr = pd->dst;
5578 u_int16_t icmpid = 0, *icmpsum;
5579 u_int8_t icmptype, icmpcode;
5580 int state_icmp = 0;
5581 struct pf_state_key_cmp key;
5582
5583 bzero(&key, sizeof(key));
5584 switch (pd->proto) {
5585 #ifdef INET
5586 case IPPROTO_ICMP:
5587 icmptype = pd->hdr.icmp.icmp_type;
5588 icmpcode = pd->hdr.icmp.icmp_code;
5589 icmpid = pd->hdr.icmp.icmp_id;
5590 icmpsum = &pd->hdr.icmp.icmp_cksum;
5591
5592 if (icmptype == ICMP_UNREACH ||
5593 icmptype == ICMP_SOURCEQUENCH ||
5594 icmptype == ICMP_REDIRECT ||
5595 icmptype == ICMP_TIMXCEED ||
5596 icmptype == ICMP_PARAMPROB)
5597 state_icmp++;
5598 break;
5599 #endif /* INET */
5600 #ifdef INET6
5601 case IPPROTO_ICMPV6:
5602 icmptype = pd->hdr.icmp6.icmp6_type;
5603 icmpcode = pd->hdr.icmp6.icmp6_code;
5604 icmpid = pd->hdr.icmp6.icmp6_id;
5605 icmpsum = &pd->hdr.icmp6.icmp6_cksum;
5606
5607 if (icmptype == ICMP6_DST_UNREACH ||
5608 icmptype == ICMP6_PACKET_TOO_BIG ||
5609 icmptype == ICMP6_TIME_EXCEEDED ||
5610 icmptype == ICMP6_PARAM_PROB)
5611 state_icmp++;
5612 break;
5613 #endif /* INET6 */
5614 }
5615
5616 if (!state_icmp) {
5617 /*
5618 * ICMP query/reply message not related to a TCP/UDP packet.
5619 * Search for an ICMP state.
5620 */
5621 key.af = pd->af;
5622 key.proto = pd->proto;
5623 key.port[0] = key.port[1] = icmpid;
5624 if (direction == PF_IN) { /* wire side, straight */
5625 PF_ACPY(&key.addr[0], pd->src, key.af);
5626 PF_ACPY(&key.addr[1], pd->dst, key.af);
5627 } else { /* stack side, reverse */
5628 PF_ACPY(&key.addr[1], pd->src, key.af);
5629 PF_ACPY(&key.addr[0], pd->dst, key.af);
5630 }
5631
5632 STATE_LOOKUP(kif, &key, direction, *state, pd);
5633
5634 (*state)->expire = time_uptime;
5635 (*state)->timeout = PFTM_ICMP_ERROR_REPLY;
5636
5637 /* translate source/destination address, if necessary */
5638 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
5639 struct pf_state_key *nk = (*state)->key[pd->didx];
5640
5641 switch (pd->af) {
5642 #ifdef INET
5643 case AF_INET:
5644 if (PF_ANEQ(pd->src,
5645 &nk->addr[pd->sidx], AF_INET))
5646 pf_change_a(&saddr->v4.s_addr,
5647 pd->ip_sum,
5648 nk->addr[pd->sidx].v4.s_addr, 0);
5649
5650 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx],
5651 AF_INET))
5652 pf_change_a(&daddr->v4.s_addr,
5653 pd->ip_sum,
5654 nk->addr[pd->didx].v4.s_addr, 0);
5655
5656 if (nk->port[0] !=
5657 pd->hdr.icmp.icmp_id) {
5658 pd->hdr.icmp.icmp_cksum =
5659 pf_cksum_fixup(
5660 pd->hdr.icmp.icmp_cksum, icmpid,
5661 nk->port[pd->sidx], 0);
5662 pd->hdr.icmp.icmp_id =
5663 nk->port[pd->sidx];
5664 }
5665
5666 m_copyback(m, off, ICMP_MINLEN,
5667 (caddr_t )&pd->hdr.icmp);
5668 break;
5669 #endif /* INET */
5670 #ifdef INET6
5671 case AF_INET6:
5672 if (PF_ANEQ(pd->src,
5673 &nk->addr[pd->sidx], AF_INET6))
5674 pf_change_a6(saddr,
5675 &pd->hdr.icmp6.icmp6_cksum,
5676 &nk->addr[pd->sidx], 0);
5677
5678 if (PF_ANEQ(pd->dst,
5679 &nk->addr[pd->didx], AF_INET6))
5680 pf_change_a6(daddr,
5681 &pd->hdr.icmp6.icmp6_cksum,
5682 &nk->addr[pd->didx], 0);
5683
5684 m_copyback(m, off, sizeof(struct icmp6_hdr),
5685 (caddr_t )&pd->hdr.icmp6);
5686 break;
5687 #endif /* INET6 */
5688 }
5689 }
5690 return (PF_PASS);
5691
5692 } else {
5693 /*
5694 * ICMP error message in response to a TCP/UDP packet.
5695 * Extract the inner TCP/UDP header and search for that state.
5696 */
5697
5698 struct pf_pdesc pd2;
5699 bzero(&pd2, sizeof pd2);
5700 #ifdef INET
5701 struct ip h2;
5702 #endif /* INET */
5703 #ifdef INET6
5704 struct ip6_hdr h2_6;
5705 int terminal = 0;
5706 #endif /* INET6 */
5707 int ipoff2 = 0;
5708 int off2 = 0;
5709
5710 pd2.af = pd->af;
5711 /* Payload packet is from the opposite direction. */
5712 pd2.sidx = (direction == PF_IN) ? 1 : 0;
5713 pd2.didx = (direction == PF_IN) ? 0 : 1;
5714 switch (pd->af) {
5715 #ifdef INET
5716 case AF_INET:
5717 /* offset of h2 in mbuf chain */
5718 ipoff2 = off + ICMP_MINLEN;
5719
5720 if (!pf_pull_hdr(m, ipoff2, &h2, sizeof(h2),
5721 NULL, reason, pd2.af)) {
5722 DPFPRINTF(PF_DEBUG_MISC,
5723 ("pf: ICMP error message too short "
5724 "(ip)\n"));
5725 return (PF_DROP);
5726 }
5727 /*
5728 * ICMP error messages don't refer to non-first
5729 * fragments
5730 */
5731 if (h2.ip_off & htons(IP_OFFMASK)) {
5732 REASON_SET(reason, PFRES_FRAG);
5733 return (PF_DROP);
5734 }
5735
5736 /* offset of protocol header that follows h2 */
5737 off2 = ipoff2 + (h2.ip_hl << 2);
5738
5739 pd2.proto = h2.ip_p;
5740 pd2.src = (struct pf_addr *)&h2.ip_src;
5741 pd2.dst = (struct pf_addr *)&h2.ip_dst;
5742 pd2.ip_sum = &h2.ip_sum;
5743 break;
5744 #endif /* INET */
5745 #ifdef INET6
5746 case AF_INET6:
5747 ipoff2 = off + sizeof(struct icmp6_hdr);
5748
5749 if (!pf_pull_hdr(m, ipoff2, &h2_6, sizeof(h2_6),
5750 NULL, reason, pd2.af)) {
5751 DPFPRINTF(PF_DEBUG_MISC,
5752 ("pf: ICMP error message too short "
5753 "(ip6)\n"));
5754 return (PF_DROP);
5755 }
5756 pd2.proto = h2_6.ip6_nxt;
5757 pd2.src = (struct pf_addr *)&h2_6.ip6_src;
5758 pd2.dst = (struct pf_addr *)&h2_6.ip6_dst;
5759 pd2.ip_sum = NULL;
5760 off2 = ipoff2 + sizeof(h2_6);
5761 do {
5762 switch (pd2.proto) {
5763 case IPPROTO_FRAGMENT:
5764 /*
5765 * ICMPv6 error messages for
5766 * non-first fragments
5767 */
5768 REASON_SET(reason, PFRES_FRAG);
5769 return (PF_DROP);
5770 case IPPROTO_AH:
5771 case IPPROTO_HOPOPTS:
5772 case IPPROTO_ROUTING:
5773 case IPPROTO_DSTOPTS: {
5774 /* get next header and header length */
5775 struct ip6_ext opt6;
5776
5777 if (!pf_pull_hdr(m, off2, &opt6,
5778 sizeof(opt6), NULL, reason,
5779 pd2.af)) {
5780 DPFPRINTF(PF_DEBUG_MISC,
5781 ("pf: ICMPv6 short opt\n"));
5782 return (PF_DROP);
5783 }
5784 if (pd2.proto == IPPROTO_AH)
5785 off2 += (opt6.ip6e_len + 2) * 4;
5786 else
5787 off2 += (opt6.ip6e_len + 1) * 8;
5788 pd2.proto = opt6.ip6e_nxt;
5789 /* goto the next header */
5790 break;
5791 }
5792 default:
5793 terminal++;
5794 break;
5795 }
5796 } while (!terminal);
5797 break;
5798 #endif /* INET6 */
5799 }
5800
5801 if (PF_ANEQ(pd->dst, pd2.src, pd->af)) {
5802 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5803 printf("pf: BAD ICMP %d:%d outer dst: ",
5804 icmptype, icmpcode);
5805 pf_print_host(pd->src, 0, pd->af);
5806 printf(" -> ");
5807 pf_print_host(pd->dst, 0, pd->af);
5808 printf(" inner src: ");
5809 pf_print_host(pd2.src, 0, pd2.af);
5810 printf(" -> ");
5811 pf_print_host(pd2.dst, 0, pd2.af);
5812 printf("\n");
5813 }
5814 REASON_SET(reason, PFRES_BADSTATE);
5815 return (PF_DROP);
5816 }
5817
5818 switch (pd2.proto) {
5819 case IPPROTO_TCP: {
5820 struct tcphdr th;
5821 u_int32_t seq;
5822 struct pf_state_peer *src, *dst;
5823 u_int8_t dws;
5824 int copyback = 0;
5825
5826 /*
5827 * Only the first 8 bytes of the TCP header can be
5828 * expected. Don't access any TCP header fields after
5829 * th_seq, an ackskew test is not possible.
5830 */
5831 if (!pf_pull_hdr(m, off2, &th, 8, NULL, reason,
5832 pd2.af)) {
5833 DPFPRINTF(PF_DEBUG_MISC,
5834 ("pf: ICMP error message too short "
5835 "(tcp)\n"));
5836 return (PF_DROP);
5837 }
5838
5839 key.af = pd2.af;
5840 key.proto = IPPROTO_TCP;
5841 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5842 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5843 key.port[pd2.sidx] = th.th_sport;
5844 key.port[pd2.didx] = th.th_dport;
5845
5846 STATE_LOOKUP(kif, &key, direction, *state, pd);
5847
5848 if (direction == (*state)->direction) {
5849 src = &(*state)->dst;
5850 dst = &(*state)->src;
5851 } else {
5852 src = &(*state)->src;
5853 dst = &(*state)->dst;
5854 }
5855
5856 if (src->wscale && dst->wscale)
5857 dws = dst->wscale & PF_WSCALE_MASK;
5858 else
5859 dws = 0;
5860
5861 /* Demodulate sequence number */
5862 seq = ntohl(th.th_seq) - src->seqdiff;
5863 if (src->seqdiff) {
5864 pf_change_a(&th.th_seq, icmpsum,
5865 htonl(seq), 0);
5866 copyback = 1;
5867 }
5868
5869 if (!((*state)->state_flags & PFSTATE_SLOPPY) &&
5870 (!SEQ_GEQ(src->seqhi, seq) ||
5871 !SEQ_GEQ(seq, src->seqlo - (dst->max_win << dws)))) {
5872 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5873 printf("pf: BAD ICMP %d:%d ",
5874 icmptype, icmpcode);
5875 pf_print_host(pd->src, 0, pd->af);
5876 printf(" -> ");
5877 pf_print_host(pd->dst, 0, pd->af);
5878 printf(" state: ");
5879 pf_print_state(*state);
5880 printf(" seq=%u\n", seq);
5881 }
5882 REASON_SET(reason, PFRES_BADSTATE);
5883 return (PF_DROP);
5884 } else {
5885 if (V_pf_status.debug >= PF_DEBUG_MISC) {
5886 printf("pf: OK ICMP %d:%d ",
5887 icmptype, icmpcode);
5888 pf_print_host(pd->src, 0, pd->af);
5889 printf(" -> ");
5890 pf_print_host(pd->dst, 0, pd->af);
5891 printf(" state: ");
5892 pf_print_state(*state);
5893 printf(" seq=%u\n", seq);
5894 }
5895 }
5896
5897 /* translate source/destination address, if necessary */
5898 if ((*state)->key[PF_SK_WIRE] !=
5899 (*state)->key[PF_SK_STACK]) {
5900 struct pf_state_key *nk =
5901 (*state)->key[pd->didx];
5902
5903 if (PF_ANEQ(pd2.src,
5904 &nk->addr[pd2.sidx], pd2.af) ||
5905 nk->port[pd2.sidx] != th.th_sport)
5906 pf_change_icmp(pd2.src, &th.th_sport,
5907 daddr, &nk->addr[pd2.sidx],
5908 nk->port[pd2.sidx], NULL,
5909 pd2.ip_sum, icmpsum,
5910 pd->ip_sum, 0, pd2.af);
5911
5912 if (PF_ANEQ(pd2.dst,
5913 &nk->addr[pd2.didx], pd2.af) ||
5914 nk->port[pd2.didx] != th.th_dport)
5915 pf_change_icmp(pd2.dst, &th.th_dport,
5916 saddr, &nk->addr[pd2.didx],
5917 nk->port[pd2.didx], NULL,
5918 pd2.ip_sum, icmpsum,
5919 pd->ip_sum, 0, pd2.af);
5920 copyback = 1;
5921 }
5922
5923 if (copyback) {
5924 switch (pd2.af) {
5925 #ifdef INET
5926 case AF_INET:
5927 m_copyback(m, off, ICMP_MINLEN,
5928 (caddr_t )&pd->hdr.icmp);
5929 m_copyback(m, ipoff2, sizeof(h2),
5930 (caddr_t )&h2);
5931 break;
5932 #endif /* INET */
5933 #ifdef INET6
5934 case AF_INET6:
5935 m_copyback(m, off,
5936 sizeof(struct icmp6_hdr),
5937 (caddr_t )&pd->hdr.icmp6);
5938 m_copyback(m, ipoff2, sizeof(h2_6),
5939 (caddr_t )&h2_6);
5940 break;
5941 #endif /* INET6 */
5942 }
5943 m_copyback(m, off2, 8, (caddr_t)&th);
5944 }
5945
5946 return (PF_PASS);
5947 break;
5948 }
5949 case IPPROTO_UDP: {
5950 struct udphdr uh;
5951
5952 if (!pf_pull_hdr(m, off2, &uh, sizeof(uh),
5953 NULL, reason, pd2.af)) {
5954 DPFPRINTF(PF_DEBUG_MISC,
5955 ("pf: ICMP error message too short "
5956 "(udp)\n"));
5957 return (PF_DROP);
5958 }
5959
5960 key.af = pd2.af;
5961 key.proto = IPPROTO_UDP;
5962 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
5963 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
5964 key.port[pd2.sidx] = uh.uh_sport;
5965 key.port[pd2.didx] = uh.uh_dport;
5966
5967 STATE_LOOKUP(kif, &key, direction, *state, pd);
5968
5969 /* translate source/destination address, if necessary */
5970 if ((*state)->key[PF_SK_WIRE] !=
5971 (*state)->key[PF_SK_STACK]) {
5972 struct pf_state_key *nk =
5973 (*state)->key[pd->didx];
5974
5975 if (PF_ANEQ(pd2.src,
5976 &nk->addr[pd2.sidx], pd2.af) ||
5977 nk->port[pd2.sidx] != uh.uh_sport)
5978 pf_change_icmp(pd2.src, &uh.uh_sport,
5979 daddr, &nk->addr[pd2.sidx],
5980 nk->port[pd2.sidx], &uh.uh_sum,
5981 pd2.ip_sum, icmpsum,
5982 pd->ip_sum, 1, pd2.af);
5983
5984 if (PF_ANEQ(pd2.dst,
5985 &nk->addr[pd2.didx], pd2.af) ||
5986 nk->port[pd2.didx] != uh.uh_dport)
5987 pf_change_icmp(pd2.dst, &uh.uh_dport,
5988 saddr, &nk->addr[pd2.didx],
5989 nk->port[pd2.didx], &uh.uh_sum,
5990 pd2.ip_sum, icmpsum,
5991 pd->ip_sum, 1, pd2.af);
5992
5993 switch (pd2.af) {
5994 #ifdef INET
5995 case AF_INET:
5996 m_copyback(m, off, ICMP_MINLEN,
5997 (caddr_t )&pd->hdr.icmp);
5998 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
5999 break;
6000 #endif /* INET */
6001 #ifdef INET6
6002 case AF_INET6:
6003 m_copyback(m, off,
6004 sizeof(struct icmp6_hdr),
6005 (caddr_t )&pd->hdr.icmp6);
6006 m_copyback(m, ipoff2, sizeof(h2_6),
6007 (caddr_t )&h2_6);
6008 break;
6009 #endif /* INET6 */
6010 }
6011 m_copyback(m, off2, sizeof(uh), (caddr_t)&uh);
6012 }
6013 return (PF_PASS);
6014 break;
6015 }
6016 #ifdef INET
6017 case IPPROTO_ICMP: {
6018 struct icmp iih;
6019
6020 if (!pf_pull_hdr(m, off2, &iih, ICMP_MINLEN,
6021 NULL, reason, pd2.af)) {
6022 DPFPRINTF(PF_DEBUG_MISC,
6023 ("pf: ICMP error message too short i"
6024 "(icmp)\n"));
6025 return (PF_DROP);
6026 }
6027
6028 key.af = pd2.af;
6029 key.proto = IPPROTO_ICMP;
6030 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6031 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6032 key.port[0] = key.port[1] = iih.icmp_id;
6033
6034 STATE_LOOKUP(kif, &key, direction, *state, pd);
6035
6036 /* translate source/destination address, if necessary */
6037 if ((*state)->key[PF_SK_WIRE] !=
6038 (*state)->key[PF_SK_STACK]) {
6039 struct pf_state_key *nk =
6040 (*state)->key[pd->didx];
6041
6042 if (PF_ANEQ(pd2.src,
6043 &nk->addr[pd2.sidx], pd2.af) ||
6044 nk->port[pd2.sidx] != iih.icmp_id)
6045 pf_change_icmp(pd2.src, &iih.icmp_id,
6046 daddr, &nk->addr[pd2.sidx],
6047 nk->port[pd2.sidx], NULL,
6048 pd2.ip_sum, icmpsum,
6049 pd->ip_sum, 0, AF_INET);
6050
6051 if (PF_ANEQ(pd2.dst,
6052 &nk->addr[pd2.didx], pd2.af) ||
6053 nk->port[pd2.didx] != iih.icmp_id)
6054 pf_change_icmp(pd2.dst, &iih.icmp_id,
6055 saddr, &nk->addr[pd2.didx],
6056 nk->port[pd2.didx], NULL,
6057 pd2.ip_sum, icmpsum,
6058 pd->ip_sum, 0, AF_INET);
6059
6060 m_copyback(m, off, ICMP_MINLEN, (caddr_t)&pd->hdr.icmp);
6061 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
6062 m_copyback(m, off2, ICMP_MINLEN, (caddr_t)&iih);
6063 }
6064 return (PF_PASS);
6065 break;
6066 }
6067 #endif /* INET */
6068 #ifdef INET6
6069 case IPPROTO_ICMPV6: {
6070 struct icmp6_hdr iih;
6071
6072 if (!pf_pull_hdr(m, off2, &iih,
6073 sizeof(struct icmp6_hdr), NULL, reason, pd2.af)) {
6074 DPFPRINTF(PF_DEBUG_MISC,
6075 ("pf: ICMP error message too short "
6076 "(icmp6)\n"));
6077 return (PF_DROP);
6078 }
6079
6080 key.af = pd2.af;
6081 key.proto = IPPROTO_ICMPV6;
6082 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6083 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6084 key.port[0] = key.port[1] = iih.icmp6_id;
6085
6086 STATE_LOOKUP(kif, &key, direction, *state, pd);
6087
6088 /* translate source/destination address, if necessary */
6089 if ((*state)->key[PF_SK_WIRE] !=
6090 (*state)->key[PF_SK_STACK]) {
6091 struct pf_state_key *nk =
6092 (*state)->key[pd->didx];
6093
6094 if (PF_ANEQ(pd2.src,
6095 &nk->addr[pd2.sidx], pd2.af) ||
6096 nk->port[pd2.sidx] != iih.icmp6_id)
6097 pf_change_icmp(pd2.src, &iih.icmp6_id,
6098 daddr, &nk->addr[pd2.sidx],
6099 nk->port[pd2.sidx], NULL,
6100 pd2.ip_sum, icmpsum,
6101 pd->ip_sum, 0, AF_INET6);
6102
6103 if (PF_ANEQ(pd2.dst,
6104 &nk->addr[pd2.didx], pd2.af) ||
6105 nk->port[pd2.didx] != iih.icmp6_id)
6106 pf_change_icmp(pd2.dst, &iih.icmp6_id,
6107 saddr, &nk->addr[pd2.didx],
6108 nk->port[pd2.didx], NULL,
6109 pd2.ip_sum, icmpsum,
6110 pd->ip_sum, 0, AF_INET6);
6111
6112 m_copyback(m, off, sizeof(struct icmp6_hdr),
6113 (caddr_t)&pd->hdr.icmp6);
6114 m_copyback(m, ipoff2, sizeof(h2_6), (caddr_t)&h2_6);
6115 m_copyback(m, off2, sizeof(struct icmp6_hdr),
6116 (caddr_t)&iih);
6117 }
6118 return (PF_PASS);
6119 break;
6120 }
6121 #endif /* INET6 */
6122 default: {
6123 key.af = pd2.af;
6124 key.proto = pd2.proto;
6125 PF_ACPY(&key.addr[pd2.sidx], pd2.src, key.af);
6126 PF_ACPY(&key.addr[pd2.didx], pd2.dst, key.af);
6127 key.port[0] = key.port[1] = 0;
6128
6129 STATE_LOOKUP(kif, &key, direction, *state, pd);
6130
6131 /* translate source/destination address, if necessary */
6132 if ((*state)->key[PF_SK_WIRE] !=
6133 (*state)->key[PF_SK_STACK]) {
6134 struct pf_state_key *nk =
6135 (*state)->key[pd->didx];
6136
6137 if (PF_ANEQ(pd2.src,
6138 &nk->addr[pd2.sidx], pd2.af))
6139 pf_change_icmp(pd2.src, NULL, daddr,
6140 &nk->addr[pd2.sidx], 0, NULL,
6141 pd2.ip_sum, icmpsum,
6142 pd->ip_sum, 0, pd2.af);
6143
6144 if (PF_ANEQ(pd2.dst,
6145 &nk->addr[pd2.didx], pd2.af))
6146 pf_change_icmp(pd2.dst, NULL, saddr,
6147 &nk->addr[pd2.didx], 0, NULL,
6148 pd2.ip_sum, icmpsum,
6149 pd->ip_sum, 0, pd2.af);
6150
6151 switch (pd2.af) {
6152 #ifdef INET
6153 case AF_INET:
6154 m_copyback(m, off, ICMP_MINLEN,
6155 (caddr_t)&pd->hdr.icmp);
6156 m_copyback(m, ipoff2, sizeof(h2), (caddr_t)&h2);
6157 break;
6158 #endif /* INET */
6159 #ifdef INET6
6160 case AF_INET6:
6161 m_copyback(m, off,
6162 sizeof(struct icmp6_hdr),
6163 (caddr_t )&pd->hdr.icmp6);
6164 m_copyback(m, ipoff2, sizeof(h2_6),
6165 (caddr_t )&h2_6);
6166 break;
6167 #endif /* INET6 */
6168 }
6169 }
6170 return (PF_PASS);
6171 break;
6172 }
6173 }
6174 }
6175 }
6176
6177 static int
6178 pf_test_state_other(struct pf_kstate **state, int direction, struct pfi_kkif *kif,
6179 struct mbuf *m, struct pf_pdesc *pd)
6180 {
6181 struct pf_state_peer *src, *dst;
6182 struct pf_state_key_cmp key;
6183 uint8_t psrc, pdst;
6184
6185 bzero(&key, sizeof(key));
6186 key.af = pd->af;
6187 key.proto = pd->proto;
6188 if (direction == PF_IN) {
6189 PF_ACPY(&key.addr[0], pd->src, key.af);
6190 PF_ACPY(&key.addr[1], pd->dst, key.af);
6191 key.port[0] = key.port[1] = 0;
6192 } else {
6193 PF_ACPY(&key.addr[1], pd->src, key.af);
6194 PF_ACPY(&key.addr[0], pd->dst, key.af);
6195 key.port[1] = key.port[0] = 0;
6196 }
6197
6198 STATE_LOOKUP(kif, &key, direction, *state, pd);
6199
6200 if (direction == (*state)->direction) {
6201 src = &(*state)->src;
6202 dst = &(*state)->dst;
6203 psrc = PF_PEER_SRC;
6204 pdst = PF_PEER_DST;
6205 } else {
6206 src = &(*state)->dst;
6207 dst = &(*state)->src;
6208 psrc = PF_PEER_DST;
6209 pdst = PF_PEER_SRC;
6210 }
6211
6212 /* update states */
6213 if (src->state < PFOTHERS_SINGLE)
6214 pf_set_protostate(*state, psrc, PFOTHERS_SINGLE);
6215 if (dst->state == PFOTHERS_SINGLE)
6216 pf_set_protostate(*state, pdst, PFOTHERS_MULTIPLE);
6217
6218 /* update expire time */
6219 (*state)->expire = time_uptime;
6220 if (src->state == PFOTHERS_MULTIPLE && dst->state == PFOTHERS_MULTIPLE)
6221 (*state)->timeout = PFTM_OTHER_MULTIPLE;
6222 else
6223 (*state)->timeout = PFTM_OTHER_SINGLE;
6224
6225 /* translate source/destination address, if necessary */
6226 if ((*state)->key[PF_SK_WIRE] != (*state)->key[PF_SK_STACK]) {
6227 struct pf_state_key *nk = (*state)->key[pd->didx];
6228
6229 KASSERT(nk, ("%s: nk is null", __func__));
6230 KASSERT(pd, ("%s: pd is null", __func__));
6231 KASSERT(pd->src, ("%s: pd->src is null", __func__));
6232 KASSERT(pd->dst, ("%s: pd->dst is null", __func__));
6233 switch (pd->af) {
6234 #ifdef INET
6235 case AF_INET:
6236 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
6237 pf_change_a(&pd->src->v4.s_addr,
6238 pd->ip_sum,
6239 nk->addr[pd->sidx].v4.s_addr,
6240 0);
6241
6242 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
6243 pf_change_a(&pd->dst->v4.s_addr,
6244 pd->ip_sum,
6245 nk->addr[pd->didx].v4.s_addr,
6246 0);
6247
6248 break;
6249 #endif /* INET */
6250 #ifdef INET6
6251 case AF_INET6:
6252 if (PF_ANEQ(pd->src, &nk->addr[pd->sidx], AF_INET))
6253 PF_ACPY(pd->src, &nk->addr[pd->sidx], pd->af);
6254
6255 if (PF_ANEQ(pd->dst, &nk->addr[pd->didx], AF_INET))
6256 PF_ACPY(pd->dst, &nk->addr[pd->didx], pd->af);
6257 #endif /* INET6 */
6258 }
6259 }
6260 return (PF_PASS);
6261 }
6262
6263 /*
6264 * ipoff and off are measured from the start of the mbuf chain.
6265 * h must be at "ipoff" on the mbuf chain.
6266 */
6267 void *
6268 pf_pull_hdr(struct mbuf *m, int off, void *p, int len,
6269 u_short *actionp, u_short *reasonp, sa_family_t af)
6270 {
6271 switch (af) {
6272 #ifdef INET
6273 case AF_INET: {
6274 struct ip *h = mtod(m, struct ip *);
6275 u_int16_t fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
6276
6277 if (fragoff) {
6278 if (fragoff >= len)
6279 ACTION_SET(actionp, PF_PASS);
6280 else {
6281 ACTION_SET(actionp, PF_DROP);
6282 REASON_SET(reasonp, PFRES_FRAG);
6283 }
6284 return (NULL);
6285 }
6286 if (m->m_pkthdr.len < off + len ||
6287 ntohs(h->ip_len) < off + len) {
6288 ACTION_SET(actionp, PF_DROP);
6289 REASON_SET(reasonp, PFRES_SHORT);
6290 return (NULL);
6291 }
6292 break;
6293 }
6294 #endif /* INET */
6295 #ifdef INET6
6296 case AF_INET6: {
6297 struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
6298
6299 if (m->m_pkthdr.len < off + len ||
6300 (ntohs(h->ip6_plen) + sizeof(struct ip6_hdr)) <
6301 (unsigned)(off + len)) {
6302 ACTION_SET(actionp, PF_DROP);
6303 REASON_SET(reasonp, PFRES_SHORT);
6304 return (NULL);
6305 }
6306 break;
6307 }
6308 #endif /* INET6 */
6309 }
6310 m_copydata(m, off, len, p);
6311 return (p);
6312 }
6313
6314 int
6315 pf_routable(struct pf_addr *addr, sa_family_t af, struct pfi_kkif *kif,
6316 int rtableid)
6317 {
6318 struct ifnet *ifp;
6319
6320 /*
6321 * Skip check for addresses with embedded interface scope,
6322 * as they would always match anyway.
6323 */
6324 if (af == AF_INET6 && IN6_IS_SCOPE_EMBED(&addr->v6))
6325 return (1);
6326
6327 if (af != AF_INET && af != AF_INET6)
6328 return (0);
6329
6330 /* Skip checks for ipsec interfaces */
6331 if (kif != NULL && kif->pfik_ifp->if_type == IFT_ENC)
6332 return (1);
6333
6334 ifp = (kif != NULL) ? kif->pfik_ifp : NULL;
6335
6336 switch (af) {
6337 #ifdef INET6
6338 case AF_INET6:
6339 return (fib6_check_urpf(rtableid, &addr->v6, 0, NHR_NONE,
6340 ifp));
6341 #endif
6342 #ifdef INET
6343 case AF_INET:
6344 return (fib4_check_urpf(rtableid, addr->v4, 0, NHR_NONE,
6345 ifp));
6346 #endif
6347 }
6348
6349 return (0);
6350 }
6351
6352 #ifdef INET
6353 static void
6354 pf_route(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
6355 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
6356 {
6357 struct mbuf *m0, *m1, *md;
6358 struct sockaddr_in dst;
6359 struct ip *ip;
6360 struct ifnet *ifp = NULL;
6361 struct pf_addr naddr;
6362 struct pf_ksrc_node *sn = NULL;
6363 int error = 0;
6364 uint16_t ip_len, ip_off;
6365
6366 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
6367 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
6368 __func__));
6369
6370 if ((pd->pf_mtag == NULL &&
6371 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
6372 pd->pf_mtag->routed++ > 3) {
6373 m0 = *m;
6374 *m = NULL;
6375 goto bad_locked;
6376 }
6377
6378 if (r->rt == PF_DUPTO) {
6379 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
6380 if (s == NULL) {
6381 ifp = r->rpool.cur->kif ?
6382 r->rpool.cur->kif->pfik_ifp : NULL;
6383 } else {
6384 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6385 /* If pfsync'd */
6386 if (ifp == NULL)
6387 ifp = r->rpool.cur->kif ?
6388 r->rpool.cur->kif->pfik_ifp : NULL;
6389 PF_STATE_UNLOCK(s);
6390 }
6391 if (ifp == oifp) {
6392 /* When the 2nd interface is not skipped */
6393 return;
6394 } else {
6395 m0 = *m;
6396 *m = NULL;
6397 goto bad;
6398 }
6399 } else {
6400 pd->pf_mtag->flags |= PF_DUPLICATED;
6401 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
6402 if (s)
6403 PF_STATE_UNLOCK(s);
6404 return;
6405 }
6406 }
6407 } else {
6408 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
6409 pf_dummynet(pd, dir, s, r, m);
6410 if (s)
6411 PF_STATE_UNLOCK(s);
6412 return;
6413 }
6414 m0 = *m;
6415 }
6416
6417 ip = mtod(m0, struct ip *);
6418
6419 bzero(&dst, sizeof(dst));
6420 dst.sin_family = AF_INET;
6421 dst.sin_len = sizeof(dst);
6422 dst.sin_addr = ip->ip_dst;
6423
6424 bzero(&naddr, sizeof(naddr));
6425
6426 if (s == NULL) {
6427 if (TAILQ_EMPTY(&r->rpool.list)) {
6428 DPFPRINTF(PF_DEBUG_URGENT,
6429 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
6430 goto bad_locked;
6431 }
6432 pf_map_addr(AF_INET, r, (struct pf_addr *)&ip->ip_src,
6433 &naddr, NULL, &sn);
6434 if (!PF_AZERO(&naddr, AF_INET))
6435 dst.sin_addr.s_addr = naddr.v4.s_addr;
6436 ifp = r->rpool.cur->kif ?
6437 r->rpool.cur->kif->pfik_ifp : NULL;
6438 } else {
6439 if (!PF_AZERO(&s->rt_addr, AF_INET))
6440 dst.sin_addr.s_addr =
6441 s->rt_addr.v4.s_addr;
6442 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6443 PF_STATE_UNLOCK(s);
6444 }
6445 /* If pfsync'd */
6446 if (ifp == NULL)
6447 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6448 if (ifp == NULL)
6449 goto bad;
6450
6451 if (dir == PF_IN) {
6452 if (pf_test(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
6453 goto bad;
6454 else if (m0 == NULL)
6455 goto done;
6456 if (m0->m_len < sizeof(struct ip)) {
6457 DPFPRINTF(PF_DEBUG_URGENT,
6458 ("%s: m0->m_len < sizeof(struct ip)\n", __func__));
6459 goto bad;
6460 }
6461 ip = mtod(m0, struct ip *);
6462 }
6463
6464 if (ifp->if_flags & IFF_LOOPBACK)
6465 m0->m_flags |= M_SKIP_FIREWALL;
6466
6467 ip_len = ntohs(ip->ip_len);
6468 ip_off = ntohs(ip->ip_off);
6469
6470 /* Copied from FreeBSD 10.0-CURRENT ip_output. */
6471 m0->m_pkthdr.csum_flags |= CSUM_IP;
6472 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA & ~ifp->if_hwassist) {
6473 in_delayed_cksum(m0);
6474 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA;
6475 }
6476 #if defined(SCTP) || defined(SCTP_SUPPORT)
6477 if (m0->m_pkthdr.csum_flags & CSUM_SCTP & ~ifp->if_hwassist) {
6478 sctp_delayed_cksum(m0, (uint32_t)(ip->ip_hl << 2));
6479 m0->m_pkthdr.csum_flags &= ~CSUM_SCTP;
6480 }
6481 #endif
6482
6483 /*
6484 * If small enough for interface, or the interface will take
6485 * care of the fragmentation for us, we can just send directly.
6486 */
6487 if (ip_len <= ifp->if_mtu ||
6488 (m0->m_pkthdr.csum_flags & ifp->if_hwassist & CSUM_TSO) != 0) {
6489 ip->ip_sum = 0;
6490 if (m0->m_pkthdr.csum_flags & CSUM_IP & ~ifp->if_hwassist) {
6491 ip->ip_sum = in_cksum(m0, ip->ip_hl << 2);
6492 m0->m_pkthdr.csum_flags &= ~CSUM_IP;
6493 }
6494 m_clrprotoflags(m0); /* Avoid confusing lower layers. */
6495
6496 md = m0;
6497 error = pf_dummynet_route(pd, dir, s, r, ifp, sintosa(&dst), &md);
6498 if (md != NULL)
6499 error = (*ifp->if_output)(ifp, md, sintosa(&dst), NULL);
6500 goto done;
6501 }
6502
6503 /* Balk when DF bit is set or the interface didn't support TSO. */
6504 if ((ip_off & IP_DF) || (m0->m_pkthdr.csum_flags & CSUM_TSO)) {
6505 error = EMSGSIZE;
6506 KMOD_IPSTAT_INC(ips_cantfrag);
6507 if (r->rt != PF_DUPTO) {
6508 if (s && pd->nat_rule != NULL)
6509 PACKET_UNDO_NAT(m0, pd,
6510 (ip->ip_hl << 2) + (ip_off & IP_OFFMASK),
6511 s, dir);
6512
6513 icmp_error(m0, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0,
6514 ifp->if_mtu);
6515 goto done;
6516 } else
6517 goto bad;
6518 }
6519
6520 error = ip_fragment(ip, &m0, ifp->if_mtu, ifp->if_hwassist);
6521 if (error)
6522 goto bad;
6523
6524 for (; m0; m0 = m1) {
6525 m1 = m0->m_nextpkt;
6526 m0->m_nextpkt = NULL;
6527 if (error == 0) {
6528 m_clrprotoflags(m0);
6529 md = m0;
6530 error = pf_dummynet_route(pd, dir, s, r, ifp,
6531 sintosa(&dst), &md);
6532 if (md != NULL)
6533 error = (*ifp->if_output)(ifp, md,
6534 sintosa(&dst), NULL);
6535 } else
6536 m_freem(m0);
6537 }
6538
6539 if (error == 0)
6540 KMOD_IPSTAT_INC(ips_fragmented);
6541
6542 done:
6543 if (r->rt != PF_DUPTO)
6544 *m = NULL;
6545 return;
6546
6547 bad_locked:
6548 if (s)
6549 PF_STATE_UNLOCK(s);
6550 bad:
6551 m_freem(m0);
6552 goto done;
6553 }
6554 #endif /* INET */
6555
6556 #ifdef INET6
6557 static void
6558 pf_route6(struct mbuf **m, struct pf_krule *r, int dir, struct ifnet *oifp,
6559 struct pf_kstate *s, struct pf_pdesc *pd, struct inpcb *inp)
6560 {
6561 struct mbuf *m0, *md;
6562 struct sockaddr_in6 dst;
6563 struct ip6_hdr *ip6;
6564 struct ifnet *ifp = NULL;
6565 struct pf_addr naddr;
6566 struct pf_ksrc_node *sn = NULL;
6567
6568 KASSERT(m && *m && r && oifp, ("%s: invalid parameters", __func__));
6569 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: invalid direction",
6570 __func__));
6571
6572 if ((pd->pf_mtag == NULL &&
6573 ((pd->pf_mtag = pf_get_mtag(*m)) == NULL)) ||
6574 pd->pf_mtag->routed++ > 3) {
6575 m0 = *m;
6576 *m = NULL;
6577 goto bad_locked;
6578 }
6579
6580 if (r->rt == PF_DUPTO) {
6581 if ((pd->pf_mtag->flags & PF_DUPLICATED)) {
6582 if (s == NULL) {
6583 ifp = r->rpool.cur->kif ?
6584 r->rpool.cur->kif->pfik_ifp : NULL;
6585 } else {
6586 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6587 /* If pfsync'd */
6588 if (ifp == NULL)
6589 ifp = r->rpool.cur->kif ?
6590 r->rpool.cur->kif->pfik_ifp : NULL;
6591 PF_STATE_UNLOCK(s);
6592 }
6593 if (ifp == oifp) {
6594 /* When the 2nd interface is not skipped */
6595 return;
6596 } else {
6597 m0 = *m;
6598 *m = NULL;
6599 goto bad;
6600 }
6601 } else {
6602 pd->pf_mtag->flags |= PF_DUPLICATED;
6603 if (((m0 = m_dup(*m, M_NOWAIT)) == NULL)) {
6604 if (s)
6605 PF_STATE_UNLOCK(s);
6606 return;
6607 }
6608 }
6609 } else {
6610 if ((r->rt == PF_REPLYTO) == (r->direction == dir)) {
6611 pf_dummynet(pd, dir, s, r, m);
6612 if (s)
6613 PF_STATE_UNLOCK(s);
6614 return;
6615 }
6616 m0 = *m;
6617 }
6618
6619 ip6 = mtod(m0, struct ip6_hdr *);
6620
6621 bzero(&dst, sizeof(dst));
6622 dst.sin6_family = AF_INET6;
6623 dst.sin6_len = sizeof(dst);
6624 dst.sin6_addr = ip6->ip6_dst;
6625
6626 bzero(&naddr, sizeof(naddr));
6627
6628 if (s == NULL) {
6629 if (TAILQ_EMPTY(&r->rpool.list)) {
6630 DPFPRINTF(PF_DEBUG_URGENT,
6631 ("%s: TAILQ_EMPTY(&r->rpool.list)\n", __func__));
6632 goto bad_locked;
6633 }
6634 pf_map_addr(AF_INET6, r, (struct pf_addr *)&ip6->ip6_src,
6635 &naddr, NULL, &sn);
6636 if (!PF_AZERO(&naddr, AF_INET6))
6637 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6638 &naddr, AF_INET6);
6639 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6640 } else {
6641 if (!PF_AZERO(&s->rt_addr, AF_INET6))
6642 PF_ACPY((struct pf_addr *)&dst.sin6_addr,
6643 &s->rt_addr, AF_INET6);
6644 ifp = s->rt_kif ? s->rt_kif->pfik_ifp : NULL;
6645 }
6646
6647 if (s)
6648 PF_STATE_UNLOCK(s);
6649
6650 /* If pfsync'd */
6651 if (ifp == NULL)
6652 ifp = r->rpool.cur->kif ? r->rpool.cur->kif->pfik_ifp : NULL;
6653 if (ifp == NULL)
6654 goto bad;
6655
6656 if (dir == PF_IN) {
6657 if (pf_test6(PF_OUT, 0, ifp, &m0, inp) != PF_PASS)
6658 goto bad;
6659 else if (m0 == NULL)
6660 goto done;
6661 if (m0->m_len < sizeof(struct ip6_hdr)) {
6662 DPFPRINTF(PF_DEBUG_URGENT,
6663 ("%s: m0->m_len < sizeof(struct ip6_hdr)\n",
6664 __func__));
6665 goto bad;
6666 }
6667 ip6 = mtod(m0, struct ip6_hdr *);
6668 }
6669
6670 if (ifp->if_flags & IFF_LOOPBACK)
6671 m0->m_flags |= M_SKIP_FIREWALL;
6672
6673 if (m0->m_pkthdr.csum_flags & CSUM_DELAY_DATA_IPV6 &
6674 ~ifp->if_hwassist) {
6675 uint32_t plen = m0->m_pkthdr.len - sizeof(*ip6);
6676 in6_delayed_cksum(m0, plen, sizeof(struct ip6_hdr));
6677 m0->m_pkthdr.csum_flags &= ~CSUM_DELAY_DATA_IPV6;
6678 }
6679
6680 /*
6681 * If the packet is too large for the outgoing interface,
6682 * send back an icmp6 error.
6683 */
6684 if (IN6_IS_SCOPE_EMBED(&dst.sin6_addr))
6685 dst.sin6_addr.s6_addr16[1] = htons(ifp->if_index);
6686 if ((u_long)m0->m_pkthdr.len <= ifp->if_mtu) {
6687 md = m0;
6688 pf_dummynet_route(pd, dir, s, r, ifp, sintosa(&dst), &md);
6689 if (md != NULL)
6690 nd6_output_ifp(ifp, ifp, md, &dst, NULL);
6691 }
6692 else {
6693 in6_ifstat_inc(ifp, ifs6_in_toobig);
6694 if (r->rt != PF_DUPTO) {
6695 if (s && pd->nat_rule != NULL)
6696 PACKET_UNDO_NAT(m0, pd,
6697 ((caddr_t)ip6 - m0->m_data) +
6698 sizeof(struct ip6_hdr), s, dir);
6699
6700 icmp6_error(m0, ICMP6_PACKET_TOO_BIG, 0, ifp->if_mtu);
6701 } else
6702 goto bad;
6703 }
6704
6705 done:
6706 if (r->rt != PF_DUPTO)
6707 *m = NULL;
6708 return;
6709
6710 bad_locked:
6711 if (s)
6712 PF_STATE_UNLOCK(s);
6713 bad:
6714 m_freem(m0);
6715 goto done;
6716 }
6717 #endif /* INET6 */
6718
6719 /*
6720 * FreeBSD supports cksum offloads for the following drivers.
6721 * em(4), fxp(4), lge(4), nge(4), re(4), ti(4), txp(4), xl(4)
6722 *
6723 * CSUM_DATA_VALID | CSUM_PSEUDO_HDR :
6724 * network driver performed cksum including pseudo header, need to verify
6725 * csum_data
6726 * CSUM_DATA_VALID :
6727 * network driver performed cksum, needs to additional pseudo header
6728 * cksum computation with partial csum_data(i.e. lack of H/W support for
6729 * pseudo header, for instance sk(4) and possibly gem(4))
6730 *
6731 * After validating the cksum of packet, set both flag CSUM_DATA_VALID and
6732 * CSUM_PSEUDO_HDR in order to avoid recomputation of the cksum in upper
6733 * TCP/UDP layer.
6734 * Also, set csum_data to 0xffff to force cksum validation.
6735 */
6736 static int
6737 pf_check_proto_cksum(struct mbuf *m, int off, int len, u_int8_t p, sa_family_t af)
6738 {
6739 u_int16_t sum = 0;
6740 int hw_assist = 0;
6741 struct ip *ip;
6742
6743 if (off < sizeof(struct ip) || len < sizeof(struct udphdr))
6744 return (1);
6745 if (m->m_pkthdr.len < off + len)
6746 return (1);
6747
6748 switch (p) {
6749 case IPPROTO_TCP:
6750 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6751 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6752 sum = m->m_pkthdr.csum_data;
6753 } else {
6754 ip = mtod(m, struct ip *);
6755 sum = in_pseudo(ip->ip_src.s_addr,
6756 ip->ip_dst.s_addr, htonl((u_short)len +
6757 m->m_pkthdr.csum_data + IPPROTO_TCP));
6758 }
6759 sum ^= 0xffff;
6760 ++hw_assist;
6761 }
6762 break;
6763 case IPPROTO_UDP:
6764 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) {
6765 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) {
6766 sum = m->m_pkthdr.csum_data;
6767 } else {
6768 ip = mtod(m, struct ip *);
6769 sum = in_pseudo(ip->ip_src.s_addr,
6770 ip->ip_dst.s_addr, htonl((u_short)len +
6771 m->m_pkthdr.csum_data + IPPROTO_UDP));
6772 }
6773 sum ^= 0xffff;
6774 ++hw_assist;
6775 }
6776 break;
6777 case IPPROTO_ICMP:
6778 #ifdef INET6
6779 case IPPROTO_ICMPV6:
6780 #endif /* INET6 */
6781 break;
6782 default:
6783 return (1);
6784 }
6785
6786 if (!hw_assist) {
6787 switch (af) {
6788 case AF_INET:
6789 if (p == IPPROTO_ICMP) {
6790 if (m->m_len < off)
6791 return (1);
6792 m->m_data += off;
6793 m->m_len -= off;
6794 sum = in_cksum(m, len);
6795 m->m_data -= off;
6796 m->m_len += off;
6797 } else {
6798 if (m->m_len < sizeof(struct ip))
6799 return (1);
6800 sum = in4_cksum(m, p, off, len);
6801 }
6802 break;
6803 #ifdef INET6
6804 case AF_INET6:
6805 if (m->m_len < sizeof(struct ip6_hdr))
6806 return (1);
6807 sum = in6_cksum(m, p, off, len);
6808 break;
6809 #endif /* INET6 */
6810 default:
6811 return (1);
6812 }
6813 }
6814 if (sum) {
6815 switch (p) {
6816 case IPPROTO_TCP:
6817 {
6818 KMOD_TCPSTAT_INC(tcps_rcvbadsum);
6819 break;
6820 }
6821 case IPPROTO_UDP:
6822 {
6823 KMOD_UDPSTAT_INC(udps_badsum);
6824 break;
6825 }
6826 #ifdef INET
6827 case IPPROTO_ICMP:
6828 {
6829 KMOD_ICMPSTAT_INC(icps_checksum);
6830 break;
6831 }
6832 #endif
6833 #ifdef INET6
6834 case IPPROTO_ICMPV6:
6835 {
6836 KMOD_ICMP6STAT_INC(icp6s_checksum);
6837 break;
6838 }
6839 #endif /* INET6 */
6840 }
6841 return (1);
6842 } else {
6843 if (p == IPPROTO_TCP || p == IPPROTO_UDP) {
6844 m->m_pkthdr.csum_flags |=
6845 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
6846 m->m_pkthdr.csum_data = 0xffff;
6847 }
6848 }
6849 return (0);
6850 }
6851
6852 static bool
6853 pf_pdesc_to_dnflow(int dir, const struct pf_pdesc *pd,
6854 const struct pf_krule *r, const struct pf_kstate *s,
6855 struct ip_fw_args *dnflow)
6856 {
6857 int dndir = r->direction;
6858
6859 if (s && dndir == PF_INOUT) {
6860 dndir = s->direction;
6861 } else if (dndir == PF_INOUT) {
6862 /* Assume primary direction. Happens when we've set dnpipe in
6863 * the ethernet level code. */
6864 dndir = dir;
6865 }
6866
6867 memset(dnflow, 0, sizeof(*dnflow));
6868
6869 if (pd->dport != NULL)
6870 dnflow->f_id.dst_port = ntohs(*pd->dport);
6871 if (pd->sport != NULL)
6872 dnflow->f_id.src_port = ntohs(*pd->sport);
6873
6874 if (dir == PF_IN)
6875 dnflow->flags |= IPFW_ARGS_IN;
6876 else
6877 dnflow->flags |= IPFW_ARGS_OUT;
6878
6879 if (dir != dndir && pd->act.dnrpipe) {
6880 dnflow->rule.info = pd->act.dnrpipe;
6881 }
6882 else if (dir == dndir && pd->act.dnpipe) {
6883 dnflow->rule.info = pd->act.dnpipe;
6884 }
6885 else {
6886 return (false);
6887 }
6888
6889 dnflow->rule.info |= IPFW_IS_DUMMYNET;
6890 if (r->free_flags & PFRULE_DN_IS_PIPE || pd->act.flags & PFRULE_DN_IS_PIPE)
6891 dnflow->rule.info |= IPFW_IS_PIPE;
6892
6893 dnflow->f_id.proto = pd->proto;
6894 dnflow->f_id.extra = dnflow->rule.info;
6895 switch (pd->af) {
6896 case AF_INET:
6897 dnflow->f_id.addr_type = 4;
6898 dnflow->f_id.src_ip = ntohl(pd->src->v4.s_addr);
6899 dnflow->f_id.dst_ip = ntohl(pd->dst->v4.s_addr);
6900 break;
6901 case AF_INET6:
6902 dnflow->flags |= IPFW_ARGS_IP6;
6903 dnflow->f_id.addr_type = 6;
6904 dnflow->f_id.src_ip6 = pd->src->v6;
6905 dnflow->f_id.dst_ip6 = pd->dst->v6;
6906 break;
6907 default:
6908 panic("Invalid AF");
6909 break;
6910 }
6911
6912 return (true);
6913 }
6914
6915 int
6916 pf_test_eth(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0,
6917 struct inpcb *inp)
6918 {
6919 struct pfi_kkif *kif;
6920 struct mbuf *m = *m0;
6921
6922 M_ASSERTPKTHDR(m);
6923 MPASS(ifp->if_vnet == curvnet);
6924 NET_EPOCH_ASSERT();
6925
6926 if (!V_pf_status.running)
6927 return (PF_PASS);
6928
6929 kif = (struct pfi_kkif *)ifp->if_pf_kif;
6930
6931 if (kif == NULL) {
6932 DPFPRINTF(PF_DEBUG_URGENT,
6933 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
6934 return (PF_DROP);
6935 }
6936 if (kif->pfik_flags & PFI_IFLAG_SKIP)
6937 return (PF_PASS);
6938
6939 if (m->m_flags & M_SKIP_FIREWALL)
6940 return (PF_PASS);
6941
6942 /* Stateless! */
6943 return (pf_test_eth_rule(dir, kif, m0));
6944 }
6945
6946 static int
6947 pf_dummynet(struct pf_pdesc *pd, int dir, struct pf_kstate *s,
6948 struct pf_krule *r, struct mbuf **m0)
6949 {
6950 return (pf_dummynet_route(pd, dir, s, r, NULL, NULL, m0));
6951 }
6952
6953 static int
6954 pf_dummynet_route(struct pf_pdesc *pd, int dir, struct pf_kstate *s,
6955 struct pf_krule *r, struct ifnet *ifp, struct sockaddr *sa,
6956 struct mbuf **m0)
6957 {
6958 NET_EPOCH_ASSERT();
6959
6960 if (s && (s->dnpipe || s->dnrpipe)) {
6961 pd->act.dnpipe = s->dnpipe;
6962 pd->act.dnrpipe = s->dnrpipe;
6963 pd->act.flags = s->state_flags;
6964 } else if (r->dnpipe || r->dnrpipe) {
6965 pd->act.dnpipe = r->dnpipe;
6966 pd->act.dnrpipe = r->dnrpipe;
6967 pd->act.flags = r->free_flags;
6968 }
6969 if (pd->act.dnpipe || pd->act.dnrpipe) {
6970 struct ip_fw_args dnflow;
6971 if (ip_dn_io_ptr == NULL) {
6972 m_freem(*m0);
6973 *m0 = NULL;
6974 return (ENOMEM);
6975 }
6976
6977 if (pd->pf_mtag == NULL &&
6978 ((pd->pf_mtag = pf_get_mtag(*m0)) == NULL)) {
6979 m_freem(*m0);
6980 *m0 = NULL;
6981 return (ENOMEM);
6982 }
6983
6984 if (ifp != NULL) {
6985 pd->pf_mtag->flags |= PF_TAG_ROUTE_TO;
6986
6987 pd->pf_mtag->if_index = ifp->if_index;
6988 pd->pf_mtag->if_idxgen = ifp->if_idxgen;
6989
6990 MPASS(sa != NULL);
6991
6992 if (pd->af == AF_INET)
6993 memcpy(&pd->pf_mtag->dst, sa,
6994 sizeof(struct sockaddr_in));
6995 else
6996 memcpy(&pd->pf_mtag->dst, sa,
6997 sizeof(struct sockaddr_in6));
6998 }
6999
7000 if (pf_pdesc_to_dnflow(dir, pd, r, s, &dnflow)) {
7001 pd->pf_mtag->flags |= PF_TAG_DUMMYNET;
7002 ip_dn_io_ptr(m0, &dnflow);
7003 if (*m0 != NULL)
7004 pd->pf_mtag->flags &= ~PF_TAG_DUMMYNET;
7005 }
7006 }
7007
7008 return (0);
7009 }
7010
7011 #ifdef INET
7012 int
7013 pf_test(int dir, int pflags, struct ifnet *ifp, struct mbuf **m0, struct inpcb *inp)
7014 {
7015 struct pfi_kkif *kif;
7016 u_short action, reason = 0, log = 0;
7017 struct mbuf *m = *m0;
7018 struct ip *h = NULL;
7019 struct m_tag *ipfwtag;
7020 struct pf_krule *a = NULL, *r = &V_pf_default_rule, *tr, *nr;
7021 struct pf_kstate *s = NULL;
7022 struct pf_kruleset *ruleset = NULL;
7023 struct pf_pdesc pd;
7024 int off, dirndx, pqid = 0;
7025
7026 PF_RULES_RLOCK_TRACKER;
7027 KASSERT(dir == PF_IN || dir == PF_OUT, ("%s: bad direction %d\n", __func__, dir));
7028 M_ASSERTPKTHDR(m);
7029
7030 if (!V_pf_status.running)
7031 return (PF_PASS);
7032
7033 PF_RULES_RLOCK();
7034
7035 kif = (struct pfi_kkif *)ifp->if_pf_kif;
7036
7037 if (__predict_false(kif == NULL)) {
7038 DPFPRINTF(PF_DEBUG_URGENT,
7039 ("pf_test: kif == NULL, if_xname %s\n", ifp->if_xname));
7040 PF_RULES_RUNLOCK();
7041 return (PF_DROP);
7042 }
7043 if (kif->pfik_flags & PFI_IFLAG_SKIP) {
7044 PF_RULES_RUNLOCK();
7045 return (PF_PASS);
7046 }
7047
7048 if (m->m_flags & M_SKIP_FIREWALL) {
7049 PF_RULES_RUNLOCK();
7050 return (PF_PASS);
7051 }
7052
7053 memset(&pd, 0, sizeof(pd));
7054 pd.pf_mtag = pf_find_mtag(m);
7055
7056 if (pd.pf_mtag != NULL && (pd.pf_mtag->flags & PF_TAG_ROUTE_TO)) {
7057 pd.pf_mtag->flags &= ~PF_TAG_ROUTE_TO;
7058
7059 ifp = ifnet_byindexgen(pd.pf_mtag->if_index,
7060 pd.pf_mtag->if_idxgen);
7061 if (ifp == NULL || ifp->if_flags & IFF_DYING) {
7062 PF_RULES_RUNLOCK();
7063 m_freem(*m0);
7064 *m0 = NULL;
7065 return (PF_PASS);
7066 }
7067 PF_RULES_RUNLOCK();
7068 (ifp->if_output)(ifp, m, sintosa(&pd.pf_mtag->dst), NULL);
7069 *m0 = NULL;
7070 return (PF_PASS);
7071 }
7072
7073 if (pd.pf_mtag && pd.pf_mtag->dnpipe) {
7074 pd.act.dnpipe = pd.pf_mtag->dnpipe;
7075 pd.act.flags = pd.pf_mtag->dnflags;
7076 }
7077
7078 if (ip_dn_io_ptr != NULL && pd.pf_mtag != NULL &&
7079 pd.pf_mtag->flags & PF_TAG_DUMMYNET) {
7080 /* Dummynet re-injects packets after they've
7081 * completed their delay. We've already
7082 * processed them, so pass unconditionally. */
7083
7084 /* But only once. We may see the packet multiple times (e.g.
7085 * PFIL_IN/PFIL_OUT). */
7086 pd.pf_mtag->flags &= ~PF_TAG_DUMMYNET;
7087 PF_RULES_RUNLOCK();
7088
7089 return (PF_PASS);
7090 }
7091
7092 if (__predict_false(ip_divert_ptr != NULL) &&
7093 ((ipfwtag = m_tag_locate(m, MTAG_IPFW_RULE, 0, NULL)) != NULL)) {
7094 struct ipfw_rule_ref *rr = (struct ipfw_rule_ref *)(ipfwtag+1);
7095 if (rr->info & IPFW_IS_DIVERT && rr->rulenum == 0) {
7096 if (pd.pf_mtag == NULL &&
7097 ((pd.pf_mtag = pf_get_mtag(m)) == NULL)) {
7098 action = PF_DROP;
7099 goto done;
7100 }
7101 pd.pf_mtag->flags |= PF_PACKET_LOOPED;
7102 m_tag_delete(m, ipfwtag);
7103 }
7104 if (pd.pf_mtag && pd.pf_mtag->flags & PF_FASTFWD_OURS_PRESENT) {
7105 m->m_flags |= M_FASTFWD_OURS;
7106 pd.pf_mtag->flags &= ~PF_FASTFWD_OURS_PRESENT;
7107 }
7108 } else if (pf_normalize_ip(m0, dir, kif, &reason, &pd) != PF_PASS) {
7109 /* We do IP header normalization and packet reassembly here */
7110 action = PF_DROP;
7111 goto done;
7112 }
7113 m = *m0; /* pf_normalize messes with m0 */
7114 h = mtod(m, struct ip *);
7115
7116 off = h->ip_hl << 2;
7117 if (off < (int)sizeof(struct ip)) {
7118 action = PF_DROP;
7119 REASON_SET(&reason, PFRES_SHORT);
7120 log = 1;
7121 goto done;
7122 }
7123
7124 pd.src = (struct pf_addr *)&h->ip_src;
7125 pd.dst = (struct pf_addr *)&h->ip_dst;
7126 pd.sport = pd.dport = NULL;
7127 pd.ip_sum = &h->ip_sum;
7128 pd.proto_sum = NULL;
7129 pd.proto = h->ip_p;
7130 pd.dir = dir;
7131 pd.sidx = (dir == PF_IN) ? 0 : 1;
7132 pd.didx = (dir == PF_IN) ? 1 : 0;
7133 pd.af = AF_INET;
7134 pd.tos = h->ip_tos & ~IPTOS_ECN_MASK;
7135 pd.tot_len = ntohs(h->ip_len);
7136
7137 /* handle fragments that didn't get reassembled by normalization */
7138 if (h->ip_off & htons(IP_MF | IP_OFFMASK)) {
7139 action = pf_test_fragment(&r, dir, kif, m, h,
7140 &pd, &a, &ruleset);
7141 goto done;
7142 }
7143
7144 switch (h->ip_p) {
7145 case IPPROTO_TCP: {
7146 if (!pf_pull_hdr(m, off, &pd.hdr.tcp, sizeof(pd.hdr.tcp),
7147 &action, &reason, AF_INET)) {
7148 log = action != PF_PASS;
7149 goto done;
7150 }
7151 pd.p_len = pd.tot_len - off - (pd.hdr.tcp.th_off << 2);
7152
7153 pd.sport = &pd.hdr.tcp.th_sport;
7154 pd.dport = &pd.hdr.tcp.th_dport;
7155
7156 /* Respond to SYN with a syncookie. */
7157 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) == TH_SYN &&
7158 pd.dir == PF_IN && pf_synflood_check(&pd)) {
7159 pf_syncookie_send(m, off, &pd);
7160 action = PF_DROP;
7161 break;
7162 }
7163
7164 if ((pd.hdr.tcp.th_flags & TH_ACK) && pd.p_len == 0)
7165 pqid = 1;
7166 action = pf_normalize_tcp(dir, kif, m, 0, off, h, &pd);
7167 if (action == PF_DROP)
7168 goto done;
7169 action = pf_test_state_tcp(&s, dir, kif, m, off, h, &pd,
7170 &reason);
7171 if (action == PF_PASS) {
7172 if (V_pfsync_update_state_ptr != NULL)
7173 V_pfsync_update_state_ptr(s);
7174 r = s->rule.ptr;
7175 a = s->anchor.ptr;
7176 log = s->log;
7177 } else if (s == NULL) {
7178 /* Validate remote SYN|ACK, re-create original SYN if
7179 * valid. */
7180 if ((pd.hdr.tcp.th_flags & (TH_SYN|TH_ACK|TH_RST)) ==
7181 TH_ACK && pf_syncookie_validate(&pd) &&
7182 pd.dir == PF_IN) {
7183 struct mbuf *msyn;
7184
7185 msyn = pf_syncookie_recreate_syn(h->ip_ttl,
7186 off,&pd);
7187 if (msyn == NULL) {
7188 action = PF_DROP;
7189 break;
7190 }
7191
7192 action = pf_test(dir, pflags, ifp, &msyn, inp);
7193 m_freem(msyn);
7194
7195 if (action == PF_PASS) {
7196 action = pf_test_state_tcp(&s, dir,
7197 kif, m, off, h, &pd, &reason);
7198 if (action != PF_PASS || s == NULL) {
7199 action = PF_DROP;
7200 break;
7201 }
7202
7203 s->src.seqhi = ntohl(pd.hdr.tcp.th_ack)
7204 - 1;
7205 s->src.seqlo = ntohl(pd.hdr.tcp.th_seq)
7206 - 1;
7207 pf_set_protostate(s, PF_PEER_SRC,
7208 PF_TCPS_PROXY_DST);
7209
7210 action = pf_synproxy(&pd, &s, &reason);
7211 if (action != PF_PASS)
7212 break;
7213 }
7214 break;
7215 }
7216 else {
7217 action = pf_test_rule(&r, &s, dir, kif, m, off,
7218 &pd, &a, &ruleset, inp);
7219 }
7220 }
7221 break;
7222 }
7223
7224 case IPPROTO_UDP: {
7225 if (!pf_pull_hdr(m, off, &pd.hdr.udp, sizeof(pd.hdr.udp),
7226 &action, &reason, AF_INET)) {
7227 log = action != PF_PASS;
7228 goto done;
7229 }
7230 pd.sport = &pd.hdr.udp.uh_sport;
7231 pd.dport = &pd.hdr.udp.uh_dport;
7232 if (pd.hdr.udp.uh_dport == 0 ||
7233 ntohs(pd.hdr.udp.uh_ulen) > m->m_pkthdr.len - off ||
7234 ntohs(pd.hdr.udp.uh_ulen) < sizeof(struct udphdr)) {
7235 action = PF_DROP;
7236 REASON_SET(&reason, PFRES_SHORT);
7237 goto done;
7238 }
7239 action = pf_test_state_udp(&s, dir, kif, m, off, h, &pd);
7240 if (action == PF_PASS) {
7241 if (V_pfsync_update_state_ptr != NULL)
7242 V_pfsync_update_state_ptr(s);
7243 r = s->rule.ptr;
7244 a = s->anchor.ptr;
7245 log = s->log;
7246 } else if (s == NULL)
7247 action = pf_test_rule(&r, &s, dir, kif, m, off, &pd,
7248 &a, &ruleset, inp);
7249 break;
7250 }
7251
7252 case IPPROTO_ICMP: {
7253 if (!pf_pull_hdr(m, off, &pd.hdr.icmp, ICMP_MINLEN,
|