Bug Summary

File:src/sbin/pfctl/pfctl_optimize.c
Warning:line 1318, column 5
Potential leak of memory pointed to by 'por'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name pfctl_optimize.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model pic -pic-level 1 -pic-is-pie -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sbin/pfctl/obj -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sbin/pfctl -internal-isystem /usr/local/lib/clang/13.0.0/include -internal-externc-isystem /usr/include -O2 -Wno-uninitialized -fdebug-compilation-dir=/usr/src/sbin/pfctl/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/ben/Projects/vmm/scan-build/2022-01-12-194120-40624-1 -x c /usr/src/sbin/pfctl/pfctl_optimize.c
1/* $OpenBSD: pfctl_optimize.c,v 1.47 2020/07/21 14:10:51 henning Exp $ */
2
3/*
4 * Copyright (c) 2004 Mike Frantzen <frantzen@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/types.h>
20#include <sys/ioctl.h>
21#include <sys/socket.h>
22
23#include <netinet/in.h>
24#include <arpa/inet.h>
25#include <net/if.h>
26#include <net/pfvar.h>
27
28#include <assert.h>
29#include <ctype.h>
30#include <err.h>
31#include <errno(*__errno()).h>
32#include <stddef.h>
33#include <stdio.h>
34#include <stdlib.h>
35#include <string.h>
36
37#include "pfctl_parser.h"
38#include "pfctl.h"
39
40/* The size at which a table becomes faster than individual rules */
41#define TABLE_THRESHOLD6 6
42
43
44/* #define OPT_DEBUG 1 */
45#ifdef OPT_DEBUG
46# define DEBUG(str, v...)((void)0) \
47 printf("%s: " str "\n", __FUNCTION__ , ## v)
48#else
49# define DEBUG(str, v...)((void)0) ((void)0)
50#endif
51
52
53/*
54 * A container that lets us sort a superblock to optimize the skip step jumps
55 */
56struct pf_skip_step {
57 int ps_count; /* number of items */
58 TAILQ_HEAD( , pf_opt_rule)struct { struct pf_opt_rule *tqh_first; struct pf_opt_rule **
tqh_last; }
ps_rules;
59 TAILQ_ENTRY(pf_skip_step)struct { struct pf_skip_step *tqe_next; struct pf_skip_step *
*tqe_prev; }
ps_entry;
60};
61
62
63/*
64 * A superblock is a block of adjacent rules of similar action. If there
65 * are five PASS rules in a row, they all become members of a superblock.
66 * Once we have a superblock, we are free to re-order any rules within it
67 * in order to improve performance; if a packet is passed, it doesn't matter
68 * who passed it.
69 */
70struct superblock {
71 TAILQ_HEAD( , pf_opt_rule)struct { struct pf_opt_rule *tqh_first; struct pf_opt_rule **
tqh_last; }
sb_rules;
72 TAILQ_ENTRY(superblock)struct { struct superblock *tqe_next; struct superblock **tqe_prev
; }
sb_entry;
73 struct superblock *sb_profiled_block;
74 TAILQ_HEAD(skiplist, pf_skip_step)struct skiplist { struct pf_skip_step *tqh_first; struct pf_skip_step
**tqh_last; }
sb_skipsteps[PF_SKIP_COUNT9];
75};
76TAILQ_HEAD(superblocks, superblock)struct superblocks { struct superblock *tqh_first; struct superblock
**tqh_last; }
;
77
78
79/*
80 * Description of the PF rule structure.
81 */
82enum {
83 BARRIER, /* the presence of the field puts the rule in it's own block */
84 BREAK, /* the field may not differ between rules in a superblock */
85 NOMERGE, /* the field may not differ between rules when combined */
86 COMBINED, /* the field may itself be combined with other rules */
87 DC, /* we just don't care about the field */
88 NEVER}; /* we should never see this field set?!? */
89struct pf_rule_field {
90 const char *prf_name;
91 int prf_type;
92 size_t prf_offset;
93 size_t prf_size;
94} pf_rule_desc[] = {
95#define PF_RULE_FIELD(field, ty){"field", ty, __builtin_offsetof(struct pf_rule, field), sizeof
(((struct pf_rule *)0)->field)}
\
96 {#field, \
97 ty, \
98 offsetof(struct pf_rule, field)__builtin_offsetof(struct pf_rule, field), \
99 sizeof(((struct pf_rule *)0)->field)}
100
101
102 /*
103 * The presence of these fields in a rule put the rule in it's own
104 * superblock. Thus it will not be optimized. It also prevents the
105 * rule from being re-ordered at all.
106 */
107 PF_RULE_FIELD(label, BARRIER){"label", BARRIER, __builtin_offsetof(struct pf_rule, label),
sizeof(((struct pf_rule *)0)->label)}
,
108 PF_RULE_FIELD(prob, BARRIER){"prob", BARRIER, __builtin_offsetof(struct pf_rule, prob), sizeof
(((struct pf_rule *)0)->prob)}
,
109 PF_RULE_FIELD(max_states, BARRIER){"max_states", BARRIER, __builtin_offsetof(struct pf_rule, max_states
), sizeof(((struct pf_rule *)0)->max_states)}
,
110 PF_RULE_FIELD(max_src_nodes, BARRIER){"max_src_nodes", BARRIER, __builtin_offsetof(struct pf_rule,
max_src_nodes), sizeof(((struct pf_rule *)0)->max_src_nodes
)}
,
111 PF_RULE_FIELD(max_src_states, BARRIER){"max_src_states", BARRIER, __builtin_offsetof(struct pf_rule
, max_src_states), sizeof(((struct pf_rule *)0)->max_src_states
)}
,
112 PF_RULE_FIELD(max_src_conn, BARRIER){"max_src_conn", BARRIER, __builtin_offsetof(struct pf_rule, max_src_conn
), sizeof(((struct pf_rule *)0)->max_src_conn)}
,
113 PF_RULE_FIELD(max_src_conn_rate, BARRIER){"max_src_conn_rate", BARRIER, __builtin_offsetof(struct pf_rule
, max_src_conn_rate), sizeof(((struct pf_rule *)0)->max_src_conn_rate
)}
,
114 PF_RULE_FIELD(anchor, BARRIER){"anchor", BARRIER, __builtin_offsetof(struct pf_rule, anchor
), sizeof(((struct pf_rule *)0)->anchor)}
, /* for now */
115
116 /*
117 * These fields must be the same between all rules in the same superblock.
118 * These rules are allowed to be re-ordered but only among like rules.
119 * For instance we can re-order all 'tag "foo"' rules because they have the
120 * same tag. But we can not re-order between a 'tag "foo"' and a
121 * 'tag "bar"' since that would change the meaning of the ruleset.
122 */
123 PF_RULE_FIELD(tagname, BREAK){"tagname", BREAK, __builtin_offsetof(struct pf_rule, tagname
), sizeof(((struct pf_rule *)0)->tagname)}
,
124 PF_RULE_FIELD(keep_state, BREAK){"keep_state", BREAK, __builtin_offsetof(struct pf_rule, keep_state
), sizeof(((struct pf_rule *)0)->keep_state)}
,
125 PF_RULE_FIELD(qname, BREAK){"qname", BREAK, __builtin_offsetof(struct pf_rule, qname), sizeof
(((struct pf_rule *)0)->qname)}
,
126 PF_RULE_FIELD(pqname, BREAK){"pqname", BREAK, __builtin_offsetof(struct pf_rule, pqname),
sizeof(((struct pf_rule *)0)->pqname)}
,
127 PF_RULE_FIELD(rt, BREAK){"rt", BREAK, __builtin_offsetof(struct pf_rule, rt), sizeof(
((struct pf_rule *)0)->rt)}
,
128 PF_RULE_FIELD(allow_opts, BREAK){"allow_opts", BREAK, __builtin_offsetof(struct pf_rule, allow_opts
), sizeof(((struct pf_rule *)0)->allow_opts)}
,
129 PF_RULE_FIELD(rule_flag, BREAK){"rule_flag", BREAK, __builtin_offsetof(struct pf_rule, rule_flag
), sizeof(((struct pf_rule *)0)->rule_flag)}
,
130 PF_RULE_FIELD(action, BREAK){"action", BREAK, __builtin_offsetof(struct pf_rule, action),
sizeof(((struct pf_rule *)0)->action)}
,
131 PF_RULE_FIELD(log, BREAK){"log", BREAK, __builtin_offsetof(struct pf_rule, log), sizeof
(((struct pf_rule *)0)->log)}
,
132 PF_RULE_FIELD(quick, BREAK){"quick", BREAK, __builtin_offsetof(struct pf_rule, quick), sizeof
(((struct pf_rule *)0)->quick)}
,
133 PF_RULE_FIELD(return_ttl, BREAK){"return_ttl", BREAK, __builtin_offsetof(struct pf_rule, return_ttl
), sizeof(((struct pf_rule *)0)->return_ttl)}
,
134 PF_RULE_FIELD(overload_tblname, BREAK){"overload_tblname", BREAK, __builtin_offsetof(struct pf_rule
, overload_tblname), sizeof(((struct pf_rule *)0)->overload_tblname
)}
,
135 PF_RULE_FIELD(flush, BREAK){"flush", BREAK, __builtin_offsetof(struct pf_rule, flush), sizeof
(((struct pf_rule *)0)->flush)}
,
136 PF_RULE_FIELD(rdr, BREAK){"rdr", BREAK, __builtin_offsetof(struct pf_rule, rdr), sizeof
(((struct pf_rule *)0)->rdr)}
,
137 PF_RULE_FIELD(nat, BREAK){"nat", BREAK, __builtin_offsetof(struct pf_rule, nat), sizeof
(((struct pf_rule *)0)->nat)}
,
138 PF_RULE_FIELD(logif, BREAK){"logif", BREAK, __builtin_offsetof(struct pf_rule, logif), sizeof
(((struct pf_rule *)0)->logif)}
,
139 PF_RULE_FIELD(route, BREAK){"route", BREAK, __builtin_offsetof(struct pf_rule, route), sizeof
(((struct pf_rule *)0)->route)}
,
140 PF_RULE_FIELD(rtableid, BREAK){"rtableid", BREAK, __builtin_offsetof(struct pf_rule, rtableid
), sizeof(((struct pf_rule *)0)->rtableid)}
,
141
142 /*
143 * Any fields not listed in this structure act as BREAK fields
144 */
145
146
147 /*
148 * These fields must not differ when we merge two rules together but
149 * their difference isn't enough to put the rules in different superblocks.
150 * There are no problems re-ordering any rules with these fields.
151 */
152 PF_RULE_FIELD(af, NOMERGE){"af", NOMERGE, __builtin_offsetof(struct pf_rule, af), sizeof
(((struct pf_rule *)0)->af)}
,
153 PF_RULE_FIELD(ifnot, NOMERGE){"ifnot", NOMERGE, __builtin_offsetof(struct pf_rule, ifnot),
sizeof(((struct pf_rule *)0)->ifnot)}
,
154 PF_RULE_FIELD(ifname, NOMERGE){"ifname", NOMERGE, __builtin_offsetof(struct pf_rule, ifname
), sizeof(((struct pf_rule *)0)->ifname)}
, /* hack for IF groups */
155 PF_RULE_FIELD(match_tag_not, NOMERGE){"match_tag_not", NOMERGE, __builtin_offsetof(struct pf_rule,
match_tag_not), sizeof(((struct pf_rule *)0)->match_tag_not
)}
,
156 PF_RULE_FIELD(match_tagname, NOMERGE){"match_tagname", NOMERGE, __builtin_offsetof(struct pf_rule,
match_tagname), sizeof(((struct pf_rule *)0)->match_tagname
)}
,
157 PF_RULE_FIELD(os_fingerprint, NOMERGE){"os_fingerprint", NOMERGE, __builtin_offsetof(struct pf_rule
, os_fingerprint), sizeof(((struct pf_rule *)0)->os_fingerprint
)}
,
158 PF_RULE_FIELD(timeout, NOMERGE){"timeout", NOMERGE, __builtin_offsetof(struct pf_rule, timeout
), sizeof(((struct pf_rule *)0)->timeout)}
,
159 PF_RULE_FIELD(return_icmp, NOMERGE){"return_icmp", NOMERGE, __builtin_offsetof(struct pf_rule, return_icmp
), sizeof(((struct pf_rule *)0)->return_icmp)}
,
160 PF_RULE_FIELD(return_icmp6, NOMERGE){"return_icmp6", NOMERGE, __builtin_offsetof(struct pf_rule, return_icmp6
), sizeof(((struct pf_rule *)0)->return_icmp6)}
,
161 PF_RULE_FIELD(uid, NOMERGE){"uid", NOMERGE, __builtin_offsetof(struct pf_rule, uid), sizeof
(((struct pf_rule *)0)->uid)}
,
162 PF_RULE_FIELD(gid, NOMERGE){"gid", NOMERGE, __builtin_offsetof(struct pf_rule, gid), sizeof
(((struct pf_rule *)0)->gid)}
,
163 PF_RULE_FIELD(direction, NOMERGE){"direction", NOMERGE, __builtin_offsetof(struct pf_rule, direction
), sizeof(((struct pf_rule *)0)->direction)}
,
164 PF_RULE_FIELD(proto, NOMERGE){"proto", NOMERGE, __builtin_offsetof(struct pf_rule, proto),
sizeof(((struct pf_rule *)0)->proto)}
,
165 PF_RULE_FIELD(type, NOMERGE){"type", NOMERGE, __builtin_offsetof(struct pf_rule, type), sizeof
(((struct pf_rule *)0)->type)}
,
166 PF_RULE_FIELD(code, NOMERGE){"code", NOMERGE, __builtin_offsetof(struct pf_rule, code), sizeof
(((struct pf_rule *)0)->code)}
,
167 PF_RULE_FIELD(flags, NOMERGE){"flags", NOMERGE, __builtin_offsetof(struct pf_rule, flags),
sizeof(((struct pf_rule *)0)->flags)}
,
168 PF_RULE_FIELD(flagset, NOMERGE){"flagset", NOMERGE, __builtin_offsetof(struct pf_rule, flagset
), sizeof(((struct pf_rule *)0)->flagset)}
,
169 PF_RULE_FIELD(tos, NOMERGE){"tos", NOMERGE, __builtin_offsetof(struct pf_rule, tos), sizeof
(((struct pf_rule *)0)->tos)}
,
170 PF_RULE_FIELD(src.port, NOMERGE){"src.port", NOMERGE, __builtin_offsetof(struct pf_rule, src.
port), sizeof(((struct pf_rule *)0)->src.port)}
,
171 PF_RULE_FIELD(dst.port, NOMERGE){"dst.port", NOMERGE, __builtin_offsetof(struct pf_rule, dst.
port), sizeof(((struct pf_rule *)0)->dst.port)}
,
172 PF_RULE_FIELD(src.port_op, NOMERGE){"src.port_op", NOMERGE, __builtin_offsetof(struct pf_rule, src
.port_op), sizeof(((struct pf_rule *)0)->src.port_op)}
,
173 PF_RULE_FIELD(dst.port_op, NOMERGE){"dst.port_op", NOMERGE, __builtin_offsetof(struct pf_rule, dst
.port_op), sizeof(((struct pf_rule *)0)->dst.port_op)}
,
174 PF_RULE_FIELD(src.neg, NOMERGE){"src.neg", NOMERGE, __builtin_offsetof(struct pf_rule, src.neg
), sizeof(((struct pf_rule *)0)->src.neg)}
,
175 PF_RULE_FIELD(dst.neg, NOMERGE){"dst.neg", NOMERGE, __builtin_offsetof(struct pf_rule, dst.neg
), sizeof(((struct pf_rule *)0)->dst.neg)}
,
176 PF_RULE_FIELD(onrdomain, NOMERGE){"onrdomain", NOMERGE, __builtin_offsetof(struct pf_rule, onrdomain
), sizeof(((struct pf_rule *)0)->onrdomain)}
,
177 PF_RULE_FIELD(naf, NOMERGE){"naf", NOMERGE, __builtin_offsetof(struct pf_rule, naf), sizeof
(((struct pf_rule *)0)->naf)}
,
178
179 /* These fields can be merged */
180 PF_RULE_FIELD(src.addr, COMBINED){"src.addr", COMBINED, __builtin_offsetof(struct pf_rule, src
.addr), sizeof(((struct pf_rule *)0)->src.addr)}
,
181 PF_RULE_FIELD(dst.addr, COMBINED){"dst.addr", COMBINED, __builtin_offsetof(struct pf_rule, dst
.addr), sizeof(((struct pf_rule *)0)->dst.addr)}
,
182
183 /* We just don't care about these fields. They're set by the kernel */
184 PF_RULE_FIELD(skip, DC){"skip", DC, __builtin_offsetof(struct pf_rule, skip), sizeof
(((struct pf_rule *)0)->skip)}
,
185 PF_RULE_FIELD(evaluations, DC){"evaluations", DC, __builtin_offsetof(struct pf_rule, evaluations
), sizeof(((struct pf_rule *)0)->evaluations)}
,
186 PF_RULE_FIELD(packets, DC){"packets", DC, __builtin_offsetof(struct pf_rule, packets), sizeof
(((struct pf_rule *)0)->packets)}
,
187 PF_RULE_FIELD(bytes, DC){"bytes", DC, __builtin_offsetof(struct pf_rule, bytes), sizeof
(((struct pf_rule *)0)->bytes)}
,
188 PF_RULE_FIELD(kif, DC){"kif", DC, __builtin_offsetof(struct pf_rule, kif), sizeof((
(struct pf_rule *)0)->kif)}
,
189 PF_RULE_FIELD(states_cur, DC){"states_cur", DC, __builtin_offsetof(struct pf_rule, states_cur
), sizeof(((struct pf_rule *)0)->states_cur)}
,
190 PF_RULE_FIELD(states_tot, DC){"states_tot", DC, __builtin_offsetof(struct pf_rule, states_tot
), sizeof(((struct pf_rule *)0)->states_tot)}
,
191 PF_RULE_FIELD(src_nodes, DC){"src_nodes", DC, __builtin_offsetof(struct pf_rule, src_nodes
), sizeof(((struct pf_rule *)0)->src_nodes)}
,
192 PF_RULE_FIELD(nr, DC){"nr", DC, __builtin_offsetof(struct pf_rule, nr), sizeof(((struct
pf_rule *)0)->nr)}
,
193 PF_RULE_FIELD(entries, DC){"entries", DC, __builtin_offsetof(struct pf_rule, entries), sizeof
(((struct pf_rule *)0)->entries)}
,
194 PF_RULE_FIELD(qid, DC){"qid", DC, __builtin_offsetof(struct pf_rule, qid), sizeof((
(struct pf_rule *)0)->qid)}
,
195 PF_RULE_FIELD(pqid, DC){"pqid", DC, __builtin_offsetof(struct pf_rule, pqid), sizeof
(((struct pf_rule *)0)->pqid)}
,
196 PF_RULE_FIELD(anchor_relative, DC){"anchor_relative", DC, __builtin_offsetof(struct pf_rule, anchor_relative
), sizeof(((struct pf_rule *)0)->anchor_relative)}
,
197 PF_RULE_FIELD(anchor_wildcard, DC){"anchor_wildcard", DC, __builtin_offsetof(struct pf_rule, anchor_wildcard
), sizeof(((struct pf_rule *)0)->anchor_wildcard)}
,
198 PF_RULE_FIELD(tag, DC){"tag", DC, __builtin_offsetof(struct pf_rule, tag), sizeof((
(struct pf_rule *)0)->tag)}
,
199 PF_RULE_FIELD(match_tag, DC){"match_tag", DC, __builtin_offsetof(struct pf_rule, match_tag
), sizeof(((struct pf_rule *)0)->match_tag)}
,
200 PF_RULE_FIELD(overload_tbl, DC){"overload_tbl", DC, __builtin_offsetof(struct pf_rule, overload_tbl
), sizeof(((struct pf_rule *)0)->overload_tbl)}
,
201
202 /* These fields should never be set in a PASS/BLOCK rule XXX fix*/
203 PF_RULE_FIELD(max_mss, NEVER){"max_mss", NEVER, __builtin_offsetof(struct pf_rule, max_mss
), sizeof(((struct pf_rule *)0)->max_mss)}
,
204 PF_RULE_FIELD(min_ttl, NEVER){"min_ttl", NEVER, __builtin_offsetof(struct pf_rule, min_ttl
), sizeof(((struct pf_rule *)0)->min_ttl)}
,
205 PF_RULE_FIELD(set_tos, NEVER){"set_tos", NEVER, __builtin_offsetof(struct pf_rule, set_tos
), sizeof(((struct pf_rule *)0)->set_tos)}
,
206};
207
208
209
210int addrs_combineable(struct pf_rule_addr *, struct pf_rule_addr *);
211int addrs_equal(struct pf_rule_addr *, struct pf_rule_addr *);
212int block_feedback(struct pfctl *, struct superblock *);
213int combine_rules(struct pfctl *, struct superblock *);
214void comparable_rule(struct pf_rule *, const struct pf_rule *, int);
215int construct_superblocks(struct pfctl *, struct pf_opt_queue *,
216 struct superblocks *);
217void exclude_supersets(struct pf_rule *, struct pf_rule *);
218int interface_group(const char *);
219int load_feedback_profile(struct pfctl *, struct superblocks *);
220int optimize_superblock(struct pfctl *, struct superblock *);
221void remove_from_skipsteps(struct skiplist *, struct superblock *,
222 struct pf_opt_rule *, struct pf_skip_step *);
223int remove_identical_rules(struct pfctl *, struct superblock *);
224int reorder_rules(struct pfctl *, struct superblock *, int);
225int rules_combineable(struct pf_rule *, struct pf_rule *);
226void skip_append(struct superblock *, int, struct pf_skip_step *,
227 struct pf_opt_rule *);
228int skip_compare(int, struct pf_skip_step *, struct pf_opt_rule *);
229void skip_init(void);
230int skip_cmp_af(struct pf_rule *, struct pf_rule *);
231int skip_cmp_dir(struct pf_rule *, struct pf_rule *);
232int skip_cmp_rdom(struct pf_rule *, struct pf_rule *);
233int skip_cmp_dst_addr(struct pf_rule *, struct pf_rule *);
234int skip_cmp_dst_port(struct pf_rule *, struct pf_rule *);
235int skip_cmp_ifp(struct pf_rule *, struct pf_rule *);
236int skip_cmp_proto(struct pf_rule *, struct pf_rule *);
237int skip_cmp_src_addr(struct pf_rule *, struct pf_rule *);
238int skip_cmp_src_port(struct pf_rule *, struct pf_rule *);
239int superblock_inclusive(struct superblock *, struct pf_opt_rule *);
240void superblock_free(struct pfctl *, struct superblock *);
241struct pf_opt_tbl *pf_opt_table_ref(struct pf_opt_tbl *);
242void pf_opt_table_unref(struct pf_opt_tbl *);
243
244
245int (*skip_comparitors[PF_SKIP_COUNT9])(struct pf_rule *, struct pf_rule *);
246const char *skip_comparitors_names[PF_SKIP_COUNT9];
247#define PF_SKIP_COMPARITORS{ { "ifp", 0, skip_cmp_ifp }, { "dir", 1, skip_cmp_dir }, { "rdomain"
, 2, skip_cmp_rdom }, { "af", 3, skip_cmp_af }, { "proto", 4,
skip_cmp_proto }, { "saddr", 5, skip_cmp_src_addr }, { "daddr"
, 6, skip_cmp_dst_addr }, { "sport", 7, skip_cmp_src_port }, {
"dport", 8, skip_cmp_dst_port } }
{ \
248 { "ifp", PF_SKIP_IFP0, skip_cmp_ifp }, \
249 { "dir", PF_SKIP_DIR1, skip_cmp_dir }, \
250 { "rdomain", PF_SKIP_RDOM2, skip_cmp_rdom }, \
251 { "af", PF_SKIP_AF3, skip_cmp_af }, \
252 { "proto", PF_SKIP_PROTO4, skip_cmp_proto }, \
253 { "saddr", PF_SKIP_SRC_ADDR5, skip_cmp_src_addr }, \
254 { "daddr", PF_SKIP_DST_ADDR6, skip_cmp_dst_addr }, \
255 { "sport", PF_SKIP_SRC_PORT7, skip_cmp_src_port }, \
256 { "dport", PF_SKIP_DST_PORT8, skip_cmp_dst_port } \
257}
258
259struct pfr_buffer table_buffer;
260int table_identifier;
261
262
263int
264pfctl_optimize_ruleset(struct pfctl *pf, struct pf_ruleset *rs)
265{
266 struct superblocks superblocks;
267 struct pf_opt_queue opt_queue;
268 struct superblock *block;
269 struct pf_opt_rule *por;
270 struct pf_rule *r;
271 struct pf_rulequeue *old_rules;
272
273 if (TAILQ_EMPTY(rs->rules.active.ptr)(((rs->rules.active.ptr)->tqh_first) == ((void*)0)))
274 return (0);
275
276 DEBUG("optimizing ruleset \"%s\"", rs->anchor->path)((void)0);
277 memset(&table_buffer, 0, sizeof(table_buffer));
278 skip_init();
279 TAILQ_INIT(&opt_queue)do { (&opt_queue)->tqh_first = ((void*)0); (&opt_queue
)->tqh_last = &(&opt_queue)->tqh_first; } while
(0)
;
280
281 old_rules = rs->rules.active.ptr;
282 rs->rules.active.ptr = rs->rules.inactive.ptr;
283 rs->rules.inactive.ptr = old_rules;
284
285 /*
286 * XXX expanding the pf_opt_rule format throughout pfctl might allow
287 * us to avoid all this copying.
288 */
289 while ((r = TAILQ_FIRST(rs->rules.inactive.ptr)((rs->rules.inactive.ptr)->tqh_first)) != NULL((void*)0)) {
290 TAILQ_REMOVE(rs->rules.inactive.ptr, r, entries)do { if (((r)->entries.tqe_next) != ((void*)0)) (r)->entries
.tqe_next->entries.tqe_prev = (r)->entries.tqe_prev; else
(rs->rules.inactive.ptr)->tqh_last = (r)->entries.tqe_prev
; *(r)->entries.tqe_prev = (r)->entries.tqe_next; ; ; }
while (0)
;
291 if ((por = calloc(1, sizeof(*por))) == NULL((void*)0))
292 err(1, "calloc");
293 memcpy(&por->por_rule, r, sizeof(*r));
294
295 TAILQ_INSERT_TAIL(&opt_queue, por, por_entry)do { (por)->por_entry.tqe_next = ((void*)0); (por)->por_entry
.tqe_prev = (&opt_queue)->tqh_last; *(&opt_queue)->
tqh_last = (por); (&opt_queue)->tqh_last = &(por)->
por_entry.tqe_next; } while (0)
;
296 }
297
298 TAILQ_INIT(&superblocks)do { (&superblocks)->tqh_first = ((void*)0); (&superblocks
)->tqh_last = &(&superblocks)->tqh_first; } while
(0)
;
299 if (construct_superblocks(pf, &opt_queue, &superblocks))
300 goto error;
301
302 if (pf->optimize & PF_OPTIMIZE_PROFILE0x0002) {
303 if (load_feedback_profile(pf, &superblocks))
304 goto error;
305 }
306
307 TAILQ_FOREACH(block, &superblocks, sb_entry)for((block) = ((&superblocks)->tqh_first); (block) != (
(void*)0); (block) = ((block)->sb_entry.tqe_next))
{
308 if (optimize_superblock(pf, block))
309 goto error;
310 }
311
312 rs->anchor->refcnt = 0;
313 while ((block = TAILQ_FIRST(&superblocks)((&superblocks)->tqh_first))) {
314 TAILQ_REMOVE(&superblocks, block, sb_entry)do { if (((block)->sb_entry.tqe_next) != ((void*)0)) (block
)->sb_entry.tqe_next->sb_entry.tqe_prev = (block)->sb_entry
.tqe_prev; else (&superblocks)->tqh_last = (block)->
sb_entry.tqe_prev; *(block)->sb_entry.tqe_prev = (block)->
sb_entry.tqe_next; ; ; } while (0)
;
315
316 while ((por = TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first))) {
317 TAILQ_REMOVE(&block->sb_rules, por, por_entry)do { if (((por)->por_entry.tqe_next) != ((void*)0)) (por)->
por_entry.tqe_next->por_entry.tqe_prev = (por)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (por
)->por_entry.tqe_prev; *(por)->por_entry.tqe_prev = (por
)->por_entry.tqe_next; ; ; } while (0)
;
318 por->por_rule.nr = rs->anchor->refcnt++;
319 if ((r = calloc(1, sizeof(*r))) == NULL((void*)0))
320 err(1, "calloc");
321 memcpy(r, &por->por_rule, sizeof(*r));
322 TAILQ_INSERT_TAIL(rs->rules.active.ptr, r, entries)do { (r)->entries.tqe_next = ((void*)0); (r)->entries.tqe_prev
= (rs->rules.active.ptr)->tqh_last; *(rs->rules.active
.ptr)->tqh_last = (r); (rs->rules.active.ptr)->tqh_last
= &(r)->entries.tqe_next; } while (0)
;
323 pf_opt_table_unref(por->por_src_tbl);
324 pf_opt_table_unref(por->por_dst_tbl);
325 free(por);
326 }
327 superblock_free(pf, block);
328 }
329
330 return (0);
331
332error:
333 while ((por = TAILQ_FIRST(&opt_queue)((&opt_queue)->tqh_first))) {
334 TAILQ_REMOVE(&opt_queue, por, por_entry)do { if (((por)->por_entry.tqe_next) != ((void*)0)) (por)->
por_entry.tqe_next->por_entry.tqe_prev = (por)->por_entry
.tqe_prev; else (&opt_queue)->tqh_last = (por)->por_entry
.tqe_prev; *(por)->por_entry.tqe_prev = (por)->por_entry
.tqe_next; ; ; } while (0)
;
335 pf_opt_table_unref(por->por_src_tbl);
336 pf_opt_table_unref(por->por_dst_tbl);
337 free(por);
338 }
339 while ((block = TAILQ_FIRST(&superblocks)((&superblocks)->tqh_first))) {
340 TAILQ_REMOVE(&superblocks, block, sb_entry)do { if (((block)->sb_entry.tqe_next) != ((void*)0)) (block
)->sb_entry.tqe_next->sb_entry.tqe_prev = (block)->sb_entry
.tqe_prev; else (&superblocks)->tqh_last = (block)->
sb_entry.tqe_prev; *(block)->sb_entry.tqe_prev = (block)->
sb_entry.tqe_next; ; ; } while (0)
;
341 superblock_free(pf, block);
342 }
343 return (1);
344}
345
346
347/*
348 * Go ahead and optimize a superblock
349 */
350int
351optimize_superblock(struct pfctl *pf, struct superblock *block)
352{
353#ifdef OPT_DEBUG
354 struct pf_opt_rule *por;
355#endif /* OPT_DEBUG */
356
357 /* We have a few optimization passes:
358 * 1) remove duplicate rules or rules that are a subset of other
359 * rules
360 * 2) combine otherwise identical rules with different IP addresses
361 * into a single rule and put the addresses in a table.
362 * 3) re-order the rules to improve kernel skip steps
363 * 4) re-order the 'quick' rules based on feedback from the
364 * active ruleset statistics
365 *
366 * XXX combine_rules() doesn't combine v4 and v6 rules. would just
367 * have to keep af in the table container, make af 'COMBINE' and
368 * twiddle the af on the merged rule
369 * XXX maybe add a weighting to the metric on skipsteps when doing
370 * reordering. sometimes two sequential tables will be better
371 * that four consecutive interfaces.
372 * XXX need to adjust the skipstep count of everything after PROTO,
373 * since they aren't actually checked on a proto mismatch in
374 * pf_test_{tcp, udp, icmp}()
375 * XXX should i treat proto=0, af=0 or dir=0 special in skepstep
376 * calculation since they are a DC?
377 * XXX keep last skiplist of last superblock to influence this
378 * superblock. '5 inet6 log' should make '3 inet6' come before '4
379 * inet' in the next superblock.
380 * XXX would be useful to add tables for ports
381 * XXX we can also re-order some mutually exclusive superblocks to
382 * try merging superblocks before any of these optimization passes.
383 * for instance a single 'log in' rule in the middle of non-logging
384 * out rules.
385 */
386
387 /* shortcut. there will be a lot of 1-rule superblocks */
388 if (!TAILQ_NEXT(TAILQ_FIRST(&block->sb_rules), por_entry)((((&block->sb_rules)->tqh_first))->por_entry.tqe_next
)
)
389 return (0);
390
391#ifdef OPT_DEBUG
392 printf("--- Superblock ---\n");
393 TAILQ_FOREACH(por, &block->sb_rules, por_entry)for((por) = ((&block->sb_rules)->tqh_first); (por) !=
((void*)0); (por) = ((por)->por_entry.tqe_next))
{
394 printf(" ");
395 print_rule(&por->por_rule, por->por_rule.anchor ?
396 por->por_rule.anchor->name : "", PF_OPT_DEBUG0x00200);
397 }
398#endif /* OPT_DEBUG */
399
400
401 if (remove_identical_rules(pf, block))
402 return (1);
403 if (combine_rules(pf, block))
404 return (1);
405 if ((pf->optimize & PF_OPTIMIZE_PROFILE0x0002) &&
406 TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first)->por_rule.quick &&
407 block->sb_profiled_block) {
408 if (block_feedback(pf, block))
409 return (1);
410 } else if (reorder_rules(pf, block, 0)) {
411 return (1);
412 }
413
414 /*
415 * Don't add any optimization passes below reorder_rules(). It will
416 * have divided superblocks into smaller blocks for further refinement
417 * and doesn't put them back together again. What once was a true
418 * superblock might have been split into multiple superblocks.
419 */
420
421#ifdef OPT_DEBUG
422 printf("--- END Superblock ---\n");
423#endif /* OPT_DEBUG */
424 return (0);
425}
426
427
428/*
429 * Optimization pass #1: remove identical rules
430 */
431int
432remove_identical_rules(struct pfctl *pf, struct superblock *block)
433{
434 struct pf_opt_rule *por1, *por2, *por_next, *por2_next;
435 struct pf_rule a, a2, b, b2;
436
437 for (por1 = TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first); por1; por1 = por_next) {
438 por_next = TAILQ_NEXT(por1, por_entry)((por1)->por_entry.tqe_next);
439 for (por2 = por_next; por2; por2 = por2_next) {
440 por2_next = TAILQ_NEXT(por2, por_entry)((por2)->por_entry.tqe_next);
441 comparable_rule(&a, &por1->por_rule, DC);
442 comparable_rule(&b, &por2->por_rule, DC);
443 memcpy(&a2, &a, sizeof(a2));
444 memcpy(&b2, &b, sizeof(b2));
445
446 exclude_supersets(&a, &b);
447 exclude_supersets(&b2, &a2);
448 if (memcmp(&a, &b, sizeof(a)) == 0) {
449 DEBUG("removing identical rule nr%d = *nr%d*",((void)0)
450 por1->por_rule.nr, por2->por_rule.nr)((void)0);
451 TAILQ_REMOVE(&block->sb_rules, por2, por_entry)do { if (((por2)->por_entry.tqe_next) != ((void*)0)) (por2
)->por_entry.tqe_next->por_entry.tqe_prev = (por2)->
por_entry.tqe_prev; else (&block->sb_rules)->tqh_last
= (por2)->por_entry.tqe_prev; *(por2)->por_entry.tqe_prev
= (por2)->por_entry.tqe_next; ; ; } while (0)
;
452 if (por_next == por2)
453 por_next = TAILQ_NEXT(por1, por_entry)((por1)->por_entry.tqe_next);
454 free(por2);
455 } else if (memcmp(&a2, &b2, sizeof(a2)) == 0) {
456 DEBUG("removing identical rule *nr%d* = nr%d",((void)0)
457 por1->por_rule.nr, por2->por_rule.nr)((void)0);
458 TAILQ_REMOVE(&block->sb_rules, por1, por_entry)do { if (((por1)->por_entry.tqe_next) != ((void*)0)) (por1
)->por_entry.tqe_next->por_entry.tqe_prev = (por1)->
por_entry.tqe_prev; else (&block->sb_rules)->tqh_last
= (por1)->por_entry.tqe_prev; *(por1)->por_entry.tqe_prev
= (por1)->por_entry.tqe_next; ; ; } while (0)
;
459 free(por1);
460 break;
461 }
462 }
463 }
464
465 return (0);
466}
467
468
469/*
470 * Optimization pass #2: combine similar rules with different addresses
471 * into a single rule and a table
472 */
473int
474combine_rules(struct pfctl *pf, struct superblock *block)
475{
476 struct pf_opt_rule *p1, *p2, *por_next;
477 int src_eq, dst_eq;
478
479 /* First we make a pass to combine the rules. O(n log n) */
480 TAILQ_FOREACH(p1, &block->sb_rules, por_entry)for((p1) = ((&block->sb_rules)->tqh_first); (p1) !=
((void*)0); (p1) = ((p1)->por_entry.tqe_next))
{
481 for (p2 = TAILQ_NEXT(p1, por_entry)((p1)->por_entry.tqe_next); p2; p2 = por_next) {
482 por_next = TAILQ_NEXT(p2, por_entry)((p2)->por_entry.tqe_next);
483
484 src_eq = addrs_equal(&p1->por_rule.src,
485 &p2->por_rule.src);
486 dst_eq = addrs_equal(&p1->por_rule.dst,
487 &p2->por_rule.dst);
488
489 if (src_eq && !dst_eq && p1->por_src_tbl == NULL((void*)0) &&
490 p2->por_dst_tbl == NULL((void*)0) &&
491 p2->por_src_tbl == NULL((void*)0) &&
492 rules_combineable(&p1->por_rule, &p2->por_rule) &&
493 addrs_combineable(&p1->por_rule.dst,
494 &p2->por_rule.dst)) {
495 DEBUG("can combine rules nr%d = nr%d",((void)0)
496 p1->por_rule.nr, p2->por_rule.nr)((void)0);
497 if (p1->por_dst_tbl == NULL((void*)0) &&
498 add_opt_table(pf, &p1->por_dst_tbl,
499 p1->por_rule.af, &p1->por_rule.dst, NULL((void*)0)))
500 return (1);
501 if (add_opt_table(pf, &p1->por_dst_tbl,
502 p1->por_rule.af, &p2->por_rule.dst, NULL((void*)0)))
503 return (1);
504 if (p1->por_dst_tbl->pt_rulecount >=
505 TABLE_THRESHOLD6) {
506 TAILQ_REMOVE(&block->sb_rules, p2,do { if (((p2)->por_entry.tqe_next) != ((void*)0)) (p2)->
por_entry.tqe_next->por_entry.tqe_prev = (p2)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p2)
->por_entry.tqe_prev; *(p2)->por_entry.tqe_prev = (p2)->
por_entry.tqe_next; ; ; } while (0)
507 por_entry)do { if (((p2)->por_entry.tqe_next) != ((void*)0)) (p2)->
por_entry.tqe_next->por_entry.tqe_prev = (p2)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p2)
->por_entry.tqe_prev; *(p2)->por_entry.tqe_prev = (p2)->
por_entry.tqe_next; ; ; } while (0)
;
508 free(p2);
509 } else
510 p2->por_dst_tbl =
511 pf_opt_table_ref(p1->por_dst_tbl);
512 } else if (!src_eq && dst_eq && p1->por_dst_tbl == NULL((void*)0)
513 && p2->por_src_tbl == NULL((void*)0) &&
514 p2->por_dst_tbl == NULL((void*)0) &&
515 rules_combineable(&p1->por_rule, &p2->por_rule) &&
516 addrs_combineable(&p1->por_rule.src,
517 &p2->por_rule.src)) {
518 DEBUG("can combine rules nr%d = nr%d",((void)0)
519 p1->por_rule.nr, p2->por_rule.nr)((void)0);
520 if (p1->por_src_tbl == NULL((void*)0) &&
521 add_opt_table(pf, &p1->por_src_tbl,
522 p1->por_rule.af, &p1->por_rule.src, NULL((void*)0)))
523 return (1);
524 if (add_opt_table(pf, &p1->por_src_tbl,
525 p1->por_rule.af, &p2->por_rule.src, NULL((void*)0)))
526 return (1);
527 if (p1->por_src_tbl->pt_rulecount >=
528 TABLE_THRESHOLD6) {
529 TAILQ_REMOVE(&block->sb_rules, p2,do { if (((p2)->por_entry.tqe_next) != ((void*)0)) (p2)->
por_entry.tqe_next->por_entry.tqe_prev = (p2)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p2)
->por_entry.tqe_prev; *(p2)->por_entry.tqe_prev = (p2)->
por_entry.tqe_next; ; ; } while (0)
530 por_entry)do { if (((p2)->por_entry.tqe_next) != ((void*)0)) (p2)->
por_entry.tqe_next->por_entry.tqe_prev = (p2)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p2)
->por_entry.tqe_prev; *(p2)->por_entry.tqe_prev = (p2)->
por_entry.tqe_next; ; ; } while (0)
;
531 free(p2);
532 } else
533 p2->por_src_tbl =
534 pf_opt_table_ref(p1->por_src_tbl);
535 }
536 }
537 }
538
539
540 /*
541 * Then we make a final pass to create a valid table name and
542 * insert the name into the rules.
543 * Convert translation/routing mapping pools to tables as well.
544 */
545 for (p1 = TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first); p1; p1 = por_next) {
546 por_next = TAILQ_NEXT(p1, por_entry)((p1)->por_entry.tqe_next);
547 assert(p1->por_src_tbl == NULL || p1->por_dst_tbl == NULL)((p1->por_src_tbl == ((void*)0) || p1->por_dst_tbl == (
(void*)0)) ? (void)0 : __assert2("/usr/src/sbin/pfctl/pfctl_optimize.c"
, 547, __func__, "p1->por_src_tbl == NULL || p1->por_dst_tbl == NULL"
))
;
548
549 if (p1->por_src_tbl && p1->por_src_tbl->pt_rulecount >=
550 TABLE_THRESHOLD6) {
551 if (p1->por_src_tbl->pt_generated) {
552 /* This rule is included in a table */
553 TAILQ_REMOVE(&block->sb_rules, p1, por_entry)do { if (((p1)->por_entry.tqe_next) != ((void*)0)) (p1)->
por_entry.tqe_next->por_entry.tqe_prev = (p1)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p1)
->por_entry.tqe_prev; *(p1)->por_entry.tqe_prev = (p1)->
por_entry.tqe_next; ; ; } while (0)
;
554 free(p1);
555 continue;
556 }
557 p1->por_src_tbl->pt_generated = 1;
558
559 if ((pf->opts & PF_OPT_NOACTION0x00008) == 0 &&
560 pf_opt_create_table(pf, p1->por_src_tbl))
561 return (1);
562
563 pf->tdirty = 1;
564
565 if (pf->opts & PF_OPT_VERBOSE0x00004)
566 print_tabledef(p1->por_src_tbl->pt_name,
567 PFR_TFLAG_CONST0x00000002, 1,
568 &p1->por_src_tbl->pt_nodes);
569
570 memset(&p1->por_rule.src.addr, 0,
571 sizeof(p1->por_rule.src.addr));
572 p1->por_rule.src.addr.type = PF_ADDR_TABLE;
573 strlcpy(p1->por_rule.src.addr.v.tblname,
574 p1->por_src_tbl->pt_name,
575 sizeof(p1->por_rule.src.addr.v.tblname));
576
577 pfr_buf_clear(p1->por_src_tbl->pt_buf);
578 free(p1->por_src_tbl->pt_buf);
579 p1->por_src_tbl->pt_buf = NULL((void*)0);
580 }
581 if (p1->por_dst_tbl && p1->por_dst_tbl->pt_rulecount >=
582 TABLE_THRESHOLD6) {
583 if (p1->por_dst_tbl->pt_generated) {
584 /* This rule is included in a table */
585 TAILQ_REMOVE(&block->sb_rules, p1, por_entry)do { if (((p1)->por_entry.tqe_next) != ((void*)0)) (p1)->
por_entry.tqe_next->por_entry.tqe_prev = (p1)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (p1)
->por_entry.tqe_prev; *(p1)->por_entry.tqe_prev = (p1)->
por_entry.tqe_next; ; ; } while (0)
;
586 free(p1);
587 continue;
588 }
589 p1->por_dst_tbl->pt_generated = 1;
590
591 if ((pf->opts & PF_OPT_NOACTION0x00008) == 0 &&
592 pf_opt_create_table(pf, p1->por_dst_tbl))
593 return (1);
594 pf->tdirty = 1;
595
596 if (pf->opts & PF_OPT_VERBOSE0x00004)
597 print_tabledef(p1->por_dst_tbl->pt_name,
598 PFR_TFLAG_CONST0x00000002, 1,
599 &p1->por_dst_tbl->pt_nodes);
600
601 memset(&p1->por_rule.dst.addr, 0,
602 sizeof(p1->por_rule.dst.addr));
603 p1->por_rule.dst.addr.type = PF_ADDR_TABLE;
604 strlcpy(p1->por_rule.dst.addr.v.tblname,
605 p1->por_dst_tbl->pt_name,
606 sizeof(p1->por_rule.dst.addr.v.tblname));
607
608 pfr_buf_clear(p1->por_dst_tbl->pt_buf);
609 free(p1->por_dst_tbl->pt_buf);
610 p1->por_dst_tbl->pt_buf = NULL((void*)0);
611 }
612 }
613
614 return (0);
615}
616
617
618/*
619 * Optimization pass #3: re-order rules to improve skip steps
620 */
621int
622reorder_rules(struct pfctl *pf, struct superblock *block, int depth)
623{
624 struct superblock *newblock;
625 struct pf_skip_step *skiplist;
626 struct pf_opt_rule *por;
627 int i, largest, largest_list, rule_count = 0;
628 TAILQ_HEAD( , pf_opt_rule)struct { struct pf_opt_rule *tqh_first; struct pf_opt_rule **
tqh_last; }
head;
629
630 /*
631 * Calculate the best-case skip steps. We put each rule in a list
632 * of other rules with common fields
633 */
634 for (i = 0; i < PF_SKIP_COUNT9; i++) {
635 TAILQ_FOREACH(por, &block->sb_rules, por_entry)for((por) = ((&block->sb_rules)->tqh_first); (por) !=
((void*)0); (por) = ((por)->por_entry.tqe_next))
{
636 TAILQ_FOREACH(skiplist, &block->sb_skipsteps[i],for((skiplist) = ((&block->sb_skipsteps[i])->tqh_first
); (skiplist) != ((void*)0); (skiplist) = ((skiplist)->ps_entry
.tqe_next))
637 ps_entry)for((skiplist) = ((&block->sb_skipsteps[i])->tqh_first
); (skiplist) != ((void*)0); (skiplist) = ((skiplist)->ps_entry
.tqe_next))
{
638 if (skip_compare(i, skiplist, por) == 0)
639 break;
640 }
641 if (skiplist == NULL((void*)0)) {
642 if ((skiplist = calloc(1, sizeof(*skiplist))) ==
643 NULL((void*)0))
644 err(1, "calloc");
645 TAILQ_INIT(&skiplist->ps_rules)do { (&skiplist->ps_rules)->tqh_first = ((void*)0);
(&skiplist->ps_rules)->tqh_last = &(&skiplist
->ps_rules)->tqh_first; } while (0)
;
646 TAILQ_INSERT_TAIL(&block->sb_skipsteps[i],do { (skiplist)->ps_entry.tqe_next = ((void*)0); (skiplist
)->ps_entry.tqe_prev = (&block->sb_skipsteps[i])->
tqh_last; *(&block->sb_skipsteps[i])->tqh_last = (skiplist
); (&block->sb_skipsteps[i])->tqh_last = &(skiplist
)->ps_entry.tqe_next; } while (0)
647 skiplist, ps_entry)do { (skiplist)->ps_entry.tqe_next = ((void*)0); (skiplist
)->ps_entry.tqe_prev = (&block->sb_skipsteps[i])->
tqh_last; *(&block->sb_skipsteps[i])->tqh_last = (skiplist
); (&block->sb_skipsteps[i])->tqh_last = &(skiplist
)->ps_entry.tqe_next; } while (0)
;
648 }
649 skip_append(block, i, skiplist, por);
650 }
651 }
652
653 TAILQ_FOREACH(por, &block->sb_rules, por_entry)for((por) = ((&block->sb_rules)->tqh_first); (por) !=
((void*)0); (por) = ((por)->por_entry.tqe_next))
654 rule_count++;
655
656 /*
657 * Now we're going to ignore any fields that are identical between
658 * all of the rules in the superblock and those fields which differ
659 * between every rule in the superblock.
660 */
661 largest = 0;
662 for (i = 0; i < PF_SKIP_COUNT9; i++) {
663 skiplist = TAILQ_FIRST(&block->sb_skipsteps[i])((&block->sb_skipsteps[i])->tqh_first);
664 if (skiplist->ps_count == rule_count) {
665 DEBUG("(%d) original skipstep '%s' is all rules",((void)0)
666 depth, skip_comparitors_names[i])((void)0);
667 skiplist->ps_count = 0;
668 } else if (skiplist->ps_count == 1) {
669 skiplist->ps_count = 0;
670 } else {
671 DEBUG("(%d) original skipstep '%s' largest jump is %d",((void)0)
672 depth, skip_comparitors_names[i],((void)0)
673 skiplist->ps_count)((void)0);
674 if (skiplist->ps_count > largest)
675 largest = skiplist->ps_count;
676 }
677 }
678 if (largest == 0) {
679 /* Ugh. There is NO commonality in the superblock on which
680 * optimize the skipsteps optimization.
681 */
682 goto done;
683 }
684
685 /*
686 * Now we're going to empty the superblock rule list and re-create
687 * it based on a more optimal skipstep order.
688 */
689 TAILQ_INIT(&head)do { (&head)->tqh_first = ((void*)0); (&head)->
tqh_last = &(&head)->tqh_first; } while (0)
;
690 TAILQ_CONCAT(&head, &block->sb_rules, por_entry)do { if (!(((&block->sb_rules)->tqh_first) == ((void
*)0))) { *(&head)->tqh_last = (&block->sb_rules
)->tqh_first; (&block->sb_rules)->tqh_first->
por_entry.tqe_prev = (&head)->tqh_last; (&head)->
tqh_last = (&block->sb_rules)->tqh_last; do { ((&
block->sb_rules))->tqh_first = ((void*)0); ((&block
->sb_rules))->tqh_last = &((&block->sb_rules
))->tqh_first; } while (0); } } while (0)
;
691
692 while (!TAILQ_EMPTY(&head)(((&head)->tqh_first) == ((void*)0))) {
693 largest = 1;
694
695 /*
696 * Find the most useful skip steps remaining
697 */
698 for (i = 0; i < PF_SKIP_COUNT9; i++) {
699 skiplist = TAILQ_FIRST(&block->sb_skipsteps[i])((&block->sb_skipsteps[i])->tqh_first);
700 if (skiplist->ps_count > largest) {
701 largest = skiplist->ps_count;
702 largest_list = i;
703 }
704 }
705
706 if (largest <= 1) {
707 /*
708 * Nothing useful left. Leave remaining rules in order.
709 */
710 DEBUG("(%d) no more commonality for skip steps", depth)((void)0);
711 TAILQ_CONCAT(&block->sb_rules, &head, por_entry)do { if (!(((&head)->tqh_first) == ((void*)0))) { *(&
block->sb_rules)->tqh_last = (&head)->tqh_first;
(&head)->tqh_first->por_entry.tqe_prev = (&block
->sb_rules)->tqh_last; (&block->sb_rules)->tqh_last
= (&head)->tqh_last; do { ((&head))->tqh_first
= ((void*)0); ((&head))->tqh_last = &((&head)
)->tqh_first; } while (0); } } while (0)
;
712 } else {
713 /*
714 * There is commonality. Extract those common rules
715 * and place them in the ruleset adjacent to each
716 * other.
717 */
718 skiplist = TAILQ_FIRST(&block->sb_skipsteps[((&block->sb_skipsteps[ largest_list])->tqh_first)
719 largest_list])((&block->sb_skipsteps[ largest_list])->tqh_first);
720 DEBUG("(%d) skipstep '%s' largest jump is %d @ #%d",((void)0)
721 depth, skip_comparitors_names[largest_list],((void)0)
722 largest, TAILQ_FIRST(&TAILQ_FIRST(&block->((void)0)
723 sb_skipsteps [largest_list])->ps_rules)->((void)0)
724 por_rule.nr)((void)0);
725 TAILQ_REMOVE(&block->sb_skipsteps[largest_list],do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&block->sb_skipsteps[largest_list
])->tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist
)->ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ;
; } while (0)
726 skiplist, ps_entry)do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&block->sb_skipsteps[largest_list
])->tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist
)->ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ;
; } while (0)
;
727
728
729 /*
730 * There may be further commonality inside these
731 * rules. So we'll split them off into they're own
732 * superblock and pass it back into the optimizer.
733 */
734 if (skiplist->ps_count > 2) {
735 if ((newblock = calloc(1, sizeof(*newblock)))
736 == NULL((void*)0)) {
737 warn("calloc");
738 return (1);
739 }
740 TAILQ_INIT(&newblock->sb_rules)do { (&newblock->sb_rules)->tqh_first = ((void*)0);
(&newblock->sb_rules)->tqh_last = &(&newblock
->sb_rules)->tqh_first; } while (0)
;
741 for (i = 0; i < PF_SKIP_COUNT9; i++)
742 TAILQ_INIT(&newblock->sb_skipsteps[i])do { (&newblock->sb_skipsteps[i])->tqh_first = ((void
*)0); (&newblock->sb_skipsteps[i])->tqh_last = &
(&newblock->sb_skipsteps[i])->tqh_first; } while (0
)
;
743 TAILQ_INSERT_BEFORE(block, newblock, sb_entry)do { (newblock)->sb_entry.tqe_prev = (block)->sb_entry.
tqe_prev; (newblock)->sb_entry.tqe_next = (block); *(block
)->sb_entry.tqe_prev = (newblock); (block)->sb_entry.tqe_prev
= &(newblock)->sb_entry.tqe_next; } while (0)
;
744 DEBUG("(%d) splitting off %d rules from superblock @ #%d",((void)0)
745 depth, skiplist->ps_count,((void)0)
746 TAILQ_FIRST(&skiplist->ps_rules)->((void)0)
747 por_rule.nr)((void)0);
748 } else {
749 newblock = block;
750 }
751
752 while ((por = TAILQ_FIRST(&skiplist->ps_rules)((&skiplist->ps_rules)->tqh_first))) {
753 TAILQ_REMOVE(&head, por, por_entry)do { if (((por)->por_entry.tqe_next) != ((void*)0)) (por)->
por_entry.tqe_next->por_entry.tqe_prev = (por)->por_entry
.tqe_prev; else (&head)->tqh_last = (por)->por_entry
.tqe_prev; *(por)->por_entry.tqe_prev = (por)->por_entry
.tqe_next; ; ; } while (0)
;
754 TAILQ_REMOVE(&skiplist->ps_rules, por,do { if (((por)->por_skip_entry[largest_list].tqe_next) !=
((void*)0)) (por)->por_skip_entry[largest_list].tqe_next->
por_skip_entry[largest_list].tqe_prev = (por)->por_skip_entry
[largest_list].tqe_prev; else (&skiplist->ps_rules)->
tqh_last = (por)->por_skip_entry[largest_list].tqe_prev; *
(por)->por_skip_entry[largest_list].tqe_prev = (por)->por_skip_entry
[largest_list].tqe_next; ; ; } while (0)
755 por_skip_entry[largest_list])do { if (((por)->por_skip_entry[largest_list].tqe_next) !=
((void*)0)) (por)->por_skip_entry[largest_list].tqe_next->
por_skip_entry[largest_list].tqe_prev = (por)->por_skip_entry
[largest_list].tqe_prev; else (&skiplist->ps_rules)->
tqh_last = (por)->por_skip_entry[largest_list].tqe_prev; *
(por)->por_skip_entry[largest_list].tqe_prev = (por)->por_skip_entry
[largest_list].tqe_next; ; ; } while (0)
;
756 TAILQ_INSERT_TAIL(&newblock->sb_rules, por,do { (por)->por_entry.tqe_next = ((void*)0); (por)->por_entry
.tqe_prev = (&newblock->sb_rules)->tqh_last; *(&
newblock->sb_rules)->tqh_last = (por); (&newblock->
sb_rules)->tqh_last = &(por)->por_entry.tqe_next; }
while (0)
757 por_entry)do { (por)->por_entry.tqe_next = ((void*)0); (por)->por_entry
.tqe_prev = (&newblock->sb_rules)->tqh_last; *(&
newblock->sb_rules)->tqh_last = (por); (&newblock->
sb_rules)->tqh_last = &(por)->por_entry.tqe_next; }
while (0)
;
758
759 /* Remove this rule from all other skiplists */
760 remove_from_skipsteps(&block->sb_skipsteps[
761 largest_list], block, por, skiplist);
762 }
763 free(skiplist);
764 if (newblock != block)
765 if (reorder_rules(pf, newblock, depth + 1))
766 return (1);
767 }
768 }
769
770done:
771 for (i = 0; i < PF_SKIP_COUNT9; i++) {
772 while ((skiplist = TAILQ_FIRST(&block->sb_skipsteps[i])((&block->sb_skipsteps[i])->tqh_first))) {
773 TAILQ_REMOVE(&block->sb_skipsteps[i], skiplist,do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&block->sb_skipsteps[i])->
tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist)->
ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ; ; } while
(0)
774 ps_entry)do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&block->sb_skipsteps[i])->
tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist)->
ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ; ; } while
(0)
;
775 free(skiplist);
776 }
777 }
778
779 return (0);
780}
781
782
783/*
784 * Optimization pass #4: re-order 'quick' rules based on feedback from the
785 * currently running ruleset
786 */
787int
788block_feedback(struct pfctl *pf, struct superblock *block)
789{
790 TAILQ_HEAD( , pf_opt_rule)struct { struct pf_opt_rule *tqh_first; struct pf_opt_rule **
tqh_last; }
queue;
791 struct pf_opt_rule *por1, *por2;
792 u_int64_t total_count = 0;
793 struct pf_rule a, b;
794
795
796 /*
797 * Walk through all of the profiled superblock's rules and copy
798 * the counters onto our rules.
799 */
800 TAILQ_FOREACH(por1, &block->sb_profiled_block->sb_rules, por_entry)for((por1) = ((&block->sb_profiled_block->sb_rules)
->tqh_first); (por1) != ((void*)0); (por1) = ((por1)->por_entry
.tqe_next))
{
801 comparable_rule(&a, &por1->por_rule, DC);
802 total_count += por1->por_rule.packets[0] +
803 por1->por_rule.packets[1];
804 TAILQ_FOREACH(por2, &block->sb_rules, por_entry)for((por2) = ((&block->sb_rules)->tqh_first); (por2
) != ((void*)0); (por2) = ((por2)->por_entry.tqe_next))
{
805 if (por2->por_profile_count)
806 continue;
807 comparable_rule(&b, &por2->por_rule, DC);
808 if (memcmp(&a, &b, sizeof(a)) == 0) {
809 por2->por_profile_count =
810 por1->por_rule.packets[0] +
811 por1->por_rule.packets[1];
812 break;
813 }
814 }
815 }
816 superblock_free(pf, block->sb_profiled_block);
817 block->sb_profiled_block = NULL((void*)0);
818
819 /*
820 * Now we pull all of the rules off the superblock and re-insert them
821 * in sorted order.
822 */
823
824 TAILQ_INIT(&queue)do { (&queue)->tqh_first = ((void*)0); (&queue)->
tqh_last = &(&queue)->tqh_first; } while (0)
;
825 TAILQ_CONCAT(&queue, &block->sb_rules, por_entry)do { if (!(((&block->sb_rules)->tqh_first) == ((void
*)0))) { *(&queue)->tqh_last = (&block->sb_rules
)->tqh_first; (&block->sb_rules)->tqh_first->
por_entry.tqe_prev = (&queue)->tqh_last; (&queue)->
tqh_last = (&block->sb_rules)->tqh_last; do { ((&
block->sb_rules))->tqh_first = ((void*)0); ((&block
->sb_rules))->tqh_last = &((&block->sb_rules
))->tqh_first; } while (0); } } while (0)
;
826
827 while ((por1 = TAILQ_FIRST(&queue)((&queue)->tqh_first)) != NULL((void*)0)) {
828 TAILQ_REMOVE(&queue, por1, por_entry)do { if (((por1)->por_entry.tqe_next) != ((void*)0)) (por1
)->por_entry.tqe_next->por_entry.tqe_prev = (por1)->
por_entry.tqe_prev; else (&queue)->tqh_last = (por1)->
por_entry.tqe_prev; *(por1)->por_entry.tqe_prev = (por1)->
por_entry.tqe_next; ; ; } while (0)
;
829/* XXX I should sort all of the unused rules based on skip steps */
830 TAILQ_FOREACH(por2, &block->sb_rules, por_entry)for((por2) = ((&block->sb_rules)->tqh_first); (por2
) != ((void*)0); (por2) = ((por2)->por_entry.tqe_next))
{
831 if (por1->por_profile_count > por2->por_profile_count) {
832 TAILQ_INSERT_BEFORE(por2, por1, por_entry)do { (por1)->por_entry.tqe_prev = (por2)->por_entry.tqe_prev
; (por1)->por_entry.tqe_next = (por2); *(por2)->por_entry
.tqe_prev = (por1); (por2)->por_entry.tqe_prev = &(por1
)->por_entry.tqe_next; } while (0)
;
833 break;
834 }
835 }
836 if (por2 == NULL((void*)0))
837 TAILQ_INSERT_TAIL(&block->sb_rules, por1, por_entry)do { (por1)->por_entry.tqe_next = ((void*)0); (por1)->por_entry
.tqe_prev = (&block->sb_rules)->tqh_last; *(&block
->sb_rules)->tqh_last = (por1); (&block->sb_rules
)->tqh_last = &(por1)->por_entry.tqe_next; } while (
0)
;
838 }
839
840 return (0);
841}
842
843
844/*
845 * Load the current ruleset from the kernel and try to associate them with
846 * the ruleset we're optimizing.
847 */
848int
849load_feedback_profile(struct pfctl *pf, struct superblocks *superblocks)
850{
851 struct superblock *block, *blockcur;
852 struct superblocks prof_superblocks;
853 struct pf_opt_rule *por;
854 struct pf_opt_queue queue;
855 struct pfioc_rule pr;
856 struct pf_rule a, b;
857 int nr, mnr;
858
859 TAILQ_INIT(&queue)do { (&queue)->tqh_first = ((void*)0); (&queue)->
tqh_last = &(&queue)->tqh_first; } while (0)
;
1
Loop condition is false. Exiting loop
860 TAILQ_INIT(&prof_superblocks)do { (&prof_superblocks)->tqh_first = ((void*)0); (&
prof_superblocks)->tqh_last = &(&prof_superblocks)
->tqh_first; } while (0)
;
2
Loop condition is false. Exiting loop
861
862 memset(&pr, 0, sizeof(pr));
863 pr.rule.action = PF_PASS;
864 if (ioctl(pf->dev, DIOCGETRULES(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((6)))
, &pr) == -1
) {
3
Assuming the condition is false
4
Taking false branch
865 warnx("%s", pf_strerror(errno(*__errno())));
866 return (1);
867 }
868 mnr = pr.nr;
869
870 DEBUG("Loading %d active rules for a feedback profile", mnr)((void)0);
871 for (nr = 0; nr < mnr; ++nr) {
5
Assuming 'nr' is < 'mnr'
6
Loop condition is true. Entering loop body
13
Assuming 'nr' is >= 'mnr'
14
Loop condition is false. Execution continues on line 889
872 struct pf_ruleset *rs;
873 if ((por = calloc(1, sizeof(*por))) == NULL((void*)0)) {
7
Memory is allocated
8
Assuming the condition is false
9
Taking false branch
874 warn("calloc");
875 return (1);
876 }
877 pr.nr = nr;
878 if (ioctl(pf->dev, DIOCGETRULE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct pfioc_rule) & 0x1fff) << 16) | ((('D')) <<
8) | ((7)))
, &pr) == -1
) {
10
Assuming the condition is false
11
Taking false branch
879 warnx("%s", pf_strerror(errno(*__errno())));
880 free(por);
881 return (1);
882 }
883 memcpy(&por->por_rule, &pr.rule, sizeof(por->por_rule));
884 rs = pf_find_or_create_ruleset(pr.anchor_call);
885 por->por_rule.anchor = rs->anchor;
886 TAILQ_INSERT_TAIL(&queue, por, por_entry)do { (por)->por_entry.tqe_next = ((void*)0); (por)->por_entry
.tqe_prev = (&queue)->tqh_last; *(&queue)->tqh_last
= (por); (&queue)->tqh_last = &(por)->por_entry
.tqe_next; } while (0)
;
12
Loop condition is false. Exiting loop
887 }
888
889 if (construct_superblocks(pf, &queue, &prof_superblocks))
15
Calling 'construct_superblocks'
890 return (1);
891
892
893 /*
894 * Now we try to associate the active ruleset's superblocks with
895 * the superblocks we're compiling.
896 */
897 block = TAILQ_FIRST(superblocks)((superblocks)->tqh_first);
898 blockcur = TAILQ_FIRST(&prof_superblocks)((&prof_superblocks)->tqh_first);
899 while (block && blockcur) {
900 comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first)->por_rule,
901 BREAK);
902 comparable_rule(&b, &TAILQ_FIRST(&blockcur->sb_rules)((&blockcur->sb_rules)->tqh_first)->por_rule,
903 BREAK);
904 if (memcmp(&a, &b, sizeof(a)) == 0) {
905 /* The two superblocks lined up */
906 block->sb_profiled_block = blockcur;
907 } else {
908 DEBUG("superblocks don't line up between #%d and #%d",((void)0)
909 TAILQ_FIRST(&block->sb_rules)->por_rule.nr,((void)0)
910 TAILQ_FIRST(&blockcur->sb_rules)->por_rule.nr)((void)0);
911 break;
912 }
913 block = TAILQ_NEXT(block, sb_entry)((block)->sb_entry.tqe_next);
914 blockcur = TAILQ_NEXT(blockcur, sb_entry)((blockcur)->sb_entry.tqe_next);
915 }
916
917
918
919 /* Free any superblocks we couldn't link */
920 while (blockcur) {
921 block = TAILQ_NEXT(blockcur, sb_entry)((blockcur)->sb_entry.tqe_next);
922 superblock_free(pf, blockcur);
923 blockcur = block;
924 }
925 return (0);
926}
927
928
929/*
930 * Compare a rule to a skiplist to see if the rule is a member
931 */
932int
933skip_compare(int skipnum, struct pf_skip_step *skiplist,
934 struct pf_opt_rule *por)
935{
936 struct pf_rule *a, *b;
937 if (skipnum >= PF_SKIP_COUNT9 || skipnum < 0)
938 errx(1, "skip_compare() out of bounds");
939 a = &por->por_rule;
940 b = &TAILQ_FIRST(&skiplist->ps_rules)((&skiplist->ps_rules)->tqh_first)->por_rule;
941
942 return ((skip_comparitors[skipnum])(a, b));
943}
944
945
946/*
947 * Add a rule to a skiplist
948 */
949void
950skip_append(struct superblock *superblock, int skipnum,
951 struct pf_skip_step *skiplist, struct pf_opt_rule *por)
952{
953 struct pf_skip_step *prev;
954
955 skiplist->ps_count++;
956 TAILQ_INSERT_TAIL(&skiplist->ps_rules, por, por_skip_entry[skipnum])do { (por)->por_skip_entry[skipnum].tqe_next = ((void*)0);
(por)->por_skip_entry[skipnum].tqe_prev = (&skiplist->
ps_rules)->tqh_last; *(&skiplist->ps_rules)->tqh_last
= (por); (&skiplist->ps_rules)->tqh_last = &(por
)->por_skip_entry[skipnum].tqe_next; } while (0)
;
957
958 /* Keep the list of skiplists sorted by whichever is larger */
959 while ((prev = TAILQ_PREV(skiplist, skiplist, ps_entry)(*(((struct skiplist *)((skiplist)->ps_entry.tqe_prev))->
tqh_last))
) &&
960 prev->ps_count < skiplist->ps_count) {
961 TAILQ_REMOVE(&superblock->sb_skipsteps[skipnum],do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&superblock->sb_skipsteps[skipnum
])->tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist
)->ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ;
; } while (0)
962 skiplist, ps_entry)do { if (((skiplist)->ps_entry.tqe_next) != ((void*)0)) (skiplist
)->ps_entry.tqe_next->ps_entry.tqe_prev = (skiplist)->
ps_entry.tqe_prev; else (&superblock->sb_skipsteps[skipnum
])->tqh_last = (skiplist)->ps_entry.tqe_prev; *(skiplist
)->ps_entry.tqe_prev = (skiplist)->ps_entry.tqe_next; ;
; } while (0)
;
963 TAILQ_INSERT_BEFORE(prev, skiplist, ps_entry)do { (skiplist)->ps_entry.tqe_prev = (prev)->ps_entry.tqe_prev
; (skiplist)->ps_entry.tqe_next = (prev); *(prev)->ps_entry
.tqe_prev = (skiplist); (prev)->ps_entry.tqe_prev = &(
skiplist)->ps_entry.tqe_next; } while (0)
;
964 }
965}
966
967
968/*
969 * Remove a rule from the other skiplist calculations.
970 */
971void
972remove_from_skipsteps(struct skiplist *head, struct superblock *block,
973 struct pf_opt_rule *por, struct pf_skip_step *active_list)
974{
975 struct pf_skip_step *sk, *next;
976 struct pf_opt_rule *p2;
977 int i, found;
978
979 for (i = 0; i < PF_SKIP_COUNT9; i++) {
980 sk = TAILQ_FIRST(&block->sb_skipsteps[i])((&block->sb_skipsteps[i])->tqh_first);
981 if (sk == NULL((void*)0) || sk == active_list || sk->ps_count <= 1)
982 continue;
983 found = 0;
984 do {
985 TAILQ_FOREACH(p2, &sk->ps_rules, por_skip_entry[i])for((p2) = ((&sk->ps_rules)->tqh_first); (p2) != ((
void*)0); (p2) = ((p2)->por_skip_entry[i].tqe_next))
986 if (p2 == por) {
987 TAILQ_REMOVE(&sk->ps_rules, p2,do { if (((p2)->por_skip_entry[i].tqe_next) != ((void*)0))
(p2)->por_skip_entry[i].tqe_next->por_skip_entry[i].tqe_prev
= (p2)->por_skip_entry[i].tqe_prev; else (&sk->ps_rules
)->tqh_last = (p2)->por_skip_entry[i].tqe_prev; *(p2)->
por_skip_entry[i].tqe_prev = (p2)->por_skip_entry[i].tqe_next
; ; ; } while (0)
988 por_skip_entry[i])do { if (((p2)->por_skip_entry[i].tqe_next) != ((void*)0))
(p2)->por_skip_entry[i].tqe_next->por_skip_entry[i].tqe_prev
= (p2)->por_skip_entry[i].tqe_prev; else (&sk->ps_rules
)->tqh_last = (p2)->por_skip_entry[i].tqe_prev; *(p2)->
por_skip_entry[i].tqe_prev = (p2)->por_skip_entry[i].tqe_next
; ; ; } while (0)
;
989 found = 1;
990 sk->ps_count--;
991 break;
992 }
993 } while (!found && (sk = TAILQ_NEXT(sk, ps_entry)((sk)->ps_entry.tqe_next)));
994 if (found && sk) {
995 /* Does this change the sorting order? */
996 while ((next = TAILQ_NEXT(sk, ps_entry)((sk)->ps_entry.tqe_next)) &&
997 next->ps_count > sk->ps_count) {
998 TAILQ_REMOVE(head, sk, ps_entry)do { if (((sk)->ps_entry.tqe_next) != ((void*)0)) (sk)->
ps_entry.tqe_next->ps_entry.tqe_prev = (sk)->ps_entry.tqe_prev
; else (head)->tqh_last = (sk)->ps_entry.tqe_prev; *(sk
)->ps_entry.tqe_prev = (sk)->ps_entry.tqe_next; ; ; } while
(0)
;
999 TAILQ_INSERT_AFTER(head, next, sk, ps_entry)do { if (((sk)->ps_entry.tqe_next = (next)->ps_entry.tqe_next
) != ((void*)0)) (sk)->ps_entry.tqe_next->ps_entry.tqe_prev
= &(sk)->ps_entry.tqe_next; else (head)->tqh_last =
&(sk)->ps_entry.tqe_next; (next)->ps_entry.tqe_next
= (sk); (sk)->ps_entry.tqe_prev = &(next)->ps_entry
.tqe_next; } while (0)
;
1000 }
1001#ifdef OPT_DEBUG
1002 next = TAILQ_NEXT(sk, ps_entry)((sk)->ps_entry.tqe_next);
1003 assert(next == NULL || next->ps_count <= sk->ps_count)((next == ((void*)0) || next->ps_count <= sk->ps_count
) ? (void)0 : __assert2("/usr/src/sbin/pfctl/pfctl_optimize.c"
, 1003, __func__, "next == NULL || next->ps_count <= sk->ps_count"
))
;
1004#endif /* OPT_DEBUG */
1005 }
1006 }
1007}
1008
1009
1010/* Compare two rules AF field for skiplist construction */
1011int
1012skip_cmp_af(struct pf_rule *a, struct pf_rule *b)
1013{
1014 if (a->af != b->af || a->af == 0)
1015 return (1);
1016 return (0);
1017}
1018
1019/* Compare two rules DIRECTION field for skiplist construction */
1020int
1021skip_cmp_dir(struct pf_rule *a, struct pf_rule *b)
1022{
1023 if (a->direction == 0 || a->direction != b->direction)
1024 return (1);
1025 return (0);
1026}
1027
1028/* Compare two rules ON RDOMAIN field for skiplist construction */
1029int
1030skip_cmp_rdom(struct pf_rule *a, struct pf_rule *b)
1031{
1032 if (a->onrdomain == -1 || a->onrdomain != b->onrdomain)
1033 return (1);
1034 return (a->ifnot != b->ifnot);
1035}
1036
1037/* Compare two rules DST Address field for skiplist construction */
1038int
1039skip_cmp_dst_addr(struct pf_rule *a, struct pf_rule *b)
1040{
1041 if (a->dst.neg != b->dst.neg ||
1042 a->dst.addr.type != b->dst.addr.type)
1043 return (1);
1044 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1045 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1046 * a->proto == IPPROTO_ICMP
1047 * return (1);
1048 */
1049 switch (a->dst.addr.type) {
1050 case PF_ADDR_ADDRMASK:
1051 if (memcmp(&a->dst.addr.v.a.addr, &b->dst.addr.v.a.addr,
1052 sizeof(a->dst.addr.v.a.addr)) ||
1053 memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask,
1054 sizeof(a->dst.addr.v.a.mask)) ||
1055 (a->dst.addr.v.a.addr.addr32pfa.addr32[0] == 0 &&
1056 a->dst.addr.v.a.addr.addr32pfa.addr32[1] == 0 &&
1057 a->dst.addr.v.a.addr.addr32pfa.addr32[2] == 0 &&
1058 a->dst.addr.v.a.addr.addr32pfa.addr32[3] == 0))
1059 return (1);
1060 return (0);
1061 case PF_ADDR_DYNIFTL:
1062 if (strcmp(a->dst.addr.v.ifname, b->dst.addr.v.ifname) != 0 ||
1063 a->dst.addr.iflags != b->dst.addr.iflags ||
1064 memcmp(&a->dst.addr.v.a.mask, &b->dst.addr.v.a.mask,
1065 sizeof(a->dst.addr.v.a.mask)))
1066 return (1);
1067 return (0);
1068 case PF_ADDR_NOROUTE:
1069 case PF_ADDR_URPFFAILED:
1070 return (0);
1071 case PF_ADDR_TABLE:
1072 return (strcmp(a->dst.addr.v.tblname, b->dst.addr.v.tblname));
1073 }
1074 return (1);
1075}
1076
1077/* Compare two rules DST port field for skiplist construction */
1078int
1079skip_cmp_dst_port(struct pf_rule *a, struct pf_rule *b)
1080{
1081 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1082 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1083 * a->proto == IPPROTO_ICMP
1084 * return (1);
1085 */
1086 if (a->dst.port_op == PF_OP_NONE || a->dst.port_op != b->dst.port_op ||
1087 a->dst.port[0] != b->dst.port[0] ||
1088 a->dst.port[1] != b->dst.port[1])
1089 return (1);
1090 return (0);
1091}
1092
1093/* Compare two rules IFP field for skiplist construction */
1094int
1095skip_cmp_ifp(struct pf_rule *a, struct pf_rule *b)
1096{
1097 if (strcmp(a->ifname, b->ifname) || a->ifname[0] == '\0')
1098 return (1);
1099 return (a->ifnot != b->ifnot);
1100}
1101
1102/* Compare two rules PROTO field for skiplist construction */
1103int
1104skip_cmp_proto(struct pf_rule *a, struct pf_rule *b)
1105{
1106 return (a->proto != b->proto || a->proto == 0);
1107}
1108
1109/* Compare two rules SRC addr field for skiplist construction */
1110int
1111skip_cmp_src_addr(struct pf_rule *a, struct pf_rule *b)
1112{
1113 if (a->src.neg != b->src.neg ||
1114 a->src.addr.type != b->src.addr.type)
1115 return (1);
1116 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1117 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1118 * a->proto == IPPROTO_ICMP
1119 * return (1);
1120 */
1121 switch (a->src.addr.type) {
1122 case PF_ADDR_ADDRMASK:
1123 if (memcmp(&a->src.addr.v.a.addr, &b->src.addr.v.a.addr,
1124 sizeof(a->src.addr.v.a.addr)) ||
1125 memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask,
1126 sizeof(a->src.addr.v.a.mask)) ||
1127 (a->src.addr.v.a.addr.addr32pfa.addr32[0] == 0 &&
1128 a->src.addr.v.a.addr.addr32pfa.addr32[1] == 0 &&
1129 a->src.addr.v.a.addr.addr32pfa.addr32[2] == 0 &&
1130 a->src.addr.v.a.addr.addr32pfa.addr32[3] == 0))
1131 return (1);
1132 return (0);
1133 case PF_ADDR_DYNIFTL:
1134 if (strcmp(a->src.addr.v.ifname, b->src.addr.v.ifname) != 0 ||
1135 a->src.addr.iflags != b->src.addr.iflags ||
1136 memcmp(&a->src.addr.v.a.mask, &b->src.addr.v.a.mask,
1137 sizeof(a->src.addr.v.a.mask)))
1138 return (1);
1139 return (0);
1140 case PF_ADDR_NOROUTE:
1141 case PF_ADDR_URPFFAILED:
1142 return (0);
1143 case PF_ADDR_TABLE:
1144 return (strcmp(a->src.addr.v.tblname, b->src.addr.v.tblname));
1145 }
1146 return (1);
1147}
1148
1149/* Compare two rules SRC port field for skiplist construction */
1150int
1151skip_cmp_src_port(struct pf_rule *a, struct pf_rule *b)
1152{
1153 if (a->src.port_op == PF_OP_NONE || a->src.port_op != b->src.port_op ||
1154 a->src.port[0] != b->src.port[0] ||
1155 a->src.port[1] != b->src.port[1])
1156 return (1);
1157 /* XXX if (a->proto != b->proto && a->proto != 0 && b->proto != 0
1158 * && (a->proto == IPPROTO_TCP || a->proto == IPPROTO_UDP ||
1159 * a->proto == IPPROTO_ICMP
1160 * return (1);
1161 */
1162 return (0);
1163}
1164
1165
1166void
1167skip_init(void)
1168{
1169 struct {
1170 char *name;
1171 int skipnum;
1172 int (*func)(struct pf_rule *, struct pf_rule *);
1173 } comps[] = PF_SKIP_COMPARITORS{ { "ifp", 0, skip_cmp_ifp }, { "dir", 1, skip_cmp_dir }, { "rdomain"
, 2, skip_cmp_rdom }, { "af", 3, skip_cmp_af }, { "proto", 4,
skip_cmp_proto }, { "saddr", 5, skip_cmp_src_addr }, { "daddr"
, 6, skip_cmp_dst_addr }, { "sport", 7, skip_cmp_src_port }, {
"dport", 8, skip_cmp_dst_port } }
;
1174 int skipnum, i;
1175
1176 for (skipnum = 0; skipnum < PF_SKIP_COUNT9; skipnum++) {
1177 for (i = 0; i < sizeof(comps)/sizeof(*comps); i++)
1178 if (comps[i].skipnum == skipnum) {
1179 skip_comparitors[skipnum] = comps[i].func;
1180 skip_comparitors_names[skipnum] = comps[i].name;
1181 }
1182 }
1183 for (skipnum = 0; skipnum < PF_SKIP_COUNT9; skipnum++)
1184 if (skip_comparitors[skipnum] == NULL((void*)0))
1185 errx(1, "Need to add skip step comparitor to pfctl?!");
1186}
1187
1188/*
1189 * Add a host/netmask to a table
1190 */
1191int
1192add_opt_table(struct pfctl *pf, struct pf_opt_tbl **tbl, sa_family_t af,
1193 struct pf_rule_addr *addr, char *ifname)
1194{
1195#ifdef OPT_DEBUG
1196 char buf[128];
1197#endif /* OPT_DEBUG */
1198 static int tablenum = 0;
1199 struct node_host node_host;
1200
1201 if (*tbl == NULL((void*)0)) {
1202 if ((*tbl = calloc(1, sizeof(**tbl))) == NULL((void*)0) ||
1203 ((*tbl)->pt_buf = calloc(1, sizeof(*(*tbl)->pt_buf))) ==
1204 NULL((void*)0))
1205 err(1, "calloc");
1206 (*tbl)->pt_refcnt = 1;
1207 (*tbl)->pt_buf->pfrb_type = PFRB_ADDRS;
1208 SIMPLEQ_INIT(&(*tbl)->pt_nodes)do { (&(*tbl)->pt_nodes)->sqh_first = ((void*)0); (
&(*tbl)->pt_nodes)->sqh_last = &(&(*tbl)->
pt_nodes)->sqh_first; } while (0)
;
1209
1210 /* This is just a temporary table name */
1211 snprintf((*tbl)->pt_name, sizeof((*tbl)->pt_name), "%s%d",
1212 PF_OPTIMIZER_TABLE_PFX"__automatic_", tablenum++);
1213 DEBUG("creating table <%s>", (*tbl)->pt_name)((void)0);
1214 }
1215
1216 memset(&node_host, 0, sizeof(node_host));
1217 node_host.af = af;
1218 node_host.addr = addr->addr;
1219 node_host.ifname = ifname;
1220 node_host.weight = addr->weight;
1221
1222 DEBUG("<%s> adding %s/%d", (*tbl)->pt_name, inet_ntop(af,((void)0)
1223 &node_host.addr.v.a.addr, buf, sizeof(buf)),((void)0)
1224 unmask(&node_host.addr.v.a.mask))((void)0);
1225
1226 if (append_addr_host((*tbl)->pt_buf, &node_host, 0, 0)) {
1227 warn("failed to add host");
1228 return (1);
1229 }
1230 if (pf->opts & PF_OPT_VERBOSE0x00004) {
1231 struct node_tinit *ti;
1232
1233 if ((ti = calloc(1, sizeof(*ti))) == NULL((void*)0))
1234 err(1, "malloc");
1235 if ((ti->host = malloc(sizeof(*ti->host))) == NULL((void*)0))
1236 err(1, "malloc");
1237 memcpy(ti->host, &node_host, sizeof(*ti->host));
1238 SIMPLEQ_INSERT_TAIL(&(*tbl)->pt_nodes, ti, entries)do { (ti)->entries.sqe_next = ((void*)0); *(&(*tbl)->
pt_nodes)->sqh_last = (ti); (&(*tbl)->pt_nodes)->
sqh_last = &(ti)->entries.sqe_next; } while (0)
;
1239 }
1240
1241 (*tbl)->pt_rulecount++;
1242 if ((*tbl)->pt_rulecount == TABLE_THRESHOLD6)
1243 DEBUG("table <%s> now faster than skip steps", (*tbl)->pt_name)((void)0);
1244
1245 return (0);
1246}
1247
1248
1249/*
1250 * Do the dirty work of choosing an unused table name and creating it.
1251 * (be careful with the table name, it might already be used in another anchor)
1252 */
1253int
1254pf_opt_create_table(struct pfctl *pf, struct pf_opt_tbl *tbl)
1255{
1256 static int tablenum;
1257 struct pfr_table *t;
1258
1259 if (table_buffer.pfrb_type == 0) {
1260 /* Initialize the list of tables */
1261 table_buffer.pfrb_type = PFRB_TABLES;
1262 for (;;) {
1263 pfr_buf_grow(&table_buffer, table_buffer.pfrb_size);
1264 table_buffer.pfrb_size = table_buffer.pfrb_msize;
1265 if (pfr_get_tables(NULL((void*)0), table_buffer.pfrb_caddr,
1266 &table_buffer.pfrb_size, PFR_FLAG_ALLRSETS0x00000040))
1267 err(1, "pfr_get_tables");
1268 if (table_buffer.pfrb_size <= table_buffer.pfrb_msize)
1269 break;
1270 }
1271 table_identifier = arc4random();
1272 }
1273
1274 /* XXX would be *really* nice to avoid duplicating identical tables */
1275
1276 /* Now we have to pick a table name that isn't used */
1277again:
1278 DEBUG("translating temporary table <%s> to <%s%x_%d>", tbl->pt_name,((void)0)
1279 PF_OPTIMIZER_TABLE_PFX, table_identifier, tablenum)((void)0);
1280 snprintf(tbl->pt_name, sizeof(tbl->pt_name), "%s%x_%d",
1281 PF_OPTIMIZER_TABLE_PFX"__automatic_", table_identifier, tablenum);
1282 PFRB_FOREACH(t, &table_buffer)for ((t) = pfr_buf_next((&table_buffer), ((void*)0)); (t)
!= ((void*)0); (t) = pfr_buf_next((&table_buffer), (t)))
{
1283 if (strcasecmp(t->pfrt_name, tbl->pt_name) == 0) {
1284 /* Collision. Try again */
1285 DEBUG("wow, table <%s> in use. trying again",((void)0)
1286 tbl->pt_name)((void)0);
1287 table_identifier = arc4random();
1288 goto again;
1289 }
1290 }
1291 tablenum++;
1292
1293 if (pfctl_define_table(tbl->pt_name, PFR_TFLAG_CONST0x00000002 | tbl->pt_flags, 1,
1294 pf->astack[0]->path, tbl->pt_buf, pf->astack[0]->ruleset.tticket)) {
1295 warn("failed to create table %s in %s",
1296 tbl->pt_name, pf->astack[0]->name);
1297 return (1);
1298 }
1299 return (0);
1300}
1301
1302/*
1303 * Partition the flat ruleset into a list of distinct superblocks
1304 */
1305int
1306construct_superblocks(struct pfctl *pf, struct pf_opt_queue *opt_queue,
1307 struct superblocks *superblocks)
1308{
1309 struct superblock *block = NULL((void*)0);
1310 struct pf_opt_rule *por;
1311 int i;
1312
1313 while (!TAILQ_EMPTY(opt_queue)(((opt_queue)->tqh_first) == ((void*)0))) {
16
Loop condition is true. Entering loop body
1314 por = TAILQ_FIRST(opt_queue)((opt_queue)->tqh_first);
1315 TAILQ_REMOVE(opt_queue, por, por_entry)do { if (((por)->por_entry.tqe_next) != ((void*)0)) (por)->
por_entry.tqe_next->por_entry.tqe_prev = (por)->por_entry
.tqe_prev; else (opt_queue)->tqh_last = (por)->por_entry
.tqe_prev; *(por)->por_entry.tqe_prev = (por)->por_entry
.tqe_next; ; ; } while (0)
;
17
Taking false branch
18
Loop condition is false. Exiting loop
1316 if (block
18.1
'block' is equal to NULL
== NULL((void*)0) || !superblock_inclusive(block, por)) {
1317 if ((block = calloc(1, sizeof(*block))) == NULL((void*)0)) {
19
Assuming the condition is true
20
Taking true branch
1318 warn("calloc");
21
Potential leak of memory pointed to by 'por'
1319 return (1);
1320 }
1321 TAILQ_INIT(&block->sb_rules)do { (&block->sb_rules)->tqh_first = ((void*)0); (&
block->sb_rules)->tqh_last = &(&block->sb_rules
)->tqh_first; } while (0)
;
1322 for (i = 0; i < PF_SKIP_COUNT9; i++)
1323 TAILQ_INIT(&block->sb_skipsteps[i])do { (&block->sb_skipsteps[i])->tqh_first = ((void*
)0); (&block->sb_skipsteps[i])->tqh_last = &(&
block->sb_skipsteps[i])->tqh_first; } while (0)
;
1324 TAILQ_INSERT_TAIL(superblocks, block, sb_entry)do { (block)->sb_entry.tqe_next = ((void*)0); (block)->
sb_entry.tqe_prev = (superblocks)->tqh_last; *(superblocks
)->tqh_last = (block); (superblocks)->tqh_last = &(
block)->sb_entry.tqe_next; } while (0)
;
1325 }
1326 TAILQ_INSERT_TAIL(&block->sb_rules, por, por_entry)do { (por)->por_entry.tqe_next = ((void*)0); (por)->por_entry
.tqe_prev = (&block->sb_rules)->tqh_last; *(&block
->sb_rules)->tqh_last = (por); (&block->sb_rules
)->tqh_last = &(por)->por_entry.tqe_next; } while (
0)
;
1327 }
1328
1329 return (0);
1330}
1331
1332
1333/*
1334 * Compare two rule addresses
1335 */
1336int
1337addrs_equal(struct pf_rule_addr *a, struct pf_rule_addr *b)
1338{
1339 if (a->neg != b->neg)
1340 return (0);
1341 return (memcmp(&a->addr, &b->addr, sizeof(a->addr)) == 0);
1342}
1343
1344
1345/*
1346 * The addresses are not equal, but can we combine them into one table?
1347 */
1348int
1349addrs_combineable(struct pf_rule_addr *a, struct pf_rule_addr *b)
1350{
1351 if (a->addr.type != PF_ADDR_ADDRMASK ||
1352 b->addr.type != PF_ADDR_ADDRMASK)
1353 return (0);
1354 if (a->neg != b->neg || a->port_op != b->port_op ||
1355 a->port[0] != b->port[0] || a->port[1] != b->port[1])
1356 return (0);
1357 return (1);
1358}
1359
1360
1361/*
1362 * Are we allowed to combine these two rules
1363 */
1364int
1365rules_combineable(struct pf_rule *p1, struct pf_rule *p2)
1366{
1367 struct pf_rule a, b;
1368
1369 comparable_rule(&a, p1, COMBINED);
1370 comparable_rule(&b, p2, COMBINED);
1371 return (memcmp(&a, &b, sizeof(a)) == 0);
1372}
1373
1374
1375/*
1376 * Can a rule be included inside a superblock
1377 */
1378int
1379superblock_inclusive(struct superblock *block, struct pf_opt_rule *por)
1380{
1381 struct pf_rule a, b;
1382 int i, j;
1383
1384 /* First check for hard breaks */
1385 for (i = 0; i < sizeof(pf_rule_desc)/sizeof(*pf_rule_desc); i++) {
1386 if (pf_rule_desc[i].prf_type == BARRIER) {
1387 for (j = 0; j < pf_rule_desc[i].prf_size; j++)
1388 if (((char *)&por->por_rule)[j +
1389 pf_rule_desc[i].prf_offset] != 0)
1390 return (0);
1391 }
1392 }
1393
1394 /* per-rule src-track is also a hard break */
1395 if (por->por_rule.rule_flag & PFRULE_RULESRCTRACK0x0040)
1396 return (0);
1397
1398 /*
1399 * Have to handle interface groups separately. Consider the following
1400 * rules:
1401 * block on EXTIFS to any port 22
1402 * pass on em0 to any port 22
1403 * (where EXTIFS is an arbitrary interface group)
1404 * The optimizer may decide to re-order the pass rule in front of the
1405 * block rule. But what if EXTIFS includes em0??? Such a reordering
1406 * would change the meaning of the ruleset.
1407 * We can't just lookup the EXTIFS group and check if em0 is a member
1408 * because the user is allowed to add interfaces to a group during
1409 * runtime.
1410 * Ergo interface groups become a defacto superblock break :-(
1411 */
1412 if (interface_group(por->por_rule.ifname) ||
1413 interface_group(TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first)->por_rule.ifname)) {
1414 if (strcasecmp(por->por_rule.ifname,
1415 TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first)->por_rule.ifname) != 0)
1416 return (0);
1417 }
1418
1419 comparable_rule(&a, &TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first)->por_rule, NOMERGE);
1420 comparable_rule(&b, &por->por_rule, NOMERGE);
1421 if (memcmp(&a, &b, sizeof(a)) == 0)
1422 return (1);
1423
1424#ifdef OPT_DEBUG
1425 for (i = 0; i < sizeof(por->por_rule); i++) {
1426 int closest = -1;
1427 if (((u_int8_t *)&a)[i] != ((u_int8_t *)&b)[i]) {
1428 for (j = 0; j < sizeof(pf_rule_desc) /
1429 sizeof(*pf_rule_desc); j++) {
1430 if (i >= pf_rule_desc[j].prf_offset &&
1431 i < pf_rule_desc[j].prf_offset +
1432 pf_rule_desc[j].prf_size) {
1433 DEBUG("superblock break @ %d due to %s",((void)0)
1434 por->por_rule.nr,((void)0)
1435 pf_rule_desc[j].prf_name)((void)0);
1436 return (0);
1437 }
1438 if (i > pf_rule_desc[j].prf_offset) {
1439 if (closest == -1 ||
1440 i-pf_rule_desc[j].prf_offset <
1441 i-pf_rule_desc[closest].prf_offset)
1442 closest = j;
1443 }
1444 }
1445
1446 if (closest >= 0)
1447 DEBUG("superblock break @ %d on %s+%lxh",((void)0)
1448 por->por_rule.nr,((void)0)
1449 pf_rule_desc[closest].prf_name,((void)0)
1450 i - pf_rule_desc[closest].prf_offset -((void)0)
1451 pf_rule_desc[closest].prf_size)((void)0);
1452 else
1453 DEBUG("superblock break @ %d on field @ %d",((void)0)
1454 por->por_rule.nr, i)((void)0);
1455 return (0);
1456 }
1457 }
1458#endif /* OPT_DEBUG */
1459
1460 return (0);
1461}
1462
1463
1464/*
1465 * Figure out if an interface name is an actual interface or actually a
1466 * group of interfaces.
1467 */
1468int
1469interface_group(const char *ifname)
1470{
1471 if (ifname == NULL((void*)0) || !ifname[0])
1472 return (0);
1473
1474 /* Real interfaces must end in a number, interface groups do not */
1475 if (isdigit((unsigned char)ifname[strlen(ifname) - 1]))
1476 return (0);
1477 else
1478 return (1);
1479}
1480
1481
1482/*
1483 * Make a rule that can directly compared by memcmp()
1484 */
1485void
1486comparable_rule(struct pf_rule *dst, const struct pf_rule *src, int type)
1487{
1488 int i;
1489 /*
1490 * To simplify the comparison, we just zero out the fields that are
1491 * allowed to be different and then do a simple memcmp()
1492 */
1493 memcpy(dst, src, sizeof(*dst));
1494 for (i = 0; i < sizeof(pf_rule_desc)/sizeof(*pf_rule_desc); i++)
1495 if (pf_rule_desc[i].prf_type >= type) {
1496#ifdef OPT_DEBUG
1497 assert(pf_rule_desc[i].prf_type != NEVER ||((pf_rule_desc[i].prf_type != NEVER || *(((char *)dst) + pf_rule_desc
[i].prf_offset) == 0) ? (void)0 : __assert2("/usr/src/sbin/pfctl/pfctl_optimize.c"
, 1498, __func__, "pf_rule_desc[i].prf_type != NEVER || *(((char *)dst) + pf_rule_desc[i].prf_offset) == 0"
))
1498 *(((char *)dst) + pf_rule_desc[i].prf_offset) == 0)((pf_rule_desc[i].prf_type != NEVER || *(((char *)dst) + pf_rule_desc
[i].prf_offset) == 0) ? (void)0 : __assert2("/usr/src/sbin/pfctl/pfctl_optimize.c"
, 1498, __func__, "pf_rule_desc[i].prf_type != NEVER || *(((char *)dst) + pf_rule_desc[i].prf_offset) == 0"
))
;
1499#endif /* OPT_DEBUG */
1500 memset(((char *)dst) + pf_rule_desc[i].prf_offset, 0,
1501 pf_rule_desc[i].prf_size);
1502 }
1503}
1504
1505
1506/*
1507 * Remove superset information from two rules so we can directly compare them
1508 * with memcmp()
1509 */
1510void
1511exclude_supersets(struct pf_rule *super, struct pf_rule *sub)
1512{
1513 if (super->ifname[0] == '\0')
1514 memset(sub->ifname, 0, sizeof(sub->ifname));
1515 if (super->direction == PF_INOUT)
1516 sub->direction = PF_INOUT;
1517 if ((super->proto == 0 || super->proto == sub->proto) &&
1518 super->flags == 0 && super->flagset == 0 && (sub->flags ||
1519 sub->flagset)) {
1520 sub->flags = super->flags;
1521 sub->flagset = super->flagset;
1522 }
1523 if (super->proto == 0)
1524 sub->proto = 0;
1525
1526 if (super->src.port_op == 0) {
1527 sub->src.port_op = 0;
1528 sub->src.port[0] = 0;
1529 sub->src.port[1] = 0;
1530 }
1531 if (super->dst.port_op == 0) {
1532 sub->dst.port_op = 0;
1533 sub->dst.port[0] = 0;
1534 sub->dst.port[1] = 0;
1535 }
1536
1537 if (super->src.addr.type == PF_ADDR_ADDRMASK && !super->src.neg &&
1538 !sub->src.neg && super->src.addr.v.a.mask.addr32pfa.addr32[0] == 0 &&
1539 super->src.addr.v.a.mask.addr32pfa.addr32[1] == 0 &&
1540 super->src.addr.v.a.mask.addr32pfa.addr32[2] == 0 &&
1541 super->src.addr.v.a.mask.addr32pfa.addr32[3] == 0)
1542 memset(&sub->src.addr, 0, sizeof(sub->src.addr));
1543 else if (super->src.addr.type == PF_ADDR_ADDRMASK &&
1544 sub->src.addr.type == PF_ADDR_ADDRMASK &&
1545 super->src.neg == sub->src.neg &&
1546 super->af == sub->af &&
1547 unmask(&super->src.addr.v.a.mask) <
1548 unmask(&sub->src.addr.v.a.mask) &&
1549 super->src.addr.v.a.addr.addr32pfa.addr32[0] ==
1550 (sub->src.addr.v.a.addr.addr32pfa.addr32[0] &
1551 super->src.addr.v.a.mask.addr32pfa.addr32[0]) &&
1552 super->src.addr.v.a.addr.addr32pfa.addr32[1] ==
1553 (sub->src.addr.v.a.addr.addr32pfa.addr32[1] &
1554 super->src.addr.v.a.mask.addr32pfa.addr32[1]) &&
1555 super->src.addr.v.a.addr.addr32pfa.addr32[2] ==
1556 (sub->src.addr.v.a.addr.addr32pfa.addr32[2] &
1557 super->src.addr.v.a.mask.addr32pfa.addr32[2]) &&
1558 super->src.addr.v.a.addr.addr32pfa.addr32[3] ==
1559 (sub->src.addr.v.a.addr.addr32pfa.addr32[3] &
1560 super->src.addr.v.a.mask.addr32pfa.addr32[3])) {
1561 /* sub->src.addr is a subset of super->src.addr/mask */
1562 memcpy(&sub->src.addr, &super->src.addr, sizeof(sub->src.addr));
1563 }
1564
1565 if (super->dst.addr.type == PF_ADDR_ADDRMASK && !super->dst.neg &&
1566 !sub->dst.neg && super->dst.addr.v.a.mask.addr32pfa.addr32[0] == 0 &&
1567 super->dst.addr.v.a.mask.addr32pfa.addr32[1] == 0 &&
1568 super->dst.addr.v.a.mask.addr32pfa.addr32[2] == 0 &&
1569 super->dst.addr.v.a.mask.addr32pfa.addr32[3] == 0)
1570 memset(&sub->dst.addr, 0, sizeof(sub->dst.addr));
1571 else if (super->dst.addr.type == PF_ADDR_ADDRMASK &&
1572 sub->dst.addr.type == PF_ADDR_ADDRMASK &&
1573 super->dst.neg == sub->dst.neg &&
1574 super->af == sub->af &&
1575 unmask(&super->dst.addr.v.a.mask) <
1576 unmask(&sub->dst.addr.v.a.mask) &&
1577 super->dst.addr.v.a.addr.addr32pfa.addr32[0] ==
1578 (sub->dst.addr.v.a.addr.addr32pfa.addr32[0] &
1579 super->dst.addr.v.a.mask.addr32pfa.addr32[0]) &&
1580 super->dst.addr.v.a.addr.addr32pfa.addr32[1] ==
1581 (sub->dst.addr.v.a.addr.addr32pfa.addr32[1] &
1582 super->dst.addr.v.a.mask.addr32pfa.addr32[1]) &&
1583 super->dst.addr.v.a.addr.addr32pfa.addr32[2] ==
1584 (sub->dst.addr.v.a.addr.addr32pfa.addr32[2] &
1585 super->dst.addr.v.a.mask.addr32pfa.addr32[2]) &&
1586 super->dst.addr.v.a.addr.addr32pfa.addr32[3] ==
1587 (sub->dst.addr.v.a.addr.addr32pfa.addr32[3] &
1588 super->dst.addr.v.a.mask.addr32pfa.addr32[3])) {
1589 /* sub->dst.addr is a subset of super->dst.addr/mask */
1590 memcpy(&sub->dst.addr, &super->dst.addr, sizeof(sub->dst.addr));
1591 }
1592
1593 if (super->af == 0)
1594 sub->af = 0;
1595}
1596
1597
1598void
1599superblock_free(struct pfctl *pf, struct superblock *block)
1600{
1601 struct pf_opt_rule *por;
1602 while ((por = TAILQ_FIRST(&block->sb_rules)((&block->sb_rules)->tqh_first))) {
1603 TAILQ_REMOVE(&block->sb_rules, por, por_entry)do { if (((por)->por_entry.tqe_next) != ((void*)0)) (por)->
por_entry.tqe_next->por_entry.tqe_prev = (por)->por_entry
.tqe_prev; else (&block->sb_rules)->tqh_last = (por
)->por_entry.tqe_prev; *(por)->por_entry.tqe_prev = (por
)->por_entry.tqe_next; ; ; } while (0)
;
1604 pf_opt_table_unref(por->por_src_tbl);
1605 pf_opt_table_unref(por->por_dst_tbl);
1606 free(por);
1607 }
1608 if (block->sb_profiled_block)
1609 superblock_free(pf, block->sb_profiled_block);
1610 free(block);
1611}
1612
1613struct pf_opt_tbl *
1614pf_opt_table_ref(struct pf_opt_tbl *pt)
1615{
1616 /* parser does not run concurrently, we don't need atomic ops. */
1617 if (pt != NULL((void*)0))
1618 pt->pt_refcnt++;
1619
1620 return (pt);
1621}
1622
1623void
1624pf_opt_table_unref(struct pf_opt_tbl *pt)
1625{
1626 if ((pt != NULL((void*)0)) && ((--pt->pt_refcnt) == 0)) {
1627 if (pt->pt_buf != NULL((void*)0)) {
1628 pfr_buf_clear(pt->pt_buf);
1629 free(pt->pt_buf);
1630 }
1631 free(pt);
1632 }
1633}