jcs's openbsd hax
openbsd
1/* $OpenBSD: pf_table.c,v 1.147 2025/11/11 04:06:20 dlg Exp $ */
2
3/*
4 * Copyright (c) 2002 Cedric Berger
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * - Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * - Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 */
32
33#include <sys/param.h>
34#include <sys/systm.h>
35#include <sys/socket.h>
36#include <sys/mbuf.h>
37#include <sys/pool.h>
38#include <sys/syslog.h>
39#include <sys/proc.h>
40
41#include <net/if.h>
42
43#include <netinet/in.h>
44#include <netinet/ip.h>
45#include <netinet/ip_ipsp.h>
46#include <netinet/ip_icmp.h>
47#include <netinet/tcp.h>
48#include <netinet/udp.h>
49
50#ifdef INET6
51#include <netinet/icmp6.h>
52#endif /* INET6 */
53
54#include <net/pfvar.h>
55#include <net/pfvar_priv.h>
56
57#define ACCEPT_FLAGS(flags, oklist) \
58 do { \
59 if ((flags & ~(oklist)) & \
60 PFR_FLAG_ALLMASK) \
61 return (EINVAL); \
62 } while (0)
63
64#define COPYIN(from, to, size, flags) \
65 ((flags & PFR_FLAG_USERIOCTL) ? \
66 copyin((from), (to), (size)) : \
67 (bcopy((from), (to), (size)), 0))
68
69#define COPYOUT(from, to, size, flags) \
70 ((flags & PFR_FLAG_USERIOCTL) ? \
71 copyout((from), (to), (size)) : \
72 (bcopy((from), (to), (size)), 0))
73
74#define YIELD(ok) \
75 do { \
76 if (ok) \
77 sched_pause(preempt); \
78 } while (0)
79
80#define FILLIN_SIN(sin, addr) \
81 do { \
82 (sin).sin_len = sizeof(sin); \
83 (sin).sin_family = AF_INET; \
84 (sin).sin_addr = (addr); \
85 } while (0)
86
87#define FILLIN_SIN6(sin6, addr) \
88 do { \
89 (sin6).sin6_len = sizeof(sin6); \
90 (sin6).sin6_family = AF_INET6; \
91 (sin6).sin6_addr = (addr); \
92 } while (0)
93
94#define SWAP(type, a1, a2) \
95 do { \
96 type tmp = a1; \
97 a1 = a2; \
98 a2 = tmp; \
99 } while (0)
100
101#define SUNION2PF(su, af) (((af)==AF_INET) ? \
102 (struct pf_addr *)&(su)->sin.sin_addr : \
103 (struct pf_addr *)&(su)->sin6.sin6_addr)
104
105#define AF_BITS(af) (((af)==AF_INET)?32:128)
106#define ADDR_NETWORK(ad) ((ad)->pfra_net < AF_BITS((ad)->pfra_af))
107#define KENTRY_NETWORK(ke) ((ke)->pfrke_net < AF_BITS((ke)->pfrke_af))
108
109#define NO_ADDRESSES (-1)
110#define ENQUEUE_UNMARKED_ONLY (1)
111#define INVERT_NEG_FLAG (1)
112
113struct pfr_walktree {
114 enum pfrw_op {
115 PFRW_MARK,
116 PFRW_SWEEP,
117 PFRW_ENQUEUE,
118 PFRW_GET_ADDRS,
119 PFRW_GET_ASTATS,
120 PFRW_POOL_GET,
121 PFRW_DYNADDR_UPDATE
122 } pfrw_op;
123 union {
124 struct pfr_addr *pfrw1_addr;
125 struct pfr_astats *pfrw1_astats;
126 struct pfr_kentryworkq *pfrw1_workq;
127 struct pfr_kentry *pfrw1_kentry;
128 struct pfi_dynaddr *pfrw1_dyn;
129 } pfrw_1;
130 int pfrw_free;
131 int pfrw_flags;
132};
133#define pfrw_addr pfrw_1.pfrw1_addr
134#define pfrw_astats pfrw_1.pfrw1_astats
135#define pfrw_workq pfrw_1.pfrw1_workq
136#define pfrw_kentry pfrw_1.pfrw1_kentry
137#define pfrw_dyn pfrw_1.pfrw1_dyn
138#define pfrw_cnt pfrw_free
139
140#define senderr(e) do { rv = (e); goto _bad; } while (0)
141
142struct pool pfr_ktable_pl;
143struct pool pfr_kentry_pl[PFRKE_MAX];
144struct pool pfr_kcounters_pl;
145union sockaddr_union pfr_mask;
146struct pf_addr pfr_ffaddr;
147
148int pfr_gcd(int, int);
149void pfr_copyout_addr(struct pfr_addr *,
150 struct pfr_kentry *ke);
151int pfr_validate_addr(struct pfr_addr *);
152void pfr_enqueue_addrs(struct pfr_ktable *,
153 struct pfr_kentryworkq *, int *, int);
154void pfr_mark_addrs(struct pfr_ktable *);
155struct pfr_kentry *pfr_lookup_addr(struct pfr_ktable *,
156 struct pfr_addr *, int);
157struct pfr_kentry *pfr_lookup_kentry(struct pfr_ktable *,
158 struct pfr_kentry *, int);
159struct pfr_kentry *pfr_create_kentry(struct pfr_addr *);
160struct pfr_kentry *pfr_create_kentry_unlocked(struct pfr_addr *, int);
161void pfr_kentry_kif_ref(struct pfr_kentry *);
162void pfr_destroy_kentries(struct pfr_kentryworkq *);
163void pfr_destroy_ioq(struct pfr_kentryworkq *, int);
164void pfr_destroy_kentry(struct pfr_kentry *);
165void pfr_insert_kentries(struct pfr_ktable *,
166 struct pfr_kentryworkq *, time_t);
167void pfr_remove_kentries(struct pfr_ktable *,
168 struct pfr_kentryworkq *);
169void pfr_clstats_kentries(struct pfr_kentryworkq *, time_t,
170 int);
171void pfr_reset_feedback(struct pfr_addr *, int, int);
172void pfr_prepare_network(union sockaddr_union *, int, int);
173int pfr_route_kentry(struct pfr_ktable *,
174 struct pfr_kentry *);
175int pfr_unroute_kentry(struct pfr_ktable *,
176 struct pfr_kentry *);
177int pfr_walktree(struct radix_node *, void *, u_int);
178int pfr_validate_table(struct pfr_table *, int, int);
179int pfr_fix_anchor(char *);
180void pfr_commit_ktable(struct pfr_ktable *, time_t);
181void pfr_insert_ktables(struct pfr_ktableworkq *);
182void pfr_insert_ktable(struct pfr_ktable *);
183void pfr_setflags_ktables(struct pfr_ktableworkq *);
184void pfr_setflags_ktable(struct pfr_ktable *, int);
185void pfr_clstats_ktables(struct pfr_ktableworkq *, time_t,
186 int);
187void pfr_clstats_ktable(struct pfr_ktable *, time_t, int);
188struct pfr_ktable *pfr_create_ktable(struct pfr_table *, time_t, int,
189 int);
190void pfr_destroy_ktables(struct pfr_ktableworkq *, int);
191void pfr_destroy_ktables_aux(struct pfr_ktableworkq *);
192void pfr_destroy_ktable(struct pfr_ktable *, int);
193int pfr_ktable_compare(struct pfr_ktable *,
194 struct pfr_ktable *);
195void pfr_ktable_winfo_update(struct pfr_ktable *,
196 struct pfr_kentry *);
197struct pfr_ktable *pfr_lookup_table(struct pfr_table *);
198void pfr_clean_node_mask(struct pfr_ktable *,
199 struct pfr_kentryworkq *);
200int pfr_table_count(struct pfr_table *, int);
201int pfr_skip_table(struct pfr_table *,
202 struct pfr_ktable *, int);
203struct pfr_kentry *pfr_kentry_byidx(struct pfr_ktable *, int, int);
204int pfr_islinklocal(sa_family_t, struct pf_addr *);
205
206RB_PROTOTYPE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
207RB_GENERATE(pfr_ktablehead, pfr_ktable, pfrkt_tree, pfr_ktable_compare);
208
209struct pfr_ktablehead pfr_ktables;
210struct pfr_table pfr_nulltable;
211int pfr_ktable_cnt;
212
213int
214pfr_gcd(int m, int n)
215{
216 int t;
217
218 while (m > 0) {
219 t = n % m;
220 n = m;
221 m = t;
222 }
223 return (n);
224}
225
226void
227pfr_initialize(void)
228{
229 rn_init(sizeof(struct sockaddr_in6));
230
231 pool_init(&pfr_ktable_pl, sizeof(struct pfr_ktable),
232 0, IPL_SOFTNET, 0, "pfrktable", NULL);
233 pool_init(&pfr_kentry_pl[PFRKE_PLAIN], sizeof(struct pfr_kentry),
234 0, IPL_SOFTNET, 0, "pfrke_plain", NULL);
235 pool_init(&pfr_kentry_pl[PFRKE_ROUTE], sizeof(struct pfr_kentry_route),
236 0, IPL_SOFTNET, 0, "pfrke_route", NULL);
237 pool_init(&pfr_kentry_pl[PFRKE_COST], sizeof(struct pfr_kentry_cost),
238 0, IPL_SOFTNET, 0, "pfrke_cost", NULL);
239 pool_init(&pfr_kcounters_pl, sizeof(struct pfr_kcounters),
240 0, IPL_SOFTNET, 0, "pfrkcounters", NULL);
241
242 memset(&pfr_ffaddr, 0xff, sizeof(pfr_ffaddr));
243}
244
245int
246pfr_clr_addrs(struct pfr_table *tbl, int *ndel, int flags)
247{
248 struct pfr_ktable *kt;
249 struct pfr_kentryworkq workq;
250
251 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
252 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
253 return (EINVAL);
254 kt = pfr_lookup_table(tbl);
255 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
256 return (ESRCH);
257 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
258 return (EPERM);
259 pfr_enqueue_addrs(kt, &workq, ndel, 0);
260
261 if (!(flags & PFR_FLAG_DUMMY)) {
262 pfr_remove_kentries(kt, &workq);
263 if (kt->pfrkt_cnt) {
264 DPFPRINTF(LOG_NOTICE,
265 "pfr_clr_addrs: corruption detected (%d).",
266 kt->pfrkt_cnt);
267 kt->pfrkt_cnt = 0;
268 }
269 }
270 return (0);
271}
272
273void
274pfr_fill_feedback(struct pfr_kentry_all *ke, struct pfr_addr *ad)
275{
276 ad->pfra_type = ke->pfrke_type;
277
278 switch (ke->pfrke_type) {
279 case PFRKE_PLAIN:
280 break;
281 case PFRKE_COST:
282 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight;
283 /* FALLTHROUGH */
284 case PFRKE_ROUTE:
285 if (ke->pfrke_rifname[0])
286 strlcpy(ad->pfra_ifname, ke->pfrke_rifname, IFNAMSIZ);
287 break;
288 }
289
290 switch (ke->pfrke_af) {
291 case AF_INET:
292 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
293 break;
294#ifdef INET6
295 case AF_INET6:
296 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
297 break;
298#endif /* INET6 */
299 default:
300 unhandled_af(ke->pfrke_af);
301 }
302 ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight;
303 ad->pfra_af = ke->pfrke_af;
304 ad->pfra_net = ke->pfrke_net;
305 if (ke->pfrke_flags & PFRKE_FLAG_NOT)
306 ad->pfra_not = 1;
307 ad->pfra_fback = ke->pfrke_fb;
308}
309
310int
311pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
312 int *nadd, int flags)
313{
314 struct pfr_ktable *kt, *tmpkt;
315 struct pfr_kentryworkq workq, ioq;
316 struct pfr_kentry *p, *q, *ke;
317 struct pfr_addr ad;
318 int i, rv, xadd = 0;
319 time_t tzero = gettime();
320
321 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
322 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
323 return (EINVAL);
324 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
325 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
326 if (tmpkt == NULL)
327 return (ENOMEM);
328 SLIST_INIT(&workq);
329 SLIST_INIT(&ioq);
330 for (i = 0; i < size; i++) {
331 YIELD(flags & PFR_FLAG_USERIOCTL);
332 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
333 senderr(EFAULT);
334 if (pfr_validate_addr(&ad))
335 senderr(EINVAL);
336
337 ke = pfr_create_kentry_unlocked(&ad, flags);
338 if (ke == NULL)
339 senderr(ENOMEM);
340 ke->pfrke_fb = PFR_FB_NONE;
341 SLIST_INSERT_HEAD(&ioq, ke, pfrke_ioq);
342 }
343
344 NET_LOCK();
345 PF_LOCK();
346 kt = pfr_lookup_table(tbl);
347 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
348 PF_UNLOCK();
349 NET_UNLOCK();
350 senderr(ESRCH);
351 }
352 if (kt->pfrkt_flags & PFR_TFLAG_CONST) {
353 PF_UNLOCK();
354 NET_UNLOCK();
355 senderr(EPERM);
356 }
357 SLIST_FOREACH(ke, &ioq, pfrke_ioq) {
358 pfr_kentry_kif_ref(ke);
359 p = pfr_lookup_kentry(kt, ke, 1);
360 q = pfr_lookup_kentry(tmpkt, ke, 1);
361 if (flags & PFR_FLAG_FEEDBACK) {
362 if (q != NULL)
363 ke->pfrke_fb = PFR_FB_DUPLICATE;
364 else if (p == NULL)
365 ke->pfrke_fb = PFR_FB_ADDED;
366 else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
367 (ke->pfrke_flags & PFRKE_FLAG_NOT))
368 ke->pfrke_fb = PFR_FB_CONFLICT;
369 else
370 ke->pfrke_fb = PFR_FB_NONE;
371 }
372 if (p == NULL && q == NULL) {
373 if (pfr_route_kentry(tmpkt, ke)) {
374 /* defer destroy after feedback is processed */
375 ke->pfrke_fb = PFR_FB_NONE;
376 } else {
377 /*
378 * mark entry as added to table, so we won't
379 * kill it with rest of the ioq
380 */
381 ke->pfrke_fb = PFR_FB_ADDED;
382 SLIST_INSERT_HEAD(&workq, ke, pfrke_workq);
383 xadd++;
384 }
385 }
386 }
387 /* remove entries, which we will insert from tmpkt */
388 pfr_clean_node_mask(tmpkt, &workq);
389 if (!(flags & PFR_FLAG_DUMMY))
390 pfr_insert_kentries(kt, &workq, tzero);
391
392 PF_UNLOCK();
393 NET_UNLOCK();
394
395 if (flags & PFR_FLAG_FEEDBACK) {
396 i = 0;
397 while ((ke = SLIST_FIRST(&ioq)) != NULL) {
398 YIELD(flags & PFR_FLAG_USERIOCTL);
399 pfr_fill_feedback((struct pfr_kentry_all *)ke, &ad);
400 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
401 senderr(EFAULT);
402 i++;
403 SLIST_REMOVE_HEAD(&ioq, pfrke_ioq);
404 switch (ke->pfrke_fb) {
405 case PFR_FB_CONFLICT:
406 case PFR_FB_DUPLICATE:
407 case PFR_FB_NONE:
408 pfr_destroy_kentry(ke);
409 break;
410 case PFR_FB_ADDED:
411 if (flags & PFR_FLAG_DUMMY)
412 pfr_destroy_kentry(ke);
413 }
414 }
415 } else
416 pfr_destroy_ioq(&ioq, flags);
417
418 if (nadd != NULL)
419 *nadd = xadd;
420
421 pfr_destroy_ktable(tmpkt, 0);
422 return (0);
423_bad:
424 pfr_destroy_ioq(&ioq, flags);
425 if (flags & PFR_FLAG_FEEDBACK)
426 pfr_reset_feedback(addr, size, flags);
427 pfr_destroy_ktable(tmpkt, 0);
428 return (rv);
429}
430
431int
432pfr_del_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
433 int *ndel, int flags)
434{
435 struct pfr_ktable *kt;
436 struct pfr_kentryworkq workq;
437 struct pfr_kentry *p;
438 struct pfr_addr ad;
439 int i, rv, xdel = 0, log = 1;
440
441 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
442 if (pfr_validate_table(tbl, 0, flags & PFR_FLAG_USERIOCTL))
443 return (EINVAL);
444 kt = pfr_lookup_table(tbl);
445 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
446 return (ESRCH);
447 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
448 return (EPERM);
449 /*
450 * there are two algorithms to choose from here.
451 * with:
452 * n: number of addresses to delete
453 * N: number of addresses in the table
454 *
455 * one is O(N) and is better for large 'n'
456 * one is O(n*LOG(N)) and is better for small 'n'
457 *
458 * following code try to decide which one is best.
459 */
460 for (i = kt->pfrkt_cnt; i > 0; i >>= 1)
461 log++;
462 if (size > kt->pfrkt_cnt/log) {
463 /* full table scan */
464 pfr_mark_addrs(kt);
465 } else {
466 /* iterate over addresses to delete */
467 for (i = 0; i < size; i++) {
468 YIELD(flags & PFR_FLAG_USERIOCTL);
469 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
470 return (EFAULT);
471 if (pfr_validate_addr(&ad))
472 return (EINVAL);
473 p = pfr_lookup_addr(kt, &ad, 1);
474 if (p != NULL)
475 p->pfrke_flags &= ~PFRKE_FLAG_MARK;
476 }
477 }
478 SLIST_INIT(&workq);
479 for (i = 0; i < size; i++) {
480 YIELD(flags & PFR_FLAG_USERIOCTL);
481 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
482 senderr(EFAULT);
483 if (pfr_validate_addr(&ad))
484 senderr(EINVAL);
485 p = pfr_lookup_addr(kt, &ad, 1);
486 if (flags & PFR_FLAG_FEEDBACK) {
487 if (p == NULL)
488 ad.pfra_fback = PFR_FB_NONE;
489 else if ((p->pfrke_flags & PFRKE_FLAG_NOT) !=
490 ad.pfra_not)
491 ad.pfra_fback = PFR_FB_CONFLICT;
492 else if (p->pfrke_flags & PFRKE_FLAG_MARK)
493 ad.pfra_fback = PFR_FB_DUPLICATE;
494 else
495 ad.pfra_fback = PFR_FB_DELETED;
496 }
497 if (p != NULL &&
498 (p->pfrke_flags & PFRKE_FLAG_NOT) == ad.pfra_not &&
499 !(p->pfrke_flags & PFRKE_FLAG_MARK)) {
500 p->pfrke_flags |= PFRKE_FLAG_MARK;
501 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
502 xdel++;
503 }
504 if (flags & PFR_FLAG_FEEDBACK)
505 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
506 senderr(EFAULT);
507 }
508 if (!(flags & PFR_FLAG_DUMMY)) {
509 pfr_remove_kentries(kt, &workq);
510 }
511 if (ndel != NULL)
512 *ndel = xdel;
513 return (0);
514_bad:
515 if (flags & PFR_FLAG_FEEDBACK)
516 pfr_reset_feedback(addr, size, flags);
517 return (rv);
518}
519
520int
521pfr_set_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
522 int *size2, int *nadd, int *ndel, int *nchange, int flags,
523 u_int32_t ignore_pfrt_flags)
524{
525 struct pfr_ktable *kt, *tmpkt;
526 struct pfr_kentryworkq addq, delq, changeq;
527 struct pfr_kentry *p, *q;
528 struct pfr_addr ad;
529 int i, rv, xadd = 0, xdel = 0, xchange = 0;
530 time_t tzero = gettime();
531
532 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
533 if (pfr_validate_table(tbl, ignore_pfrt_flags, flags &
534 PFR_FLAG_USERIOCTL))
535 return (EINVAL);
536 kt = pfr_lookup_table(tbl);
537 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
538 return (ESRCH);
539 if (kt->pfrkt_flags & PFR_TFLAG_CONST)
540 return (EPERM);
541 tmpkt = pfr_create_ktable(&pfr_nulltable, 0, 0,
542 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
543 if (tmpkt == NULL)
544 return (ENOMEM);
545 pfr_mark_addrs(kt);
546 SLIST_INIT(&addq);
547 SLIST_INIT(&delq);
548 SLIST_INIT(&changeq);
549 for (i = 0; i < size; i++) {
550 YIELD(flags & PFR_FLAG_USERIOCTL);
551 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
552 senderr(EFAULT);
553 if (pfr_validate_addr(&ad))
554 senderr(EINVAL);
555 ad.pfra_fback = PFR_FB_NONE;
556 p = pfr_lookup_addr(kt, &ad, 1);
557 if (p != NULL) {
558 if (p->pfrke_flags & PFRKE_FLAG_MARK) {
559 ad.pfra_fback = PFR_FB_DUPLICATE;
560 goto _skip;
561 }
562 p->pfrke_flags |= PFRKE_FLAG_MARK;
563 if ((p->pfrke_flags & PFRKE_FLAG_NOT) != ad.pfra_not) {
564 SLIST_INSERT_HEAD(&changeq, p, pfrke_workq);
565 ad.pfra_fback = PFR_FB_CHANGED;
566 xchange++;
567 }
568 } else {
569 q = pfr_lookup_addr(tmpkt, &ad, 1);
570 if (q != NULL) {
571 ad.pfra_fback = PFR_FB_DUPLICATE;
572 goto _skip;
573 }
574 p = pfr_create_kentry(&ad);
575 if (p == NULL)
576 senderr(ENOMEM);
577 if (pfr_route_kentry(tmpkt, p)) {
578 pfr_destroy_kentry(p);
579 ad.pfra_fback = PFR_FB_NONE;
580 goto _skip;
581 }
582 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
583 ad.pfra_fback = PFR_FB_ADDED;
584 xadd++;
585 if (p->pfrke_type == PFRKE_COST)
586 kt->pfrkt_refcntcost++;
587 pfr_ktable_winfo_update(kt, p);
588 }
589_skip:
590 if (flags & PFR_FLAG_FEEDBACK)
591 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
592 senderr(EFAULT);
593 }
594 pfr_enqueue_addrs(kt, &delq, &xdel, ENQUEUE_UNMARKED_ONLY);
595 if ((flags & PFR_FLAG_FEEDBACK) && *size2) {
596 if (*size2 < size+xdel) {
597 *size2 = size+xdel;
598 senderr(0);
599 }
600 i = 0;
601 SLIST_FOREACH(p, &delq, pfrke_workq) {
602 pfr_copyout_addr(&ad, p);
603 ad.pfra_fback = PFR_FB_DELETED;
604 if (COPYOUT(&ad, addr+size+i, sizeof(ad), flags))
605 senderr(EFAULT);
606 i++;
607 }
608 }
609 pfr_clean_node_mask(tmpkt, &addq);
610 if (!(flags & PFR_FLAG_DUMMY)) {
611 pfr_insert_kentries(kt, &addq, tzero);
612 pfr_remove_kentries(kt, &delq);
613 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
614 } else
615 pfr_destroy_kentries(&addq);
616 if (nadd != NULL)
617 *nadd = xadd;
618 if (ndel != NULL)
619 *ndel = xdel;
620 if (nchange != NULL)
621 *nchange = xchange;
622 if ((flags & PFR_FLAG_FEEDBACK) && size2)
623 *size2 = size+xdel;
624 pfr_destroy_ktable(tmpkt, 0);
625 return (0);
626_bad:
627 pfr_clean_node_mask(tmpkt, &addq);
628 pfr_destroy_kentries(&addq);
629 if (flags & PFR_FLAG_FEEDBACK)
630 pfr_reset_feedback(addr, size, flags);
631 pfr_destroy_ktable(tmpkt, 0);
632 return (rv);
633}
634
635int
636pfr_tst_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
637 int *nmatch, int flags)
638{
639 struct pfr_ktable *kt;
640 struct pfr_kentry *p;
641 struct pfr_addr ad;
642 int i, xmatch = 0;
643
644 ACCEPT_FLAGS(flags, PFR_FLAG_REPLACE);
645 if (pfr_validate_table(tbl, 0, 0))
646 return (EINVAL);
647 kt = pfr_lookup_table(tbl);
648 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
649 return (ESRCH);
650
651 for (i = 0; i < size; i++) {
652 YIELD(flags & PFR_FLAG_USERIOCTL);
653 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
654 return (EFAULT);
655 if (pfr_validate_addr(&ad))
656 return (EINVAL);
657 if (ADDR_NETWORK(&ad))
658 return (EINVAL);
659 p = pfr_lookup_addr(kt, &ad, 0);
660 if (flags & PFR_FLAG_REPLACE)
661 pfr_copyout_addr(&ad, p);
662 ad.pfra_fback = (p == NULL) ? PFR_FB_NONE :
663 ((p->pfrke_flags & PFRKE_FLAG_NOT) ?
664 PFR_FB_NOTMATCH : PFR_FB_MATCH);
665 if (p != NULL && !(p->pfrke_flags & PFRKE_FLAG_NOT))
666 xmatch++;
667 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
668 return (EFAULT);
669 }
670 if (nmatch != NULL)
671 *nmatch = xmatch;
672 return (0);
673}
674
675int
676pfr_get_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int *size,
677 int flags)
678{
679 struct pfr_ktable *kt;
680 struct pfr_walktree w;
681 int rv;
682
683 ACCEPT_FLAGS(flags, 0);
684 if (pfr_validate_table(tbl, 0, 0))
685 return (EINVAL);
686 kt = pfr_lookup_table(tbl);
687 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
688 return (ESRCH);
689 if (kt->pfrkt_cnt > *size) {
690 *size = kt->pfrkt_cnt;
691 return (0);
692 }
693
694 bzero(&w, sizeof(w));
695 w.pfrw_op = PFRW_GET_ADDRS;
696 w.pfrw_addr = addr;
697 w.pfrw_free = kt->pfrkt_cnt;
698 w.pfrw_flags = flags;
699 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
700 if (!rv)
701 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
702 if (rv)
703 return (rv);
704
705 if (w.pfrw_free) {
706 DPFPRINTF(LOG_ERR,
707 "pfr_get_addrs: corruption detected (%d)", w.pfrw_free);
708 return (ENOTTY);
709 }
710 *size = kt->pfrkt_cnt;
711 return (0);
712}
713
714int
715pfr_get_astats(struct pfr_table *tbl, struct pfr_astats *addr, int *size,
716 int flags)
717{
718 struct pfr_ktable *kt;
719 struct pfr_walktree w;
720 struct pfr_kentryworkq workq;
721 int rv;
722 time_t tzero = gettime();
723
724 if (pfr_validate_table(tbl, 0, 0))
725 return (EINVAL);
726 kt = pfr_lookup_table(tbl);
727 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
728 return (ESRCH);
729 if (kt->pfrkt_cnt > *size) {
730 *size = kt->pfrkt_cnt;
731 return (0);
732 }
733
734 bzero(&w, sizeof(w));
735 w.pfrw_op = PFRW_GET_ASTATS;
736 w.pfrw_astats = addr;
737 w.pfrw_free = kt->pfrkt_cnt;
738 w.pfrw_flags = flags;
739 rv = rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
740 if (!rv)
741 rv = rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
742 if (!rv && (flags & PFR_FLAG_CLSTATS)) {
743 pfr_enqueue_addrs(kt, &workq, NULL, 0);
744 pfr_clstats_kentries(&workq, tzero, 0);
745 }
746 if (rv)
747 return (rv);
748
749 if (w.pfrw_free) {
750 DPFPRINTF(LOG_ERR,
751 "pfr_get_astats: corruption detected (%d)", w.pfrw_free);
752 return (ENOTTY);
753 }
754 *size = kt->pfrkt_cnt;
755 return (0);
756}
757
758int
759pfr_clr_astats(struct pfr_table *tbl, struct pfr_addr *addr, int size,
760 int *nzero, int flags)
761{
762 struct pfr_ktable *kt;
763 struct pfr_kentryworkq workq;
764 struct pfr_kentry *p;
765 struct pfr_addr ad;
766 int i, rv, xzero = 0;
767
768 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_FEEDBACK);
769 if (pfr_validate_table(tbl, 0, 0))
770 return (EINVAL);
771 kt = pfr_lookup_table(tbl);
772 if (kt == NULL || !(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
773 return (ESRCH);
774 SLIST_INIT(&workq);
775 for (i = 0; i < size; i++) {
776 YIELD(flags & PFR_FLAG_USERIOCTL);
777 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
778 senderr(EFAULT);
779 if (pfr_validate_addr(&ad))
780 senderr(EINVAL);
781 p = pfr_lookup_addr(kt, &ad, 1);
782 if (flags & PFR_FLAG_FEEDBACK) {
783 ad.pfra_fback = (p != NULL) ?
784 PFR_FB_CLEARED : PFR_FB_NONE;
785 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
786 senderr(EFAULT);
787 }
788 if (p != NULL) {
789 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
790 xzero++;
791 }
792 }
793
794 if (!(flags & PFR_FLAG_DUMMY)) {
795 pfr_clstats_kentries(&workq, gettime(), 0);
796 }
797 if (nzero != NULL)
798 *nzero = xzero;
799 return (0);
800_bad:
801 if (flags & PFR_FLAG_FEEDBACK)
802 pfr_reset_feedback(addr, size, flags);
803 return (rv);
804}
805
806int
807pfr_validate_addr(struct pfr_addr *ad)
808{
809 int i;
810
811 switch (ad->pfra_af) {
812 case AF_INET:
813 if (ad->pfra_net > 32)
814 return (-1);
815 break;
816#ifdef INET6
817 case AF_INET6:
818 if (ad->pfra_net > 128)
819 return (-1);
820 break;
821#endif /* INET6 */
822 default:
823 return (-1);
824 }
825 if (ad->pfra_net < 128 &&
826 (((caddr_t)ad)[ad->pfra_net/8] & (0xFF >> (ad->pfra_net%8))))
827 return (-1);
828 for (i = (ad->pfra_net+7)/8; i < sizeof(ad->pfra_u); i++)
829 if (((caddr_t)ad)[i])
830 return (-1);
831 if (ad->pfra_not && ad->pfra_not != 1)
832 return (-1);
833 if (ad->pfra_fback != PFR_FB_NONE)
834 return (-1);
835 if (ad->pfra_type >= PFRKE_MAX)
836 return (-1);
837 return (0);
838}
839
840void
841pfr_enqueue_addrs(struct pfr_ktable *kt, struct pfr_kentryworkq *workq,
842 int *naddr, int sweep)
843{
844 struct pfr_walktree w;
845
846 SLIST_INIT(workq);
847 bzero(&w, sizeof(w));
848 w.pfrw_op = sweep ? PFRW_SWEEP : PFRW_ENQUEUE;
849 w.pfrw_workq = workq;
850 if (kt->pfrkt_ip4 != NULL)
851 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
852 DPFPRINTF(LOG_ERR,
853 "pfr_enqueue_addrs: IPv4 walktree failed.");
854 if (kt->pfrkt_ip6 != NULL)
855 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
856 DPFPRINTF(LOG_ERR,
857 "pfr_enqueue_addrs: IPv6 walktree failed.");
858 if (naddr != NULL)
859 *naddr = w.pfrw_cnt;
860}
861
862void
863pfr_mark_addrs(struct pfr_ktable *kt)
864{
865 struct pfr_walktree w;
866
867 bzero(&w, sizeof(w));
868 w.pfrw_op = PFRW_MARK;
869 if (rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w))
870 DPFPRINTF(LOG_ERR,
871 "pfr_mark_addrs: IPv4 walktree failed.");
872 if (rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w))
873 DPFPRINTF(LOG_ERR,
874 "pfr_mark_addrs: IPv6 walktree failed.");
875}
876
877
878struct pfr_kentry *
879pfr_lookup_addr(struct pfr_ktable *kt, struct pfr_addr *ad, int exact)
880{
881 union sockaddr_union sa, mask;
882 struct radix_node_head *head;
883 struct pfr_kentry *ke;
884
885 bzero(&sa, sizeof(sa));
886 switch (ad->pfra_af) {
887 case AF_INET:
888 FILLIN_SIN(sa.sin, ad->pfra_ip4addr);
889 head = kt->pfrkt_ip4;
890 break;
891#ifdef INET6
892 case AF_INET6:
893 FILLIN_SIN6(sa.sin6, ad->pfra_ip6addr);
894 head = kt->pfrkt_ip6;
895 break;
896#endif /* INET6 */
897 default:
898 unhandled_af(ad->pfra_af);
899 }
900 if (ADDR_NETWORK(ad)) {
901 pfr_prepare_network(&mask, ad->pfra_af, ad->pfra_net);
902 ke = (struct pfr_kentry *)rn_lookup(&sa, &mask, head);
903 } else {
904 ke = (struct pfr_kentry *)rn_match(&sa, head);
905 if (exact && ke && KENTRY_NETWORK(ke))
906 ke = NULL;
907 }
908 return (ke);
909}
910
911struct pfr_kentry *
912pfr_lookup_kentry(struct pfr_ktable *kt, struct pfr_kentry *key, int exact)
913{
914 union sockaddr_union mask;
915 struct radix_node_head *head;
916 struct pfr_kentry *ke;
917
918 switch (key->pfrke_af) {
919 case AF_INET:
920 head = kt->pfrkt_ip4;
921 break;
922#ifdef INET6
923 case AF_INET6:
924 head = kt->pfrkt_ip6;
925 break;
926#endif /* INET6 */
927 default:
928 unhandled_af(key->pfrke_af);
929 }
930 if (KENTRY_NETWORK(key)) {
931 pfr_prepare_network(&mask, key->pfrke_af, key->pfrke_net);
932 ke = (struct pfr_kentry *)rn_lookup(&key->pfrke_sa, &mask,
933 head);
934 } else {
935 ke = (struct pfr_kentry *)rn_match(&key->pfrke_sa, head);
936 if (exact && ke && KENTRY_NETWORK(ke))
937 ke = NULL;
938 }
939 return (ke);
940}
941
942struct pfr_kentry *
943pfr_create_kentry(struct pfr_addr *ad)
944{
945 struct pfr_kentry_all *ke;
946
947 if (ad->pfra_type >= PFRKE_MAX)
948 panic("unknown pfra_type %d", ad->pfra_type);
949
950 ke = pool_get(&pfr_kentry_pl[ad->pfra_type], PR_NOWAIT | PR_ZERO);
951 if (ke == NULL)
952 return (NULL);
953
954 ke->pfrke_type = ad->pfra_type;
955
956 /* set weight allowing implicit weights */
957 if (ad->pfra_weight == 0)
958 ad->pfra_weight = 1;
959
960 switch (ke->pfrke_type) {
961 case PFRKE_PLAIN:
962 break;
963 case PFRKE_COST:
964 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight;
965 /* FALLTHROUGH */
966 case PFRKE_ROUTE:
967 if (ad->pfra_ifname[0])
968 ke->pfrke_rkif = pfi_kif_get(ad->pfra_ifname, NULL);
969 if (ke->pfrke_rkif)
970 pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE);
971 break;
972 }
973
974 switch (ad->pfra_af) {
975 case AF_INET:
976 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
977 break;
978#ifdef INET6
979 case AF_INET6:
980 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
981 break;
982#endif /* INET6 */
983 default:
984 unhandled_af(ad->pfra_af);
985 }
986 ke->pfrke_af = ad->pfra_af;
987 ke->pfrke_net = ad->pfra_net;
988 if (ad->pfra_not)
989 ke->pfrke_flags |= PFRKE_FLAG_NOT;
990 return ((struct pfr_kentry *)ke);
991}
992
993struct pfr_kentry *
994pfr_create_kentry_unlocked(struct pfr_addr *ad, int flags)
995{
996 struct pfr_kentry_all *ke;
997 int mflags = PR_ZERO;
998
999 if (ad->pfra_type >= PFRKE_MAX)
1000 panic("unknown pfra_type %d", ad->pfra_type);
1001
1002 if (flags & PFR_FLAG_USERIOCTL)
1003 mflags |= PR_WAITOK;
1004 else
1005 mflags |= PR_NOWAIT;
1006
1007 ke = pool_get(&pfr_kentry_pl[ad->pfra_type], mflags);
1008 if (ke == NULL)
1009 return (NULL);
1010
1011 ke->pfrke_type = ad->pfra_type;
1012
1013 /* set weight allowing implicit weights */
1014 if (ad->pfra_weight == 0)
1015 ad->pfra_weight = 1;
1016
1017 switch (ke->pfrke_type) {
1018 case PFRKE_PLAIN:
1019 break;
1020 case PFRKE_COST:
1021 ((struct pfr_kentry_cost *)ke)->weight = ad->pfra_weight;
1022 /* FALLTHROUGH */
1023 case PFRKE_ROUTE:
1024 if (ad->pfra_ifname[0])
1025 (void) strlcpy(ke->pfrke_rifname, ad->pfra_ifname,
1026 IFNAMSIZ);
1027 break;
1028 }
1029
1030 switch (ad->pfra_af) {
1031 case AF_INET:
1032 FILLIN_SIN(ke->pfrke_sa.sin, ad->pfra_ip4addr);
1033 break;
1034#ifdef INET6
1035 case AF_INET6:
1036 FILLIN_SIN6(ke->pfrke_sa.sin6, ad->pfra_ip6addr);
1037 break;
1038#endif /* INET6 */
1039 default:
1040 unhandled_af(ad->pfra_af);
1041 }
1042 ke->pfrke_af = ad->pfra_af;
1043 ke->pfrke_net = ad->pfra_net;
1044 if (ad->pfra_not)
1045 ke->pfrke_flags |= PFRKE_FLAG_NOT;
1046 return ((struct pfr_kentry *)ke);
1047}
1048
1049void
1050pfr_kentry_kif_ref(struct pfr_kentry *ke_all)
1051{
1052 struct pfr_kentry_all *ke = (struct pfr_kentry_all *)ke_all;
1053
1054 NET_ASSERT_LOCKED();
1055 switch (ke->pfrke_type) {
1056 case PFRKE_PLAIN:
1057 break;
1058 case PFRKE_COST:
1059 case PFRKE_ROUTE:
1060 if (ke->pfrke_rifname[0])
1061 ke->pfrke_rkif = pfi_kif_get(ke->pfrke_rifname, NULL);
1062 if (ke->pfrke_rkif)
1063 pfi_kif_ref(ke->pfrke_rkif, PFI_KIF_REF_ROUTE);
1064 break;
1065 }
1066}
1067
1068void
1069pfr_destroy_kentries(struct pfr_kentryworkq *workq)
1070{
1071 struct pfr_kentry *p;
1072
1073 while ((p = SLIST_FIRST(workq)) != NULL) {
1074 YIELD(1);
1075 SLIST_REMOVE_HEAD(workq, pfrke_workq);
1076 pfr_destroy_kentry(p);
1077 }
1078}
1079
1080void
1081pfr_destroy_ioq(struct pfr_kentryworkq *ioq, int flags)
1082{
1083 struct pfr_kentry *p;
1084
1085 while ((p = SLIST_FIRST(ioq)) != NULL) {
1086 YIELD(flags & PFR_FLAG_USERIOCTL);
1087 SLIST_REMOVE_HEAD(ioq, pfrke_ioq);
1088 /*
1089 * we destroy only those entries, which did not make it to
1090 * table
1091 */
1092 if ((p->pfrke_fb != PFR_FB_ADDED) || (flags & PFR_FLAG_DUMMY))
1093 pfr_destroy_kentry(p);
1094 }
1095}
1096
1097void
1098pfr_destroy_kentry(struct pfr_kentry *ke)
1099{
1100 if (ke->pfrke_counters)
1101 pool_put(&pfr_kcounters_pl, ke->pfrke_counters);
1102 if (ke->pfrke_type == PFRKE_COST || ke->pfrke_type == PFRKE_ROUTE)
1103 pfi_kif_unref(((struct pfr_kentry_all *)ke)->pfrke_rkif,
1104 PFI_KIF_REF_ROUTE);
1105 pool_put(&pfr_kentry_pl[ke->pfrke_type], ke);
1106}
1107
1108void
1109pfr_insert_kentries(struct pfr_ktable *kt,
1110 struct pfr_kentryworkq *workq, time_t tzero)
1111{
1112 struct pfr_kentry *p;
1113 int rv, n = 0;
1114
1115 SLIST_FOREACH(p, workq, pfrke_workq) {
1116 rv = pfr_route_kentry(kt, p);
1117 if (rv) {
1118 DPFPRINTF(LOG_ERR,
1119 "pfr_insert_kentries: cannot route entry "
1120 "(code=%d).", rv);
1121 break;
1122 }
1123 p->pfrke_tzero = tzero;
1124 ++n;
1125 if (p->pfrke_type == PFRKE_COST)
1126 kt->pfrkt_refcntcost++;
1127 pfr_ktable_winfo_update(kt, p);
1128 YIELD(1);
1129 }
1130 kt->pfrkt_cnt += n;
1131}
1132
1133int
1134pfr_insert_kentry(struct pfr_ktable *kt, struct pfr_addr *ad, time_t tzero)
1135{
1136 struct pfr_kentry *p;
1137 int rv;
1138
1139 p = pfr_lookup_addr(kt, ad, 1);
1140 if (p != NULL)
1141 return (0);
1142 p = pfr_create_kentry(ad);
1143 if (p == NULL)
1144 return (EINVAL);
1145
1146 rv = pfr_route_kentry(kt, p);
1147 if (rv)
1148 return (rv);
1149
1150 p->pfrke_tzero = tzero;
1151 if (p->pfrke_type == PFRKE_COST)
1152 kt->pfrkt_refcntcost++;
1153 kt->pfrkt_cnt++;
1154 pfr_ktable_winfo_update(kt, p);
1155
1156 return (0);
1157}
1158
1159int
1160pfr_remove_kentry(struct pfr_ktable *kt, struct pfr_addr *ad)
1161{
1162 struct pfr_kentryworkq workq = SLIST_HEAD_INITIALIZER(workq);
1163 struct pfr_kentry *p;
1164
1165 p = pfr_lookup_addr(kt, ad, 1);
1166 if (p == NULL || ISSET(p->pfrke_flags, PFRKE_FLAG_NOT))
1167 return (ESRCH);
1168
1169 if (ISSET(p->pfrke_flags, PFRKE_FLAG_MARK))
1170 return (0);
1171
1172 SET(p->pfrke_flags, PFRKE_FLAG_MARK);
1173 SLIST_INSERT_HEAD(&workq, p, pfrke_workq);
1174 pfr_remove_kentries(kt, &workq);
1175
1176 return (0);
1177}
1178
1179void
1180pfr_remove_kentries(struct pfr_ktable *kt,
1181 struct pfr_kentryworkq *workq)
1182{
1183 struct pfr_kentry *p;
1184 struct pfr_kentryworkq addrq;
1185 int n = 0;
1186
1187 SLIST_FOREACH(p, workq, pfrke_workq) {
1188 pfr_unroute_kentry(kt, p);
1189 ++n;
1190 YIELD(1);
1191 if (p->pfrke_type == PFRKE_COST)
1192 kt->pfrkt_refcntcost--;
1193 }
1194 kt->pfrkt_cnt -= n;
1195 pfr_destroy_kentries(workq);
1196
1197 /* update maxweight and gcd for load balancing */
1198 if (kt->pfrkt_refcntcost > 0) {
1199 kt->pfrkt_gcdweight = 0;
1200 kt->pfrkt_maxweight = 1;
1201 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
1202 SLIST_FOREACH(p, &addrq, pfrke_workq)
1203 pfr_ktable_winfo_update(kt, p);
1204 }
1205}
1206
1207void
1208pfr_clean_node_mask(struct pfr_ktable *kt,
1209 struct pfr_kentryworkq *workq)
1210{
1211 struct pfr_kentry *p;
1212
1213 SLIST_FOREACH(p, workq, pfrke_workq) {
1214 pfr_unroute_kentry(kt, p);
1215 }
1216}
1217
1218void
1219pfr_clstats_kentries(struct pfr_kentryworkq *workq, time_t tzero, int negchange)
1220{
1221 struct pfr_kentry *p;
1222
1223 SLIST_FOREACH(p, workq, pfrke_workq) {
1224 if (negchange)
1225 p->pfrke_flags ^= PFRKE_FLAG_NOT;
1226 if (p->pfrke_counters) {
1227 pool_put(&pfr_kcounters_pl, p->pfrke_counters);
1228 p->pfrke_counters = NULL;
1229 }
1230 p->pfrke_tzero = tzero;
1231 }
1232}
1233
1234void
1235pfr_reset_feedback(struct pfr_addr *addr, int size, int flags)
1236{
1237 struct pfr_addr ad;
1238 int i;
1239
1240 for (i = 0; i < size; i++) {
1241 YIELD(flags & PFR_FLAG_USERIOCTL);
1242 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1243 break;
1244 ad.pfra_fback = PFR_FB_NONE;
1245 if (COPYOUT(&ad, addr+i, sizeof(ad), flags))
1246 break;
1247 }
1248}
1249
1250void
1251pfr_prepare_network(union sockaddr_union *sa, int af, int net)
1252{
1253#ifdef INET6
1254 int i;
1255#endif /* INET6 */
1256
1257 bzero(sa, sizeof(*sa));
1258 switch (af) {
1259 case AF_INET:
1260 sa->sin.sin_len = sizeof(sa->sin);
1261 sa->sin.sin_family = AF_INET;
1262 sa->sin.sin_addr.s_addr = net ? htonl(-1 << (32-net)) : 0;
1263 break;
1264#ifdef INET6
1265 case AF_INET6:
1266 sa->sin6.sin6_len = sizeof(sa->sin6);
1267 sa->sin6.sin6_family = AF_INET6;
1268 for (i = 0; i < 4; i++) {
1269 if (net <= 32) {
1270 sa->sin6.sin6_addr.s6_addr32[i] =
1271 net ? htonl(-1 << (32-net)) : 0;
1272 break;
1273 }
1274 sa->sin6.sin6_addr.s6_addr32[i] = 0xFFFFFFFF;
1275 net -= 32;
1276 }
1277 break;
1278#endif /* INET6 */
1279 default:
1280 unhandled_af(af);
1281 }
1282}
1283
1284int
1285pfr_route_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1286{
1287 union sockaddr_union mask;
1288 struct radix_node *rn;
1289 struct radix_node_head *head;
1290
1291 bzero(ke->pfrke_node, sizeof(ke->pfrke_node));
1292 switch (ke->pfrke_af) {
1293 case AF_INET:
1294 head = kt->pfrkt_ip4;
1295 break;
1296#ifdef INET6
1297 case AF_INET6:
1298 head = kt->pfrkt_ip6;
1299 break;
1300#endif /* INET6 */
1301 default:
1302 unhandled_af(ke->pfrke_af);
1303 }
1304
1305 if (KENTRY_NETWORK(ke)) {
1306 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1307 rn = rn_addroute(&ke->pfrke_sa, &mask, head, ke->pfrke_node, 0);
1308 } else
1309 rn = rn_addroute(&ke->pfrke_sa, NULL, head, ke->pfrke_node, 0);
1310
1311 return (rn == NULL ? -1 : 0);
1312}
1313
1314int
1315pfr_unroute_kentry(struct pfr_ktable *kt, struct pfr_kentry *ke)
1316{
1317 union sockaddr_union mask;
1318 struct radix_node *rn;
1319 struct radix_node_head *head;
1320
1321 switch (ke->pfrke_af) {
1322 case AF_INET:
1323 head = kt->pfrkt_ip4;
1324 break;
1325#ifdef INET6
1326 case AF_INET6:
1327 head = kt->pfrkt_ip6;
1328 break;
1329#endif /* INET6 */
1330 default:
1331 unhandled_af(ke->pfrke_af);
1332 }
1333
1334 if (KENTRY_NETWORK(ke)) {
1335 pfr_prepare_network(&mask, ke->pfrke_af, ke->pfrke_net);
1336 rn = rn_delete(&ke->pfrke_sa, &mask, head, NULL);
1337 } else
1338 rn = rn_delete(&ke->pfrke_sa, NULL, head, NULL);
1339
1340 if (rn == NULL) {
1341 DPFPRINTF(LOG_ERR, "pfr_unroute_kentry: delete failed.\n");
1342 return (-1);
1343 }
1344 return (0);
1345}
1346
1347void
1348pfr_copyout_addr(struct pfr_addr *ad, struct pfr_kentry *ke)
1349{
1350 bzero(ad, sizeof(*ad));
1351 if (ke == NULL)
1352 return;
1353 ad->pfra_af = ke->pfrke_af;
1354 ad->pfra_net = ke->pfrke_net;
1355 ad->pfra_type = ke->pfrke_type;
1356 if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1357 ad->pfra_not = 1;
1358
1359 switch (ad->pfra_af) {
1360 case AF_INET:
1361 ad->pfra_ip4addr = ke->pfrke_sa.sin.sin_addr;
1362 break;
1363#ifdef INET6
1364 case AF_INET6:
1365 ad->pfra_ip6addr = ke->pfrke_sa.sin6.sin6_addr;
1366 break;
1367#endif /* INET6 */
1368 default:
1369 unhandled_af(ad->pfra_af);
1370 }
1371 if (ke->pfrke_counters != NULL)
1372 ad->pfra_states = ke->pfrke_counters->states;
1373 switch (ke->pfrke_type) {
1374 case PFRKE_COST:
1375 ad->pfra_weight = ((struct pfr_kentry_cost *)ke)->weight;
1376 /* FALLTHROUGH */
1377 case PFRKE_ROUTE:
1378 if (((struct pfr_kentry_route *)ke)->kif != NULL)
1379 strlcpy(ad->pfra_ifname,
1380 ((struct pfr_kentry_route *)ke)->kif->pfik_name,
1381 IFNAMSIZ);
1382 break;
1383 default:
1384 break;
1385 }
1386}
1387
1388int
1389pfr_walktree(struct radix_node *rn, void *arg, u_int id)
1390{
1391 struct pfr_kentry *ke = (struct pfr_kentry *)rn;
1392 struct pfr_walktree *w = arg;
1393 union sockaddr_union mask;
1394 int flags = w->pfrw_flags;
1395
1396 switch (w->pfrw_op) {
1397 case PFRW_MARK:
1398 ke->pfrke_flags &= ~PFRKE_FLAG_MARK;
1399 break;
1400 case PFRW_SWEEP:
1401 if (ke->pfrke_flags & PFRKE_FLAG_MARK)
1402 break;
1403 /* FALLTHROUGH */
1404 case PFRW_ENQUEUE:
1405 SLIST_INSERT_HEAD(w->pfrw_workq, ke, pfrke_workq);
1406 w->pfrw_cnt++;
1407 break;
1408 case PFRW_GET_ADDRS:
1409 if (w->pfrw_free-- > 0) {
1410 struct pfr_addr ad;
1411
1412 pfr_copyout_addr(&ad, ke);
1413 if (copyout(&ad, w->pfrw_addr, sizeof(ad)))
1414 return (EFAULT);
1415 w->pfrw_addr++;
1416 }
1417 break;
1418 case PFRW_GET_ASTATS:
1419 if (w->pfrw_free-- > 0) {
1420 struct pfr_astats as;
1421
1422 pfr_copyout_addr(&as.pfras_a, ke);
1423
1424 if (ke->pfrke_counters) {
1425 bcopy(ke->pfrke_counters->pfrkc_packets,
1426 as.pfras_packets, sizeof(as.pfras_packets));
1427 bcopy(ke->pfrke_counters->pfrkc_bytes,
1428 as.pfras_bytes, sizeof(as.pfras_bytes));
1429 } else {
1430 bzero(as.pfras_packets,
1431 sizeof(as.pfras_packets));
1432 bzero(as.pfras_bytes, sizeof(as.pfras_bytes));
1433 as.pfras_a.pfra_fback = PFR_FB_NOCOUNT;
1434 }
1435 as.pfras_tzero = ke->pfrke_tzero;
1436
1437 if (COPYOUT(&as, w->pfrw_astats, sizeof(as), flags))
1438 return (EFAULT);
1439 w->pfrw_astats++;
1440 }
1441 break;
1442 case PFRW_POOL_GET:
1443 if (ke->pfrke_flags & PFRKE_FLAG_NOT)
1444 break; /* negative entries are ignored */
1445 if (!w->pfrw_cnt--) {
1446 w->pfrw_kentry = ke;
1447 return (1); /* finish search */
1448 }
1449 break;
1450 case PFRW_DYNADDR_UPDATE:
1451 switch (ke->pfrke_af) {
1452 case AF_INET:
1453 if (w->pfrw_dyn->pfid_acnt4++ > 0)
1454 break;
1455 pfr_prepare_network(&mask, AF_INET, ke->pfrke_net);
1456 w->pfrw_dyn->pfid_addr4 = *SUNION2PF(
1457 &ke->pfrke_sa, AF_INET);
1458 w->pfrw_dyn->pfid_mask4 = *SUNION2PF(
1459 &mask, AF_INET);
1460 break;
1461#ifdef INET6
1462 case AF_INET6:
1463 if (w->pfrw_dyn->pfid_acnt6++ > 0)
1464 break;
1465 pfr_prepare_network(&mask, AF_INET6, ke->pfrke_net);
1466 w->pfrw_dyn->pfid_addr6 = *SUNION2PF(
1467 &ke->pfrke_sa, AF_INET6);
1468 w->pfrw_dyn->pfid_mask6 = *SUNION2PF(
1469 &mask, AF_INET6);
1470 break;
1471#endif /* INET6 */
1472 default:
1473 unhandled_af(ke->pfrke_af);
1474 }
1475 break;
1476 }
1477 return (0);
1478}
1479
1480int
1481pfr_clr_tables(struct pfr_table *filter, int *ndel, int flags)
1482{
1483 struct pfr_ktableworkq workq;
1484 struct pfr_ktable *p;
1485 int xdel = 0;
1486
1487 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ALLRSETS);
1488 if (pfr_fix_anchor(filter->pfrt_anchor))
1489 return (EINVAL);
1490 if (pfr_table_count(filter, flags) < 0)
1491 return (ENOENT);
1492
1493 SLIST_INIT(&workq);
1494 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1495 if (pfr_skip_table(filter, p, flags))
1496 continue;
1497 if (!strcmp(p->pfrkt_anchor, PF_RESERVED_ANCHOR))
1498 continue;
1499 if (!(p->pfrkt_flags & PFR_TFLAG_ACTIVE))
1500 continue;
1501 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1502 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1503 xdel++;
1504 }
1505 if (!(flags & PFR_FLAG_DUMMY)) {
1506 pfr_setflags_ktables(&workq);
1507 }
1508 if (ndel != NULL)
1509 *ndel = xdel;
1510 return (0);
1511}
1512
1513int
1514pfr_add_tables(struct pfr_table *tbl, int size, int *nadd, int flags)
1515{
1516 struct pfr_ktableworkq addq, changeq, auxq;
1517 struct pfr_ktable *p, *q, *r, *n, *w, key;
1518 int i, rv, xadd = 0;
1519 time_t tzero = gettime();
1520
1521 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1522 SLIST_INIT(&addq);
1523 SLIST_INIT(&changeq);
1524 SLIST_INIT(&auxq);
1525 /* pre-allocate all memory outside of locks */
1526 for (i = 0; i < size; i++) {
1527 YIELD(flags & PFR_FLAG_USERIOCTL);
1528 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1529 senderr(EFAULT);
1530 if (pfr_validate_table(&key.pfrkt_t, PFR_TFLAG_USRMASK,
1531 flags & PFR_FLAG_USERIOCTL))
1532 senderr(EINVAL);
1533 key.pfrkt_flags |= PFR_TFLAG_ACTIVE;
1534 p = pfr_create_ktable(&key.pfrkt_t, tzero, 0,
1535 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
1536 if (p == NULL)
1537 senderr(ENOMEM);
1538
1539 /*
1540 * Note: we also pre-allocate a root table here. We keep it
1541 * at ->pfrkt_root, which we must not forget about.
1542 */
1543 key.pfrkt_flags = 0;
1544 memset(key.pfrkt_anchor, 0, sizeof(key.pfrkt_anchor));
1545 p->pfrkt_root = pfr_create_ktable(&key.pfrkt_t, 0, 0,
1546 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
1547 if (p->pfrkt_root == NULL) {
1548 pfr_destroy_ktable(p, 0);
1549 senderr(ENOMEM);
1550 }
1551
1552 SLIST_FOREACH(q, &auxq, pfrkt_workq) {
1553 if (!pfr_ktable_compare(p, q)) {
1554 /*
1555 * We need no lock here, because `p` is empty,
1556 * there are no rules or shadow tables
1557 * attached.
1558 */
1559 pfr_destroy_ktable(p->pfrkt_root, 0);
1560 p->pfrkt_root = NULL;
1561 pfr_destroy_ktable(p, 0);
1562 p = NULL;
1563 break;
1564 }
1565 }
1566 if (q != NULL)
1567 continue;
1568
1569 SLIST_INSERT_HEAD(&auxq, p, pfrkt_workq);
1570 }
1571
1572 /*
1573 * auxq contains freshly allocated tables with no dups.
1574 * also note there are no rulesets attached, because
1575 * the attach operation requires PF_LOCK().
1576 */
1577 NET_LOCK();
1578 PF_LOCK();
1579 SLIST_FOREACH_SAFE(n, &auxq, pfrkt_workq, w) {
1580 p = RB_FIND(pfr_ktablehead, &pfr_ktables, n);
1581 if (p == NULL) {
1582 SLIST_REMOVE(&auxq, n, pfr_ktable, pfrkt_workq);
1583 SLIST_INSERT_HEAD(&addq, n, pfrkt_workq);
1584 xadd++;
1585 } else if (!(flags & PFR_FLAG_DUMMY) &&
1586 !(p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1587 p->pfrkt_nflags =
1588 (p->pfrkt_flags & ~PFR_TFLAG_USRMASK) |
1589 (n->pfrkt_flags & PFR_TFLAG_USRMASK) |
1590 PFR_TFLAG_ACTIVE;
1591 SLIST_INSERT_HEAD(&changeq, p, pfrkt_workq);
1592 }
1593 }
1594
1595 if (!(flags & PFR_FLAG_DUMMY)) {
1596 /*
1597 * addq contains tables we have to insert and attach rules to
1598 * them
1599 *
1600 * changeq contains tables we need to update
1601 *
1602 * auxq contains pre-allocated tables, we won't use and we must
1603 * free them
1604 */
1605 SLIST_FOREACH_SAFE(p, &addq, pfrkt_workq, w) {
1606 p->pfrkt_rs = pf_find_or_create_ruleset(
1607 p->pfrkt_anchor);
1608 if (p->pfrkt_rs == NULL) {
1609 xadd--;
1610 SLIST_REMOVE(&addq, p, pfr_ktable, pfrkt_workq);
1611 SLIST_INSERT_HEAD(&auxq, p, pfrkt_workq);
1612 continue;
1613 }
1614 p->pfrkt_rs->tables++;
1615
1616 if (!p->pfrkt_anchor[0]) {
1617 q = p->pfrkt_root;
1618 p->pfrkt_root = NULL;
1619 SLIST_INSERT_HEAD(&auxq, q, pfrkt_workq);
1620 continue;
1621 }
1622
1623 /* use pre-allocated root table as a key */
1624 q = p->pfrkt_root;
1625 p->pfrkt_root = NULL;
1626 r = RB_FIND(pfr_ktablehead, &pfr_ktables, q);
1627 if (r != NULL) {
1628 p->pfrkt_root = r;
1629 SLIST_INSERT_HEAD(&auxq, q, pfrkt_workq);
1630 continue;
1631 }
1632 /*
1633 * there is a chance we could create root table in
1634 * earlier iteration. such table may exist in addq only
1635 * then.
1636 */
1637 SLIST_FOREACH(r, &addq, pfrkt_workq) {
1638 if (!pfr_ktable_compare(r, q)) {
1639 /*
1640 * `r` is our root table we've found
1641 * earlier, `q` can get dropped.
1642 */
1643 p->pfrkt_root = r;
1644 SLIST_INSERT_HEAD(&auxq, q,
1645 pfrkt_workq);
1646 break;
1647 }
1648 }
1649 if (r != NULL)
1650 continue;
1651
1652 q->pfrkt_rs = pf_find_or_create_ruleset(q->pfrkt_anchor);
1653 /*
1654 * root tables are attached to main ruleset,
1655 * because ->pfrkt_anchor[0] == '\0'
1656 */
1657 KASSERT(q->pfrkt_rs == &pf_main_ruleset);
1658 q->pfrkt_rs->tables++;
1659 p->pfrkt_root = q;
1660 SLIST_INSERT_HEAD(&addq, q, pfrkt_workq);
1661 }
1662
1663 pfr_insert_ktables(&addq);
1664 pfr_setflags_ktables(&changeq);
1665 }
1666 PF_UNLOCK();
1667 NET_UNLOCK();
1668
1669 pfr_destroy_ktables_aux(&auxq);
1670 if (flags & PFR_FLAG_DUMMY)
1671 pfr_destroy_ktables_aux(&addq);
1672
1673 if (nadd != NULL)
1674 *nadd = xadd;
1675 return (0);
1676_bad:
1677 pfr_destroy_ktables_aux(&auxq);
1678 return (rv);
1679}
1680
1681int
1682pfr_del_tables(struct pfr_table *tbl, int size, int *ndel, int flags)
1683{
1684 struct pfr_ktableworkq workq;
1685 struct pfr_ktable *p, *q, key;
1686 int i, xdel = 0;
1687
1688 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1689 SLIST_INIT(&workq);
1690 for (i = 0; i < size; i++) {
1691 YIELD(flags & PFR_FLAG_USERIOCTL);
1692 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1693 return (EFAULT);
1694 if (pfr_validate_table(&key.pfrkt_t, 0,
1695 flags & PFR_FLAG_USERIOCTL))
1696 return (EINVAL);
1697 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1698 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1699 SLIST_FOREACH(q, &workq, pfrkt_workq)
1700 if (!pfr_ktable_compare(p, q))
1701 goto _skip;
1702 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_ACTIVE;
1703 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1704 xdel++;
1705 }
1706_skip:
1707 ;
1708 }
1709
1710 if (!(flags & PFR_FLAG_DUMMY)) {
1711 pfr_setflags_ktables(&workq);
1712 }
1713 if (ndel != NULL)
1714 *ndel = xdel;
1715 return (0);
1716}
1717
1718int
1719pfr_get_tables(struct pfr_table *filter, struct pfr_table *tbl, int *size,
1720 int flags)
1721{
1722 struct pfr_ktable *p;
1723 int n, nn;
1724
1725 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1726 if (pfr_fix_anchor(filter->pfrt_anchor))
1727 return (EINVAL);
1728 n = nn = pfr_table_count(filter, flags);
1729 if (n < 0)
1730 return (ENOENT);
1731 if (n > *size) {
1732 *size = n;
1733 return (0);
1734 }
1735 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1736 if (pfr_skip_table(filter, p, flags))
1737 continue;
1738 if (n-- <= 0)
1739 continue;
1740 if (COPYOUT(&p->pfrkt_t, tbl++, sizeof(*tbl), flags))
1741 return (EFAULT);
1742 }
1743 if (n) {
1744 DPFPRINTF(LOG_ERR,
1745 "pfr_get_tables: corruption detected (%d).", n);
1746 return (ENOTTY);
1747 }
1748 *size = nn;
1749 return (0);
1750}
1751
1752int
1753pfr_get_tstats(struct pfr_table *filter, struct pfr_tstats *tbl, int *size,
1754 int flags)
1755{
1756 struct pfr_ktable *p;
1757 struct pfr_ktableworkq workq;
1758 int n, nn;
1759 time_t tzero = gettime();
1760
1761 /* XXX PFR_FLAG_CLSTATS disabled */
1762 ACCEPT_FLAGS(flags, PFR_FLAG_ALLRSETS);
1763 if (pfr_fix_anchor(filter->pfrt_anchor))
1764 return (EINVAL);
1765 n = nn = pfr_table_count(filter, flags);
1766 if (n < 0)
1767 return (ENOENT);
1768 if (n > *size) {
1769 *size = n;
1770 return (0);
1771 }
1772 SLIST_INIT(&workq);
1773 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1774 if (pfr_skip_table(filter, p, flags))
1775 continue;
1776 if (n-- <= 0)
1777 continue;
1778 if (COPYOUT(&p->pfrkt_ts, tbl++, sizeof(*tbl), flags))
1779 return (EFAULT);
1780 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1781 }
1782 if (flags & PFR_FLAG_CLSTATS)
1783 pfr_clstats_ktables(&workq, tzero,
1784 flags & PFR_FLAG_ADDRSTOO);
1785 if (n) {
1786 DPFPRINTF(LOG_ERR,
1787 "pfr_get_tstats: corruption detected (%d).", n);
1788 return (ENOTTY);
1789 }
1790 *size = nn;
1791 return (0);
1792}
1793
1794int
1795pfr_clr_tstats(struct pfr_table *tbl, int size, int *nzero, int flags)
1796{
1797 struct pfr_ktableworkq workq;
1798 struct pfr_ktable *p, key;
1799 int i, xzero = 0;
1800 time_t tzero = gettime();
1801
1802 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1803 SLIST_INIT(&workq);
1804 for (i = 0; i < size; i++) {
1805 YIELD(flags & PFR_FLAG_USERIOCTL);
1806 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1807 return (EFAULT);
1808 if (pfr_validate_table(&key.pfrkt_t, 0, 0))
1809 return (EINVAL);
1810 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1811 if (p != NULL) {
1812 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1813 xzero++;
1814 }
1815 }
1816 if (!(flags & PFR_FLAG_DUMMY)) {
1817 pfr_clstats_ktables(&workq, tzero, flags & PFR_FLAG_ADDRSTOO);
1818 }
1819 if (nzero != NULL)
1820 *nzero = xzero;
1821 return (0);
1822}
1823
1824int
1825pfr_set_tflags(struct pfr_table *tbl, int size, int setflag, int clrflag,
1826 int *nchange, int *ndel, int flags)
1827{
1828 struct pfr_ktableworkq workq;
1829 struct pfr_ktable *p, *q, key;
1830 int i, xchange = 0, xdel = 0;
1831
1832 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1833 if ((setflag & ~PFR_TFLAG_USRMASK) ||
1834 (clrflag & ~PFR_TFLAG_USRMASK) ||
1835 (setflag & clrflag))
1836 return (EINVAL);
1837 SLIST_INIT(&workq);
1838 for (i = 0; i < size; i++) {
1839 YIELD(flags & PFR_FLAG_USERIOCTL);
1840 if (COPYIN(tbl+i, &key.pfrkt_t, sizeof(key.pfrkt_t), flags))
1841 return (EFAULT);
1842 if (pfr_validate_table(&key.pfrkt_t, 0,
1843 flags & PFR_FLAG_USERIOCTL))
1844 return (EINVAL);
1845 p = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1846 if (p != NULL && (p->pfrkt_flags & PFR_TFLAG_ACTIVE)) {
1847 p->pfrkt_nflags = (p->pfrkt_flags | setflag) &
1848 ~clrflag;
1849 if (p->pfrkt_nflags == p->pfrkt_flags)
1850 goto _skip;
1851 SLIST_FOREACH(q, &workq, pfrkt_workq)
1852 if (!pfr_ktable_compare(p, q))
1853 goto _skip;
1854 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1855 if ((p->pfrkt_flags & PFR_TFLAG_PERSIST) &&
1856 (clrflag & PFR_TFLAG_PERSIST) &&
1857 !(p->pfrkt_flags & PFR_TFLAG_REFERENCED))
1858 xdel++;
1859 else
1860 xchange++;
1861 }
1862_skip:
1863 ;
1864 }
1865 if (!(flags & PFR_FLAG_DUMMY)) {
1866 pfr_setflags_ktables(&workq);
1867 }
1868 if (nchange != NULL)
1869 *nchange = xchange;
1870 if (ndel != NULL)
1871 *ndel = xdel;
1872 return (0);
1873}
1874
1875int
1876pfr_ina_begin(struct pfr_table *trs, u_int32_t *ticket, int *ndel, int flags)
1877{
1878 struct pfr_ktableworkq workq;
1879 struct pfr_ktable *p;
1880 struct pf_ruleset *rs;
1881 int xdel = 0;
1882
1883 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
1884 rs = pf_find_or_create_ruleset(trs->pfrt_anchor);
1885 if (rs == NULL)
1886 return (ENOMEM);
1887 SLIST_INIT(&workq);
1888 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
1889 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
1890 pfr_skip_table(trs, p, 0))
1891 continue;
1892 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
1893 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
1894 xdel++;
1895 }
1896 if (!(flags & PFR_FLAG_DUMMY)) {
1897 pfr_setflags_ktables(&workq);
1898 if (ticket != NULL)
1899 *ticket = ++rs->tticket;
1900 rs->topen = 1;
1901 } else
1902 pf_remove_if_empty_ruleset(rs);
1903 if (ndel != NULL)
1904 *ndel = xdel;
1905 return (0);
1906}
1907
1908int
1909pfr_ina_define(struct pfr_table *tbl, struct pfr_addr *addr, int size,
1910 int *nadd, int *naddr, u_int32_t ticket, int flags)
1911{
1912 struct pfr_ktableworkq tableq;
1913 struct pfr_kentryworkq addrq;
1914 struct pfr_ktable *kt, *rt, *shadow, key;
1915 struct pfr_kentry *p;
1916 struct pfr_addr ad;
1917 struct pf_ruleset *rs;
1918 int i, rv, xadd = 0, xaddr = 0;
1919
1920 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY | PFR_FLAG_ADDRSTOO);
1921 if (size && !(flags & PFR_FLAG_ADDRSTOO))
1922 return (EINVAL);
1923 if (pfr_validate_table(tbl, PFR_TFLAG_USRMASK,
1924 flags & PFR_FLAG_USERIOCTL))
1925 return (EINVAL);
1926 rs = pf_find_ruleset(tbl->pfrt_anchor);
1927 if (rs == NULL || !rs->topen || ticket != rs->tticket)
1928 return (EBUSY);
1929 tbl->pfrt_flags |= PFR_TFLAG_INACTIVE;
1930 SLIST_INIT(&tableq);
1931 kt = RB_FIND(pfr_ktablehead, &pfr_ktables, (struct pfr_ktable *)tbl);
1932 if (kt == NULL) {
1933 kt = pfr_create_ktable(tbl, 0, 1,
1934 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
1935 if (kt == NULL)
1936 return (ENOMEM);
1937 SLIST_INSERT_HEAD(&tableq, kt, pfrkt_workq);
1938 xadd++;
1939 if (!tbl->pfrt_anchor[0])
1940 goto _skip;
1941
1942 /* find or create root table */
1943 bzero(&key, sizeof(key));
1944 strlcpy(key.pfrkt_name, tbl->pfrt_name, sizeof(key.pfrkt_name));
1945 rt = RB_FIND(pfr_ktablehead, &pfr_ktables, &key);
1946 if (rt != NULL) {
1947 kt->pfrkt_root = rt;
1948 goto _skip;
1949 }
1950 rt = pfr_create_ktable(&key.pfrkt_t, 0, 1,
1951 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
1952 if (rt == NULL) {
1953 pfr_destroy_ktables(&tableq, 0);
1954 return (ENOMEM);
1955 }
1956 SLIST_INSERT_HEAD(&tableq, rt, pfrkt_workq);
1957 kt->pfrkt_root = rt;
1958 } else if (!(kt->pfrkt_flags & PFR_TFLAG_INACTIVE))
1959 xadd++;
1960_skip:
1961 shadow = pfr_create_ktable(tbl, 0, 0,
1962 (flags & PFR_FLAG_USERIOCTL? PR_WAITOK : PR_NOWAIT));
1963 if (shadow == NULL) {
1964 pfr_destroy_ktables(&tableq, 0);
1965 return (ENOMEM);
1966 }
1967 SLIST_INIT(&addrq);
1968 for (i = 0; i < size; i++) {
1969 YIELD(flags & PFR_FLAG_USERIOCTL);
1970 if (COPYIN(addr+i, &ad, sizeof(ad), flags))
1971 senderr(EFAULT);
1972 if (pfr_validate_addr(&ad))
1973 senderr(EINVAL);
1974 if (pfr_lookup_addr(shadow, &ad, 1) != NULL)
1975 continue;
1976 p = pfr_create_kentry(&ad);
1977 if (p == NULL)
1978 senderr(ENOMEM);
1979 if (pfr_route_kentry(shadow, p)) {
1980 pfr_destroy_kentry(p);
1981 continue;
1982 }
1983 SLIST_INSERT_HEAD(&addrq, p, pfrke_workq);
1984 xaddr++;
1985 if (p->pfrke_type == PFRKE_COST)
1986 kt->pfrkt_refcntcost++;
1987 pfr_ktable_winfo_update(kt, p);
1988 }
1989 if (!(flags & PFR_FLAG_DUMMY)) {
1990 if (kt->pfrkt_shadow != NULL)
1991 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
1992 kt->pfrkt_flags |= PFR_TFLAG_INACTIVE;
1993 pfr_insert_ktables(&tableq);
1994 shadow->pfrkt_cnt = (flags & PFR_FLAG_ADDRSTOO) ?
1995 xaddr : NO_ADDRESSES;
1996 kt->pfrkt_shadow = shadow;
1997 } else {
1998 pfr_clean_node_mask(shadow, &addrq);
1999 pfr_destroy_ktable(shadow, 0);
2000 pfr_destroy_ktables(&tableq, 0);
2001 pfr_destroy_kentries(&addrq);
2002 }
2003 if (nadd != NULL)
2004 *nadd = xadd;
2005 if (naddr != NULL)
2006 *naddr = xaddr;
2007 return (0);
2008_bad:
2009 pfr_destroy_ktable(shadow, 0);
2010 pfr_destroy_ktables(&tableq, 0);
2011 pfr_destroy_kentries(&addrq);
2012 return (rv);
2013}
2014
2015int
2016pfr_ina_rollback(struct pfr_table *trs, u_int32_t ticket, int *ndel, int flags)
2017{
2018 struct pfr_ktableworkq workq;
2019 struct pfr_ktable *p;
2020 struct pf_ruleset *rs;
2021 int xdel = 0;
2022
2023 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
2024 rs = pf_find_ruleset(trs->pfrt_anchor);
2025 if (rs == NULL || !rs->topen || ticket != rs->tticket)
2026 return (0);
2027 SLIST_INIT(&workq);
2028 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
2029 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
2030 pfr_skip_table(trs, p, 0))
2031 continue;
2032 p->pfrkt_nflags = p->pfrkt_flags & ~PFR_TFLAG_INACTIVE;
2033 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
2034 xdel++;
2035 }
2036 if (!(flags & PFR_FLAG_DUMMY)) {
2037 pfr_setflags_ktables(&workq);
2038 rs->topen = 0;
2039 pf_remove_if_empty_ruleset(rs);
2040 }
2041 if (ndel != NULL)
2042 *ndel = xdel;
2043 return (0);
2044}
2045
2046int
2047pfr_ina_commit(struct pfr_table *trs, u_int32_t ticket, int *nadd,
2048 int *nchange, int flags)
2049{
2050 struct pfr_ktable *p, *q;
2051 struct pfr_ktableworkq workq;
2052 struct pf_ruleset *rs;
2053 int xadd = 0, xchange = 0;
2054 time_t tzero = gettime();
2055
2056 ACCEPT_FLAGS(flags, PFR_FLAG_DUMMY);
2057 rs = pf_find_ruleset(trs->pfrt_anchor);
2058 if (rs == NULL || !rs->topen || ticket != rs->tticket)
2059 return (EBUSY);
2060
2061 SLIST_INIT(&workq);
2062 RB_FOREACH(p, pfr_ktablehead, &pfr_ktables) {
2063 if (!(p->pfrkt_flags & PFR_TFLAG_INACTIVE) ||
2064 pfr_skip_table(trs, p, 0))
2065 continue;
2066 SLIST_INSERT_HEAD(&workq, p, pfrkt_workq);
2067 if (p->pfrkt_flags & PFR_TFLAG_ACTIVE)
2068 xchange++;
2069 else
2070 xadd++;
2071 }
2072
2073 if (!(flags & PFR_FLAG_DUMMY)) {
2074 SLIST_FOREACH_SAFE(p, &workq, pfrkt_workq, q) {
2075 pfr_commit_ktable(p, tzero);
2076 }
2077 rs->topen = 0;
2078 pf_remove_if_empty_ruleset(rs);
2079 }
2080 if (nadd != NULL)
2081 *nadd = xadd;
2082 if (nchange != NULL)
2083 *nchange = xchange;
2084
2085 return (0);
2086}
2087
2088void
2089pfr_commit_ktable(struct pfr_ktable *kt, time_t tzero)
2090{
2091 struct pfr_ktable *shadow = kt->pfrkt_shadow;
2092 int nflags;
2093
2094 if (shadow->pfrkt_cnt == NO_ADDRESSES) {
2095 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2096 pfr_clstats_ktable(kt, tzero, 1);
2097 } else if (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) {
2098 /* kt might contain addresses */
2099 struct pfr_kentryworkq addrq, addq, changeq, delq, garbageq;
2100 struct pfr_kentry *p, *q;
2101 struct pfr_addr ad;
2102
2103 pfr_enqueue_addrs(shadow, &addrq, NULL, 0);
2104 pfr_mark_addrs(kt);
2105 SLIST_INIT(&addq);
2106 SLIST_INIT(&changeq);
2107 SLIST_INIT(&delq);
2108 SLIST_INIT(&garbageq);
2109 pfr_clean_node_mask(shadow, &addrq);
2110 while ((p = SLIST_FIRST(&addrq)) != NULL) {
2111 SLIST_REMOVE_HEAD(&addrq, pfrke_workq);
2112 pfr_copyout_addr(&ad, p);
2113 q = pfr_lookup_addr(kt, &ad, 1);
2114 if (q != NULL) {
2115 if ((q->pfrke_flags & PFRKE_FLAG_NOT) !=
2116 (p->pfrke_flags & PFRKE_FLAG_NOT))
2117 SLIST_INSERT_HEAD(&changeq, q,
2118 pfrke_workq);
2119 q->pfrke_flags |= PFRKE_FLAG_MARK;
2120 SLIST_INSERT_HEAD(&garbageq, p, pfrke_workq);
2121 } else {
2122 p->pfrke_tzero = tzero;
2123 SLIST_INSERT_HEAD(&addq, p, pfrke_workq);
2124 }
2125 }
2126 pfr_enqueue_addrs(kt, &delq, NULL, ENQUEUE_UNMARKED_ONLY);
2127 pfr_insert_kentries(kt, &addq, tzero);
2128 pfr_remove_kentries(kt, &delq);
2129 pfr_clstats_kentries(&changeq, tzero, INVERT_NEG_FLAG);
2130 pfr_destroy_kentries(&garbageq);
2131 } else {
2132 /* kt cannot contain addresses */
2133 SWAP(struct radix_node_head *, kt->pfrkt_ip4,
2134 shadow->pfrkt_ip4);
2135 SWAP(struct radix_node_head *, kt->pfrkt_ip6,
2136 shadow->pfrkt_ip6);
2137 SWAP(int, kt->pfrkt_cnt, shadow->pfrkt_cnt);
2138 pfr_clstats_ktable(kt, tzero, 1);
2139 }
2140 nflags = ((shadow->pfrkt_flags & PFR_TFLAG_USRMASK) |
2141 (kt->pfrkt_flags & PFR_TFLAG_SETMASK) | PFR_TFLAG_ACTIVE)
2142 & ~PFR_TFLAG_INACTIVE;
2143 pfr_destroy_ktable(shadow, 0);
2144 kt->pfrkt_shadow = NULL;
2145 pfr_setflags_ktable(kt, nflags);
2146}
2147
2148int
2149pfr_validate_table(struct pfr_table *tbl, int allowedflags, int no_reserved)
2150{
2151 int i;
2152
2153 if (!tbl->pfrt_name[0])
2154 return (-1);
2155 if (no_reserved && !strcmp(tbl->pfrt_anchor, PF_RESERVED_ANCHOR))
2156 return (-1);
2157 if (tbl->pfrt_name[PF_TABLE_NAME_SIZE-1])
2158 return (-1);
2159 for (i = strlen(tbl->pfrt_name); i < PF_TABLE_NAME_SIZE; i++)
2160 if (tbl->pfrt_name[i])
2161 return (-1);
2162 if (pfr_fix_anchor(tbl->pfrt_anchor))
2163 return (-1);
2164 if (tbl->pfrt_flags & ~allowedflags)
2165 return (-1);
2166 return (0);
2167}
2168
2169/*
2170 * Rewrite anchors referenced by tables to remove slashes
2171 * and check for validity.
2172 */
2173int
2174pfr_fix_anchor(char *anchor)
2175{
2176 size_t siz = MAXPATHLEN;
2177 int i;
2178
2179 if (anchor[0] == '/') {
2180 char *path;
2181 int off;
2182
2183 path = anchor;
2184 off = 1;
2185 while (*++path == '/')
2186 off++;
2187 bcopy(path, anchor, siz - off);
2188 memset(anchor + siz - off, 0, off);
2189 }
2190 if (anchor[siz - 1])
2191 return (-1);
2192 for (i = strlen(anchor); i < siz; i++)
2193 if (anchor[i])
2194 return (-1);
2195 return (0);
2196}
2197
2198int
2199pfr_table_count(struct pfr_table *filter, int flags)
2200{
2201 struct pf_ruleset *rs;
2202
2203 if (flags & PFR_FLAG_ALLRSETS)
2204 return (pfr_ktable_cnt);
2205 if (filter->pfrt_anchor[0]) {
2206 rs = pf_find_ruleset(filter->pfrt_anchor);
2207 return ((rs != NULL) ? rs->tables : -1);
2208 }
2209 return (pf_main_ruleset.tables);
2210}
2211
2212int
2213pfr_skip_table(struct pfr_table *filter, struct pfr_ktable *kt, int flags)
2214{
2215 if (flags & PFR_FLAG_ALLRSETS)
2216 return (0);
2217 if (strcmp(filter->pfrt_anchor, kt->pfrkt_anchor))
2218 return (1);
2219 return (0);
2220}
2221
2222void
2223pfr_insert_ktables(struct pfr_ktableworkq *workq)
2224{
2225 struct pfr_ktable *p;
2226
2227 SLIST_FOREACH(p, workq, pfrkt_workq)
2228 pfr_insert_ktable(p);
2229}
2230
2231void
2232pfr_insert_ktable(struct pfr_ktable *kt)
2233{
2234 RB_INSERT(pfr_ktablehead, &pfr_ktables, kt);
2235 pfr_ktable_cnt++;
2236 if (kt->pfrkt_root != NULL)
2237 if (!kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR]++)
2238 pfr_setflags_ktable(kt->pfrkt_root,
2239 kt->pfrkt_root->pfrkt_flags|PFR_TFLAG_REFDANCHOR);
2240}
2241
2242void
2243pfr_setflags_ktables(struct pfr_ktableworkq *workq)
2244{
2245 struct pfr_ktable *p, *q;
2246
2247 SLIST_FOREACH_SAFE(p, workq, pfrkt_workq, q) {
2248 pfr_setflags_ktable(p, p->pfrkt_nflags);
2249 }
2250}
2251
2252void
2253pfr_setflags_ktable(struct pfr_ktable *kt, int newf)
2254{
2255 struct pfr_kentryworkq addrq;
2256
2257 if (!(newf & PFR_TFLAG_REFERENCED) &&
2258 !(newf & PFR_TFLAG_REFDANCHOR) &&
2259 !(newf & PFR_TFLAG_PERSIST))
2260 newf &= ~PFR_TFLAG_ACTIVE;
2261 if (!(newf & PFR_TFLAG_ACTIVE))
2262 newf &= ~PFR_TFLAG_USRMASK;
2263 if (!(newf & PFR_TFLAG_SETMASK)) {
2264 RB_REMOVE(pfr_ktablehead, &pfr_ktables, kt);
2265 if (kt->pfrkt_root != NULL)
2266 if (!--kt->pfrkt_root->pfrkt_refcnt[PFR_REFCNT_ANCHOR])
2267 pfr_setflags_ktable(kt->pfrkt_root,
2268 kt->pfrkt_root->pfrkt_flags &
2269 ~PFR_TFLAG_REFDANCHOR);
2270 pfr_destroy_ktable(kt, 1);
2271 pfr_ktable_cnt--;
2272 return;
2273 }
2274 if (!(newf & PFR_TFLAG_ACTIVE) && kt->pfrkt_cnt) {
2275 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2276 pfr_remove_kentries(kt, &addrq);
2277 }
2278 if (!(newf & PFR_TFLAG_INACTIVE) && kt->pfrkt_shadow != NULL) {
2279 pfr_destroy_ktable(kt->pfrkt_shadow, 1);
2280 kt->pfrkt_shadow = NULL;
2281 }
2282 kt->pfrkt_flags = newf;
2283}
2284
2285void
2286pfr_clstats_ktables(struct pfr_ktableworkq *workq, time_t tzero, int recurse)
2287{
2288 struct pfr_ktable *p;
2289
2290 SLIST_FOREACH(p, workq, pfrkt_workq)
2291 pfr_clstats_ktable(p, tzero, recurse);
2292}
2293
2294void
2295pfr_clstats_ktable(struct pfr_ktable *kt, time_t tzero, int recurse)
2296{
2297 struct pfr_kentryworkq addrq;
2298
2299 if (recurse) {
2300 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2301 pfr_clstats_kentries(&addrq, tzero, 0);
2302 }
2303 bzero(kt->pfrkt_packets, sizeof(kt->pfrkt_packets));
2304 bzero(kt->pfrkt_bytes, sizeof(kt->pfrkt_bytes));
2305 kt->pfrkt_match = kt->pfrkt_nomatch = 0;
2306 kt->pfrkt_tzero = tzero;
2307}
2308
2309struct pfr_ktable *
2310pfr_create_ktable(struct pfr_table *tbl, time_t tzero, int attachruleset,
2311 int wait)
2312{
2313 struct pfr_ktable *kt;
2314 struct pf_ruleset *rs;
2315
2316 kt = pool_get(&pfr_ktable_pl, wait|PR_ZERO|PR_LIMITFAIL);
2317 if (kt == NULL)
2318 return (NULL);
2319 kt->pfrkt_t = *tbl;
2320
2321 if (attachruleset) {
2322 PF_ASSERT_LOCKED();
2323 rs = pf_find_or_create_ruleset(tbl->pfrt_anchor);
2324 if (!rs) {
2325 pfr_destroy_ktable(kt, 0);
2326 return (NULL);
2327 }
2328 kt->pfrkt_rs = rs;
2329 rs->tables++;
2330 }
2331
2332 if (!rn_inithead((void **)&kt->pfrkt_ip4,
2333 offsetof(struct sockaddr_in, sin_addr)) ||
2334 !rn_inithead((void **)&kt->pfrkt_ip6,
2335 offsetof(struct sockaddr_in6, sin6_addr))) {
2336 pfr_destroy_ktable(kt, 0);
2337 return (NULL);
2338 }
2339 kt->pfrkt_tzero = tzero;
2340 kt->pfrkt_refcntcost = 0;
2341 kt->pfrkt_gcdweight = 0;
2342 kt->pfrkt_maxweight = 1;
2343
2344 return (kt);
2345}
2346
2347void
2348pfr_destroy_ktables(struct pfr_ktableworkq *workq, int flushaddr)
2349{
2350 struct pfr_ktable *p;
2351
2352 while ((p = SLIST_FIRST(workq)) != NULL) {
2353 SLIST_REMOVE_HEAD(workq, pfrkt_workq);
2354 pfr_destroy_ktable(p, flushaddr);
2355 }
2356}
2357
2358void
2359pfr_destroy_ktables_aux(struct pfr_ktableworkq *auxq)
2360{
2361 struct pfr_ktable *p;
2362
2363 while ((p = SLIST_FIRST(auxq)) != NULL) {
2364 SLIST_REMOVE_HEAD(auxq, pfrkt_workq);
2365 /*
2366 * There must be no extra data (rules, shadow tables, ...)
2367 * attached, because auxq holds just empty memory to be
2368 * initialized. Therefore we can also be called with no lock.
2369 */
2370 if (p->pfrkt_root != NULL) {
2371 KASSERT(p->pfrkt_root->pfrkt_rs == NULL);
2372 KASSERT(p->pfrkt_root->pfrkt_shadow == NULL);
2373 KASSERT(p->pfrkt_root->pfrkt_root == NULL);
2374 pfr_destroy_ktable(p->pfrkt_root, 0);
2375 p->pfrkt_root = NULL;
2376 }
2377 KASSERT(p->pfrkt_rs == NULL);
2378 KASSERT(p->pfrkt_shadow == NULL);
2379 pfr_destroy_ktable(p, 0);
2380 }
2381}
2382
2383void
2384pfr_destroy_ktable(struct pfr_ktable *kt, int flushaddr)
2385{
2386 struct pfr_kentryworkq addrq;
2387
2388 if (flushaddr) {
2389 pfr_enqueue_addrs(kt, &addrq, NULL, 0);
2390 pfr_clean_node_mask(kt, &addrq);
2391 pfr_destroy_kentries(&addrq);
2392 }
2393 if (kt->pfrkt_ip4 != NULL)
2394 free(kt->pfrkt_ip4, M_RTABLE, sizeof(*kt->pfrkt_ip4));
2395 if (kt->pfrkt_ip6 != NULL)
2396 free(kt->pfrkt_ip6, M_RTABLE, sizeof(*kt->pfrkt_ip6));
2397 if (kt->pfrkt_shadow != NULL)
2398 pfr_destroy_ktable(kt->pfrkt_shadow, flushaddr);
2399 if (kt->pfrkt_rs != NULL) {
2400 kt->pfrkt_rs->tables--;
2401 pf_remove_if_empty_ruleset(kt->pfrkt_rs);
2402 }
2403 pool_put(&pfr_ktable_pl, kt);
2404}
2405
2406int
2407pfr_ktable_compare(struct pfr_ktable *p, struct pfr_ktable *q)
2408{
2409 int d;
2410
2411 if ((d = strncmp(p->pfrkt_name, q->pfrkt_name, PF_TABLE_NAME_SIZE)))
2412 return (d);
2413 return (strcmp(p->pfrkt_anchor, q->pfrkt_anchor));
2414}
2415
2416struct pfr_ktable *
2417pfr_lookup_table(struct pfr_table *tbl)
2418{
2419 /* struct pfr_ktable start like a struct pfr_table */
2420 return (RB_FIND(pfr_ktablehead, &pfr_ktables,
2421 (struct pfr_ktable *)tbl));
2422}
2423
2424int
2425pfr_match_addr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af)
2426{
2427 struct pfr_kentry *ke = NULL;
2428 int match;
2429
2430 ke = pfr_kentry_byaddr(kt, a, af, 0);
2431
2432 match = (ke && !(ke->pfrke_flags & PFRKE_FLAG_NOT));
2433 if (match)
2434 kt->pfrkt_match++;
2435 else
2436 kt->pfrkt_nomatch++;
2437
2438 return (match);
2439}
2440
2441struct pfr_kentry *
2442pfr_kentry_byaddr(struct pfr_ktable *kt, struct pf_addr *a, sa_family_t af,
2443 int exact)
2444{
2445 struct pfr_kentry *ke = NULL;
2446 struct sockaddr_in tmp4;
2447#ifdef INET6
2448 struct sockaddr_in6 tmp6;
2449#endif /* INET6 */
2450
2451 kt = pfr_ktable_select_active(kt);
2452 if (kt == NULL)
2453 return (0);
2454
2455 switch (af) {
2456 case AF_INET:
2457 bzero(&tmp4, sizeof(tmp4));
2458 tmp4.sin_len = sizeof(tmp4);
2459 tmp4.sin_family = AF_INET;
2460 tmp4.sin_addr.s_addr = a->addr32[0];
2461 ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4);
2462 break;
2463#ifdef INET6
2464 case AF_INET6:
2465 bzero(&tmp6, sizeof(tmp6));
2466 tmp6.sin6_len = sizeof(tmp6);
2467 tmp6.sin6_family = AF_INET6;
2468 bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr));
2469 ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6);
2470 break;
2471#endif /* INET6 */
2472 default:
2473 unhandled_af(af);
2474 }
2475 if (exact && ke && KENTRY_NETWORK(ke))
2476 ke = NULL;
2477
2478 return (ke);
2479}
2480
2481void
2482pfr_update_stats(struct pfr_ktable *kt, struct pf_addr *a, struct pf_pdesc *pd,
2483 int op, int notrule)
2484{
2485 struct pfr_kentry *ke = NULL;
2486 struct sockaddr_in tmp4;
2487#ifdef INET6
2488 struct sockaddr_in6 tmp6;
2489#endif /* INET6 */
2490 sa_family_t af = pd->af;
2491 u_int64_t len = pd->tot_len;
2492 int dir_idx = (pd->dir == PF_OUT);
2493 int op_idx;
2494
2495 kt = pfr_ktable_select_active(kt);
2496 if (kt == NULL)
2497 return;
2498
2499 switch (af) {
2500 case AF_INET:
2501 bzero(&tmp4, sizeof(tmp4));
2502 tmp4.sin_len = sizeof(tmp4);
2503 tmp4.sin_family = AF_INET;
2504 tmp4.sin_addr.s_addr = a->addr32[0];
2505 ke = (struct pfr_kentry *)rn_match(&tmp4, kt->pfrkt_ip4);
2506 break;
2507#ifdef INET6
2508 case AF_INET6:
2509 bzero(&tmp6, sizeof(tmp6));
2510 tmp6.sin6_len = sizeof(tmp6);
2511 tmp6.sin6_family = AF_INET6;
2512 bcopy(a, &tmp6.sin6_addr, sizeof(tmp6.sin6_addr));
2513 ke = (struct pfr_kentry *)rn_match(&tmp6, kt->pfrkt_ip6);
2514 break;
2515#endif /* INET6 */
2516 default:
2517 unhandled_af(af);
2518 }
2519
2520 switch (op) {
2521 case PF_PASS:
2522 op_idx = PFR_OP_PASS;
2523 break;
2524 case PF_MATCH:
2525 op_idx = PFR_OP_MATCH;
2526 break;
2527 case PF_DROP:
2528 op_idx = PFR_OP_BLOCK;
2529 break;
2530 default:
2531 panic("unhandled op");
2532 }
2533
2534 if ((ke == NULL || (ke->pfrke_flags & PFRKE_FLAG_NOT)) != notrule) {
2535 if (op_idx != PFR_OP_PASS)
2536 DPFPRINTF(LOG_DEBUG,
2537 "pfr_update_stats: assertion failed.");
2538 op_idx = PFR_OP_XPASS;
2539 }
2540 kt->pfrkt_packets[dir_idx][op_idx]++;
2541 kt->pfrkt_bytes[dir_idx][op_idx] += len;
2542 if (ke != NULL && op_idx != PFR_OP_XPASS &&
2543 (kt->pfrkt_flags & PFR_TFLAG_COUNTERS)) {
2544 if (ke->pfrke_counters == NULL)
2545 ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2546 PR_NOWAIT | PR_ZERO);
2547 if (ke->pfrke_counters != NULL) {
2548 ke->pfrke_counters->pfrkc_packets[dir_idx][op_idx]++;
2549 ke->pfrke_counters->pfrkc_bytes[dir_idx][op_idx] += len;
2550 }
2551 }
2552}
2553
2554struct pfr_ktable *
2555pfr_attach_table(struct pf_ruleset *rs, char *name, int wait)
2556{
2557 struct pfr_ktable *kt, *rt;
2558 struct pfr_table tbl;
2559 struct pf_anchor *ac = rs->anchor;
2560
2561 bzero(&tbl, sizeof(tbl));
2562 strlcpy(tbl.pfrt_name, name, sizeof(tbl.pfrt_name));
2563 if (ac != NULL)
2564 strlcpy(tbl.pfrt_anchor, ac->path, sizeof(tbl.pfrt_anchor));
2565 kt = pfr_lookup_table(&tbl);
2566 if (kt == NULL) {
2567 kt = pfr_create_ktable(&tbl, gettime(), 1, wait);
2568 if (kt == NULL)
2569 return (NULL);
2570 if (ac != NULL) {
2571 bzero(tbl.pfrt_anchor, sizeof(tbl.pfrt_anchor));
2572 rt = pfr_lookup_table(&tbl);
2573 if (rt == NULL) {
2574 rt = pfr_create_ktable(&tbl, 0, 1, wait);
2575 if (rt == NULL) {
2576 pfr_destroy_ktable(kt, 0);
2577 return (NULL);
2578 }
2579 pfr_insert_ktable(rt);
2580 }
2581 kt->pfrkt_root = rt;
2582 }
2583 pfr_insert_ktable(kt);
2584 }
2585 if (!kt->pfrkt_refcnt[PFR_REFCNT_RULE]++)
2586 pfr_setflags_ktable(kt, kt->pfrkt_flags|PFR_TFLAG_REFERENCED);
2587 return (kt);
2588}
2589
2590void
2591pfr_detach_table(struct pfr_ktable *kt)
2592{
2593 if (kt->pfrkt_refcnt[PFR_REFCNT_RULE] <= 0)
2594 DPFPRINTF(LOG_NOTICE, "pfr_detach_table: refcount = %d.",
2595 kt->pfrkt_refcnt[PFR_REFCNT_RULE]);
2596 else if (!--kt->pfrkt_refcnt[PFR_REFCNT_RULE])
2597 pfr_setflags_ktable(kt, kt->pfrkt_flags&~PFR_TFLAG_REFERENCED);
2598}
2599
2600int
2601pfr_islinklocal(sa_family_t af, struct pf_addr *addr)
2602{
2603#ifdef INET6
2604 if (af == AF_INET6 && IN6_IS_ADDR_LINKLOCAL(&addr->v6))
2605 return (1);
2606#endif /* INET6 */
2607 return (0);
2608}
2609
2610int
2611pfr_pool_get(struct pf_pool *rpool, struct pf_addr **raddr,
2612 struct pf_addr **rmask, sa_family_t af)
2613{
2614 struct pfr_ktable *kt;
2615 struct pfr_kentry *ke, *ke2;
2616 struct pf_addr *addr, *counter;
2617 union sockaddr_union mask;
2618 struct sockaddr_in tmp4;
2619#ifdef INET6
2620 struct sockaddr_in6 tmp6;
2621#endif
2622 int startidx, idx = -1, loop = 0, use_counter = 0;
2623
2624 switch (af) {
2625 case AF_INET:
2626 bzero(&tmp4, sizeof(tmp4));
2627 tmp4.sin_len = sizeof(tmp4);
2628 tmp4.sin_family = AF_INET;
2629 addr = (struct pf_addr *)&tmp4.sin_addr;
2630 break;
2631#ifdef INET6
2632 case AF_INET6:
2633 bzero(&tmp6, sizeof(tmp6));
2634 tmp6.sin6_len = sizeof(tmp6);
2635 tmp6.sin6_family = AF_INET6;
2636 addr = (struct pf_addr *)&tmp6.sin6_addr;
2637 break;
2638#endif /* INET6 */
2639 default:
2640 unhandled_af(af);
2641 }
2642
2643 if (rpool->addr.type == PF_ADDR_TABLE)
2644 kt = rpool->addr.p.tbl;
2645 else if (rpool->addr.type == PF_ADDR_DYNIFTL)
2646 kt = rpool->addr.p.dyn->pfid_kt;
2647 else
2648 return (-1);
2649 kt = pfr_ktable_select_active(kt);
2650 if (kt == NULL)
2651 return (-1);
2652
2653 counter = &rpool->counter;
2654 idx = rpool->tblidx;
2655 if (idx < 0 || idx >= kt->pfrkt_cnt)
2656 idx = 0;
2657 else
2658 use_counter = 1;
2659 startidx = idx;
2660
2661 _next_block:
2662 if (loop && startidx == idx) {
2663 kt->pfrkt_nomatch++;
2664 return (1);
2665 }
2666
2667 ke = pfr_kentry_byidx(kt, idx, af);
2668 if (ke == NULL) {
2669 /* we don't have this idx, try looping */
2670 if (loop || (ke = pfr_kentry_byidx(kt, 0, af)) == NULL) {
2671 kt->pfrkt_nomatch++;
2672 return (1);
2673 }
2674 idx = 0;
2675 loop++;
2676 }
2677
2678 /* Get current weight for weighted round-robin */
2679 if (idx == 0 && use_counter == 1 && kt->pfrkt_refcntcost > 0) {
2680 rpool->curweight = rpool->curweight - kt->pfrkt_gcdweight;
2681
2682 if (rpool->curweight < 1)
2683 rpool->curweight = kt->pfrkt_maxweight;
2684 }
2685
2686 pfr_prepare_network(&pfr_mask, af, ke->pfrke_net);
2687 *raddr = SUNION2PF(&ke->pfrke_sa, af);
2688 *rmask = SUNION2PF(&pfr_mask, af);
2689
2690 if (use_counter && !PF_AZERO(counter, af)) {
2691 /* is supplied address within block? */
2692 if (!pf_match_addr(0, *raddr, *rmask, counter, af)) {
2693 /* no, go to next block in table */
2694 idx++;
2695 use_counter = 0;
2696 goto _next_block;
2697 }
2698 pf_addrcpy(addr, counter, af);
2699 } else {
2700 /* use first address of block */
2701 pf_addrcpy(addr, *raddr, af);
2702 }
2703
2704 if (!KENTRY_NETWORK(ke)) {
2705 /* this is a single IP address - no possible nested block */
2706 if (rpool->addr.type == PF_ADDR_DYNIFTL &&
2707 pfr_islinklocal(af, addr)) {
2708 idx++;
2709 goto _next_block;
2710 }
2711 pf_addrcpy(counter, addr, af);
2712 rpool->tblidx = idx;
2713 kt->pfrkt_match++;
2714 rpool->states = 0;
2715 if (ke->pfrke_counters != NULL)
2716 rpool->states = ke->pfrke_counters->states;
2717 switch (ke->pfrke_type) {
2718 case PFRKE_COST:
2719 rpool->weight = ((struct pfr_kentry_cost *)ke)->weight;
2720 /* FALLTHROUGH */
2721 case PFRKE_ROUTE:
2722 rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
2723 break;
2724 default:
2725 rpool->weight = 1;
2726 break;
2727 }
2728 return (0);
2729 }
2730 for (;;) {
2731 /* we don't want to use a nested block */
2732 switch (af) {
2733 case AF_INET:
2734 ke2 = (struct pfr_kentry *)rn_match(&tmp4,
2735 kt->pfrkt_ip4);
2736 break;
2737#ifdef INET6
2738 case AF_INET6:
2739 ke2 = (struct pfr_kentry *)rn_match(&tmp6,
2740 kt->pfrkt_ip6);
2741 break;
2742#endif /* INET6 */
2743 default:
2744 unhandled_af(af);
2745 }
2746 if (ke2 == ke) {
2747 /* lookup return the same block - perfect */
2748 if (rpool->addr.type == PF_ADDR_DYNIFTL &&
2749 pfr_islinklocal(af, addr))
2750 goto _next_entry;
2751 pf_addrcpy(counter, addr, af);
2752 rpool->tblidx = idx;
2753 kt->pfrkt_match++;
2754 rpool->states = 0;
2755 if (ke->pfrke_counters != NULL)
2756 rpool->states = ke->pfrke_counters->states;
2757 switch (ke->pfrke_type) {
2758 case PFRKE_COST:
2759 rpool->weight =
2760 ((struct pfr_kentry_cost *)ke)->weight;
2761 /* FALLTHROUGH */
2762 case PFRKE_ROUTE:
2763 rpool->kif = ((struct pfr_kentry_route *)ke)->kif;
2764 break;
2765 default:
2766 rpool->weight = 1;
2767 break;
2768 }
2769 return (0);
2770 }
2771_next_entry:
2772 /* we need to increase the counter past the nested block */
2773 pfr_prepare_network(&mask, AF_INET, ke2->pfrke_net);
2774 pf_poolmask(addr, addr, SUNION2PF(&mask, af), &pfr_ffaddr, af);
2775 pf_addr_inc(addr, af);
2776 if (!pf_match_addr(0, *raddr, *rmask, addr, af)) {
2777 /* ok, we reached the end of our main block */
2778 /* go to next block in table */
2779 idx++;
2780 use_counter = 0;
2781 goto _next_block;
2782 }
2783 }
2784}
2785
2786struct pfr_kentry *
2787pfr_kentry_byidx(struct pfr_ktable *kt, int idx, int af)
2788{
2789 struct pfr_walktree w;
2790
2791 bzero(&w, sizeof(w));
2792 w.pfrw_op = PFRW_POOL_GET;
2793 w.pfrw_cnt = idx;
2794
2795 switch (af) {
2796 case AF_INET:
2797 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2798 return (w.pfrw_kentry);
2799#ifdef INET6
2800 case AF_INET6:
2801 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2802 return (w.pfrw_kentry);
2803#endif /* INET6 */
2804 default:
2805 return (NULL);
2806 }
2807}
2808
2809/* Added for load balancing state counter use. */
2810int
2811pfr_states_increase(struct pfr_ktable *kt, struct pf_addr *addr, int af)
2812{
2813 struct pfr_kentry *ke;
2814
2815 ke = pfr_kentry_byaddr(kt, addr, af, 1);
2816 if (ke == NULL)
2817 return (-1);
2818
2819 if (ke->pfrke_counters == NULL)
2820 ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2821 PR_NOWAIT | PR_ZERO);
2822 if (ke->pfrke_counters == NULL)
2823 return (-1);
2824
2825 ke->pfrke_counters->states++;
2826 return ke->pfrke_counters->states;
2827}
2828
2829/* Added for load balancing state counter use. */
2830int
2831pfr_states_decrease(struct pfr_ktable *kt, struct pf_addr *addr, int af)
2832{
2833 struct pfr_kentry *ke;
2834
2835 ke = pfr_kentry_byaddr(kt, addr, af, 1);
2836 if (ke == NULL)
2837 return (-1);
2838
2839 if (ke->pfrke_counters == NULL)
2840 ke->pfrke_counters = pool_get(&pfr_kcounters_pl,
2841 PR_NOWAIT | PR_ZERO);
2842 if (ke->pfrke_counters == NULL)
2843 return (-1);
2844
2845 if (ke->pfrke_counters->states > 0)
2846 ke->pfrke_counters->states--;
2847 else
2848 DPFPRINTF(LOG_DEBUG,
2849 "pfr_states_decrease: states-- when states <= 0");
2850
2851 return ke->pfrke_counters->states;
2852}
2853
2854void
2855pfr_dynaddr_update(struct pfr_ktable *kt, struct pfi_dynaddr *dyn)
2856{
2857 struct pfr_walktree w;
2858
2859 bzero(&w, sizeof(w));
2860 w.pfrw_op = PFRW_DYNADDR_UPDATE;
2861 w.pfrw_dyn = dyn;
2862
2863 dyn->pfid_acnt4 = 0;
2864 dyn->pfid_acnt6 = 0;
2865 switch (dyn->pfid_af) {
2866 case AF_UNSPEC: /* look up all both addresses IPv4 + IPv6 */
2867 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2868 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2869 break;
2870 case AF_INET:
2871 rn_walktree(kt->pfrkt_ip4, pfr_walktree, &w);
2872 break;
2873#ifdef INET6
2874 case AF_INET6:
2875 rn_walktree(kt->pfrkt_ip6, pfr_walktree, &w);
2876 break;
2877#endif /* INET6 */
2878 default:
2879 unhandled_af(dyn->pfid_af);
2880 }
2881}
2882
2883void
2884pfr_ktable_winfo_update(struct pfr_ktable *kt, struct pfr_kentry *p) {
2885 /*
2886 * If cost flag is set,
2887 * gcdweight is needed for round-robin.
2888 */
2889 if (kt->pfrkt_refcntcost > 0) {
2890 u_int16_t weight;
2891
2892 weight = (p->pfrke_type == PFRKE_COST) ?
2893 ((struct pfr_kentry_cost *)p)->weight : 1;
2894
2895 if (kt->pfrkt_gcdweight == 0)
2896 kt->pfrkt_gcdweight = weight;
2897
2898 kt->pfrkt_gcdweight =
2899 pfr_gcd(weight, kt->pfrkt_gcdweight);
2900
2901 if (kt->pfrkt_maxweight < weight)
2902 kt->pfrkt_maxweight = weight;
2903 }
2904}
2905
2906struct pfr_ktable *
2907pfr_ktable_select_active(struct pfr_ktable *kt)
2908{
2909 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
2910 kt = kt->pfrkt_root;
2911 if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE))
2912 return (NULL);
2913
2914 return (kt);
2915}