Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: ipset: move functions to ip_set_core.c.

Several inline functions in ip_set.h are only called in ip_set_core.c:
move them and remove inline function specifier.

Signed-off-by: Jeremy Sowden <jeremy@azazel.net>
Acked-by: Jozsef Kadlecsik <kadlec@netfilter.org>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

authored by

Jeremy Sowden and committed by
Pablo Neira Ayuso
2398a976 94177f6e

+102 -102
-102
include/linux/netfilter/ipset/ip_set.h
··· 508 508 *timeout = t; 509 509 } 510 510 511 - static inline u32 512 - ip_set_timeout_get(const unsigned long *timeout) 513 - { 514 - u32 t; 515 - 516 - if (*timeout == IPSET_ELEM_PERMANENT) 517 - return 0; 518 - 519 - t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; 520 - /* Zero value in userspace means no timeout */ 521 - return t == 0 ? 1 : t; 522 - } 523 - 524 511 void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment, 525 512 const struct ip_set_ext *ext); 526 - 527 - static inline void 528 - ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) 529 - { 530 - atomic64_add((long long)bytes, &(counter)->bytes); 531 - } 532 - 533 - static inline void 534 - ip_set_add_packets(u64 packets, struct ip_set_counter *counter) 535 - { 536 - atomic64_add((long long)packets, &(counter)->packets); 537 - } 538 - 539 - static inline u64 540 - ip_set_get_bytes(const struct ip_set_counter *counter) 541 - { 542 - return (u64)atomic64_read(&(counter)->bytes); 543 - } 544 - 545 - static inline u64 546 - ip_set_get_packets(const struct ip_set_counter *counter) 547 - { 548 - return (u64)atomic64_read(&(counter)->packets); 549 - } 550 - 551 - static inline bool 552 - ip_set_match_counter(u64 counter, u64 match, u8 op) 553 - { 554 - switch (op) { 555 - case IPSET_COUNTER_NONE: 556 - return true; 557 - case IPSET_COUNTER_EQ: 558 - return counter == match; 559 - case IPSET_COUNTER_NE: 560 - return counter != match; 561 - case IPSET_COUNTER_LT: 562 - return counter < match; 563 - case IPSET_COUNTER_GT: 564 - return counter > match; 565 - } 566 - return false; 567 - } 568 - 569 - static inline void 570 - ip_set_update_counter(struct ip_set_counter *counter, 571 - const struct ip_set_ext *ext, u32 flags) 572 - { 573 - if (ext->packets != ULLONG_MAX && 574 - !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { 575 - ip_set_add_bytes(ext->bytes, counter); 576 - ip_set_add_packets(ext->packets, counter); 577 - } 578 - } 579 - 580 - static inline bool 581 - ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) 582 - { 583 - return nla_put_net64(skb, IPSET_ATTR_BYTES, 584 - cpu_to_be64(ip_set_get_bytes(counter)), 585 - IPSET_ATTR_PAD) || 586 - nla_put_net64(skb, IPSET_ATTR_PACKETS, 587 - cpu_to_be64(ip_set_get_packets(counter)), 588 - IPSET_ATTR_PAD); 589 - } 590 513 591 514 static inline void 592 515 ip_set_init_counter(struct ip_set_counter *counter, ··· 519 596 atomic64_set(&(counter)->bytes, (long long)(ext->bytes)); 520 597 if (ext->packets != ULLONG_MAX) 521 598 atomic64_set(&(counter)->packets, (long long)(ext->packets)); 522 - } 523 - 524 - static inline void 525 - ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, 526 - const struct ip_set_ext *ext, 527 - struct ip_set_ext *mext, u32 flags) 528 - { 529 - mext->skbinfo = *skbinfo; 530 - } 531 - 532 - static inline bool 533 - ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) 534 - { 535 - /* Send nonzero parameters only */ 536 - return ((skbinfo->skbmark || skbinfo->skbmarkmask) && 537 - nla_put_net64(skb, IPSET_ATTR_SKBMARK, 538 - cpu_to_be64((u64)skbinfo->skbmark << 32 | 539 - skbinfo->skbmarkmask), 540 - IPSET_ATTR_PAD)) || 541 - (skbinfo->skbprio && 542 - nla_put_net32(skb, IPSET_ATTR_SKBPRIO, 543 - cpu_to_be32(skbinfo->skbprio))) || 544 - (skbinfo->skbqueue && 545 - nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, 546 - cpu_to_be16(skbinfo->skbqueue))); 547 599 } 548 600 549 601 static inline void
+102
net/netfilter/ipset/ip_set_core.c
··· 325 325 } 326 326 EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6); 327 327 328 + static u32 329 + ip_set_timeout_get(const unsigned long *timeout) 330 + { 331 + u32 t; 332 + 333 + if (*timeout == IPSET_ELEM_PERMANENT) 334 + return 0; 335 + 336 + t = jiffies_to_msecs(*timeout - jiffies) / MSEC_PER_SEC; 337 + /* Zero value in userspace means no timeout */ 338 + return t == 0 ? 1 : t; 339 + } 340 + 328 341 static char * 329 342 ip_set_comment_uget(struct nlattr *tb) 330 343 { ··· 523 510 } 524 511 EXPORT_SYMBOL_GPL(ip_set_get_extensions); 525 512 513 + static u64 514 + ip_set_get_bytes(const struct ip_set_counter *counter) 515 + { 516 + return (u64)atomic64_read(&(counter)->bytes); 517 + } 518 + 519 + static u64 520 + ip_set_get_packets(const struct ip_set_counter *counter) 521 + { 522 + return (u64)atomic64_read(&(counter)->packets); 523 + } 524 + 525 + static bool 526 + ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter) 527 + { 528 + return nla_put_net64(skb, IPSET_ATTR_BYTES, 529 + cpu_to_be64(ip_set_get_bytes(counter)), 530 + IPSET_ATTR_PAD) || 531 + nla_put_net64(skb, IPSET_ATTR_PACKETS, 532 + cpu_to_be64(ip_set_get_packets(counter)), 533 + IPSET_ATTR_PAD); 534 + } 535 + 536 + static bool 537 + ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo) 538 + { 539 + /* Send nonzero parameters only */ 540 + return ((skbinfo->skbmark || skbinfo->skbmarkmask) && 541 + nla_put_net64(skb, IPSET_ATTR_SKBMARK, 542 + cpu_to_be64((u64)skbinfo->skbmark << 32 | 543 + skbinfo->skbmarkmask), 544 + IPSET_ATTR_PAD)) || 545 + (skbinfo->skbprio && 546 + nla_put_net32(skb, IPSET_ATTR_SKBPRIO, 547 + cpu_to_be32(skbinfo->skbprio))) || 548 + (skbinfo->skbqueue && 549 + nla_put_net16(skb, IPSET_ATTR_SKBQUEUE, 550 + cpu_to_be16(skbinfo->skbqueue))); 551 + } 552 + 526 553 int 527 554 ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, 528 555 const void *e, bool active) ··· 587 534 return 0; 588 535 } 589 536 EXPORT_SYMBOL_GPL(ip_set_put_extensions); 537 + 538 + static bool 539 + ip_set_match_counter(u64 counter, u64 match, u8 op) 540 + { 541 + switch (op) { 542 + case IPSET_COUNTER_NONE: 543 + return true; 544 + case IPSET_COUNTER_EQ: 545 + return counter == match; 546 + case IPSET_COUNTER_NE: 547 + return counter != match; 548 + case IPSET_COUNTER_LT: 549 + return counter < match; 550 + case IPSET_COUNTER_GT: 551 + return counter > match; 552 + } 553 + return false; 554 + } 555 + 556 + static void 557 + ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter) 558 + { 559 + atomic64_add((long long)bytes, &(counter)->bytes); 560 + } 561 + 562 + static void 563 + ip_set_add_packets(u64 packets, struct ip_set_counter *counter) 564 + { 565 + atomic64_add((long long)packets, &(counter)->packets); 566 + } 567 + 568 + static void 569 + ip_set_update_counter(struct ip_set_counter *counter, 570 + const struct ip_set_ext *ext, u32 flags) 571 + { 572 + if (ext->packets != ULLONG_MAX && 573 + !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { 574 + ip_set_add_bytes(ext->bytes, counter); 575 + ip_set_add_packets(ext->packets, counter); 576 + } 577 + } 578 + 579 + static void 580 + ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo, 581 + const struct ip_set_ext *ext, 582 + struct ip_set_ext *mext, u32 flags) 583 + { 584 + mext->skbinfo = *skbinfo; 585 + } 590 586 591 587 bool 592 588 ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext,