Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "netfilter: xt_quota: fix the behavior of xt_quota module"

This reverts commit e9837e55b0200da544a095a1fca36efd7fd3ba30.

When talking to Maze and Chenbo, we agreed to keep this back by now
due to problems in the ruleset listing path with 32-bit arches.

Signed-off-by: Maciej Żenczykowski <maze@google.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>

+35 -26
+3 -5
include/uapi/linux/netfilter/xt_quota.h
··· 15 15 __u32 flags; 16 16 __u32 pad; 17 17 __aligned_u64 quota; 18 - #ifdef __KERNEL__ 19 - atomic64_t counter; 20 - #else 21 - __aligned_u64 remain; 22 - #endif 18 + 19 + /* Used internally by the kernel */ 20 + struct xt_quota_priv *master; 23 21 }; 24 22 25 23 #endif /* _XT_QUOTA_H */
+32 -21
net/netfilter/xt_quota.c
··· 11 11 #include <linux/netfilter/xt_quota.h> 12 12 #include <linux/module.h> 13 13 14 + struct xt_quota_priv { 15 + spinlock_t lock; 16 + uint64_t quota; 17 + }; 18 + 14 19 MODULE_LICENSE("GPL"); 15 20 MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); 16 21 MODULE_DESCRIPTION("Xtables: countdown quota match"); ··· 26 21 quota_mt(const struct sk_buff *skb, struct xt_action_param *par) 27 22 { 28 23 struct xt_quota_info *q = (void *)par->matchinfo; 29 - u64 current_count = atomic64_read(&q->counter); 24 + struct xt_quota_priv *priv = q->master; 30 25 bool ret = q->flags & XT_QUOTA_INVERT; 31 - u64 old_count, new_count; 32 26 33 - do { 34 - if (current_count == 1) 35 - return ret; 36 - if (current_count <= skb->len) { 37 - atomic64_set(&q->counter, 1); 38 - return ret; 39 - } 40 - old_count = current_count; 41 - new_count = current_count - skb->len; 42 - current_count = atomic64_cmpxchg(&q->counter, old_count, 43 - new_count); 44 - } while (current_count != old_count); 45 - return !ret; 27 + spin_lock_bh(&priv->lock); 28 + if (priv->quota >= skb->len) { 29 + priv->quota -= skb->len; 30 + ret = !ret; 31 + } else { 32 + /* we do not allow even small packets from now on */ 33 + priv->quota = 0; 34 + } 35 + spin_unlock_bh(&priv->lock); 36 + 37 + return ret; 46 38 } 47 39 48 40 static int quota_mt_check(const struct xt_mtchk_param *par) 49 41 { 50 42 struct xt_quota_info *q = par->matchinfo; 51 43 52 - BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64)); 53 - 54 44 if (q->flags & ~XT_QUOTA_MASK) 55 45 return -EINVAL; 56 - if (atomic64_read(&q->counter) > q->quota + 1) 57 - return -ERANGE; 58 46 59 - if (atomic64_read(&q->counter) == 0) 60 - atomic64_set(&q->counter, q->quota + 1); 47 + q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); 48 + if (q->master == NULL) 49 + return -ENOMEM; 50 + 51 + spin_lock_init(&q->master->lock); 52 + q->master->quota = q->quota; 61 53 return 0; 54 + } 55 + 56 + static void quota_mt_destroy(const struct xt_mtdtor_param *par) 57 + { 58 + const struct xt_quota_info *q = par->matchinfo; 59 + 60 + kfree(q->master); 62 61 } 63 62 64 63 static struct xt_match quota_mt_reg __read_mostly = { ··· 71 62 .family = NFPROTO_UNSPEC, 72 63 .match = quota_mt, 73 64 .checkentry = quota_mt_check, 65 + .destroy = quota_mt_destroy, 74 66 .matchsize = sizeof(struct xt_quota_info), 67 + .usersize = offsetof(struct xt_quota_info, master), 75 68 .me = THIS_MODULE, 76 69 }; 77 70