Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PKT_SCHED]: Kill pkt_act.h inlining.

This was simply making templates of functions and mostly causing a lot
of code duplication in the classifier action modules.

We solve this more cleanly by having a common "struct tcf_common" that
hash worker functions contained once in act_api.c can work with.

Callers work with real action objects that have the common struct
plus their module specific struct members. You go from a common
object to the higher level one using a "to_foo()" macro which makes
use of container_of() to do the dirty work.

This also kills off act_generic.h which was only used by act_simple.c
and keeping it around was more work than the it's value.

Signed-off-by: David S. Miller <davem@davemloft.net>

+1058 -1144
+88 -48
include/net/act_api.h
··· 8 8 #include <net/sch_generic.h> 9 9 #include <net/pkt_sched.h> 10 10 11 - #define tca_gen(name) \ 12 - struct tcf_##name *next; \ 13 - u32 index; \ 14 - int refcnt; \ 15 - int bindcnt; \ 16 - u32 capab; \ 17 - int action; \ 18 - struct tcf_t tm; \ 19 - struct gnet_stats_basic bstats; \ 20 - struct gnet_stats_queue qstats; \ 21 - struct gnet_stats_rate_est rate_est; \ 22 - spinlock_t *stats_lock; \ 23 - spinlock_t lock 24 - 25 - struct tcf_police 26 - { 27 - tca_gen(police); 28 - int result; 29 - u32 ewma_rate; 30 - u32 burst; 31 - u32 mtu; 32 - u32 toks; 33 - u32 ptoks; 34 - psched_time_t t_c; 35 - struct qdisc_rate_table *R_tab; 36 - struct qdisc_rate_table *P_tab; 11 + struct tcf_common { 12 + struct tcf_common *tcfc_next; 13 + u32 tcfc_index; 14 + int tcfc_refcnt; 15 + int tcfc_bindcnt; 16 + u32 tcfc_capab; 17 + int tcfc_action; 18 + struct tcf_t tcfc_tm; 19 + struct gnet_stats_basic tcfc_bstats; 20 + struct gnet_stats_queue tcfc_qstats; 21 + struct gnet_stats_rate_est tcfc_rate_est; 22 + spinlock_t *tcfc_stats_lock; 23 + spinlock_t tcfc_lock; 37 24 }; 25 + #define tcf_next common.tcfc_next 26 + #define tcf_index common.tcfc_index 27 + #define tcf_refcnt common.tcfc_refcnt 28 + #define tcf_bindcnt common.tcfc_bindcnt 29 + #define tcf_capab common.tcfc_capab 30 + #define tcf_action common.tcfc_action 31 + #define tcf_tm common.tcfc_tm 32 + #define tcf_bstats common.tcfc_bstats 33 + #define tcf_qstats common.tcfc_qstats 34 + #define tcf_rate_est common.tcfc_rate_est 35 + #define tcf_stats_lock common.tcfc_stats_lock 36 + #define tcf_lock common.tcfc_lock 37 + 38 + struct tcf_police { 39 + struct tcf_common common; 40 + int tcfp_result; 41 + u32 tcfp_ewma_rate; 42 + u32 tcfp_burst; 43 + u32 tcfp_mtu; 44 + u32 tcfp_toks; 45 + u32 tcfp_ptoks; 46 + psched_time_t tcfp_t_c; 47 + struct qdisc_rate_table *tcfp_R_tab; 48 + struct qdisc_rate_table *tcfp_P_tab; 49 + }; 50 + #define to_police(pc) \ 51 + container_of(pc, struct tcf_police, common) 52 + 53 + struct tcf_hashinfo { 54 + struct tcf_common **htab; 55 + unsigned int hmask; 56 + rwlock_t *lock; 57 + }; 58 + 59 + static inline unsigned int tcf_hash(u32 index, unsigned int hmask) 60 + { 61 + return index & hmask; 62 + } 38 63 39 64 #ifdef CONFIG_NET_CLS_ACT 40 65 41 66 #define ACT_P_CREATED 1 42 67 #define ACT_P_DELETED 1 43 68 44 - struct tcf_act_hdr 45 - { 46 - tca_gen(act_hdr); 69 + struct tcf_act_hdr { 70 + struct tcf_common common; 47 71 }; 48 72 49 - struct tc_action 50 - { 51 - void *priv; 52 - struct tc_action_ops *ops; 53 - __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 54 - __u32 order; 55 - struct tc_action *next; 73 + struct tc_action { 74 + void *priv; 75 + struct tc_action_ops *ops; 76 + __u32 type; /* for backward compat(TCA_OLD_COMPAT) */ 77 + __u32 order; 78 + struct tc_action *next; 56 79 }; 57 80 58 81 #define TCA_CAP_NONE 0 59 - struct tc_action_ops 60 - { 82 + struct tc_action_ops { 61 83 struct tc_action_ops *next; 84 + struct tcf_hashinfo *hinfo; 62 85 char kind[IFNAMSIZ]; 63 86 __u32 type; /* TBD to match kind */ 64 87 __u32 capab; /* capabilities includes 4 bit version */ 65 88 struct module *owner; 66 89 int (*act)(struct sk_buff *, struct tc_action *, struct tcf_result *); 67 90 int (*get_stats)(struct sk_buff *, struct tc_action *); 68 - int (*dump)(struct sk_buff *, struct tc_action *,int , int); 91 + int (*dump)(struct sk_buff *, struct tc_action *, int, int); 69 92 int (*cleanup)(struct tc_action *, int bind); 70 - int (*lookup)(struct tc_action *, u32 ); 71 - int (*init)(struct rtattr *,struct rtattr *,struct tc_action *, int , int ); 72 - int (*walk)(struct sk_buff *, struct netlink_callback *, int , struct tc_action *); 93 + int (*lookup)(struct tc_action *, u32); 94 + int (*init)(struct rtattr *, struct rtattr *, struct tc_action *, int , int); 95 + int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *); 73 96 }; 97 + 98 + extern struct tcf_common *tcf_hash_lookup(u32 index, 99 + struct tcf_hashinfo *hinfo); 100 + extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo); 101 + extern int tcf_hash_release(struct tcf_common *p, int bind, 102 + struct tcf_hashinfo *hinfo); 103 + extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 104 + int type, struct tc_action *a); 105 + extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo); 106 + extern int tcf_hash_search(struct tc_action *a, u32 index); 107 + extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, 108 + int bind, struct tcf_hashinfo *hinfo); 109 + extern struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, 110 + struct tc_action *a, int size, 111 + int bind, u32 *idx_gen, 112 + struct tcf_hashinfo *hinfo); 113 + extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo); 74 114 75 115 extern int tcf_register_action(struct tc_action_ops *a); 76 116 extern int tcf_unregister_action(struct tc_action_ops *a); ··· 136 96 int ret = 0; 137 97 #ifdef CONFIG_NET_CLS_ACT 138 98 if (p) { 139 - if (bind) { 140 - p->bindcnt--; 141 - } 142 - p->refcnt--; 143 - if (p->refcnt <= 0 && !p->bindcnt) { 99 + if (bind) 100 + p->tcf_bindcnt--; 101 + 102 + p->tcf_refcnt--; 103 + if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) { 144 104 tcf_police_destroy(p); 145 105 ret = 1; 146 106 } 147 107 } 148 108 #else 149 - if (p && --p->refcnt == 0) 109 + if (p && --p->tcf_refcnt == 0) 150 110 tcf_police_destroy(p); 151 111 152 112 #endif /* CONFIG_NET_CLS_ACT */
-142
include/net/act_generic.h
··· 1 - /* 2 - * include/net/act_generic.h 3 - * 4 - */ 5 - #ifndef _NET_ACT_GENERIC_H 6 - #define _NET_ACT_GENERIC_H 7 - static inline int tcf_defact_release(struct tcf_defact *p, int bind) 8 - { 9 - int ret = 0; 10 - if (p) { 11 - if (bind) { 12 - p->bindcnt--; 13 - } 14 - p->refcnt--; 15 - if (p->bindcnt <= 0 && p->refcnt <= 0) { 16 - kfree(p->defdata); 17 - tcf_hash_destroy(p); 18 - ret = 1; 19 - } 20 - } 21 - return ret; 22 - } 23 - 24 - static inline int 25 - alloc_defdata(struct tcf_defact *p, u32 datalen, void *defdata) 26 - { 27 - p->defdata = kmalloc(datalen, GFP_KERNEL); 28 - if (p->defdata == NULL) 29 - return -ENOMEM; 30 - p->datalen = datalen; 31 - memcpy(p->defdata, defdata, datalen); 32 - return 0; 33 - } 34 - 35 - static inline int 36 - realloc_defdata(struct tcf_defact *p, u32 datalen, void *defdata) 37 - { 38 - /* safer to be just brute force for now */ 39 - kfree(p->defdata); 40 - return alloc_defdata(p, datalen, defdata); 41 - } 42 - 43 - static inline int 44 - tcf_defact_init(struct rtattr *rta, struct rtattr *est, 45 - struct tc_action *a, int ovr, int bind) 46 - { 47 - struct rtattr *tb[TCA_DEF_MAX]; 48 - struct tc_defact *parm; 49 - struct tcf_defact *p; 50 - void *defdata; 51 - u32 datalen = 0; 52 - int ret = 0; 53 - 54 - if (rta == NULL || rtattr_parse_nested(tb, TCA_DEF_MAX, rta) < 0) 55 - return -EINVAL; 56 - 57 - if (tb[TCA_DEF_PARMS - 1] == NULL || 58 - RTA_PAYLOAD(tb[TCA_DEF_PARMS - 1]) < sizeof(*parm)) 59 - return -EINVAL; 60 - 61 - parm = RTA_DATA(tb[TCA_DEF_PARMS - 1]); 62 - defdata = RTA_DATA(tb[TCA_DEF_DATA - 1]); 63 - if (defdata == NULL) 64 - return -EINVAL; 65 - 66 - datalen = RTA_PAYLOAD(tb[TCA_DEF_DATA - 1]); 67 - if (datalen <= 0) 68 - return -EINVAL; 69 - 70 - p = tcf_hash_check(parm->index, a, ovr, bind); 71 - if (p == NULL) { 72 - p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 73 - if (p == NULL) 74 - return -ENOMEM; 75 - 76 - ret = alloc_defdata(p, datalen, defdata); 77 - if (ret < 0) { 78 - kfree(p); 79 - return ret; 80 - } 81 - ret = ACT_P_CREATED; 82 - } else { 83 - if (!ovr) { 84 - tcf_defact_release(p, bind); 85 - return -EEXIST; 86 - } 87 - realloc_defdata(p, datalen, defdata); 88 - } 89 - 90 - spin_lock_bh(&p->lock); 91 - p->action = parm->action; 92 - spin_unlock_bh(&p->lock); 93 - if (ret == ACT_P_CREATED) 94 - tcf_hash_insert(p); 95 - return ret; 96 - } 97 - 98 - static inline int tcf_defact_cleanup(struct tc_action *a, int bind) 99 - { 100 - struct tcf_defact *p = PRIV(a, defact); 101 - 102 - if (p != NULL) 103 - return tcf_defact_release(p, bind); 104 - return 0; 105 - } 106 - 107 - static inline int 108 - tcf_defact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 109 - { 110 - unsigned char *b = skb->tail; 111 - struct tc_defact opt; 112 - struct tcf_defact *p = PRIV(a, defact); 113 - struct tcf_t t; 114 - 115 - opt.index = p->index; 116 - opt.refcnt = p->refcnt - ref; 117 - opt.bindcnt = p->bindcnt - bind; 118 - opt.action = p->action; 119 - RTA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); 120 - RTA_PUT(skb, TCA_DEF_DATA, p->datalen, p->defdata); 121 - t.install = jiffies_to_clock_t(jiffies - p->tm.install); 122 - t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 123 - t.expires = jiffies_to_clock_t(p->tm.expires); 124 - RTA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); 125 - return skb->len; 126 - 127 - rtattr_failure: 128 - skb_trim(skb, b - skb->data); 129 - return -1; 130 - } 131 - 132 - #define tca_use_default_ops \ 133 - .dump = tcf_defact_dump, \ 134 - .cleanup = tcf_defact_cleanup, \ 135 - .init = tcf_defact_init, \ 136 - .walk = tcf_generic_walker, \ 137 - 138 - #define tca_use_default_defines(name) \ 139 - static u32 idx_gen; \ 140 - static struct tcf_defact *tcf_##name_ht[MY_TAB_SIZE]; \ 141 - static DEFINE_RWLOCK(##name_lock); 142 - #endif /* _NET_ACT_GENERIC_H */
-273
include/net/pkt_act.h
··· 1 - #ifndef __NET_PKT_ACT_H 2 - #define __NET_PKT_ACT_H 3 - 4 - #include <asm/uaccess.h> 5 - #include <asm/system.h> 6 - #include <linux/bitops.h> 7 - #include <linux/types.h> 8 - #include <linux/kernel.h> 9 - #include <linux/sched.h> 10 - #include <linux/string.h> 11 - #include <linux/mm.h> 12 - #include <linux/socket.h> 13 - #include <linux/sockios.h> 14 - #include <linux/in.h> 15 - #include <linux/errno.h> 16 - #include <linux/interrupt.h> 17 - #include <linux/skbuff.h> 18 - #include <linux/rtnetlink.h> 19 - #include <linux/module.h> 20 - #include <linux/init.h> 21 - #include <linux/proc_fs.h> 22 - #include <net/sock.h> 23 - #include <net/pkt_sched.h> 24 - 25 - #define tca_st(val) (struct tcf_##val *) 26 - #define PRIV(a,name) ( tca_st(name) (a)->priv) 27 - 28 - #if 0 /* control */ 29 - #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) 30 - #else 31 - #define DPRINTK(format,args...) 32 - #endif 33 - 34 - #if 0 /* data */ 35 - #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args) 36 - #else 37 - #define D2PRINTK(format,args...) 38 - #endif 39 - 40 - static __inline__ unsigned 41 - tcf_hash(u32 index) 42 - { 43 - return index & MY_TAB_MASK; 44 - } 45 - 46 - /* probably move this from being inline 47 - * and put into act_generic 48 - */ 49 - static inline void 50 - tcf_hash_destroy(struct tcf_st *p) 51 - { 52 - unsigned h = tcf_hash(p->index); 53 - struct tcf_st **p1p; 54 - 55 - for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) { 56 - if (*p1p == p) { 57 - write_lock_bh(&tcf_t_lock); 58 - *p1p = p->next; 59 - write_unlock_bh(&tcf_t_lock); 60 - #ifdef CONFIG_NET_ESTIMATOR 61 - gen_kill_estimator(&p->bstats, &p->rate_est); 62 - #endif 63 - kfree(p); 64 - return; 65 - } 66 - } 67 - BUG_TRAP(0); 68 - } 69 - 70 - static inline int 71 - tcf_hash_release(struct tcf_st *p, int bind ) 72 - { 73 - int ret = 0; 74 - if (p) { 75 - if (bind) { 76 - p->bindcnt--; 77 - } 78 - p->refcnt--; 79 - if(p->bindcnt <=0 && p->refcnt <= 0) { 80 - tcf_hash_destroy(p); 81 - ret = 1; 82 - } 83 - } 84 - return ret; 85 - } 86 - 87 - static __inline__ int 88 - tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 89 - struct tc_action *a) 90 - { 91 - struct tcf_st *p; 92 - int err =0, index = -1,i= 0, s_i = 0, n_i = 0; 93 - struct rtattr *r ; 94 - 95 - read_lock(&tcf_t_lock); 96 - 97 - s_i = cb->args[0]; 98 - 99 - for (i = 0; i < MY_TAB_SIZE; i++) { 100 - p = tcf_ht[tcf_hash(i)]; 101 - 102 - for (; p; p = p->next) { 103 - index++; 104 - if (index < s_i) 105 - continue; 106 - a->priv = p; 107 - a->order = n_i; 108 - r = (struct rtattr*) skb->tail; 109 - RTA_PUT(skb, a->order, 0, NULL); 110 - err = tcf_action_dump_1(skb, a, 0, 0); 111 - if (0 > err) { 112 - index--; 113 - skb_trim(skb, (u8*)r - skb->data); 114 - goto done; 115 - } 116 - r->rta_len = skb->tail - (u8*)r; 117 - n_i++; 118 - if (n_i >= TCA_ACT_MAX_PRIO) { 119 - goto done; 120 - } 121 - } 122 - } 123 - done: 124 - read_unlock(&tcf_t_lock); 125 - if (n_i) 126 - cb->args[0] += n_i; 127 - return n_i; 128 - 129 - rtattr_failure: 130 - skb_trim(skb, (u8*)r - skb->data); 131 - goto done; 132 - } 133 - 134 - static __inline__ int 135 - tcf_del_walker(struct sk_buff *skb, struct tc_action *a) 136 - { 137 - struct tcf_st *p, *s_p; 138 - struct rtattr *r ; 139 - int i= 0, n_i = 0; 140 - 141 - r = (struct rtattr*) skb->tail; 142 - RTA_PUT(skb, a->order, 0, NULL); 143 - RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); 144 - for (i = 0; i < MY_TAB_SIZE; i++) { 145 - p = tcf_ht[tcf_hash(i)]; 146 - 147 - while (p != NULL) { 148 - s_p = p->next; 149 - if (ACT_P_DELETED == tcf_hash_release(p, 0)) { 150 - module_put(a->ops->owner); 151 - } 152 - n_i++; 153 - p = s_p; 154 - } 155 - } 156 - RTA_PUT(skb, TCA_FCNT, 4, &n_i); 157 - r->rta_len = skb->tail - (u8*)r; 158 - 159 - return n_i; 160 - rtattr_failure: 161 - skb_trim(skb, (u8*)r - skb->data); 162 - return -EINVAL; 163 - } 164 - 165 - static __inline__ int 166 - tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type, 167 - struct tc_action *a) 168 - { 169 - if (type == RTM_DELACTION) { 170 - return tcf_del_walker(skb,a); 171 - } else if (type == RTM_GETACTION) { 172 - return tcf_dump_walker(skb,cb,a); 173 - } else { 174 - printk("tcf_generic_walker: unknown action %d\n",type); 175 - return -EINVAL; 176 - } 177 - } 178 - 179 - static __inline__ struct tcf_st * 180 - tcf_hash_lookup(u32 index) 181 - { 182 - struct tcf_st *p; 183 - 184 - read_lock(&tcf_t_lock); 185 - for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) { 186 - if (p->index == index) 187 - break; 188 - } 189 - read_unlock(&tcf_t_lock); 190 - return p; 191 - } 192 - 193 - static __inline__ u32 194 - tcf_hash_new_index(void) 195 - { 196 - do { 197 - if (++idx_gen == 0) 198 - idx_gen = 1; 199 - } while (tcf_hash_lookup(idx_gen)); 200 - 201 - return idx_gen; 202 - } 203 - 204 - 205 - static inline int 206 - tcf_hash_search(struct tc_action *a, u32 index) 207 - { 208 - struct tcf_st *p = tcf_hash_lookup(index); 209 - 210 - if (p != NULL) { 211 - a->priv = p; 212 - return 1; 213 - } 214 - return 0; 215 - } 216 - 217 - #ifdef CONFIG_NET_ACT_INIT 218 - static inline struct tcf_st * 219 - tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind) 220 - { 221 - struct tcf_st *p = NULL; 222 - if (index && (p = tcf_hash_lookup(index)) != NULL) { 223 - if (bind) { 224 - p->bindcnt++; 225 - p->refcnt++; 226 - } 227 - a->priv = p; 228 - } 229 - return p; 230 - } 231 - 232 - static inline struct tcf_st * 233 - tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind) 234 - { 235 - struct tcf_st *p = NULL; 236 - 237 - p = kmalloc(size, GFP_KERNEL); 238 - if (p == NULL) 239 - return p; 240 - 241 - memset(p, 0, size); 242 - p->refcnt = 1; 243 - 244 - if (bind) { 245 - p->bindcnt = 1; 246 - } 247 - 248 - spin_lock_init(&p->lock); 249 - p->stats_lock = &p->lock; 250 - p->index = index ? : tcf_hash_new_index(); 251 - p->tm.install = jiffies; 252 - p->tm.lastuse = jiffies; 253 - #ifdef CONFIG_NET_ESTIMATOR 254 - if (est) 255 - gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 256 - #endif 257 - a->priv = (void *) p; 258 - return p; 259 - } 260 - 261 - static inline void tcf_hash_insert(struct tcf_st *p) 262 - { 263 - unsigned h = tcf_hash(p->index); 264 - 265 - write_lock_bh(&tcf_t_lock); 266 - p->next = tcf_ht[h]; 267 - tcf_ht[h] = p; 268 - write_unlock_bh(&tcf_t_lock); 269 - } 270 - 271 - #endif 272 - 273 - #endif
+7 -6
include/net/tc_act/tc_defact.h
··· 3 3 4 4 #include <net/act_api.h> 5 5 6 - struct tcf_defact 7 - { 8 - tca_gen(defact); 9 - u32 datalen; 10 - void *defdata; 6 + struct tcf_defact { 7 + struct tcf_common common; 8 + u32 tcfd_datalen; 9 + void *tcfd_defdata; 11 10 }; 11 + #define to_defact(pc) \ 12 + container_of(pc, struct tcf_defact, common) 12 13 13 - #endif 14 + #endif /* __NET_TC_DEF_H */
+9 -9
include/net/tc_act/tc_gact.h
··· 3 3 4 4 #include <net/act_api.h> 5 5 6 - struct tcf_gact 7 - { 8 - tca_gen(gact); 6 + struct tcf_gact { 7 + struct tcf_common common; 9 8 #ifdef CONFIG_GACT_PROB 10 - u16 ptype; 11 - u16 pval; 12 - int paction; 9 + u16 tcfg_ptype; 10 + u16 tcfg_pval; 11 + int tcfg_paction; 13 12 #endif 14 - 15 13 }; 16 - 17 - #endif 14 + #define to_gact(pc) \ 15 + container_of(pc, struct tcf_gact, common) 16 + 17 + #endif /* __NET_TC_GACT_H */
+8 -7
include/net/tc_act/tc_ipt.h
··· 5 5 6 6 struct xt_entry_target; 7 7 8 - struct tcf_ipt 9 - { 10 - tca_gen(ipt); 11 - u32 hook; 12 - char *tname; 13 - struct xt_entry_target *t; 8 + struct tcf_ipt { 9 + struct tcf_common common; 10 + u32 tcfi_hook; 11 + char *tcfi_tname; 12 + struct xt_entry_target *tcfi_t; 14 13 }; 14 + #define to_ipt(pc) \ 15 + container_of(pc, struct tcf_ipt, common) 15 16 16 - #endif 17 + #endif /* __NET_TC_IPT_H */
+9 -8
include/net/tc_act/tc_mirred.h
··· 3 3 4 4 #include <net/act_api.h> 5 5 6 - struct tcf_mirred 7 - { 8 - tca_gen(mirred); 9 - int eaction; 10 - int ifindex; 11 - int ok_push; 12 - struct net_device *dev; 6 + struct tcf_mirred { 7 + struct tcf_common common; 8 + int tcfm_eaction; 9 + int tcfm_ifindex; 10 + int tcfm_ok_push; 11 + struct net_device *tcfm_dev; 13 12 }; 13 + #define to_mirred(pc) \ 14 + container_of(pc, struct tcf_mirred, common) 14 15 15 - #endif 16 + #endif /* __NET_TC_MIR_H */
+8 -7
include/net/tc_act/tc_pedit.h
··· 3 3 4 4 #include <net/act_api.h> 5 5 6 - struct tcf_pedit 7 - { 8 - tca_gen(pedit); 9 - unsigned char nkeys; 10 - unsigned char flags; 11 - struct tc_pedit_key *keys; 6 + struct tcf_pedit { 7 + struct tcf_common common; 8 + unsigned char tcfp_nkeys; 9 + unsigned char tcfp_flags; 10 + struct tc_pedit_key *tcfp_keys; 12 11 }; 12 + #define to_pedit(pc) \ 13 + container_of(pc, struct tcf_pedit, common) 13 14 14 - #endif 15 + #endif /* __NET_TC_PED_H */
+227 -19
net/sched/act_api.c
··· 33 33 #include <net/sch_generic.h> 34 34 #include <net/act_api.h> 35 35 36 - #if 0 /* control */ 37 - #define DPRINTK(format, args...) printk(KERN_DEBUG format, ##args) 38 - #else 39 - #define DPRINTK(format, args...) 36 + void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 37 + { 38 + unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 39 + struct tcf_common **p1p; 40 + 41 + for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 42 + if (*p1p == p) { 43 + write_lock_bh(hinfo->lock); 44 + *p1p = p->tcfc_next; 45 + write_unlock_bh(hinfo->lock); 46 + #ifdef CONFIG_NET_ESTIMATOR 47 + gen_kill_estimator(&p->tcfc_bstats, 48 + &p->tcfc_rate_est); 40 49 #endif 41 - #if 0 /* data */ 42 - #define D2PRINTK(format, args...) printk(KERN_DEBUG format, ##args) 43 - #else 44 - #define D2PRINTK(format, args...) 50 + kfree(p); 51 + return; 52 + } 53 + } 54 + BUG_TRAP(0); 55 + } 56 + EXPORT_SYMBOL(tcf_hash_destroy); 57 + 58 + int tcf_hash_release(struct tcf_common *p, int bind, 59 + struct tcf_hashinfo *hinfo) 60 + { 61 + int ret = 0; 62 + 63 + if (p) { 64 + if (bind) 65 + p->tcfc_bindcnt--; 66 + 67 + p->tcfc_refcnt--; 68 + if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 69 + tcf_hash_destroy(p, hinfo); 70 + ret = 1; 71 + } 72 + } 73 + return ret; 74 + } 75 + EXPORT_SYMBOL(tcf_hash_release); 76 + 77 + static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 78 + struct tc_action *a, struct tcf_hashinfo *hinfo) 79 + { 80 + struct tcf_common *p; 81 + int err = 0, index = -1,i = 0, s_i = 0, n_i = 0; 82 + struct rtattr *r ; 83 + 84 + read_lock(hinfo->lock); 85 + 86 + s_i = cb->args[0]; 87 + 88 + for (i = 0; i < (hinfo->hmask + 1); i++) { 89 + p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 90 + 91 + for (; p; p = p->tcfc_next) { 92 + index++; 93 + if (index < s_i) 94 + continue; 95 + a->priv = p; 96 + a->order = n_i; 97 + r = (struct rtattr*) skb->tail; 98 + RTA_PUT(skb, a->order, 0, NULL); 99 + err = tcf_action_dump_1(skb, a, 0, 0); 100 + if (err < 0) { 101 + index--; 102 + skb_trim(skb, (u8*)r - skb->data); 103 + goto done; 104 + } 105 + r->rta_len = skb->tail - (u8*)r; 106 + n_i++; 107 + if (n_i >= TCA_ACT_MAX_PRIO) 108 + goto done; 109 + } 110 + } 111 + done: 112 + read_unlock(hinfo->lock); 113 + if (n_i) 114 + cb->args[0] += n_i; 115 + return n_i; 116 + 117 + rtattr_failure: 118 + skb_trim(skb, (u8*)r - skb->data); 119 + goto done; 120 + } 121 + 122 + static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a, 123 + struct tcf_hashinfo *hinfo) 124 + { 125 + struct tcf_common *p, *s_p; 126 + struct rtattr *r ; 127 + int i= 0, n_i = 0; 128 + 129 + r = (struct rtattr*) skb->tail; 130 + RTA_PUT(skb, a->order, 0, NULL); 131 + RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); 132 + for (i = 0; i < (hinfo->hmask + 1); i++) { 133 + p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 134 + 135 + while (p != NULL) { 136 + s_p = p->tcfc_next; 137 + if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) 138 + module_put(a->ops->owner); 139 + n_i++; 140 + p = s_p; 141 + } 142 + } 143 + RTA_PUT(skb, TCA_FCNT, 4, &n_i); 144 + r->rta_len = skb->tail - (u8*)r; 145 + 146 + return n_i; 147 + rtattr_failure: 148 + skb_trim(skb, (u8*)r - skb->data); 149 + return -EINVAL; 150 + } 151 + 152 + int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 153 + int type, struct tc_action *a) 154 + { 155 + struct tcf_hashinfo *hinfo = a->ops->hinfo; 156 + 157 + if (type == RTM_DELACTION) { 158 + return tcf_del_walker(skb, a, hinfo); 159 + } else if (type == RTM_GETACTION) { 160 + return tcf_dump_walker(skb, cb, a, hinfo); 161 + } else { 162 + printk("tcf_generic_walker: unknown action %d\n", type); 163 + return -EINVAL; 164 + } 165 + } 166 + EXPORT_SYMBOL(tcf_generic_walker); 167 + 168 + struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 169 + { 170 + struct tcf_common *p; 171 + 172 + read_lock(hinfo->lock); 173 + for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 174 + p = p->tcfc_next) { 175 + if (p->tcfc_index == index) 176 + break; 177 + } 178 + read_unlock(hinfo->lock); 179 + 180 + return p; 181 + } 182 + EXPORT_SYMBOL(tcf_hash_lookup); 183 + 184 + u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo) 185 + { 186 + u32 val = *idx_gen; 187 + 188 + do { 189 + if (++val == 0) 190 + val = 1; 191 + } while (tcf_hash_lookup(val, hinfo)); 192 + 193 + return (*idx_gen = val); 194 + } 195 + EXPORT_SYMBOL(tcf_hash_new_index); 196 + 197 + int tcf_hash_search(struct tc_action *a, u32 index) 198 + { 199 + struct tcf_hashinfo *hinfo = a->ops->hinfo; 200 + struct tcf_common *p = tcf_hash_lookup(index, hinfo); 201 + 202 + if (p) { 203 + a->priv = p; 204 + return 1; 205 + } 206 + return 0; 207 + } 208 + EXPORT_SYMBOL(tcf_hash_search); 209 + 210 + struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind, 211 + struct tcf_hashinfo *hinfo) 212 + { 213 + struct tcf_common *p = NULL; 214 + if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 215 + if (bind) { 216 + p->tcfc_bindcnt++; 217 + p->tcfc_refcnt++; 218 + } 219 + a->priv = p; 220 + } 221 + return p; 222 + } 223 + EXPORT_SYMBOL(tcf_hash_check); 224 + 225 + struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo) 226 + { 227 + struct tcf_common *p = kzalloc(size, GFP_KERNEL); 228 + 229 + if (unlikely(!p)) 230 + return p; 231 + p->tcfc_refcnt = 1; 232 + if (bind) 233 + p->tcfc_bindcnt = 1; 234 + 235 + spin_lock_init(&p->tcfc_lock); 236 + p->tcfc_stats_lock = &p->tcfc_lock; 237 + p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo); 238 + p->tcfc_tm.install = jiffies; 239 + p->tcfc_tm.lastuse = jiffies; 240 + #ifdef CONFIG_NET_ESTIMATOR 241 + if (est) 242 + gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est, 243 + p->tcfc_stats_lock, est); 45 244 #endif 245 + a->priv = (void *) p; 246 + return p; 247 + } 248 + EXPORT_SYMBOL(tcf_hash_create); 249 + 250 + void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo) 251 + { 252 + unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 253 + 254 + write_lock_bh(hinfo->lock); 255 + p->tcfc_next = hinfo->htab[h]; 256 + hinfo->htab[h] = p; 257 + write_unlock_bh(hinfo->lock); 258 + } 259 + EXPORT_SYMBOL(tcf_hash_insert); 46 260 47 261 static struct tc_action_ops *act_base = NULL; 48 262 static DEFINE_RWLOCK(act_mod_lock); ··· 369 155 370 156 if (skb->tc_verd & TC_NCLS) { 371 157 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 372 - D2PRINTK("(%p)tcf_action_exec: cleared TC_NCLS in %s out %s\n", 373 - skb, skb->input_dev ? skb->input_dev->name : "xxx", 374 - skb->dev->name); 375 158 ret = TC_ACT_OK; 376 159 goto exec_done; 377 160 } ··· 398 187 399 188 for (a = act; a; a = act) { 400 189 if (a->ops && a->ops->cleanup) { 401 - DPRINTK("tcf_action_destroy destroying %p next %p\n", 402 - a, a->next); 403 190 if (a->ops->cleanup(a, bind) == ACT_P_DELETED) 404 191 module_put(a->ops->owner); 405 192 act = act->next; ··· 540 331 if (*err != ACT_P_CREATED) 541 332 module_put(a_o->owner); 542 333 a->ops = a_o; 543 - DPRINTK("tcf_action_init_1: successfull %s\n", act_name); 544 334 545 335 *err = 0; 546 336 return a; ··· 600 392 if (compat_mode) { 601 393 if (a->type == TCA_OLD_COMPAT) 602 394 err = gnet_stats_start_copy_compat(skb, 0, 603 - TCA_STATS, TCA_XSTATS, h->stats_lock, &d); 395 + TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d); 604 396 else 605 397 return 0; 606 398 } else 607 399 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 608 - h->stats_lock, &d); 400 + h->tcf_stats_lock, &d); 609 401 610 402 if (err < 0) 611 403 goto errout; ··· 614 406 if (a->ops->get_stats(skb, a) < 0) 615 407 goto errout; 616 408 617 - if (gnet_stats_copy_basic(&d, &h->bstats) < 0 || 409 + if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 || 618 410 #ifdef CONFIG_NET_ESTIMATOR 619 - gnet_stats_copy_rate_est(&d, &h->rate_est) < 0 || 411 + gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 || 620 412 #endif 621 - gnet_stats_copy_queue(&d, &h->qstats) < 0) 413 + gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0) 622 414 goto errout; 623 415 624 416 if (gnet_stats_finish_copy(&d) < 0)
+68 -74
net/sched/act_gact.c
··· 34 34 #include <linux/tc_act/tc_gact.h> 35 35 #include <net/tc_act/tc_gact.h> 36 36 37 - /* use generic hash table */ 38 - #define MY_TAB_SIZE 16 39 - #define MY_TAB_MASK 15 40 - 41 - static u32 idx_gen; 42 - static struct tcf_gact *tcf_gact_ht[MY_TAB_SIZE]; 37 + #define GACT_TAB_MASK 15 38 + static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1]; 39 + static u32 gact_idx_gen; 43 40 static DEFINE_RWLOCK(gact_lock); 44 41 45 - /* ovewrride the defaults */ 46 - #define tcf_st tcf_gact 47 - #define tc_st tc_gact 48 - #define tcf_t_lock gact_lock 49 - #define tcf_ht tcf_gact_ht 50 - 51 - #define CONFIG_NET_ACT_INIT 1 52 - #include <net/pkt_act.h> 42 + static struct tcf_hashinfo gact_hash_info = { 43 + .htab = tcf_gact_ht, 44 + .hmask = GACT_TAB_MASK, 45 + .lock = &gact_lock, 46 + }; 53 47 54 48 #ifdef CONFIG_GACT_PROB 55 - static int gact_net_rand(struct tcf_gact *p) 49 + static int gact_net_rand(struct tcf_gact *gact) 56 50 { 57 - if (net_random()%p->pval) 58 - return p->action; 59 - return p->paction; 51 + if (net_random() % gact->tcfg_pval) 52 + return gact->tcf_action; 53 + return gact->tcfg_paction; 60 54 } 61 55 62 - static int gact_determ(struct tcf_gact *p) 56 + static int gact_determ(struct tcf_gact *gact) 63 57 { 64 - if (p->bstats.packets%p->pval) 65 - return p->action; 66 - return p->paction; 58 + if (gact->tcf_bstats.packets % gact->tcfg_pval) 59 + return gact->tcf_action; 60 + return gact->tcfg_paction; 67 61 } 68 62 69 - typedef int (*g_rand)(struct tcf_gact *p); 63 + typedef int (*g_rand)(struct tcf_gact *gact); 70 64 static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ }; 71 - #endif 65 + #endif /* CONFIG_GACT_PROB */ 72 66 73 67 static int tcf_gact_init(struct rtattr *rta, struct rtattr *est, 74 68 struct tc_action *a, int ovr, int bind) 75 69 { 76 70 struct rtattr *tb[TCA_GACT_MAX]; 77 71 struct tc_gact *parm; 78 - struct tcf_gact *p; 72 + struct tcf_gact *gact; 73 + struct tcf_common *pc; 79 74 int ret = 0; 80 75 81 76 if (rta == NULL || rtattr_parse_nested(tb, TCA_GACT_MAX, rta) < 0) ··· 89 94 return -EOPNOTSUPP; 90 95 #endif 91 96 92 - p = tcf_hash_check(parm->index, a, ovr, bind); 93 - if (p == NULL) { 94 - p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 95 - if (p == NULL) 97 + pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); 98 + if (!pc) { 99 + pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), 100 + bind, &gact_idx_gen, &gact_hash_info); 101 + if (unlikely(!pc)) 96 102 return -ENOMEM; 97 103 ret = ACT_P_CREATED; 98 104 } else { 99 105 if (!ovr) { 100 - tcf_hash_release(p, bind); 106 + tcf_hash_release(pc, bind, &gact_hash_info); 101 107 return -EEXIST; 102 108 } 103 109 } 104 110 105 - spin_lock_bh(&p->lock); 106 - p->action = parm->action; 111 + gact = to_gact(pc); 112 + 113 + spin_lock_bh(&gact->tcf_lock); 114 + gact->tcf_action = parm->action; 107 115 #ifdef CONFIG_GACT_PROB 108 116 if (tb[TCA_GACT_PROB-1] != NULL) { 109 117 struct tc_gact_p *p_parm = RTA_DATA(tb[TCA_GACT_PROB-1]); 110 - p->paction = p_parm->paction; 111 - p->pval = p_parm->pval; 112 - p->ptype = p_parm->ptype; 118 + gact->tcfg_paction = p_parm->paction; 119 + gact->tcfg_pval = p_parm->pval; 120 + gact->tcfg_ptype = p_parm->ptype; 113 121 } 114 122 #endif 115 - spin_unlock_bh(&p->lock); 123 + spin_unlock_bh(&gact->tcf_lock); 116 124 if (ret == ACT_P_CREATED) 117 - tcf_hash_insert(p); 125 + tcf_hash_insert(pc, &gact_hash_info); 118 126 return ret; 119 127 } 120 128 121 - static int 122 - tcf_gact_cleanup(struct tc_action *a, int bind) 129 + static int tcf_gact_cleanup(struct tc_action *a, int bind) 123 130 { 124 - struct tcf_gact *p = PRIV(a, gact); 131 + struct tcf_gact *gact = a->priv; 125 132 126 - if (p != NULL) 127 - return tcf_hash_release(p, bind); 133 + if (gact) 134 + return tcf_hash_release(&gact->common, bind, &gact_hash_info); 128 135 return 0; 129 136 } 130 137 131 - static int 132 - tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 138 + static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 133 139 { 134 - struct tcf_gact *p = PRIV(a, gact); 140 + struct tcf_gact *gact = a->priv; 135 141 int action = TC_ACT_SHOT; 136 142 137 - spin_lock(&p->lock); 143 + spin_lock(&gact->tcf_lock); 138 144 #ifdef CONFIG_GACT_PROB 139 - if (p->ptype && gact_rand[p->ptype] != NULL) 140 - action = gact_rand[p->ptype](p); 145 + if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) 146 + action = gact_rand[gact->tcfg_ptype](gact); 141 147 else 142 - action = p->action; 148 + action = gact->tcf_action; 143 149 #else 144 - action = p->action; 150 + action = gact->tcf_action; 145 151 #endif 146 - p->bstats.bytes += skb->len; 147 - p->bstats.packets++; 152 + gact->tcf_bstats.bytes += skb->len; 153 + gact->tcf_bstats.packets++; 148 154 if (action == TC_ACT_SHOT) 149 - p->qstats.drops++; 150 - p->tm.lastuse = jiffies; 151 - spin_unlock(&p->lock); 155 + gact->tcf_qstats.drops++; 156 + gact->tcf_tm.lastuse = jiffies; 157 + spin_unlock(&gact->tcf_lock); 152 158 153 159 return action; 154 160 } 155 161 156 - static int 157 - tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 162 + static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 158 163 { 159 164 unsigned char *b = skb->tail; 160 165 struct tc_gact opt; 161 - struct tcf_gact *p = PRIV(a, gact); 166 + struct tcf_gact *gact = a->priv; 162 167 struct tcf_t t; 163 168 164 - opt.index = p->index; 165 - opt.refcnt = p->refcnt - ref; 166 - opt.bindcnt = p->bindcnt - bind; 167 - opt.action = p->action; 169 + opt.index = gact->tcf_index; 170 + opt.refcnt = gact->tcf_refcnt - ref; 171 + opt.bindcnt = gact->tcf_bindcnt - bind; 172 + opt.action = gact->tcf_action; 168 173 RTA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); 169 174 #ifdef CONFIG_GACT_PROB 170 - if (p->ptype) { 175 + if (gact->tcfg_ptype) { 171 176 struct tc_gact_p p_opt; 172 - p_opt.paction = p->paction; 173 - p_opt.pval = p->pval; 174 - p_opt.ptype = p->ptype; 177 + p_opt.paction = gact->tcfg_paction; 178 + p_opt.pval = gact->tcfg_pval; 179 + p_opt.ptype = gact->tcfg_ptype; 175 180 RTA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); 176 181 } 177 182 #endif 178 - t.install = jiffies_to_clock_t(jiffies - p->tm.install); 179 - t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 180 - t.expires = jiffies_to_clock_t(p->tm.expires); 183 + t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); 184 + t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); 185 + t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); 181 186 RTA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); 182 187 return skb->len; 183 188 184 - rtattr_failure: 189 + rtattr_failure: 185 190 skb_trim(skb, b - skb->data); 186 191 return -1; 187 192 } 188 193 189 194 static struct tc_action_ops act_gact_ops = { 190 195 .kind = "gact", 196 + .hinfo = &gact_hash_info, 191 197 .type = TCA_ACT_GACT, 192 198 .capab = TCA_CAP_NONE, 193 199 .owner = THIS_MODULE, ··· 204 208 MODULE_DESCRIPTION("Generic Classifier actions"); 205 209 MODULE_LICENSE("GPL"); 206 210 207 - static int __init 208 - gact_init_module(void) 211 + static int __init gact_init_module(void) 209 212 { 210 213 #ifdef CONFIG_GACT_PROB 211 214 printk("GACT probability on\n"); ··· 214 219 return tcf_register_action(&act_gact_ops); 215 220 } 216 221 217 - static void __exit 218 - gact_cleanup_module(void) 222 + static void __exit gact_cleanup_module(void) 219 223 { 220 224 tcf_unregister_action(&act_gact_ops); 221 225 }
+78 -95
net/sched/act_ipt.c
··· 38 38 39 39 #include <linux/netfilter_ipv4/ip_tables.h> 40 40 41 - /* use generic hash table */ 42 - #define MY_TAB_SIZE 16 43 - #define MY_TAB_MASK 15 44 41 45 - static u32 idx_gen; 46 - static struct tcf_ipt *tcf_ipt_ht[MY_TAB_SIZE]; 47 - /* ipt hash table lock */ 42 + #define IPT_TAB_MASK 15 43 + static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1]; 44 + static u32 ipt_idx_gen; 48 45 static DEFINE_RWLOCK(ipt_lock); 49 46 50 - /* ovewrride the defaults */ 51 - #define tcf_st tcf_ipt 52 - #define tcf_t_lock ipt_lock 53 - #define tcf_ht tcf_ipt_ht 47 + static struct tcf_hashinfo ipt_hash_info = { 48 + .htab = tcf_ipt_ht, 49 + .hmask = IPT_TAB_MASK, 50 + .lock = &ipt_lock, 51 + }; 54 52 55 - #define CONFIG_NET_ACT_INIT 56 - #include <net/pkt_act.h> 57 - 58 - static int 59 - ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) 53 + static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook) 60 54 { 61 55 struct ipt_target *target; 62 56 int ret = 0; ··· 59 65 if (!target) 60 66 return -ENOENT; 61 67 62 - DPRINTK("ipt_init_target: found %s\n", target->name); 63 68 t->u.kernel.target = target; 64 69 65 70 ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t), ··· 71 78 t->u.kernel.target, t->data, 72 79 t->u.target_size - sizeof(*t), 73 80 hook)) { 74 - DPRINTK("ipt_init_target: check failed for `%s'.\n", 75 - t->u.kernel.target->name); 76 81 module_put(t->u.kernel.target->me); 77 82 ret = -EINVAL; 78 83 } ··· 78 87 return ret; 79 88 } 80 89 81 - static void 82 - ipt_destroy_target(struct ipt_entry_target *t) 90 + static void ipt_destroy_target(struct ipt_entry_target *t) 83 91 { 84 92 if (t->u.kernel.target->destroy) 85 93 t->u.kernel.target->destroy(t->u.kernel.target, t->data, ··· 86 96 module_put(t->u.kernel.target->me); 87 97 } 88 98 89 - static int 90 - tcf_ipt_release(struct tcf_ipt *p, int bind) 99 + static int tcf_ipt_release(struct tcf_ipt *ipt, int bind) 91 100 { 92 101 int ret = 0; 93 - if (p) { 102 + if (ipt) { 94 103 if (bind) 95 - p->bindcnt--; 96 - p->refcnt--; 97 - if (p->bindcnt <= 0 && p->refcnt <= 0) { 98 - ipt_destroy_target(p->t); 99 - kfree(p->tname); 100 - kfree(p->t); 101 - tcf_hash_destroy(p); 104 + ipt->tcf_bindcnt--; 105 + ipt->tcf_refcnt--; 106 + if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) { 107 + ipt_destroy_target(ipt->tcfi_t); 108 + kfree(ipt->tcfi_tname); 109 + kfree(ipt->tcfi_t); 110 + tcf_hash_destroy(&ipt->common, &ipt_hash_info); 102 111 ret = ACT_P_DELETED; 103 112 } 104 113 } 105 114 return ret; 106 115 } 107 116 108 - static int 109 - tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 110 - int ovr, int bind) 117 + static int tcf_ipt_init(struct rtattr *rta, struct rtattr *est, 118 + struct tc_action *a, int ovr, int bind) 111 119 { 112 120 struct rtattr *tb[TCA_IPT_MAX]; 113 - struct tcf_ipt *p; 121 + struct tcf_ipt *ipt; 122 + struct tcf_common *pc; 114 123 struct ipt_entry_target *td, *t; 115 124 char *tname; 116 125 int ret = 0, err; ··· 133 144 RTA_PAYLOAD(tb[TCA_IPT_INDEX-1]) >= sizeof(u32)) 134 145 index = *(u32 *)RTA_DATA(tb[TCA_IPT_INDEX-1]); 135 146 136 - p = tcf_hash_check(index, a, ovr, bind); 137 - if (p == NULL) { 138 - p = tcf_hash_create(index, est, a, sizeof(*p), ovr, bind); 139 - if (p == NULL) 147 + pc = tcf_hash_check(index, a, bind, &ipt_hash_info); 148 + if (!pc) { 149 + pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind, 150 + &ipt_idx_gen, &ipt_hash_info); 151 + if (unlikely(!pc)) 140 152 return -ENOMEM; 141 153 ret = ACT_P_CREATED; 142 154 } else { 143 155 if (!ovr) { 144 - tcf_ipt_release(p, bind); 156 + tcf_ipt_release(to_ipt(pc), bind); 145 157 return -EEXIST; 146 158 } 147 159 } 160 + ipt = to_ipt(pc); 148 161 149 162 hook = *(u32 *)RTA_DATA(tb[TCA_IPT_HOOK-1]); 150 163 151 164 err = -ENOMEM; 152 165 tname = kmalloc(IFNAMSIZ, GFP_KERNEL); 153 - if (tname == NULL) 166 + if (unlikely(!tname)) 154 167 goto err1; 155 168 if (tb[TCA_IPT_TABLE - 1] == NULL || 156 169 rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ) 157 170 strcpy(tname, "mangle"); 158 171 159 172 t = kmalloc(td->u.target_size, GFP_KERNEL); 160 - if (t == NULL) 173 + if (unlikely(!t)) 161 174 goto err2; 162 175 memcpy(t, td, td->u.target_size); 163 176 164 177 if ((err = ipt_init_target(t, tname, hook)) < 0) 165 178 goto err3; 166 179 167 - spin_lock_bh(&p->lock); 180 + spin_lock_bh(&ipt->tcf_lock); 168 181 if (ret != ACT_P_CREATED) { 169 - ipt_destroy_target(p->t); 170 - kfree(p->tname); 171 - kfree(p->t); 182 + ipt_destroy_target(ipt->tcfi_t); 183 + kfree(ipt->tcfi_tname); 184 + kfree(ipt->tcfi_t); 172 185 } 173 - p->tname = tname; 174 - p->t = t; 175 - p->hook = hook; 176 - spin_unlock_bh(&p->lock); 186 + ipt->tcfi_tname = tname; 187 + ipt->tcfi_t = t; 188 + ipt->tcfi_hook = hook; 189 + spin_unlock_bh(&ipt->tcf_lock); 177 190 if (ret == ACT_P_CREATED) 178 - tcf_hash_insert(p); 191 + tcf_hash_insert(pc, &ipt_hash_info); 179 192 return ret; 180 193 181 194 err3: ··· 185 194 err2: 186 195 kfree(tname); 187 196 err1: 188 - kfree(p); 197 + kfree(pc); 189 198 return err; 190 199 } 191 200 192 - static int 193 - tcf_ipt_cleanup(struct tc_action *a, int bind) 201 + static int tcf_ipt_cleanup(struct tc_action *a, int bind) 194 202 { 195 - struct tcf_ipt *p = PRIV(a, ipt); 196 - return tcf_ipt_release(p, bind); 203 + struct tcf_ipt *ipt = a->priv; 204 + return tcf_ipt_release(ipt, bind); 197 205 } 198 206 199 - static int 200 - tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 207 + static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, 208 + struct tcf_result *res) 201 209 { 202 210 int ret = 0, result = 0; 203 - struct tcf_ipt *p = PRIV(a, ipt); 211 + struct tcf_ipt *ipt = a->priv; 204 212 205 213 if (skb_cloned(skb)) { 206 214 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 207 215 return TC_ACT_UNSPEC; 208 216 } 209 217 210 - spin_lock(&p->lock); 218 + spin_lock(&ipt->tcf_lock); 211 219 212 - p->tm.lastuse = jiffies; 213 - p->bstats.bytes += skb->len; 214 - p->bstats.packets++; 220 + ipt->tcf_tm.lastuse = jiffies; 221 + ipt->tcf_bstats.bytes += skb->len; 222 + ipt->tcf_bstats.packets++; 215 223 216 224 /* yes, we have to worry about both in and out dev 217 225 worry later - danger - this API seems to have changed ··· 219 229 /* iptables targets take a double skb pointer in case the skb 220 230 * needs to be replaced. We don't own the skb, so this must not 221 231 * happen. The pskb_expand_head above should make sure of this */ 222 - ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, p->hook, 223 - p->t->u.kernel.target, p->t->data, 224 - NULL); 232 + ret = ipt->tcfi_t->u.kernel.target->target(&skb, skb->dev, NULL, 233 + ipt->tcfi_hook, 234 + ipt->tcfi_t->u.kernel.target, 235 + ipt->tcfi_t->data, NULL); 225 236 switch (ret) { 226 237 case NF_ACCEPT: 227 238 result = TC_ACT_OK; 228 239 break; 229 240 case NF_DROP: 230 241 result = TC_ACT_SHOT; 231 - p->qstats.drops++; 242 + ipt->tcf_qstats.drops++; 232 243 break; 233 244 case IPT_CONTINUE: 234 245 result = TC_ACT_PIPE; ··· 240 249 result = TC_POLICE_OK; 241 250 break; 242 251 } 243 - spin_unlock(&p->lock); 252 + spin_unlock(&ipt->tcf_lock); 244 253 return result; 245 254 246 255 } 247 256 248 - static int 249 - tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 257 + static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 250 258 { 259 + unsigned char *b = skb->tail; 260 + struct tcf_ipt *ipt = a->priv; 251 261 struct ipt_entry_target *t; 252 262 struct tcf_t tm; 253 263 struct tc_cnt c; 254 - unsigned char *b = skb->tail; 255 - struct tcf_ipt *p = PRIV(a, ipt); 256 264 257 265 /* for simple targets kernel size == user size 258 266 ** user name = target name 259 267 ** for foolproof you need to not assume this 260 268 */ 261 269 262 - t = kmalloc(p->t->u.user.target_size, GFP_ATOMIC); 263 - if (t == NULL) 270 + t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC); 271 + if (unlikely(!t)) 264 272 goto rtattr_failure; 265 273 266 - c.bindcnt = p->bindcnt - bind; 267 - c.refcnt = p->refcnt - ref; 268 - memcpy(t, p->t, p->t->u.user.target_size); 269 - strcpy(t->u.user.name, p->t->u.kernel.target->name); 274 + c.bindcnt = ipt->tcf_bindcnt - bind; 275 + c.refcnt = ipt->tcf_refcnt - ref; 276 + memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size); 277 + strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); 270 278 271 - DPRINTK("\ttcf_ipt_dump tablename %s length %d\n", p->tname, 272 - strlen(p->tname)); 273 - DPRINTK("\tdump target name %s size %d size user %d " 274 - "data[0] %x data[1] %x\n", p->t->u.kernel.target->name, 275 - p->t->u.target_size, p->t->u.user.target_size, 276 - p->t->data[0], p->t->data[1]); 277 - RTA_PUT(skb, TCA_IPT_TARG, p->t->u.user.target_size, t); 278 - RTA_PUT(skb, TCA_IPT_INDEX, 4, &p->index); 279 - RTA_PUT(skb, TCA_IPT_HOOK, 4, &p->hook); 279 + RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); 280 + RTA_PUT(skb, TCA_IPT_INDEX, 4, &ipt->tcf_index); 281 + RTA_PUT(skb, TCA_IPT_HOOK, 4, &ipt->tcfi_hook); 280 282 RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); 281 - RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, p->tname); 282 - tm.install = jiffies_to_clock_t(jiffies - p->tm.install); 283 - tm.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 284 - tm.expires = jiffies_to_clock_t(p->tm.expires); 283 + RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, ipt->tcfi_tname); 284 + tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); 285 + tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); 286 + tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); 285 287 RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); 286 288 kfree(t); 287 289 return skb->len; 288 290 289 - rtattr_failure: 291 + rtattr_failure: 290 292 skb_trim(skb, b - skb->data); 291 293 kfree(t); 292 294 return -1; ··· 287 303 288 304 static struct tc_action_ops act_ipt_ops = { 289 305 .kind = "ipt", 306 + .hinfo = &ipt_hash_info, 290 307 .type = TCA_ACT_IPT, 291 308 .capab = TCA_CAP_NONE, 292 309 .owner = THIS_MODULE, ··· 303 318 MODULE_DESCRIPTION("Iptables target actions"); 304 319 MODULE_LICENSE("GPL"); 305 320 306 - static int __init 307 - ipt_init_module(void) 321 + static int __init ipt_init_module(void) 308 322 { 309 323 return tcf_register_action(&act_ipt_ops); 310 324 } 311 325 312 - static void __exit 313 - ipt_cleanup_module(void) 326 + static void __exit ipt_cleanup_module(void) 314 327 { 315 328 tcf_unregister_action(&act_ipt_ops); 316 329 }
+74 -85
net/sched/act_mirred.c
··· 39 39 #include <linux/etherdevice.h> 40 40 #include <linux/if_arp.h> 41 41 42 - 43 - /* use generic hash table */ 44 - #define MY_TAB_SIZE 8 45 - #define MY_TAB_MASK (MY_TAB_SIZE - 1) 46 - static u32 idx_gen; 47 - static struct tcf_mirred *tcf_mirred_ht[MY_TAB_SIZE]; 42 + #define MIRRED_TAB_MASK 7 43 + static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; 44 + static u32 mirred_idx_gen; 48 45 static DEFINE_RWLOCK(mirred_lock); 49 46 50 - /* ovewrride the defaults */ 51 - #define tcf_st tcf_mirred 52 - #define tc_st tc_mirred 53 - #define tcf_t_lock mirred_lock 54 - #define tcf_ht tcf_mirred_ht 47 + static struct tcf_hashinfo mirred_hash_info = { 48 + .htab = tcf_mirred_ht, 49 + .hmask = MIRRED_TAB_MASK, 50 + .lock = &mirred_lock, 51 + }; 55 52 56 - #define CONFIG_NET_ACT_INIT 1 57 - #include <net/pkt_act.h> 58 - 59 - static inline int 60 - tcf_mirred_release(struct tcf_mirred *p, int bind) 53 + static inline int tcf_mirred_release(struct tcf_mirred *m, int bind) 61 54 { 62 - if (p) { 55 + if (m) { 63 56 if (bind) 64 - p->bindcnt--; 65 - p->refcnt--; 66 - if(!p->bindcnt && p->refcnt <= 0) { 67 - dev_put(p->dev); 68 - tcf_hash_destroy(p); 57 + m->tcf_bindcnt--; 58 + m->tcf_refcnt--; 59 + if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { 60 + dev_put(m->tcfm_dev); 61 + tcf_hash_destroy(&m->common, &mirred_hash_info); 69 62 return 1; 70 63 } 71 64 } 72 65 return 0; 73 66 } 74 67 75 - static int 76 - tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 77 - int ovr, int bind) 68 + static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est, 69 + struct tc_action *a, int ovr, int bind) 78 70 { 79 71 struct rtattr *tb[TCA_MIRRED_MAX]; 80 72 struct tc_mirred *parm; 81 - struct tcf_mirred *p; 73 + struct tcf_mirred *m; 74 + struct tcf_common *pc; 82 75 struct net_device *dev = NULL; 83 76 int ret = 0; 84 77 int ok_push = 0; ··· 103 110 } 104 111 } 105 112 106 - p = tcf_hash_check(parm->index, a, ovr, bind); 107 - if (p == NULL) { 113 + pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info); 114 + if (!pc) { 108 115 if (!parm->ifindex) 109 116 return -EINVAL; 110 - p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 111 - if (p == NULL) 117 + pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind, 118 + &mirred_idx_gen, &mirred_hash_info); 119 + if (unlikely(!pc)) 112 120 return -ENOMEM; 113 121 ret = ACT_P_CREATED; 114 122 } else { 115 123 if (!ovr) { 116 - tcf_mirred_release(p, bind); 124 + tcf_mirred_release(to_mirred(pc), bind); 117 125 return -EEXIST; 118 126 } 119 127 } 128 + m = to_mirred(pc); 120 129 121 - spin_lock_bh(&p->lock); 122 - p->action = parm->action; 123 - p->eaction = parm->eaction; 130 + spin_lock_bh(&m->tcf_lock); 131 + m->tcf_action = parm->action; 132 + m->tcfm_eaction = parm->eaction; 124 133 if (parm->ifindex) { 125 - p->ifindex = parm->ifindex; 134 + m->tcfm_ifindex = parm->ifindex; 126 135 if (ret != ACT_P_CREATED) 127 - dev_put(p->dev); 128 - p->dev = dev; 136 + dev_put(m->tcfm_dev); 137 + m->tcfm_dev = dev; 129 138 dev_hold(dev); 130 - p->ok_push = ok_push; 139 + m->tcfm_ok_push = ok_push; 131 140 } 132 - spin_unlock_bh(&p->lock); 141 + spin_unlock_bh(&m->tcf_lock); 133 142 if (ret == ACT_P_CREATED) 134 - tcf_hash_insert(p); 143 + tcf_hash_insert(pc, &mirred_hash_info); 135 144 136 - DPRINTK("tcf_mirred_init index %d action %d eaction %d device %s " 137 - "ifindex %d\n", parm->index, parm->action, parm->eaction, 138 - dev->name, parm->ifindex); 139 145 return ret; 140 146 } 141 147 142 - static int 143 - tcf_mirred_cleanup(struct tc_action *a, int bind) 148 + static int tcf_mirred_cleanup(struct tc_action *a, int bind) 144 149 { 145 - struct tcf_mirred *p = PRIV(a, mirred); 150 + struct tcf_mirred *m = a->priv; 146 151 147 - if (p != NULL) 148 - return tcf_mirred_release(p, bind); 152 + if (m) 153 + return tcf_mirred_release(m, bind); 149 154 return 0; 150 155 } 151 156 152 - static int 153 - tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 157 + static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, 158 + struct tcf_result *res) 154 159 { 155 - struct tcf_mirred *p = PRIV(a, mirred); 160 + struct tcf_mirred *m = a->priv; 156 161 struct net_device *dev; 157 162 struct sk_buff *skb2 = NULL; 158 163 u32 at = G_TC_AT(skb->tc_verd); 159 164 160 - spin_lock(&p->lock); 165 + spin_lock(&m->tcf_lock); 161 166 162 - dev = p->dev; 163 - p->tm.lastuse = jiffies; 167 + dev = m->tcfm_dev; 168 + m->tcf_tm.lastuse = jiffies; 164 169 165 170 if (!(dev->flags&IFF_UP) ) { 166 171 if (net_ratelimit()) ··· 167 176 bad_mirred: 168 177 if (skb2 != NULL) 169 178 kfree_skb(skb2); 170 - p->qstats.overlimits++; 171 - p->bstats.bytes += skb->len; 172 - p->bstats.packets++; 173 - spin_unlock(&p->lock); 179 + m->tcf_qstats.overlimits++; 180 + m->tcf_bstats.bytes += skb->len; 181 + m->tcf_bstats.packets++; 182 + spin_unlock(&m->tcf_lock); 174 183 /* should we be asking for packet to be dropped? 175 184 * may make sense for redirect case only 176 185 */ ··· 180 189 skb2 = skb_clone(skb, GFP_ATOMIC); 181 190 if (skb2 == NULL) 182 191 goto bad_mirred; 183 - if (p->eaction != TCA_EGRESS_MIRROR && p->eaction != TCA_EGRESS_REDIR) { 192 + if (m->tcfm_eaction != TCA_EGRESS_MIRROR && 193 + m->tcfm_eaction != TCA_EGRESS_REDIR) { 184 194 if (net_ratelimit()) 185 - printk("tcf_mirred unknown action %d\n", p->eaction); 195 + printk("tcf_mirred unknown action %d\n", 196 + m->tcfm_eaction); 186 197 goto bad_mirred; 187 198 } 188 199 189 - p->bstats.bytes += skb2->len; 190 - p->bstats.packets++; 200 + m->tcf_bstats.bytes += skb2->len; 201 + m->tcf_bstats.packets++; 191 202 if (!(at & AT_EGRESS)) 192 - if (p->ok_push) 203 + if (m->tcfm_ok_push) 193 204 skb_push(skb2, skb2->dev->hard_header_len); 194 205 195 206 /* mirror is always swallowed */ 196 - if (p->eaction != TCA_EGRESS_MIRROR) 207 + if (m->tcfm_eaction != TCA_EGRESS_MIRROR) 197 208 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); 198 209 199 210 skb2->dev = dev; 200 211 skb2->input_dev = skb->dev; 201 212 dev_queue_xmit(skb2); 202 - spin_unlock(&p->lock); 203 - return p->action; 213 + spin_unlock(&m->tcf_lock); 214 + return m->tcf_action; 204 215 } 205 216 206 - static int 207 - tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 217 + static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 208 218 { 209 219 unsigned char *b = skb->tail; 220 + struct tcf_mirred *m = a->priv; 210 221 struct tc_mirred opt; 211 - struct tcf_mirred *p = PRIV(a, mirred); 212 222 struct tcf_t t; 213 223 214 - opt.index = p->index; 215 - opt.action = p->action; 216 - opt.refcnt = p->refcnt - ref; 217 - opt.bindcnt = p->bindcnt - bind; 218 - opt.eaction = p->eaction; 219 - opt.ifindex = p->ifindex; 220 - DPRINTK("tcf_mirred_dump index %d action %d eaction %d ifindex %d\n", 221 - p->index, p->action, p->eaction, p->ifindex); 224 + opt.index = m->tcf_index; 225 + opt.action = m->tcf_action; 226 + opt.refcnt = m->tcf_refcnt - ref; 227 + opt.bindcnt = m->tcf_bindcnt - bind; 228 + opt.eaction = m->tcfm_eaction; 229 + opt.ifindex = m->tcfm_ifindex; 222 230 RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); 223 - t.install = jiffies_to_clock_t(jiffies - p->tm.install); 224 - t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 225 - t.expires = jiffies_to_clock_t(p->tm.expires); 231 + t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 232 + t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 233 + t.expires = jiffies_to_clock_t(m->tcf_tm.expires); 226 234 RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); 227 235 return skb->len; 228 236 229 - rtattr_failure: 237 + rtattr_failure: 230 238 skb_trim(skb, b - skb->data); 231 239 return -1; 232 240 } 233 241 234 242 static struct tc_action_ops act_mirred_ops = { 235 243 .kind = "mirred", 244 + .hinfo = &mirred_hash_info, 236 245 .type = TCA_ACT_MIRRED, 237 246 .capab = TCA_CAP_NONE, 238 247 .owner = THIS_MODULE, ··· 248 257 MODULE_DESCRIPTION("Device Mirror/redirect actions"); 249 258 MODULE_LICENSE("GPL"); 250 259 251 - static int __init 252 - mirred_init_module(void) 260 + static int __init mirred_init_module(void) 253 261 { 254 262 printk("Mirror/redirect action on\n"); 255 263 return tcf_register_action(&act_mirred_ops); 256 264 } 257 265 258 - static void __exit 259 - mirred_cleanup_module(void) 266 + static void __exit mirred_cleanup_module(void) 260 267 { 261 268 tcf_unregister_action(&act_mirred_ops); 262 269 }
+72 -94
net/sched/act_pedit.c
··· 33 33 #include <linux/tc_act/tc_pedit.h> 34 34 #include <net/tc_act/tc_pedit.h> 35 35 36 - 37 - #define PEDIT_DEB 1 38 - 39 - /* use generic hash table */ 40 - #define MY_TAB_SIZE 16 41 - #define MY_TAB_MASK 15 42 - static u32 idx_gen; 43 - static struct tcf_pedit *tcf_pedit_ht[MY_TAB_SIZE]; 36 + #define PEDIT_TAB_MASK 15 37 + static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1]; 38 + static u32 pedit_idx_gen; 44 39 static DEFINE_RWLOCK(pedit_lock); 45 40 46 - #define tcf_st tcf_pedit 47 - #define tc_st tc_pedit 48 - #define tcf_t_lock pedit_lock 49 - #define tcf_ht tcf_pedit_ht 41 + static struct tcf_hashinfo pedit_hash_info = { 42 + .htab = tcf_pedit_ht, 43 + .hmask = PEDIT_TAB_MASK, 44 + .lock = &pedit_lock, 45 + }; 50 46 51 - #define CONFIG_NET_ACT_INIT 1 52 - #include <net/pkt_act.h> 53 - 54 - static int 55 - tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a, 56 - int ovr, int bind) 47 + static int tcf_pedit_init(struct rtattr *rta, struct rtattr *est, 48 + struct tc_action *a, int ovr, int bind) 57 49 { 58 50 struct rtattr *tb[TCA_PEDIT_MAX]; 59 51 struct tc_pedit *parm; 60 52 int ret = 0; 61 53 struct tcf_pedit *p; 54 + struct tcf_common *pc; 62 55 struct tc_pedit_key *keys = NULL; 63 56 int ksize; 64 57 ··· 66 73 if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize) 67 74 return -EINVAL; 68 75 69 - p = tcf_hash_check(parm->index, a, ovr, bind); 70 - if (p == NULL) { 76 + pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info); 77 + if (!pc) { 71 78 if (!parm->nkeys) 72 79 return -EINVAL; 73 - p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind); 74 - if (p == NULL) 80 + pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, 81 + &pedit_idx_gen, &pedit_hash_info); 82 + if (unlikely(!pc)) 75 83 return -ENOMEM; 84 + p = to_pedit(pc); 76 85 keys = kmalloc(ksize, GFP_KERNEL); 77 86 if (keys == NULL) { 78 - kfree(p); 87 + kfree(pc); 79 88 return -ENOMEM; 80 89 } 81 90 ret = ACT_P_CREATED; 82 91 } else { 92 + p = to_pedit(pc); 83 93 if (!ovr) { 84 - tcf_hash_release(p, bind); 94 + tcf_hash_release(pc, bind, &pedit_hash_info); 85 95 return -EEXIST; 86 96 } 87 - if (p->nkeys && p->nkeys != parm->nkeys) { 97 + if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 88 98 keys = kmalloc(ksize, GFP_KERNEL); 89 99 if (keys == NULL) 90 100 return -ENOMEM; 91 101 } 92 102 } 93 103 94 - spin_lock_bh(&p->lock); 95 - p->flags = parm->flags; 96 - p->action = parm->action; 104 + spin_lock_bh(&p->tcf_lock); 105 + p->tcfp_flags = parm->flags; 106 + p->tcf_action = parm->action; 97 107 if (keys) { 98 - kfree(p->keys); 99 - p->keys = keys; 100 - p->nkeys = parm->nkeys; 108 + kfree(p->tcfp_keys); 109 + p->tcfp_keys = keys; 110 + p->tcfp_nkeys = parm->nkeys; 101 111 } 102 - memcpy(p->keys, parm->keys, ksize); 103 - spin_unlock_bh(&p->lock); 112 + memcpy(p->tcfp_keys, parm->keys, ksize); 113 + spin_unlock_bh(&p->tcf_lock); 104 114 if (ret == ACT_P_CREATED) 105 - tcf_hash_insert(p); 115 + tcf_hash_insert(pc, &pedit_hash_info); 106 116 return ret; 107 117 } 108 118 109 - static int 110 - tcf_pedit_cleanup(struct tc_action *a, int bind) 119 + static int tcf_pedit_cleanup(struct tc_action *a, int bind) 111 120 { 112 - struct tcf_pedit *p = PRIV(a, pedit); 121 + struct tcf_pedit *p = a->priv; 113 122 114 - if (p != NULL) { 115 - struct tc_pedit_key *keys = p->keys; 116 - if (tcf_hash_release(p, bind)) { 123 + if (p) { 124 + struct tc_pedit_key *keys = p->tcfp_keys; 125 + if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) { 117 126 kfree(keys); 118 127 return 1; 119 128 } ··· 123 128 return 0; 124 129 } 125 130 126 - static int 127 - tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 131 + static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, 132 + struct tcf_result *res) 128 133 { 129 - struct tcf_pedit *p = PRIV(a, pedit); 134 + struct tcf_pedit *p = a->priv; 130 135 int i, munged = 0; 131 136 u8 *pptr; 132 137 133 138 if (!(skb->tc_verd & TC_OK2MUNGE)) { 134 139 /* should we set skb->cloned? */ 135 140 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 136 - return p->action; 141 + return p->tcf_action; 137 142 } 138 143 } 139 144 140 145 pptr = skb->nh.raw; 141 146 142 - spin_lock(&p->lock); 147 + spin_lock(&p->tcf_lock); 143 148 144 - p->tm.lastuse = jiffies; 149 + p->tcf_tm.lastuse = jiffies; 145 150 146 - if (p->nkeys > 0) { 147 - struct tc_pedit_key *tkey = p->keys; 151 + if (p->tcfp_nkeys > 0) { 152 + struct tc_pedit_key *tkey = p->tcfp_keys; 148 153 149 - for (i = p->nkeys; i > 0; i--, tkey++) { 154 + for (i = p->tcfp_nkeys; i > 0; i--, tkey++) { 150 155 u32 *ptr; 151 156 int offset = tkey->off; 152 157 ··· 164 169 printk("offset must be on 32 bit boundaries\n"); 165 170 goto bad; 166 171 } 167 - if (skb->len < 0 || (offset > 0 && offset > skb->len)) { 172 + if (skb->len < 0 || 173 + (offset > 0 && offset > skb->len)) { 168 174 printk("offset %d cant exceed pkt length %d\n", 169 175 offset, skb->len); 170 176 goto bad; ··· 181 185 skb->tc_verd = SET_TC_MUNGED(skb->tc_verd); 182 186 goto done; 183 187 } else { 184 - printk("pedit BUG: index %d\n",p->index); 188 + printk("pedit BUG: index %d\n", p->tcf_index); 185 189 } 186 190 187 191 bad: 188 - p->qstats.overlimits++; 192 + p->tcf_qstats.overlimits++; 189 193 done: 190 - p->bstats.bytes += skb->len; 191 - p->bstats.packets++; 192 - spin_unlock(&p->lock); 193 - return p->action; 194 + p->tcf_bstats.bytes += skb->len; 195 + p->tcf_bstats.packets++; 196 + spin_unlock(&p->tcf_lock); 197 + return p->tcf_action; 194 198 } 195 199 196 - static int 197 - tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref) 200 + static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, 201 + int bind, int ref) 198 202 { 199 203 unsigned char *b = skb->tail; 204 + struct tcf_pedit *p = a->priv; 200 205 struct tc_pedit *opt; 201 - struct tcf_pedit *p = PRIV(a, pedit); 202 206 struct tcf_t t; 203 207 int s; 204 208 205 - s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key); 209 + s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key); 206 210 207 211 /* netlink spinlocks held above us - must use ATOMIC */ 208 212 opt = kzalloc(s, GFP_ATOMIC); 209 - if (opt == NULL) 213 + if (unlikely(!opt)) 210 214 return -ENOBUFS; 211 215 212 - memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key)); 213 - opt->index = p->index; 214 - opt->nkeys = p->nkeys; 215 - opt->flags = p->flags; 216 - opt->action = p->action; 217 - opt->refcnt = p->refcnt - ref; 218 - opt->bindcnt = p->bindcnt - bind; 219 - 220 - 221 - #ifdef PEDIT_DEB 222 - { 223 - /* Debug - get rid of later */ 224 - int i; 225 - struct tc_pedit_key *key = opt->keys; 226 - 227 - for (i=0; i<opt->nkeys; i++, key++) { 228 - printk( "\n key #%d",i); 229 - printk( " at %d: val %08x mask %08x", 230 - (unsigned int)key->off, 231 - (unsigned int)key->val, 232 - (unsigned int)key->mask); 233 - } 234 - } 235 - #endif 216 + memcpy(opt->keys, p->tcfp_keys, 217 + p->tcfp_nkeys * sizeof(struct tc_pedit_key)); 218 + opt->index = p->tcf_index; 219 + opt->nkeys = p->tcfp_nkeys; 220 + opt->flags = p->tcfp_flags; 221 + opt->action = p->tcf_action; 222 + opt->refcnt = p->tcf_refcnt - ref; 223 + opt->bindcnt = p->tcf_bindcnt - bind; 236 224 237 225 RTA_PUT(skb, TCA_PEDIT_PARMS, s, opt); 238 - t.install = jiffies_to_clock_t(jiffies - p->tm.install); 239 - t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse); 240 - t.expires = jiffies_to_clock_t(p->tm.expires); 226 + t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 227 + t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 228 + t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 241 229 RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 242 230 kfree(opt); 243 231 return skb->len; ··· 232 252 return -1; 233 253 } 234 254 235 - static 236 - struct tc_action_ops act_pedit_ops = { 255 + static struct tc_action_ops act_pedit_ops = { 237 256 .kind = "pedit", 257 + .hinfo = &pedit_hash_info, 238 258 .type = TCA_ACT_PEDIT, 239 259 .capab = TCA_CAP_NONE, 240 260 .owner = THIS_MODULE, ··· 250 270 MODULE_DESCRIPTION("Generic Packet Editor actions"); 251 271 MODULE_LICENSE("GPL"); 252 272 253 - static int __init 254 - pedit_init_module(void) 273 + static int __init pedit_init_module(void) 255 274 { 256 275 return tcf_register_action(&act_pedit_ops); 257 276 } 258 277 259 - static void __exit 260 - pedit_cleanup_module(void) 278 + static void __exit pedit_cleanup_module(void) 261 279 { 262 280 tcf_unregister_action(&act_pedit_ops); 263 281 }
+258 -246
net/sched/act_police.c
··· 32 32 #include <net/sock.h> 33 33 #include <net/act_api.h> 34 34 35 - #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log]) 36 - #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log]) 37 - #define PRIV(a) ((struct tcf_police *) (a)->priv) 35 + #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log]) 36 + #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log]) 38 37 39 - /* use generic hash table */ 40 - #define MY_TAB_SIZE 16 41 - #define MY_TAB_MASK 15 42 - static u32 idx_gen; 43 - static struct tcf_police *tcf_police_ht[MY_TAB_SIZE]; 44 - /* Policer hash table lock */ 38 + #define POL_TAB_MASK 15 39 + static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1]; 40 + static u32 police_idx_gen; 45 41 static DEFINE_RWLOCK(police_lock); 46 42 43 + static struct tcf_hashinfo police_hash_info = { 44 + .htab = tcf_police_ht, 45 + .hmask = POL_TAB_MASK, 46 + .lock = &police_lock, 47 + }; 48 + 47 49 /* Each policer is serialized by its individual spinlock */ 48 - 49 - static __inline__ unsigned tcf_police_hash(u32 index) 50 - { 51 - return index&0xF; 52 - } 53 - 54 - static __inline__ struct tcf_police * tcf_police_lookup(u32 index) 55 - { 56 - struct tcf_police *p; 57 - 58 - read_lock(&police_lock); 59 - for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) { 60 - if (p->index == index) 61 - break; 62 - } 63 - read_unlock(&police_lock); 64 - return p; 65 - } 66 50 67 51 #ifdef CONFIG_NET_CLS_ACT 68 52 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb, 69 53 int type, struct tc_action *a) 70 54 { 71 - struct tcf_police *p; 55 + struct tcf_common *p; 72 56 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 73 57 struct rtattr *r; 74 58 ··· 60 76 61 77 s_i = cb->args[0]; 62 78 63 - for (i = 0; i < MY_TAB_SIZE; i++) { 64 - p = tcf_police_ht[tcf_police_hash(i)]; 79 + for (i = 0; i < (POL_TAB_MASK + 1); i++) { 80 + p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)]; 65 81 66 - for (; p; p = p->next) { 82 + for (; p; p = p->tcfc_next) { 67 83 index++; 68 84 if (index < s_i) 69 85 continue; ··· 94 110 skb_trim(skb, (u8*)r - skb->data); 95 111 goto done; 96 112 } 97 - 98 - static inline int 99 - tcf_act_police_hash_search(struct tc_action *a, u32 index) 100 - { 101 - struct tcf_police *p = tcf_police_lookup(index); 102 - 103 - if (p != NULL) { 104 - a->priv = p; 105 - return 1; 106 - } else { 107 - return 0; 108 - } 109 - } 110 113 #endif 111 - 112 - static inline u32 tcf_police_new_index(void) 113 - { 114 - do { 115 - if (++idx_gen == 0) 116 - idx_gen = 1; 117 - } while (tcf_police_lookup(idx_gen)); 118 - 119 - return idx_gen; 120 - } 121 114 122 115 void tcf_police_destroy(struct tcf_police *p) 123 116 { 124 - unsigned h = tcf_police_hash(p->index); 125 - struct tcf_police **p1p; 117 + unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 118 + struct tcf_common **p1p; 126 119 127 - for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) { 128 - if (*p1p == p) { 120 + for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) { 121 + if (*p1p == &p->common) { 129 122 write_lock_bh(&police_lock); 130 - *p1p = p->next; 123 + *p1p = p->tcf_next; 131 124 write_unlock_bh(&police_lock); 132 125 #ifdef CONFIG_NET_ESTIMATOR 133 - gen_kill_estimator(&p->bstats, &p->rate_est); 126 + gen_kill_estimator(&p->tcf_bstats, 127 + &p->tcf_rate_est); 134 128 #endif 135 - if (p->R_tab) 136 - qdisc_put_rtab(p->R_tab); 137 - if (p->P_tab) 138 - qdisc_put_rtab(p->P_tab); 129 + if (p->tcfp_R_tab) 130 + qdisc_put_rtab(p->tcfp_R_tab); 131 + if (p->tcfp_P_tab) 132 + qdisc_put_rtab(p->tcfp_P_tab); 139 133 kfree(p); 140 134 return; 141 135 } ··· 129 167 int ret = 0, err; 130 168 struct rtattr *tb[TCA_POLICE_MAX]; 131 169 struct tc_police *parm; 132 - struct tcf_police *p; 170 + struct tcf_police *police; 133 171 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; 134 172 135 173 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0) ··· 147 185 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 148 186 return -EINVAL; 149 187 150 - if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 151 - a->priv = p; 152 - if (bind) { 153 - p->bindcnt += 1; 154 - p->refcnt += 1; 188 + if (parm->index) { 189 + struct tcf_common *pc; 190 + 191 + pc = tcf_hash_lookup(parm->index, &police_hash_info); 192 + if (pc != NULL) { 193 + a->priv = pc; 194 + police = to_police(pc); 195 + if (bind) { 196 + police->tcf_bindcnt += 1; 197 + police->tcf_refcnt += 1; 198 + } 199 + if (ovr) 200 + goto override; 201 + return ret; 155 202 } 156 - if (ovr) 157 - goto override; 158 - return ret; 159 203 } 160 204 161 - p = kzalloc(sizeof(*p), GFP_KERNEL); 162 - if (p == NULL) 205 + police = kzalloc(sizeof(*police), GFP_KERNEL); 206 + if (police == NULL) 163 207 return -ENOMEM; 164 - 165 208 ret = ACT_P_CREATED; 166 - p->refcnt = 1; 167 - spin_lock_init(&p->lock); 168 - p->stats_lock = &p->lock; 209 + police->tcf_refcnt = 1; 210 + spin_lock_init(&police->tcf_lock); 211 + police->tcf_stats_lock = &police->tcf_lock; 169 212 if (bind) 170 - p->bindcnt = 1; 213 + police->tcf_bindcnt = 1; 171 214 override: 172 215 if (parm->rate.rate) { 173 216 err = -ENOMEM; ··· 182 215 if (parm->peakrate.rate) { 183 216 P_tab = qdisc_get_rtab(&parm->peakrate, 184 217 tb[TCA_POLICE_PEAKRATE-1]); 185 - if (p->P_tab == NULL) { 218 + if (P_tab == NULL) { 186 219 qdisc_put_rtab(R_tab); 187 220 goto failure; 188 221 } 189 222 } 190 223 } 191 224 /* No failure allowed after this point */ 192 - spin_lock_bh(&p->lock); 225 + spin_lock_bh(&police->tcf_lock); 193 226 if (R_tab != NULL) { 194 - qdisc_put_rtab(p->R_tab); 195 - p->R_tab = R_tab; 227 + qdisc_put_rtab(police->tcfp_R_tab); 228 + police->tcfp_R_tab = R_tab; 196 229 } 197 230 if (P_tab != NULL) { 198 - qdisc_put_rtab(p->P_tab); 199 - p->P_tab = P_tab; 231 + qdisc_put_rtab(police->tcfp_P_tab); 232 + police->tcfp_P_tab = P_tab; 200 233 } 201 234 202 235 if (tb[TCA_POLICE_RESULT-1]) 203 - p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 204 - p->toks = p->burst = parm->burst; 205 - p->mtu = parm->mtu; 206 - if (p->mtu == 0) { 207 - p->mtu = ~0; 208 - if (p->R_tab) 209 - p->mtu = 255<<p->R_tab->rate.cell_log; 236 + police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 237 + police->tcfp_toks = police->tcfp_burst = parm->burst; 238 + police->tcfp_mtu = parm->mtu; 239 + if (police->tcfp_mtu == 0) { 240 + police->tcfp_mtu = ~0; 241 + if (police->tcfp_R_tab) 242 + police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; 210 243 } 211 - if (p->P_tab) 212 - p->ptoks = L2T_P(p, p->mtu); 213 - p->action = parm->action; 244 + if (police->tcfp_P_tab) 245 + police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); 246 + police->tcf_action = parm->action; 214 247 215 248 #ifdef CONFIG_NET_ESTIMATOR 216 249 if (tb[TCA_POLICE_AVRATE-1]) 217 - p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 250 + police->tcfp_ewma_rate = 251 + *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 218 252 if (est) 219 - gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 253 + gen_replace_estimator(&police->tcf_bstats, 254 + &police->tcf_rate_est, 255 + police->tcf_stats_lock, est); 220 256 #endif 221 257 222 - spin_unlock_bh(&p->lock); 258 + spin_unlock_bh(&police->tcf_lock); 223 259 if (ret != ACT_P_CREATED) 224 260 return ret; 225 261 226 - PSCHED_GET_TIME(p->t_c); 227 - p->index = parm->index ? : tcf_police_new_index(); 228 - h = tcf_police_hash(p->index); 262 + PSCHED_GET_TIME(police->tcfp_t_c); 263 + police->tcf_index = parm->index ? parm->index : 264 + tcf_hash_new_index(&police_idx_gen, &police_hash_info); 265 + h = tcf_hash(police->tcf_index, POL_TAB_MASK); 229 266 write_lock_bh(&police_lock); 230 - p->next = tcf_police_ht[h]; 231 - tcf_police_ht[h] = p; 267 + police->tcf_next = tcf_police_ht[h]; 268 + tcf_police_ht[h] = &police->common; 232 269 write_unlock_bh(&police_lock); 233 270 234 - a->priv = p; 271 + a->priv = police; 235 272 return ret; 236 273 237 274 failure: 238 275 if (ret == ACT_P_CREATED) 239 - kfree(p); 276 + kfree(police); 240 277 return err; 241 278 } 242 279 243 280 static int tcf_act_police_cleanup(struct tc_action *a, int bind) 244 281 { 245 - struct tcf_police *p = PRIV(a); 282 + struct tcf_police *p = a->priv; 246 283 247 284 if (p != NULL) 248 285 return tcf_police_release(p, bind); ··· 256 285 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 257 286 struct tcf_result *res) 258 287 { 288 + struct tcf_police *police = a->priv; 259 289 psched_time_t now; 260 - struct tcf_police *p = PRIV(a); 261 290 long toks; 262 291 long ptoks = 0; 263 292 264 - spin_lock(&p->lock); 293 + spin_lock(&police->tcf_lock); 265 294 266 - p->bstats.bytes += skb->len; 267 - p->bstats.packets++; 295 + police->tcf_bstats.bytes += skb->len; 296 + police->tcf_bstats.packets++; 268 297 269 298 #ifdef CONFIG_NET_ESTIMATOR 270 - if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 271 - p->qstats.overlimits++; 272 - spin_unlock(&p->lock); 273 - return p->action; 299 + if (police->tcfp_ewma_rate && 300 + police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 301 + police->tcf_qstats.overlimits++; 302 + spin_unlock(&police->tcf_lock); 303 + return police->tcf_action; 274 304 } 275 305 #endif 276 306 277 - if (skb->len <= p->mtu) { 278 - if (p->R_tab == NULL) { 279 - spin_unlock(&p->lock); 280 - return p->result; 307 + if (skb->len <= police->tcfp_mtu) { 308 + if (police->tcfp_R_tab == NULL) { 309 + spin_unlock(&police->tcf_lock); 310 + return police->tcfp_result; 281 311 } 282 312 283 313 PSCHED_GET_TIME(now); 284 314 285 - toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 286 - 287 - if (p->P_tab) { 288 - ptoks = toks + p->ptoks; 289 - if (ptoks > (long)L2T_P(p, p->mtu)) 290 - ptoks = (long)L2T_P(p, p->mtu); 291 - ptoks -= L2T_P(p, skb->len); 315 + toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c, 316 + police->tcfp_burst); 317 + if (police->tcfp_P_tab) { 318 + ptoks = toks + police->tcfp_ptoks; 319 + if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 320 + ptoks = (long)L2T_P(police, police->tcfp_mtu); 321 + ptoks -= L2T_P(police, skb->len); 292 322 } 293 - toks += p->toks; 294 - if (toks > (long)p->burst) 295 - toks = p->burst; 296 - toks -= L2T(p, skb->len); 297 - 323 + toks += police->tcfp_toks; 324 + if (toks > (long)police->tcfp_burst) 325 + toks = police->tcfp_burst; 326 + toks -= L2T(police, skb->len); 298 327 if ((toks|ptoks) >= 0) { 299 - p->t_c = now; 300 - p->toks = toks; 301 - p->ptoks = ptoks; 302 - spin_unlock(&p->lock); 303 - return p->result; 328 + police->tcfp_t_c = now; 329 + police->tcfp_toks = toks; 330 + police->tcfp_ptoks = ptoks; 331 + spin_unlock(&police->tcf_lock); 332 + return police->tcfp_result; 304 333 } 305 334 } 306 335 307 - p->qstats.overlimits++; 308 - spin_unlock(&p->lock); 309 - return p->action; 336 + police->tcf_qstats.overlimits++; 337 + spin_unlock(&police->tcf_lock); 338 + return police->tcf_action; 310 339 } 311 340 312 341 static int 313 342 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 314 343 { 315 344 unsigned char *b = skb->tail; 345 + struct tcf_police *police = a->priv; 316 346 struct tc_police opt; 317 - struct tcf_police *p = PRIV(a); 318 347 319 - opt.index = p->index; 320 - opt.action = p->action; 321 - opt.mtu = p->mtu; 322 - opt.burst = p->burst; 323 - opt.refcnt = p->refcnt - ref; 324 - opt.bindcnt = p->bindcnt - bind; 325 - if (p->R_tab) 326 - opt.rate = p->R_tab->rate; 348 + opt.index = police->tcf_index; 349 + opt.action = police->tcf_action; 350 + opt.mtu = police->tcfp_mtu; 351 + opt.burst = police->tcfp_burst; 352 + opt.refcnt = police->tcf_refcnt - ref; 353 + opt.bindcnt = police->tcf_bindcnt - bind; 354 + if (police->tcfp_R_tab) 355 + opt.rate = police->tcfp_R_tab->rate; 327 356 else 328 357 memset(&opt.rate, 0, sizeof(opt.rate)); 329 - if (p->P_tab) 330 - opt.peakrate = p->P_tab->rate; 358 + if (police->tcfp_P_tab) 359 + opt.peakrate = police->tcfp_P_tab->rate; 331 360 else 332 361 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 333 362 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 334 - if (p->result) 335 - RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 363 + if (police->tcfp_result) 364 + RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 365 + &police->tcfp_result); 336 366 #ifdef CONFIG_NET_ESTIMATOR 337 - if (p->ewma_rate) 338 - RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 367 + if (police->tcfp_ewma_rate) 368 + RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 339 369 #endif 340 370 return skb->len; 341 371 ··· 351 379 352 380 static struct tc_action_ops act_police_ops = { 353 381 .kind = "police", 382 + .hinfo = &police_hash_info, 354 383 .type = TCA_ID_POLICE, 355 384 .capab = TCA_CAP_NONE, 356 385 .owner = THIS_MODULE, 357 386 .act = tcf_act_police, 358 387 .dump = tcf_act_police_dump, 359 388 .cleanup = tcf_act_police_cleanup, 360 - .lookup = tcf_act_police_hash_search, 389 + .lookup = tcf_hash_search, 361 390 .init = tcf_act_police_locate, 362 391 .walk = tcf_act_police_walker 363 392 }; ··· 380 407 381 408 #else /* CONFIG_NET_CLS_ACT */ 382 409 383 - struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est) 410 + static struct tcf_common *tcf_police_lookup(u32 index) 384 411 { 385 - unsigned h; 386 - struct tcf_police *p; 412 + struct tcf_hashinfo *hinfo = &police_hash_info; 413 + struct tcf_common *p; 414 + 415 + read_lock(hinfo->lock); 416 + for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p; 417 + p = p->tcfc_next) { 418 + if (p->tcfc_index == index) 419 + break; 420 + } 421 + read_unlock(hinfo->lock); 422 + 423 + return p; 424 + } 425 + 426 + static u32 tcf_police_new_index(void) 427 + { 428 + u32 *idx_gen = &police_idx_gen; 429 + u32 val = *idx_gen; 430 + 431 + do { 432 + if (++val == 0) 433 + val = 1; 434 + } while (tcf_police_lookup(val)); 435 + 436 + return (*idx_gen = val); 437 + } 438 + 439 + struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est) 440 + { 441 + unsigned int h; 442 + struct tcf_police *police; 387 443 struct rtattr *tb[TCA_POLICE_MAX]; 388 444 struct tc_police *parm; 389 445 ··· 425 423 426 424 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]); 427 425 428 - if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) { 429 - p->refcnt++; 430 - return p; 431 - } 426 + if (parm->index) { 427 + struct tcf_common *pc; 432 428 433 - p = kzalloc(sizeof(*p), GFP_KERNEL); 434 - if (p == NULL) 429 + pc = tcf_police_lookup(parm->index); 430 + if (pc) { 431 + police = to_police(pc); 432 + police->tcf_refcnt++; 433 + return police; 434 + } 435 + } 436 + police = kzalloc(sizeof(*police), GFP_KERNEL); 437 + if (unlikely(!police)) 435 438 return NULL; 436 439 437 - p->refcnt = 1; 438 - spin_lock_init(&p->lock); 439 - p->stats_lock = &p->lock; 440 + police->tcf_refcnt = 1; 441 + spin_lock_init(&police->tcf_lock); 442 + police->tcf_stats_lock = &police->tcf_lock; 440 443 if (parm->rate.rate) { 441 - p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 442 - if (p->R_tab == NULL) 444 + police->tcfp_R_tab = 445 + qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]); 446 + if (police->tcfp_R_tab == NULL) 443 447 goto failure; 444 448 if (parm->peakrate.rate) { 445 - p->P_tab = qdisc_get_rtab(&parm->peakrate, 446 - tb[TCA_POLICE_PEAKRATE-1]); 447 - if (p->P_tab == NULL) 449 + police->tcfp_P_tab = 450 + qdisc_get_rtab(&parm->peakrate, 451 + tb[TCA_POLICE_PEAKRATE-1]); 452 + if (police->tcfp_P_tab == NULL) 448 453 goto failure; 449 454 } 450 455 } 451 456 if (tb[TCA_POLICE_RESULT-1]) { 452 457 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32)) 453 458 goto failure; 454 - p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 459 + police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]); 455 460 } 456 461 #ifdef CONFIG_NET_ESTIMATOR 457 462 if (tb[TCA_POLICE_AVRATE-1]) { 458 463 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32)) 459 464 goto failure; 460 - p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 465 + police->tcfp_ewma_rate = 466 + *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]); 461 467 } 462 468 #endif 463 - p->toks = p->burst = parm->burst; 464 - p->mtu = parm->mtu; 465 - if (p->mtu == 0) { 466 - p->mtu = ~0; 467 - if (p->R_tab) 468 - p->mtu = 255<<p->R_tab->rate.cell_log; 469 + police->tcfp_toks = police->tcfp_burst = parm->burst; 470 + police->tcfp_mtu = parm->mtu; 471 + if (police->tcfp_mtu == 0) { 472 + police->tcfp_mtu = ~0; 473 + if (police->tcfp_R_tab) 474 + police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log; 469 475 } 470 - if (p->P_tab) 471 - p->ptoks = L2T_P(p, p->mtu); 472 - PSCHED_GET_TIME(p->t_c); 473 - p->index = parm->index ? : tcf_police_new_index(); 474 - p->action = parm->action; 476 + if (police->tcfp_P_tab) 477 + police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu); 478 + PSCHED_GET_TIME(police->tcfp_t_c); 479 + police->tcf_index = parm->index ? parm->index : 480 + tcf_police_new_index(); 481 + police->tcf_action = parm->action; 475 482 #ifdef CONFIG_NET_ESTIMATOR 476 483 if (est) 477 - gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est); 484 + gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est, 485 + police->tcf_stats_lock, est); 478 486 #endif 479 - h = tcf_police_hash(p->index); 487 + h = tcf_hash(police->tcf_index, POL_TAB_MASK); 480 488 write_lock_bh(&police_lock); 481 - p->next = tcf_police_ht[h]; 482 - tcf_police_ht[h] = p; 489 + police->tcf_next = tcf_police_ht[h]; 490 + tcf_police_ht[h] = &police->common; 483 491 write_unlock_bh(&police_lock); 484 - return p; 492 + return police; 485 493 486 494 failure: 487 - if (p->R_tab) 488 - qdisc_put_rtab(p->R_tab); 489 - kfree(p); 495 + if (police->tcfp_R_tab) 496 + qdisc_put_rtab(police->tcfp_R_tab); 497 + kfree(police); 490 498 return NULL; 491 499 } 492 500 493 - int tcf_police(struct sk_buff *skb, struct tcf_police *p) 501 + int tcf_police(struct sk_buff *skb, struct tcf_police *police) 494 502 { 495 503 psched_time_t now; 496 504 long toks; 497 505 long ptoks = 0; 498 506 499 - spin_lock(&p->lock); 507 + spin_lock(&police->tcf_lock); 500 508 501 - p->bstats.bytes += skb->len; 502 - p->bstats.packets++; 509 + police->tcf_bstats.bytes += skb->len; 510 + police->tcf_bstats.packets++; 503 511 504 512 #ifdef CONFIG_NET_ESTIMATOR 505 - if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) { 506 - p->qstats.overlimits++; 507 - spin_unlock(&p->lock); 508 - return p->action; 513 + if (police->tcfp_ewma_rate && 514 + police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 515 + police->tcf_qstats.overlimits++; 516 + spin_unlock(&police->tcf_lock); 517 + return police->tcf_action; 509 518 } 510 519 #endif 511 - 512 - if (skb->len <= p->mtu) { 513 - if (p->R_tab == NULL) { 514 - spin_unlock(&p->lock); 515 - return p->result; 520 + if (skb->len <= police->tcfp_mtu) { 521 + if (police->tcfp_R_tab == NULL) { 522 + spin_unlock(&police->tcf_lock); 523 + return police->tcfp_result; 516 524 } 517 525 518 526 PSCHED_GET_TIME(now); 519 - 520 - toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst); 521 - 522 - if (p->P_tab) { 523 - ptoks = toks + p->ptoks; 524 - if (ptoks > (long)L2T_P(p, p->mtu)) 525 - ptoks = (long)L2T_P(p, p->mtu); 526 - ptoks -= L2T_P(p, skb->len); 527 + toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c, 528 + police->tcfp_burst); 529 + if (police->tcfp_P_tab) { 530 + ptoks = toks + police->tcfp_ptoks; 531 + if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 532 + ptoks = (long)L2T_P(police, police->tcfp_mtu); 533 + ptoks -= L2T_P(police, skb->len); 527 534 } 528 - toks += p->toks; 529 - if (toks > (long)p->burst) 530 - toks = p->burst; 531 - toks -= L2T(p, skb->len); 532 - 535 + toks += police->tcfp_toks; 536 + if (toks > (long)police->tcfp_burst) 537 + toks = police->tcfp_burst; 538 + toks -= L2T(police, skb->len); 533 539 if ((toks|ptoks) >= 0) { 534 - p->t_c = now; 535 - p->toks = toks; 536 - p->ptoks = ptoks; 537 - spin_unlock(&p->lock); 538 - return p->result; 540 + police->tcfp_t_c = now; 541 + police->tcfp_toks = toks; 542 + police->tcfp_ptoks = ptoks; 543 + spin_unlock(&police->tcf_lock); 544 + return police->tcfp_result; 539 545 } 540 546 } 541 547 542 - p->qstats.overlimits++; 543 - spin_unlock(&p->lock); 544 - return p->action; 548 + police->tcf_qstats.overlimits++; 549 + spin_unlock(&police->tcf_lock); 550 + return police->tcf_action; 545 551 } 546 552 EXPORT_SYMBOL(tcf_police); 547 553 548 - int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p) 554 + int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police) 549 555 { 550 - unsigned char *b = skb->tail; 556 + unsigned char *b = skb->tail; 551 557 struct tc_police opt; 552 558 553 - opt.index = p->index; 554 - opt.action = p->action; 555 - opt.mtu = p->mtu; 556 - opt.burst = p->burst; 557 - if (p->R_tab) 558 - opt.rate = p->R_tab->rate; 559 + opt.index = police->tcf_index; 560 + opt.action = police->tcf_action; 561 + opt.mtu = police->tcfp_mtu; 562 + opt.burst = police->tcfp_burst; 563 + if (police->tcfp_R_tab) 564 + opt.rate = police->tcfp_R_tab->rate; 559 565 else 560 566 memset(&opt.rate, 0, sizeof(opt.rate)); 561 - if (p->P_tab) 562 - opt.peakrate = p->P_tab->rate; 567 + if (police->tcfp_P_tab) 568 + opt.peakrate = police->tcfp_P_tab->rate; 563 569 else 564 570 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 565 571 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 566 - if (p->result) 567 - RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result); 572 + if (police->tcfp_result) 573 + RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), 574 + &police->tcfp_result); 568 575 #ifdef CONFIG_NET_ESTIMATOR 569 - if (p->ewma_rate) 570 - RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate); 576 + if (police->tcfp_ewma_rate) 577 + RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate); 571 578 #endif 572 579 return skb->len; 573 580 ··· 585 574 return -1; 586 575 } 587 576 588 - int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p) 577 + int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police) 589 578 { 590 579 struct gnet_dump d; 591 580 592 581 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, 593 - TCA_XSTATS, p->stats_lock, &d) < 0) 582 + TCA_XSTATS, police->tcf_stats_lock, 583 + &d) < 0) 594 584 goto errout; 595 585 596 - if (gnet_stats_copy_basic(&d, &p->bstats) < 0 || 586 + if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 || 597 587 #ifdef CONFIG_NET_ESTIMATOR 598 - gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 || 588 + gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 || 599 589 #endif 600 - gnet_stats_copy_queue(&d, &p->qstats) < 0) 590 + gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0) 601 591 goto errout; 602 592 603 593 if (gnet_stats_finish_copy(&d) < 0)
+152 -31
net/sched/act_simple.c
··· 20 20 21 21 #define TCA_ACT_SIMP 22 22 22 23 - /* XXX: Hide all these common elements under some macro 24 - * probably 25 - */ 26 23 #include <linux/tc_act/tc_defact.h> 27 24 #include <net/tc_act/tc_defact.h> 28 25 29 - /* use generic hash table with 8 buckets */ 30 - #define MY_TAB_SIZE 8 31 - #define MY_TAB_MASK (MY_TAB_SIZE - 1) 32 - static u32 idx_gen; 33 - static struct tcf_defact *tcf_simp_ht[MY_TAB_SIZE]; 26 + #define SIMP_TAB_MASK 7 27 + static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1]; 28 + static u32 simp_idx_gen; 34 29 static DEFINE_RWLOCK(simp_lock); 35 30 36 - /* override the defaults */ 37 - #define tcf_st tcf_defact 38 - #define tc_st tc_defact 39 - #define tcf_t_lock simp_lock 40 - #define tcf_ht tcf_simp_ht 41 - 42 - #define CONFIG_NET_ACT_INIT 1 43 - #include <net/pkt_act.h> 44 - #include <net/act_generic.h> 31 + struct tcf_hashinfo simp_hash_info = { 32 + .htab = tcf_simp_ht, 33 + .hmask = SIMP_TAB_MASK, 34 + .lock = &simp_lock, 35 + }; 45 36 46 37 static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 47 38 { 48 - struct tcf_defact *p = PRIV(a, defact); 39 + struct tcf_defact *d = a->priv; 49 40 50 - spin_lock(&p->lock); 51 - p->tm.lastuse = jiffies; 52 - p->bstats.bytes += skb->len; 53 - p->bstats.packets++; 41 + spin_lock(&d->tcf_lock); 42 + d->tcf_tm.lastuse = jiffies; 43 + d->tcf_bstats.bytes += skb->len; 44 + d->tcf_bstats.packets++; 54 45 55 46 /* print policy string followed by _ then packet count 56 47 * Example if this was the 3rd packet and the string was "hello" 57 48 * then it would look like "hello_3" (without quotes) 58 49 **/ 59 - printk("simple: %s_%d\n", (char *)p->defdata, p->bstats.packets); 60 - spin_unlock(&p->lock); 61 - return p->action; 50 + printk("simple: %s_%d\n", 51 + (char *)d->tcfd_defdata, d->tcf_bstats.packets); 52 + spin_unlock(&d->tcf_lock); 53 + return d->tcf_action; 54 + } 55 + 56 + static int tcf_simp_release(struct tcf_defact *d, int bind) 57 + { 58 + int ret = 0; 59 + if (d) { 60 + if (bind) 61 + d->tcf_bindcnt--; 62 + d->tcf_refcnt--; 63 + if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) { 64 + kfree(d->tcfd_defdata); 65 + tcf_hash_destroy(&d->common, &simp_hash_info); 66 + ret = 1; 67 + } 68 + } 69 + return ret; 70 + } 71 + 72 + static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) 73 + { 74 + d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL); 75 + if (unlikely(!d->tcfd_defdata)) 76 + return -ENOMEM; 77 + d->tcfd_datalen = datalen; 78 + memcpy(d->tcfd_defdata, defdata, datalen); 79 + return 0; 80 + } 81 + 82 + static int realloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata) 83 + { 84 + kfree(d->tcfd_defdata); 85 + return alloc_defdata(d, datalen, defdata); 86 + } 87 + 88 + static int tcf_simp_init(struct rtattr *rta, struct rtattr *est, 89 + struct tc_action *a, int ovr, int bind) 90 + { 91 + struct rtattr *tb[TCA_DEF_MAX]; 92 + struct tc_defact *parm; 93 + struct tcf_defact *d; 94 + struct tcf_common *pc; 95 + void *defdata; 96 + u32 datalen = 0; 97 + int ret = 0; 98 + 99 + if (rta == NULL || rtattr_parse_nested(tb, TCA_DEF_MAX, rta) < 0) 100 + return -EINVAL; 101 + 102 + if (tb[TCA_DEF_PARMS - 1] == NULL || 103 + RTA_PAYLOAD(tb[TCA_DEF_PARMS - 1]) < sizeof(*parm)) 104 + return -EINVAL; 105 + 106 + parm = RTA_DATA(tb[TCA_DEF_PARMS - 1]); 107 + defdata = RTA_DATA(tb[TCA_DEF_DATA - 1]); 108 + if (defdata == NULL) 109 + return -EINVAL; 110 + 111 + datalen = RTA_PAYLOAD(tb[TCA_DEF_DATA - 1]); 112 + if (datalen <= 0) 113 + return -EINVAL; 114 + 115 + pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info); 116 + if (!pc) { 117 + pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind, 118 + &simp_idx_gen, &simp_hash_info); 119 + if (unlikely(!pc)) 120 + return -ENOMEM; 121 + 122 + d = to_defact(pc); 123 + ret = alloc_defdata(d, datalen, defdata); 124 + if (ret < 0) { 125 + kfree(pc); 126 + return ret; 127 + } 128 + ret = ACT_P_CREATED; 129 + } else { 130 + d = to_defact(pc); 131 + if (!ovr) { 132 + tcf_simp_release(d, bind); 133 + return -EEXIST; 134 + } 135 + realloc_defdata(d, datalen, defdata); 136 + } 137 + 138 + spin_lock_bh(&d->tcf_lock); 139 + d->tcf_action = parm->action; 140 + spin_unlock_bh(&d->tcf_lock); 141 + 142 + if (ret == ACT_P_CREATED) 143 + tcf_hash_insert(pc, &simp_hash_info); 144 + return ret; 145 + } 146 + 147 + static inline int tcf_simp_cleanup(struct tc_action *a, int bind) 148 + { 149 + struct tcf_defact *d = a->priv; 150 + 151 + if (d) 152 + return tcf_simp_release(d, bind); 153 + return 0; 154 + } 155 + 156 + static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, 157 + int bind, int ref) 158 + { 159 + unsigned char *b = skb->tail; 160 + struct tcf_defact *d = a->priv; 161 + struct tc_defact opt; 162 + struct tcf_t t; 163 + 164 + opt.index = d->tcf_index; 165 + opt.refcnt = d->tcf_refcnt - ref; 166 + opt.bindcnt = d->tcf_bindcnt - bind; 167 + opt.action = d->tcf_action; 168 + RTA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); 169 + RTA_PUT(skb, TCA_DEF_DATA, d->tcfd_datalen, d->tcfd_defdata); 170 + t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 171 + t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 172 + t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 173 + RTA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); 174 + return skb->len; 175 + 176 + rtattr_failure: 177 + skb_trim(skb, b - skb->data); 178 + return -1; 62 179 } 63 180 64 181 static struct tc_action_ops act_simp_ops = { 65 - .kind = "simple", 66 - .type = TCA_ACT_SIMP, 67 - .capab = TCA_CAP_NONE, 68 - .owner = THIS_MODULE, 69 - .act = tcf_simp, 70 - tca_use_default_ops 182 + .kind = "simple", 183 + .hinfo = &simp_hash_info, 184 + .type = TCA_ACT_SIMP, 185 + .capab = TCA_CAP_NONE, 186 + .owner = THIS_MODULE, 187 + .act = tcf_simp, 188 + .dump = tcf_simp_dump, 189 + .cleanup = tcf_simp_cleanup, 190 + .init = tcf_simp_init, 191 + .walk = tcf_generic_walker, 71 192 }; 72 193 73 194 MODULE_AUTHOR("Jamal Hadi Salim(2005)");