Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: Create and use nf_hook_state.

Instead of passing a large number of arguments down into the nf_hook()
entry points, create a structure which carries this state down through
the hook processing layers.

This makes is so that if we want to change the types or signatures of
any of these pieces of state, there are less places that need to be
changed.

Signed-off-by: David S. Miller <davem@davemloft.net>

+59 -50
+23 -5
include/linux/netfilter.h
··· 44 44 struct sk_buff; 45 45 46 46 struct nf_hook_ops; 47 + 48 + struct nf_hook_state { 49 + unsigned int hook; 50 + int thresh; 51 + u_int8_t pf; 52 + struct net_device *in; 53 + struct net_device *out; 54 + int (*okfn)(struct sk_buff *); 55 + }; 56 + 47 57 typedef unsigned int nf_hookfn(const struct nf_hook_ops *ops, 48 58 struct sk_buff *skb, 49 59 const struct net_device *in, ··· 128 118 } 129 119 #endif 130 120 131 - int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 132 - struct net_device *indev, struct net_device *outdev, 133 - int (*okfn)(struct sk_buff *), int thresh); 121 + int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state); 134 122 135 123 /** 136 124 * nf_hook_thresh - call a netfilter hook ··· 143 135 struct net_device *outdev, 144 136 int (*okfn)(struct sk_buff *), int thresh) 145 137 { 146 - if (nf_hooks_active(pf, hook)) 147 - return nf_hook_slow(pf, hook, skb, indev, outdev, okfn, thresh); 138 + if (nf_hooks_active(pf, hook)) { 139 + struct nf_hook_state state = { 140 + .hook = hook, 141 + .thresh = thresh, 142 + .pf = pf, 143 + .in = indev, 144 + .out = outdev, 145 + .okfn = okfn 146 + }; 147 + 148 + return nf_hook_slow(skb, &state); 149 + } 148 150 return 1; 149 151 } 150 152
+13 -19
net/netfilter/core.c
··· 120 120 121 121 unsigned int nf_iterate(struct list_head *head, 122 122 struct sk_buff *skb, 123 - unsigned int hook, 124 - const struct net_device *indev, 125 - const struct net_device *outdev, 126 - struct nf_hook_ops **elemp, 127 - int (*okfn)(struct sk_buff *), 128 - int hook_thresh) 123 + struct nf_hook_state *state, 124 + struct nf_hook_ops **elemp) 129 125 { 130 126 unsigned int verdict; 131 127 ··· 130 134 * function because of risk of continuing from deleted element. 131 135 */ 132 136 list_for_each_entry_continue_rcu((*elemp), head, list) { 133 - if (hook_thresh > (*elemp)->priority) 137 + if (state->thresh > (*elemp)->priority) 134 138 continue; 135 139 136 140 /* Optimization: we don't need to hold module 137 141 reference here, since function can't sleep. --RR */ 138 142 repeat: 139 - verdict = (*elemp)->hook(*elemp, skb, indev, outdev, okfn); 143 + verdict = (*elemp)->hook(*elemp, skb, state->in, state->out, 144 + state->okfn); 140 145 if (verdict != NF_ACCEPT) { 141 146 #ifdef CONFIG_NETFILTER_DEBUG 142 147 if (unlikely((verdict & NF_VERDICT_MASK) 143 148 > NF_MAX_VERDICT)) { 144 149 NFDEBUG("Evil return from %p(%u).\n", 145 - (*elemp)->hook, hook); 150 + (*elemp)->hook, state->hook); 146 151 continue; 147 152 } 148 153 #endif ··· 158 161 159 162 /* Returns 1 if okfn() needs to be executed by the caller, 160 163 * -EPERM for NF_DROP, 0 otherwise. */ 161 - int nf_hook_slow(u_int8_t pf, unsigned int hook, struct sk_buff *skb, 162 - struct net_device *indev, 163 - struct net_device *outdev, 164 - int (*okfn)(struct sk_buff *), 165 - int hook_thresh) 164 + int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) 166 165 { 167 166 struct nf_hook_ops *elem; 168 167 unsigned int verdict; ··· 167 174 /* We may already have this, but read-locks nest anyway */ 168 175 rcu_read_lock(); 169 176 170 - elem = list_entry_rcu(&nf_hooks[pf][hook], struct nf_hook_ops, list); 177 + elem = list_entry_rcu(&nf_hooks[state->pf][state->hook], 178 + struct nf_hook_ops, list); 171 179 next_hook: 172 - verdict = nf_iterate(&nf_hooks[pf][hook], skb, hook, indev, 173 - outdev, &elem, okfn, hook_thresh); 180 + verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state, 181 + &elem); 174 182 if (verdict == NF_ACCEPT || verdict == NF_STOP) { 175 183 ret = 1; 176 184 } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { ··· 180 186 if (ret == 0) 181 187 ret = -EPERM; 182 188 } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { 183 - int err = nf_queue(skb, elem, pf, hook, indev, outdev, okfn, 184 - verdict >> NF_VERDICT_QBITS); 189 + int err = nf_queue(skb, elem, state, 190 + verdict >> NF_VERDICT_QBITS); 185 191 if (err < 0) { 186 192 if (err == -ECANCELED) 187 193 goto next_hook;
+3 -8
net/netfilter/nf_internals.h
··· 14 14 15 15 /* core.c */ 16 16 unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, 17 - unsigned int hook, const struct net_device *indev, 18 - const struct net_device *outdev, 19 - struct nf_hook_ops **elemp, 20 - int (*okfn)(struct sk_buff *), int hook_thresh); 17 + struct nf_hook_state *state, struct nf_hook_ops **elemp); 21 18 22 19 /* nf_queue.c */ 23 - int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, u_int8_t pf, 24 - unsigned int hook, struct net_device *indev, 25 - struct net_device *outdev, int (*okfn)(struct sk_buff *), 26 - unsigned int queuenum); 20 + int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, 21 + struct nf_hook_state *state, unsigned int queuenum); 27 22 int __init netfilter_queue_init(void); 28 23 29 24 /* nf_log.c */
+20 -18
net/netfilter/nf_queue.c
··· 100 100 * through nf_reinject(). 101 101 */ 102 102 int nf_queue(struct sk_buff *skb, 103 - struct nf_hook_ops *elem, 104 - u_int8_t pf, unsigned int hook, 105 - struct net_device *indev, 106 - struct net_device *outdev, 107 - int (*okfn)(struct sk_buff *), 108 - unsigned int queuenum) 103 + struct nf_hook_ops *elem, 104 + struct nf_hook_state *state, 105 + unsigned int queuenum) 109 106 { 110 107 int status = -ENOENT; 111 108 struct nf_queue_entry *entry = NULL; ··· 118 121 goto err_unlock; 119 122 } 120 123 121 - afinfo = nf_get_afinfo(pf); 124 + afinfo = nf_get_afinfo(state->pf); 122 125 if (!afinfo) 123 126 goto err_unlock; 124 127 ··· 131 134 *entry = (struct nf_queue_entry) { 132 135 .skb = skb, 133 136 .elem = elem, 134 - .pf = pf, 135 - .hook = hook, 136 - .indev = indev, 137 - .outdev = outdev, 138 - .okfn = okfn, 137 + .pf = state->pf, 138 + .hook = state->hook, 139 + .indev = state->in, 140 + .outdev = state->out, 141 + .okfn = state->okfn, 139 142 .size = sizeof(*entry) + afinfo->route_key_size, 140 143 }; 141 144 ··· 168 171 struct sk_buff *skb = entry->skb; 169 172 struct nf_hook_ops *elem = entry->elem; 170 173 const struct nf_afinfo *afinfo; 174 + struct nf_hook_state state; 171 175 int err; 172 176 173 177 rcu_read_lock(); ··· 187 189 verdict = NF_DROP; 188 190 } 189 191 192 + state.hook = entry->hook; 193 + state.thresh = INT_MIN; 194 + state.pf = entry->pf; 195 + state.in = entry->indev; 196 + state.out = entry->outdev; 197 + state.okfn = entry->okfn; 198 + 190 199 if (verdict == NF_ACCEPT) { 191 200 next_hook: 192 201 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook], 193 - skb, entry->hook, 194 - entry->indev, entry->outdev, &elem, 195 - entry->okfn, INT_MIN); 202 + skb, &state, &elem); 196 203 } 197 204 198 205 switch (verdict & NF_VERDICT_MASK) { ··· 208 205 local_bh_enable(); 209 206 break; 210 207 case NF_QUEUE: 211 - err = nf_queue(skb, elem, entry->pf, entry->hook, 212 - entry->indev, entry->outdev, entry->okfn, 213 - verdict >> NF_VERDICT_QBITS); 208 + err = nf_queue(skb, elem, &state, 209 + verdict >> NF_VERDICT_QBITS); 214 210 if (err < 0) { 215 211 if (err == -ECANCELED) 216 212 goto next_hook;