at v4.1-rc8 5.3 kB view raw
1/* 2 * Rusty Russell (C)2000 -- This code is GPL. 3 * Patrick McHardy (c) 2006-2012 4 */ 5 6#include <linux/kernel.h> 7#include <linux/slab.h> 8#include <linux/init.h> 9#include <linux/module.h> 10#include <linux/proc_fs.h> 11#include <linux/skbuff.h> 12#include <linux/netfilter.h> 13#include <linux/netfilter_bridge.h> 14#include <linux/seq_file.h> 15#include <linux/rcupdate.h> 16#include <net/protocol.h> 17#include <net/netfilter/nf_queue.h> 18#include <net/dst.h> 19 20#include "nf_internals.h" 21 22/* 23 * Hook for nfnetlink_queue to register its queue handler. 24 * We do this so that most of the NFQUEUE code can be modular. 25 * 26 * Once the queue is registered it must reinject all packets it 27 * receives, no matter what. 28 */ 29static const struct nf_queue_handler __rcu *queue_handler __read_mostly; 30 31/* return EBUSY when somebody else is registered, return EEXIST if the 32 * same handler is registered, return 0 in case of success. */ 33void nf_register_queue_handler(const struct nf_queue_handler *qh) 34{ 35 /* should never happen, we only have one queueing backend in kernel */ 36 WARN_ON(rcu_access_pointer(queue_handler)); 37 rcu_assign_pointer(queue_handler, qh); 38} 39EXPORT_SYMBOL(nf_register_queue_handler); 40 41/* The caller must flush their queue before this */ 42void nf_unregister_queue_handler(void) 43{ 44 RCU_INIT_POINTER(queue_handler, NULL); 45 synchronize_rcu(); 46} 47EXPORT_SYMBOL(nf_unregister_queue_handler); 48 49void nf_queue_entry_release_refs(struct nf_queue_entry *entry) 50{ 51 struct nf_hook_state *state = &entry->state; 52 53 /* Release those devices we held, or Alexey will kill me. */ 54 if (state->in) 55 dev_put(state->in); 56 if (state->out) 57 dev_put(state->out); 58 if (state->sk) 59 sock_put(state->sk); 60#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 61 if (entry->skb->nf_bridge) { 62 struct net_device *physdev; 63 64 physdev = nf_bridge_get_physindev(entry->skb); 65 if (physdev) 66 dev_put(physdev); 67 physdev = nf_bridge_get_physoutdev(entry->skb); 68 if (physdev) 69 dev_put(physdev); 70 } 71#endif 72 /* Drop reference to owner of hook which queued us. */ 73 module_put(entry->elem->owner); 74} 75EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); 76 77/* Bump dev refs so they don't vanish while packet is out */ 78bool nf_queue_entry_get_refs(struct nf_queue_entry *entry) 79{ 80 struct nf_hook_state *state = &entry->state; 81 82 if (!try_module_get(entry->elem->owner)) 83 return false; 84 85 if (state->in) 86 dev_hold(state->in); 87 if (state->out) 88 dev_hold(state->out); 89 if (state->sk) 90 sock_hold(state->sk); 91#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) 92 if (entry->skb->nf_bridge) { 93 struct net_device *physdev; 94 95 physdev = nf_bridge_get_physindev(entry->skb); 96 if (physdev) 97 dev_hold(physdev); 98 physdev = nf_bridge_get_physoutdev(entry->skb); 99 if (physdev) 100 dev_hold(physdev); 101 } 102#endif 103 104 return true; 105} 106EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); 107 108/* 109 * Any packet that leaves via this function must come back 110 * through nf_reinject(). 111 */ 112int nf_queue(struct sk_buff *skb, 113 struct nf_hook_ops *elem, 114 struct nf_hook_state *state, 115 unsigned int queuenum) 116{ 117 int status = -ENOENT; 118 struct nf_queue_entry *entry = NULL; 119 const struct nf_afinfo *afinfo; 120 const struct nf_queue_handler *qh; 121 122 /* QUEUE == DROP if no one is waiting, to be safe. */ 123 rcu_read_lock(); 124 125 qh = rcu_dereference(queue_handler); 126 if (!qh) { 127 status = -ESRCH; 128 goto err_unlock; 129 } 130 131 afinfo = nf_get_afinfo(state->pf); 132 if (!afinfo) 133 goto err_unlock; 134 135 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); 136 if (!entry) { 137 status = -ENOMEM; 138 goto err_unlock; 139 } 140 141 *entry = (struct nf_queue_entry) { 142 .skb = skb, 143 .elem = elem, 144 .state = *state, 145 .size = sizeof(*entry) + afinfo->route_key_size, 146 }; 147 148 if (!nf_queue_entry_get_refs(entry)) { 149 status = -ECANCELED; 150 goto err_unlock; 151 } 152 skb_dst_force(skb); 153 afinfo->saveroute(skb, entry); 154 status = qh->outfn(entry, queuenum); 155 156 rcu_read_unlock(); 157 158 if (status < 0) { 159 nf_queue_entry_release_refs(entry); 160 goto err; 161 } 162 163 return 0; 164 165err_unlock: 166 rcu_read_unlock(); 167err: 168 kfree(entry); 169 return status; 170} 171 172void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) 173{ 174 struct sk_buff *skb = entry->skb; 175 struct nf_hook_ops *elem = entry->elem; 176 const struct nf_afinfo *afinfo; 177 int err; 178 179 rcu_read_lock(); 180 181 nf_queue_entry_release_refs(entry); 182 183 /* Continue traversal iff userspace said ok... */ 184 if (verdict == NF_REPEAT) { 185 elem = list_entry(elem->list.prev, struct nf_hook_ops, list); 186 verdict = NF_ACCEPT; 187 } 188 189 if (verdict == NF_ACCEPT) { 190 afinfo = nf_get_afinfo(entry->state.pf); 191 if (!afinfo || afinfo->reroute(skb, entry) < 0) 192 verdict = NF_DROP; 193 } 194 195 entry->state.thresh = INT_MIN; 196 197 if (verdict == NF_ACCEPT) { 198 next_hook: 199 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], 200 skb, &entry->state, &elem); 201 } 202 203 switch (verdict & NF_VERDICT_MASK) { 204 case NF_ACCEPT: 205 case NF_STOP: 206 local_bh_disable(); 207 entry->state.okfn(entry->state.sk, skb); 208 local_bh_enable(); 209 break; 210 case NF_QUEUE: 211 err = nf_queue(skb, elem, &entry->state, 212 verdict >> NF_VERDICT_QBITS); 213 if (err < 0) { 214 if (err == -ECANCELED) 215 goto next_hook; 216 if (err == -ESRCH && 217 (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) 218 goto next_hook; 219 kfree_skb(skb); 220 } 221 break; 222 case NF_STOLEN: 223 break; 224 default: 225 kfree_skb(skb); 226 } 227 rcu_read_unlock(); 228 kfree(entry); 229} 230EXPORT_SYMBOL(nf_reinject);