Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

netfilter: don't use mutex_lock_interruptible()

Eric Dumazet reports that getsockopt() or setsockopt() sometimes
returns -EINTR instead of -ENOPROTOOPT, causing headaches to
application developers.

This patch replaces all the mutex_lock_interruptible() by mutex_lock()
in the netfilter tree, as there is no reason we should sleep for a
long time there.

Reported-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Julian Anastasov <ja@ssi.bg>

+22 -73
+2 -8
net/bridge/netfilter/ebtables.c
··· 327 327 char name[EBT_FUNCTION_MAXNAMELEN]; 328 328 } *e; 329 329 330 - *error = mutex_lock_interruptible(mutex); 331 - if (*error != 0) 332 - return NULL; 333 - 330 + mutex_lock(mutex); 334 331 list_for_each_entry(e, head, list) { 335 332 if (strcmp(e->name, name) == 0) 336 333 return e; ··· 1200 1203 1201 1204 table->private = newinfo; 1202 1205 rwlock_init(&table->lock); 1203 - ret = mutex_lock_interruptible(&ebt_mutex); 1204 - if (ret != 0) 1205 - goto free_chainstack; 1206 - 1206 + mutex_lock(&ebt_mutex); 1207 1207 list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) { 1208 1208 if (strcmp(t->name, table->name) == 0) { 1209 1209 ret = -EEXIST;
+2 -9
net/netfilter/core.c
··· 35 35 36 36 int nf_register_afinfo(const struct nf_afinfo *afinfo) 37 37 { 38 - int err; 39 - 40 - err = mutex_lock_interruptible(&afinfo_mutex); 41 - if (err < 0) 42 - return err; 38 + mutex_lock(&afinfo_mutex); 43 39 RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); 44 40 mutex_unlock(&afinfo_mutex); 45 41 return 0; ··· 64 68 int nf_register_hook(struct nf_hook_ops *reg) 65 69 { 66 70 struct nf_hook_ops *elem; 67 - int err; 68 71 69 - err = mutex_lock_interruptible(&nf_hook_mutex); 70 - if (err < 0) 71 - return err; 72 + mutex_lock(&nf_hook_mutex); 72 73 list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) { 73 74 if (reg->priority < elem->priority) 74 75 break;
+4 -15
net/netfilter/ipvs/ip_vs_ctl.c
··· 2271 2271 cmd == IP_VS_SO_SET_STOPDAEMON) { 2272 2272 struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; 2273 2273 2274 - if (mutex_lock_interruptible(&ipvs->sync_mutex)) { 2275 - ret = -ERESTARTSYS; 2276 - goto out_dec; 2277 - } 2274 + mutex_lock(&ipvs->sync_mutex); 2278 2275 if (cmd == IP_VS_SO_SET_STARTDAEMON) 2279 2276 ret = start_sync_thread(net, dm->state, dm->mcast_ifn, 2280 2277 dm->syncid); ··· 2281 2284 goto out_dec; 2282 2285 } 2283 2286 2284 - if (mutex_lock_interruptible(&__ip_vs_mutex)) { 2285 - ret = -ERESTARTSYS; 2286 - goto out_dec; 2287 - } 2288 - 2287 + mutex_lock(&__ip_vs_mutex); 2289 2288 if (cmd == IP_VS_SO_SET_FLUSH) { 2290 2289 /* Flush the virtual service */ 2291 2290 ret = ip_vs_flush(net, false); ··· 2566 2573 struct ip_vs_daemon_user d[2]; 2567 2574 2568 2575 memset(&d, 0, sizeof(d)); 2569 - if (mutex_lock_interruptible(&ipvs->sync_mutex)) 2570 - return -ERESTARTSYS; 2571 - 2576 + mutex_lock(&ipvs->sync_mutex); 2572 2577 if (ipvs->sync_state & IP_VS_STATE_MASTER) { 2573 2578 d[0].state = IP_VS_STATE_MASTER; 2574 2579 strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, ··· 2585 2594 return ret; 2586 2595 } 2587 2596 2588 - if (mutex_lock_interruptible(&__ip_vs_mutex)) 2589 - return -ERESTARTSYS; 2590 - 2597 + mutex_lock(&__ip_vs_mutex); 2591 2598 switch (cmd) { 2592 2599 case IP_VS_SO_GET_VERSION: 2593 2600 {
+2 -6
net/netfilter/nf_sockopt.c
··· 26 26 struct nf_sockopt_ops *ops; 27 27 int ret = 0; 28 28 29 - if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 30 - return -EINTR; 31 - 29 + mutex_lock(&nf_sockopt_mutex); 32 30 list_for_each_entry(ops, &nf_sockopts, list) { 33 31 if (ops->pf == reg->pf 34 32 && (overlap(ops->set_optmin, ops->set_optmax, ··· 63 65 { 64 66 struct nf_sockopt_ops *ops; 65 67 66 - if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0) 67 - return ERR_PTR(-EINTR); 68 - 68 + mutex_lock(&nf_sockopt_mutex); 69 69 list_for_each_entry(ops, &nf_sockopts, list) { 70 70 if (ops->pf == pf) { 71 71 if (!try_module_get(ops->owner))
+12 -35
net/netfilter/x_tables.c
··· 71 71 static const unsigned int xt_jumpstack_multiplier = 2; 72 72 73 73 /* Registration hooks for targets. */ 74 - int 75 - xt_register_target(struct xt_target *target) 74 + int xt_register_target(struct xt_target *target) 76 75 { 77 76 u_int8_t af = target->family; 78 - int ret; 79 77 80 - ret = mutex_lock_interruptible(&xt[af].mutex); 81 - if (ret != 0) 82 - return ret; 78 + mutex_lock(&xt[af].mutex); 83 79 list_add(&target->list, &xt[af].target); 84 80 mutex_unlock(&xt[af].mutex); 85 - return ret; 81 + return 0; 86 82 } 87 83 EXPORT_SYMBOL(xt_register_target); 88 84 ··· 121 125 } 122 126 EXPORT_SYMBOL(xt_unregister_targets); 123 127 124 - int 125 - xt_register_match(struct xt_match *match) 128 + int xt_register_match(struct xt_match *match) 126 129 { 127 130 u_int8_t af = match->family; 128 - int ret; 129 131 130 - ret = mutex_lock_interruptible(&xt[af].mutex); 131 - if (ret != 0) 132 - return ret; 133 - 132 + mutex_lock(&xt[af].mutex); 134 133 list_add(&match->list, &xt[af].match); 135 134 mutex_unlock(&xt[af].mutex); 136 - 137 - return ret; 135 + return 0; 138 136 } 139 137 EXPORT_SYMBOL(xt_register_match); 140 138 ··· 184 194 struct xt_match *m; 185 195 int err = -ENOENT; 186 196 187 - if (mutex_lock_interruptible(&xt[af].mutex) != 0) 188 - return ERR_PTR(-EINTR); 189 - 197 + mutex_lock(&xt[af].mutex); 190 198 list_for_each_entry(m, &xt[af].match, list) { 191 199 if (strcmp(m->name, name) == 0) { 192 200 if (m->revision == revision) { ··· 227 239 struct xt_target *t; 228 240 int err = -ENOENT; 229 241 230 - if (mutex_lock_interruptible(&xt[af].mutex) != 0) 231 - return ERR_PTR(-EINTR); 232 - 242 + mutex_lock(&xt[af].mutex); 233 243 list_for_each_entry(t, &xt[af].target, list) { 234 244 if (strcmp(t->name, name) == 0) { 235 245 if (t->revision == revision) { ··· 309 323 { 310 324 int have_rev, best = -1; 311 325 312 - if (mutex_lock_interruptible(&xt[af].mutex) != 0) { 313 - *err = -EINTR; 314 - return 1; 315 - } 326 + mutex_lock(&xt[af].mutex); 316 327 if (target == 1) 317 328 have_rev = target_revfn(af, name, revision, &best); 318 329 else ··· 715 732 { 716 733 struct xt_table *t; 717 734 718 - if (mutex_lock_interruptible(&xt[af].mutex) != 0) 719 - return ERR_PTR(-EINTR); 720 - 735 + mutex_lock(&xt[af].mutex); 721 736 list_for_each_entry(t, &net->xt.tables[af], list) 722 737 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 723 738 return t; ··· 864 883 goto out; 865 884 } 866 885 867 - ret = mutex_lock_interruptible(&xt[table->af].mutex); 868 - if (ret != 0) 869 - goto out_free; 870 - 886 + mutex_lock(&xt[table->af].mutex); 871 887 /* Don't autoload: we'd eat our tail... */ 872 888 list_for_each_entry(t, &net->xt.tables[table->af], list) { 873 889 if (strcmp(t->name, table->name) == 0) { ··· 889 911 mutex_unlock(&xt[table->af].mutex); 890 912 return table; 891 913 892 - unlock: 914 + unlock: 893 915 mutex_unlock(&xt[table->af].mutex); 894 - out_free: 895 916 kfree(table); 896 917 out: 897 918 return ERR_PTR(ret);