pkt_sched: gen_estimator: add a new lock

gen_kill_estimator() / gen_new_estimator() is not always called with
RTNL held.

net/netfilter/xt_RATEEST.c is one user of these API that do not hold
RTNL, so random corruptions can occur between "tc" and "iptables".

Add a new fine grained lock instead of trying to use RTNL in netfilter.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Eric Dumazet and committed by David S. Miller ae638c47 597a264b

+12 -3
+12 -3
net/core/gen_estimator.c
··· 107 108 /* Protects against soft lockup during large deletion */ 109 static struct rb_root est_root = RB_ROOT; 110 111 static void est_timer(unsigned long arg) 112 { ··· 202 * 203 * Returns 0 on success or a negative error code. 204 * 205 - * NOTE: Called under rtnl_mutex 206 */ 207 int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 208 struct gnet_stats_rate_est *rate_est, ··· 232 est->last_packets = bstats->packets; 233 est->avpps = rate_est->pps<<10; 234 235 if (!elist[idx].timer.function) { 236 INIT_LIST_HEAD(&elist[idx].list); 237 setup_timer(&elist[idx].timer, est_timer, idx); ··· 243 244 list_add_rcu(&est->list, &elist[idx].list); 245 gen_add_node(est); 246 247 return 0; 248 } ··· 263 * 264 * Removes the rate estimator specified by &bstats and &rate_est. 265 * 266 - * NOTE: Called under rtnl_mutex 267 */ 268 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 269 struct gnet_stats_rate_est *rate_est) 270 { 271 struct gen_estimator *e; 272 273 while ((e = gen_find_node(bstats, rate_est))) { 274 rb_erase(&e->node, &est_root); 275 ··· 280 list_del_rcu(&e->list); 281 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 } 283 } 284 EXPORT_SYMBOL(gen_kill_estimator); 285 ··· 315 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 316 const struct gnet_stats_rate_est *rate_est) 317 { 318 ASSERT_RTNL(); 319 320 - return gen_find_node(bstats, rate_est) != NULL; 321 } 322 EXPORT_SYMBOL(gen_estimator_active);
··· 107 108 /* Protects against soft lockup during large deletion */ 109 static struct rb_root est_root = RB_ROOT; 110 + static DEFINE_SPINLOCK(est_tree_lock); 111 112 static void est_timer(unsigned long arg) 113 { ··· 201 * 202 * Returns 0 on success or a negative error code. 203 * 204 */ 205 int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 206 struct gnet_stats_rate_est *rate_est, ··· 232 est->last_packets = bstats->packets; 233 est->avpps = rate_est->pps<<10; 234 235 + spin_lock(&est_tree_lock); 236 if (!elist[idx].timer.function) { 237 INIT_LIST_HEAD(&elist[idx].list); 238 setup_timer(&elist[idx].timer, est_timer, idx); ··· 242 243 list_add_rcu(&est->list, &elist[idx].list); 244 gen_add_node(est); 245 + spin_unlock(&est_tree_lock); 246 247 return 0; 248 } ··· 261 * 262 * Removes the rate estimator specified by &bstats and &rate_est. 263 * 264 */ 265 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 266 struct gnet_stats_rate_est *rate_est) 267 { 268 struct gen_estimator *e; 269 270 + spin_lock(&est_tree_lock); 271 while ((e = gen_find_node(bstats, rate_est))) { 272 rb_erase(&e->node, &est_root); 273 ··· 278 list_del_rcu(&e->list); 279 call_rcu(&e->e_rcu, __gen_kill_estimator); 280 } 281 + spin_unlock(&est_tree_lock); 282 } 283 EXPORT_SYMBOL(gen_kill_estimator); 284 ··· 312 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 313 const struct gnet_stats_rate_est *rate_est) 314 { 315 + bool res; 316 + 317 ASSERT_RTNL(); 318 319 + spin_lock(&est_tree_lock); 320 + res = gen_find_node(bstats, rate_est) != NULL; 321 + spin_unlock(&est_tree_lock); 322 + 323 + return res; 324 } 325 EXPORT_SYMBOL(gen_estimator_active);