[TOKENRING]: Use interrupt-safe locking with rif_lock.

Change operations on rif_lock from spin_{un}lock_bh to
spin_{un}lock_irq{save,restore} equivalents. Some of the
rif_lock critical sections are called from interrupt context via
tr_type_trans->tr_add_rif_info. The TR NIC drivers call tr_type_trans
from their packet receive handlers.

Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Jay Vosburgh and committed by David S. Miller 001dd250 6be382ea

+12 -10
+12 -10
net/802/tr.c
··· 251 251 unsigned int hash; 252 252 struct rif_cache *entry; 253 253 unsigned char *olddata; 254 + unsigned long flags; 254 255 static const unsigned char mcast_func_addr[] 255 256 = {0xC0,0x00,0x00,0x04,0x00,0x00}; 256 257 257 - spin_lock_bh(&rif_lock); 258 + spin_lock_irqsave(&rif_lock, flags); 258 259 259 260 /* 260 261 * Broadcasts are single route as stated in RFC 1042 ··· 324 323 else 325 324 slack = 18 - ((ntohs(trh->rcf) & TR_RCF_LEN_MASK)>>8); 326 325 olddata = skb->data; 327 - spin_unlock_bh(&rif_lock); 326 + spin_unlock_irqrestore(&rif_lock, flags); 328 327 329 328 skb_pull(skb, slack); 330 329 memmove(skb->data, olddata, sizeof(struct trh_hdr) - slack); ··· 338 337 static void tr_add_rif_info(struct trh_hdr *trh, struct net_device *dev) 339 338 { 340 339 unsigned int hash, rii_p = 0; 340 + unsigned long flags; 341 341 struct rif_cache *entry; 342 342 343 343 344 - spin_lock_bh(&rif_lock); 344 + spin_lock_irqsave(&rif_lock, flags); 345 345 346 346 /* 347 347 * Firstly see if the entry exists ··· 380 378 if(!entry) 381 379 { 382 380 printk(KERN_DEBUG "tr.c: Couldn't malloc rif cache entry !\n"); 383 - spin_unlock_bh(&rif_lock); 381 + spin_unlock_irqrestore(&rif_lock, flags); 384 382 return; 385 383 } 386 384 ··· 422 420 } 423 421 entry->last_used=jiffies; 424 422 } 425 - spin_unlock_bh(&rif_lock); 423 + spin_unlock_irqrestore(&rif_lock, flags); 426 424 } 427 425 428 426 /* ··· 432 430 static void rif_check_expire(unsigned long dummy) 433 431 { 434 432 int i; 435 - unsigned long next_interval = jiffies + sysctl_tr_rif_timeout/2; 433 + unsigned long flags, next_interval = jiffies + sysctl_tr_rif_timeout/2; 436 434 437 - spin_lock_bh(&rif_lock); 435 + spin_lock_irqsave(&rif_lock, flags); 438 436 439 437 for(i =0; i < RIF_TABLE_SIZE; i++) { 440 438 struct rif_cache *entry, **pentry; ··· 456 454 } 457 455 } 458 456 459 - spin_unlock_bh(&rif_lock); 457 + spin_unlock_irqrestore(&rif_lock, flags); 460 458 461 459 mod_timer(&rif_timer, next_interval); 462 460 ··· 487 485 488 486 static void *rif_seq_start(struct seq_file *seq, loff_t *pos) 489 487 { 490 - spin_lock_bh(&rif_lock); 488 + spin_lock_irq(&rif_lock); 491 489 492 490 return *pos ? rif_get_idx(*pos - 1) : SEQ_START_TOKEN; 493 491 } ··· 518 516 519 517 static void rif_seq_stop(struct seq_file *seq, void *v) 520 518 { 521 - spin_unlock_bh(&rif_lock); 519 + spin_unlock_irq(&rif_lock); 522 520 } 523 521 524 522 static int rif_seq_show(struct seq_file *seq, void *v)