Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
clockevents: Convert to raw_spinlock
clockevents: Make tick_device_lock static
debugobjects: Convert to raw_spinlocks
perf_event: Convert to raw_spinlock
hrtimers: Convert to raw_spinlocks
genirq: Convert irq_desc.lock to raw_spinlock
smp: Convert smplocks to raw_spinlocks
rtmutes: Convert rtmutex.lock to raw_spinlock
sched: Convert pi_lock to raw_spinlock
sched: Convert cpupri lock to raw_spinlock
sched: Convert rt_runtime_lock to raw_spinlock
sched: Convert rq->lock to raw_spinlock
plist: Make plist debugging raw_spinlock aware
bkl: Fixup core_lock fallout
locking: Cleanup the name space completely
locking: Further name space cleanups
alpha: Fix fallout from locking changes
locking: Implement new raw_spinlock
locking: Convert raw_rwlock functions to arch_rwlock
locking: Convert raw_rwlock to arch_rwlock
...

+2485 -2177
+17 -17
arch/alpha/include/asm/core_t2.h
··· 435 435 set_hae(msb); \ 436 436 } 437 437 438 - extern spinlock_t t2_hae_lock; 438 + extern raw_spinlock_t t2_hae_lock; 439 439 440 440 /* 441 441 * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since ··· 448 448 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 449 449 unsigned long result, msb; 450 450 unsigned long flags; 451 - spin_lock_irqsave(&t2_hae_lock, flags); 451 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 452 452 453 453 t2_set_hae; 454 454 455 455 result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); 456 - spin_unlock_irqrestore(&t2_hae_lock, flags); 456 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 457 457 return __kernel_extbl(result, addr & 3); 458 458 } 459 459 ··· 462 462 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 463 463 unsigned long result, msb; 464 464 unsigned long flags; 465 - spin_lock_irqsave(&t2_hae_lock, flags); 465 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 466 466 467 467 t2_set_hae; 468 468 469 469 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); 470 - spin_unlock_irqrestore(&t2_hae_lock, flags); 470 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 471 471 return __kernel_extwl(result, addr & 3); 472 472 } 473 473 ··· 480 480 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 481 481 unsigned long result, msb; 482 482 unsigned long flags; 483 - spin_lock_irqsave(&t2_hae_lock, flags); 483 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 484 484 485 485 t2_set_hae; 486 486 487 487 result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); 488 - spin_unlock_irqrestore(&t2_hae_lock, flags); 488 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 489 489 return result & 0xffffffffUL; 490 490 } 491 491 ··· 494 494 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 495 495 unsigned long r0, r1, work, msb; 496 496 unsigned long flags; 497 - spin_lock_irqsave(&t2_hae_lock, flags); 497 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 498 498 499 499 t2_set_hae; 500 500 501 501 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 502 502 r0 = *(vuip)(work); 503 503 r1 = *(vuip)(work + (4 << 5)); 504 - spin_unlock_irqrestore(&t2_hae_lock, flags); 504 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 505 505 return r1 << 32 | r0; 506 506 } 507 507 ··· 510 510 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 511 511 unsigned long msb, w; 512 512 unsigned long flags; 513 - spin_lock_irqsave(&t2_hae_lock, flags); 513 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 514 514 515 515 t2_set_hae; 516 516 517 517 w = __kernel_insbl(b, addr & 3); 518 518 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; 519 - spin_unlock_irqrestore(&t2_hae_lock, flags); 519 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 520 520 } 521 521 522 522 __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) ··· 524 524 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 525 525 unsigned long msb, w; 526 526 unsigned long flags; 527 - spin_lock_irqsave(&t2_hae_lock, flags); 527 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 528 528 529 529 t2_set_hae; 530 530 531 531 w = __kernel_inswl(b, addr & 3); 532 532 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; 533 - spin_unlock_irqrestore(&t2_hae_lock, flags); 533 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 534 534 } 535 535 536 536 /* ··· 542 542 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 543 543 unsigned long msb; 544 544 unsigned long flags; 545 - spin_lock_irqsave(&t2_hae_lock, flags); 545 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 546 546 547 547 t2_set_hae; 548 548 549 549 *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; 550 - spin_unlock_irqrestore(&t2_hae_lock, flags); 550 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 551 551 } 552 552 553 553 __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) ··· 555 555 unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; 556 556 unsigned long msb, work; 557 557 unsigned long flags; 558 - spin_lock_irqsave(&t2_hae_lock, flags); 558 + raw_spin_lock_irqsave(&t2_hae_lock, flags); 559 559 560 560 t2_set_hae; 561 561 562 562 work = (addr << 5) + T2_SPARSE_MEM + 0x18; 563 563 *(vuip)work = b; 564 564 *(vuip)(work + (4 << 5)) = b >> 32; 565 - spin_unlock_irqrestore(&t2_hae_lock, flags); 565 + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); 566 566 } 567 567 568 568 __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr)
+19 -19
arch/alpha/include/asm/spinlock.h
··· 12 12 * We make no fairness assumptions. They have a cost. 13 13 */ 14 14 15 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 16 - #define __raw_spin_is_locked(x) ((x)->lock != 0) 17 - #define __raw_spin_unlock_wait(x) \ 15 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 16 + #define arch_spin_is_locked(x) ((x)->lock != 0) 17 + #define arch_spin_unlock_wait(x) \ 18 18 do { cpu_relax(); } while ((x)->lock) 19 19 20 - static inline void __raw_spin_unlock(raw_spinlock_t * lock) 20 + static inline void arch_spin_unlock(arch_spinlock_t * lock) 21 21 { 22 22 mb(); 23 23 lock->lock = 0; 24 24 } 25 25 26 - static inline void __raw_spin_lock(raw_spinlock_t * lock) 26 + static inline void arch_spin_lock(arch_spinlock_t * lock) 27 27 { 28 28 long tmp; 29 29 ··· 43 43 : "m"(lock->lock) : "memory"); 44 44 } 45 45 46 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 47 47 { 48 48 return !test_and_set_bit(0, &lock->lock); 49 49 } 50 50 51 51 /***********************************************************/ 52 52 53 - static inline int __raw_read_can_lock(raw_rwlock_t *lock) 53 + static inline int arch_read_can_lock(arch_rwlock_t *lock) 54 54 { 55 55 return (lock->lock & 1) == 0; 56 56 } 57 57 58 - static inline int __raw_write_can_lock(raw_rwlock_t *lock) 58 + static inline int arch_write_can_lock(arch_rwlock_t *lock) 59 59 { 60 60 return lock->lock == 0; 61 61 } 62 62 63 - static inline void __raw_read_lock(raw_rwlock_t *lock) 63 + static inline void arch_read_lock(arch_rwlock_t *lock) 64 64 { 65 65 long regx; 66 66 ··· 80 80 : "m" (*lock) : "memory"); 81 81 } 82 82 83 - static inline void __raw_write_lock(raw_rwlock_t *lock) 83 + static inline void arch_write_lock(arch_rwlock_t *lock) 84 84 { 85 85 long regx; 86 86 ··· 100 100 : "m" (*lock) : "memory"); 101 101 } 102 102 103 - static inline int __raw_read_trylock(raw_rwlock_t * lock) 103 + static inline int arch_read_trylock(arch_rwlock_t * lock) 104 104 { 105 105 long regx; 106 106 int success; ··· 122 122 return success; 123 123 } 124 124 125 - static inline int __raw_write_trylock(raw_rwlock_t * lock) 125 + static inline int arch_write_trylock(arch_rwlock_t * lock) 126 126 { 127 127 long regx; 128 128 int success; ··· 144 144 return success; 145 145 } 146 146 147 - static inline void __raw_read_unlock(raw_rwlock_t * lock) 147 + static inline void arch_read_unlock(arch_rwlock_t * lock) 148 148 { 149 149 long regx; 150 150 __asm__ __volatile__( ··· 160 160 : "m" (*lock) : "memory"); 161 161 } 162 162 163 - static inline void __raw_write_unlock(raw_rwlock_t * lock) 163 + static inline void arch_write_unlock(arch_rwlock_t * lock) 164 164 { 165 165 mb(); 166 166 lock->lock = 0; 167 167 } 168 168 169 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 170 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 169 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 170 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 171 171 172 - #define _raw_spin_relax(lock) cpu_relax() 173 - #define _raw_read_relax(lock) cpu_relax() 174 - #define _raw_write_relax(lock) cpu_relax() 172 + #define arch_spin_relax(lock) cpu_relax() 173 + #define arch_read_relax(lock) cpu_relax() 174 + #define arch_write_relax(lock) cpu_relax() 175 175 176 176 #endif /* _ALPHA_SPINLOCK_H */
+4 -4
arch/alpha/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int lock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 18 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 19 19 20 20 #endif
+1 -1
arch/alpha/kernel/core_t2.c
··· 74 74 # define DBG(args) 75 75 #endif 76 76 77 - DEFINE_SPINLOCK(t2_hae_lock); 77 + DEFINE_RAW_SPINLOCK(t2_hae_lock); 78 78 79 79 static volatile unsigned int t2_mcheck_any_expected; 80 80 static volatile unsigned int t2_mcheck_last_taken;
+2 -2
arch/alpha/kernel/irq.c
··· 81 81 #endif 82 82 83 83 if (irq < ACTUAL_NR_IRQS) { 84 - spin_lock_irqsave(&irq_desc[irq].lock, flags); 84 + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 85 85 action = irq_desc[irq].action; 86 86 if (!action) 87 87 goto unlock; ··· 105 105 106 106 seq_putc(p, '\n'); 107 107 unlock: 108 - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 108 + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 109 109 } else if (irq == ACTUAL_NR_IRQS) { 110 110 #ifdef CONFIG_SMP 111 111 seq_puts(p, "IPI: ");
+2 -2
arch/arm/include/asm/mach/irq.h
··· 26 26 */ 27 27 #define do_bad_IRQ(irq,desc) \ 28 28 do { \ 29 - spin_lock(&desc->lock); \ 29 + raw_spin_lock(&desc->lock); \ 30 30 handle_bad_irq(irq, desc); \ 31 - spin_unlock(&desc->lock); \ 31 + raw_spin_unlock(&desc->lock); \ 32 32 } while(0) 33 33 34 34 #endif
+20 -20
arch/arm/include/asm/spinlock.h
··· 17 17 * Locked value: 1 18 18 */ 19 19 20 - #define __raw_spin_is_locked(x) ((x)->lock != 0) 21 - #define __raw_spin_unlock_wait(lock) \ 22 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 20 + #define arch_spin_is_locked(x) ((x)->lock != 0) 21 + #define arch_spin_unlock_wait(lock) \ 22 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 23 23 24 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 24 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 25 25 26 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 26 + static inline void arch_spin_lock(arch_spinlock_t *lock) 27 27 { 28 28 unsigned long tmp; 29 29 ··· 43 43 smp_mb(); 44 44 } 45 45 46 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 46 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 47 47 { 48 48 unsigned long tmp; 49 49 ··· 63 63 } 64 64 } 65 65 66 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 66 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 67 67 { 68 68 smp_mb(); 69 69 ··· 86 86 * just write zero since the lock is exclusively held. 87 87 */ 88 88 89 - static inline void __raw_write_lock(raw_rwlock_t *rw) 89 + static inline void arch_write_lock(arch_rwlock_t *rw) 90 90 { 91 91 unsigned long tmp; 92 92 ··· 106 106 smp_mb(); 107 107 } 108 108 109 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 109 + static inline int arch_write_trylock(arch_rwlock_t *rw) 110 110 { 111 111 unsigned long tmp; 112 112 ··· 126 126 } 127 127 } 128 128 129 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 129 + static inline void arch_write_unlock(arch_rwlock_t *rw) 130 130 { 131 131 smp_mb(); 132 132 ··· 142 142 } 143 143 144 144 /* write_can_lock - would write_trylock() succeed? */ 145 - #define __raw_write_can_lock(x) ((x)->lock == 0) 145 + #define arch_write_can_lock(x) ((x)->lock == 0) 146 146 147 147 /* 148 148 * Read locks are a bit more hairy: ··· 156 156 * currently active. However, we know we won't have any write 157 157 * locks. 158 158 */ 159 - static inline void __raw_read_lock(raw_rwlock_t *rw) 159 + static inline void arch_read_lock(arch_rwlock_t *rw) 160 160 { 161 161 unsigned long tmp, tmp2; 162 162 ··· 176 176 smp_mb(); 177 177 } 178 178 179 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 179 + static inline void arch_read_unlock(arch_rwlock_t *rw) 180 180 { 181 181 unsigned long tmp, tmp2; 182 182 ··· 198 198 : "cc"); 199 199 } 200 200 201 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 201 + static inline int arch_read_trylock(arch_rwlock_t *rw) 202 202 { 203 203 unsigned long tmp, tmp2 = 1; 204 204 ··· 215 215 } 216 216 217 217 /* read_can_lock - would read_trylock() succeed? */ 218 - #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) 218 + #define arch_read_can_lock(x) ((x)->lock < 0x80000000) 219 219 220 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 221 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 220 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 221 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 222 222 223 - #define _raw_spin_relax(lock) cpu_relax() 224 - #define _raw_read_relax(lock) cpu_relax() 225 - #define _raw_write_relax(lock) cpu_relax() 223 + #define arch_spin_relax(lock) cpu_relax() 224 + #define arch_read_relax(lock) cpu_relax() 225 + #define arch_write_relax(lock) cpu_relax() 226 226 227 227 #endif /* __ASM_SPINLOCK_H */
+4 -4
arch/arm/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int lock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 18 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 19 19 20 20 #endif
+6 -6
arch/arm/kernel/irq.c
··· 69 69 } 70 70 71 71 if (i < NR_IRQS) { 72 - spin_lock_irqsave(&irq_desc[i].lock, flags); 72 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 73 73 action = irq_desc[i].action; 74 74 if (!action) 75 75 goto unlock; ··· 84 84 85 85 seq_putc(p, '\n'); 86 86 unlock: 87 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 87 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 88 88 } else if (i == NR_IRQS) { 89 89 #ifdef CONFIG_FIQ 90 90 show_fiq_list(p, v); ··· 139 139 } 140 140 141 141 desc = irq_desc + irq; 142 - spin_lock_irqsave(&desc->lock, flags); 142 + raw_spin_lock_irqsave(&desc->lock, flags); 143 143 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 144 144 if (iflags & IRQF_VALID) 145 145 desc->status &= ~IRQ_NOREQUEST; ··· 147 147 desc->status &= ~IRQ_NOPROBE; 148 148 if (!(iflags & IRQF_NOAUTOEN)) 149 149 desc->status &= ~IRQ_NOAUTOEN; 150 - spin_unlock_irqrestore(&desc->lock, flags); 150 + raw_spin_unlock_irqrestore(&desc->lock, flags); 151 151 } 152 152 153 153 void __init init_IRQ(void) ··· 166 166 { 167 167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); 168 168 169 - spin_lock_irq(&desc->lock); 169 + raw_spin_lock_irq(&desc->lock); 170 170 desc->chip->set_affinity(irq, cpumask_of(cpu)); 171 - spin_unlock_irq(&desc->lock); 171 + raw_spin_unlock_irq(&desc->lock); 172 172 } 173 173 174 174 /*
+4 -4
arch/arm/mach-ns9xxx/irq.c
··· 66 66 struct irqaction *action; 67 67 irqreturn_t action_ret; 68 68 69 - spin_lock(&desc->lock); 69 + raw_spin_lock(&desc->lock); 70 70 71 71 BUG_ON(desc->status & IRQ_INPROGRESS); 72 72 ··· 78 78 goto out_mask; 79 79 80 80 desc->status |= IRQ_INPROGRESS; 81 - spin_unlock(&desc->lock); 81 + raw_spin_unlock(&desc->lock); 82 82 83 83 action_ret = handle_IRQ_event(irq, action); 84 84 ··· 87 87 * Maybe this function should go to kernel/irq/chip.c? */ 88 88 note_interrupt(irq, desc, action_ret); 89 89 90 - spin_lock(&desc->lock); 90 + raw_spin_lock(&desc->lock); 91 91 desc->status &= ~IRQ_INPROGRESS; 92 92 93 93 if (desc->status & IRQ_DISABLED) ··· 97 97 /* ack unconditionally to unmask lower prio irqs */ 98 98 desc->chip->ack(irq); 99 99 100 - spin_unlock(&desc->lock); 100 + raw_spin_unlock(&desc->lock); 101 101 } 102 102 #define handle_irq handle_prio_irq 103 103 #endif
+2 -2
arch/avr32/kernel/irq.c
··· 42 42 } 43 43 44 44 if (i < NR_IRQS) { 45 - spin_lock_irqsave(&irq_desc[i].lock, flags); 45 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 46 46 action = irq_desc[i].action; 47 47 if (!action) 48 48 goto unlock; ··· 57 57 58 58 seq_putc(p, '\n'); 59 59 unlock: 60 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 60 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 61 61 } 62 62 63 63 return 0;
+30 -30
arch/blackfin/include/asm/spinlock.h
··· 17 17 asmlinkage void __raw_spin_lock_asm(volatile int *ptr); 18 18 asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); 19 19 asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); 20 - asmlinkage void __raw_read_lock_asm(volatile int *ptr); 21 - asmlinkage int __raw_read_trylock_asm(volatile int *ptr); 22 - asmlinkage void __raw_read_unlock_asm(volatile int *ptr); 23 - asmlinkage void __raw_write_lock_asm(volatile int *ptr); 24 - asmlinkage int __raw_write_trylock_asm(volatile int *ptr); 25 - asmlinkage void __raw_write_unlock_asm(volatile int *ptr); 20 + asmlinkage void arch_read_lock_asm(volatile int *ptr); 21 + asmlinkage int arch_read_trylock_asm(volatile int *ptr); 22 + asmlinkage void arch_read_unlock_asm(volatile int *ptr); 23 + asmlinkage void arch_write_lock_asm(volatile int *ptr); 24 + asmlinkage int arch_write_trylock_asm(volatile int *ptr); 25 + asmlinkage void arch_write_unlock_asm(volatile int *ptr); 26 26 27 - static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 27 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 28 28 { 29 29 return __raw_spin_is_locked_asm(&lock->lock); 30 30 } 31 31 32 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 32 + static inline void arch_spin_lock(arch_spinlock_t *lock) 33 33 { 34 34 __raw_spin_lock_asm(&lock->lock); 35 35 } 36 36 37 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 37 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 38 38 39 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 40 40 { 41 41 return __raw_spin_trylock_asm(&lock->lock); 42 42 } 43 43 44 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 45 45 { 46 46 __raw_spin_unlock_asm(&lock->lock); 47 47 } 48 48 49 - static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 49 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 50 50 { 51 - while (__raw_spin_is_locked(lock)) 51 + while (arch_spin_is_locked(lock)) 52 52 cpu_relax(); 53 53 } 54 54 55 - static inline int __raw_read_can_lock(raw_rwlock_t *rw) 55 + static inline int arch_read_can_lock(arch_rwlock_t *rw) 56 56 { 57 57 return __raw_uncached_fetch_asm(&rw->lock) > 0; 58 58 } 59 59 60 - static inline int __raw_write_can_lock(raw_rwlock_t *rw) 60 + static inline int arch_write_can_lock(arch_rwlock_t *rw) 61 61 { 62 62 return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; 63 63 } 64 64 65 - static inline void __raw_read_lock(raw_rwlock_t *rw) 65 + static inline void arch_read_lock(arch_rwlock_t *rw) 66 66 { 67 - __raw_read_lock_asm(&rw->lock); 67 + arch_read_lock_asm(&rw->lock); 68 68 } 69 69 70 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 70 + static inline int arch_read_trylock(arch_rwlock_t *rw) 71 71 { 72 - return __raw_read_trylock_asm(&rw->lock); 72 + return arch_read_trylock_asm(&rw->lock); 73 73 } 74 74 75 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 75 + static inline void arch_read_unlock(arch_rwlock_t *rw) 76 76 { 77 - __raw_read_unlock_asm(&rw->lock); 77 + arch_read_unlock_asm(&rw->lock); 78 78 } 79 79 80 - static inline void __raw_write_lock(raw_rwlock_t *rw) 80 + static inline void arch_write_lock(arch_rwlock_t *rw) 81 81 { 82 - __raw_write_lock_asm(&rw->lock); 82 + arch_write_lock_asm(&rw->lock); 83 83 } 84 84 85 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 85 + static inline int arch_write_trylock(arch_rwlock_t *rw) 86 86 { 87 - return __raw_write_trylock_asm(&rw->lock); 87 + return arch_write_trylock_asm(&rw->lock); 88 88 } 89 89 90 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 90 + static inline void arch_write_unlock(arch_rwlock_t *rw) 91 91 { 92 - __raw_write_unlock_asm(&rw->lock); 92 + arch_write_unlock_asm(&rw->lock); 93 93 } 94 94 95 - #define _raw_spin_relax(lock) cpu_relax() 96 - #define _raw_read_relax(lock) cpu_relax() 97 - #define _raw_write_relax(lock) cpu_relax() 95 + #define arch_spin_relax(lock) cpu_relax() 96 + #define arch_read_relax(lock) cpu_relax() 97 + #define arch_write_relax(lock) cpu_relax() 98 98 99 99 #endif 100 100
+4 -4
arch/blackfin/include/asm/spinlock_types.h
··· 15 15 16 16 typedef struct { 17 17 volatile unsigned int lock; 18 - } raw_spinlock_t; 18 + } arch_spinlock_t; 19 19 20 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 20 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 21 21 22 22 typedef struct { 23 23 volatile unsigned int lock; 24 - } raw_rwlock_t; 24 + } arch_rwlock_t; 25 25 26 - #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 26 + #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 27 27 28 28 #endif
+3 -3
arch/blackfin/kernel/irqchip.c
··· 23 23 24 24 static struct irq_desc bad_irq_desc = { 25 25 .handle_irq = handle_bad_irq, 26 - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 26 + .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), 27 27 }; 28 28 29 29 #ifdef CONFIG_CPUMASK_OFFSTACK ··· 39 39 unsigned long flags; 40 40 41 41 if (i < NR_IRQS) { 42 - spin_lock_irqsave(&irq_desc[i].lock, flags); 42 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 43 43 action = irq_desc[i].action; 44 44 if (!action) 45 45 goto skip; ··· 53 53 54 54 seq_putc(p, '\n'); 55 55 skip: 56 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 57 57 } else if (i == NR_IRQS) { 58 58 seq_printf(p, "NMI: "); 59 59 for_each_online_cpu(j)
+2 -2
arch/blackfin/kernel/traps.c
··· 1140 1140 if (fp->ipend & ~0x3F) { 1141 1141 for (i = 0; i < (NR_IRQS - 1); i++) { 1142 1142 if (!in_atomic) 1143 - spin_lock_irqsave(&irq_desc[i].lock, flags); 1143 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 1144 1144 1145 1145 action = irq_desc[i].action; 1146 1146 if (!action) ··· 1155 1155 verbose_printk("\n"); 1156 1156 unlock: 1157 1157 if (!in_atomic) 1158 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 1158 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 1159 1159 } 1160 1160 } 1161 1161
+31 -31
arch/cris/include/arch-v32/arch/spinlock.h
··· 9 9 extern void cris_spin_lock(void *l); 10 10 extern int cris_spin_trylock(void *l); 11 11 12 - static inline int __raw_spin_is_locked(raw_spinlock_t *x) 12 + static inline int arch_spin_is_locked(arch_spinlock_t *x) 13 13 { 14 14 return *(volatile signed char *)(&(x)->slock) <= 0; 15 15 } 16 16 17 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 17 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 18 18 { 19 19 __asm__ volatile ("move.d %1,%0" \ 20 20 : "=m" (lock->slock) \ ··· 22 22 : "memory"); 23 23 } 24 24 25 - static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 25 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 26 26 { 27 - while (__raw_spin_is_locked(lock)) 27 + while (arch_spin_is_locked(lock)) 28 28 cpu_relax(); 29 29 } 30 30 31 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 31 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 32 32 { 33 33 return cris_spin_trylock((void *)&lock->slock); 34 34 } 35 35 36 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 36 + static inline void arch_spin_lock(arch_spinlock_t *lock) 37 37 { 38 38 cris_spin_lock((void *)&lock->slock); 39 39 } 40 40 41 41 static inline void 42 - __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 42 + arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 43 43 { 44 - __raw_spin_lock(lock); 44 + arch_spin_lock(lock); 45 45 } 46 46 47 47 /* ··· 56 56 * 57 57 */ 58 58 59 - static inline int __raw_read_can_lock(raw_rwlock_t *x) 59 + static inline int arch_read_can_lock(arch_rwlock_t *x) 60 60 { 61 61 return (int)(x)->lock > 0; 62 62 } 63 63 64 - static inline int __raw_write_can_lock(raw_rwlock_t *x) 64 + static inline int arch_write_can_lock(arch_rwlock_t *x) 65 65 { 66 66 return (x)->lock == RW_LOCK_BIAS; 67 67 } 68 68 69 - static inline void __raw_read_lock(raw_rwlock_t *rw) 69 + static inline void arch_read_lock(arch_rwlock_t *rw) 70 70 { 71 - __raw_spin_lock(&rw->slock); 71 + arch_spin_lock(&rw->slock); 72 72 while (rw->lock == 0); 73 73 rw->lock--; 74 - __raw_spin_unlock(&rw->slock); 74 + arch_spin_unlock(&rw->slock); 75 75 } 76 76 77 - static inline void __raw_write_lock(raw_rwlock_t *rw) 77 + static inline void arch_write_lock(arch_rwlock_t *rw) 78 78 { 79 - __raw_spin_lock(&rw->slock); 79 + arch_spin_lock(&rw->slock); 80 80 while (rw->lock != RW_LOCK_BIAS); 81 81 rw->lock = 0; 82 - __raw_spin_unlock(&rw->slock); 82 + arch_spin_unlock(&rw->slock); 83 83 } 84 84 85 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 85 + static inline void arch_read_unlock(arch_rwlock_t *rw) 86 86 { 87 - __raw_spin_lock(&rw->slock); 87 + arch_spin_lock(&rw->slock); 88 88 rw->lock++; 89 - __raw_spin_unlock(&rw->slock); 89 + arch_spin_unlock(&rw->slock); 90 90 } 91 91 92 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 92 + static inline void arch_write_unlock(arch_rwlock_t *rw) 93 93 { 94 - __raw_spin_lock(&rw->slock); 94 + arch_spin_lock(&rw->slock); 95 95 while (rw->lock != RW_LOCK_BIAS); 96 96 rw->lock = RW_LOCK_BIAS; 97 - __raw_spin_unlock(&rw->slock); 97 + arch_spin_unlock(&rw->slock); 98 98 } 99 99 100 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 100 + static inline int arch_read_trylock(arch_rwlock_t *rw) 101 101 { 102 102 int ret = 0; 103 - __raw_spin_lock(&rw->slock); 103 + arch_spin_lock(&rw->slock); 104 104 if (rw->lock != 0) { 105 105 rw->lock--; 106 106 ret = 1; 107 107 } 108 - __raw_spin_unlock(&rw->slock); 108 + arch_spin_unlock(&rw->slock); 109 109 return ret; 110 110 } 111 111 112 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 112 + static inline int arch_write_trylock(arch_rwlock_t *rw) 113 113 { 114 114 int ret = 0; 115 - __raw_spin_lock(&rw->slock); 115 + arch_spin_lock(&rw->slock); 116 116 if (rw->lock == RW_LOCK_BIAS) { 117 117 rw->lock = 0; 118 118 ret = 1; 119 119 } 120 - __raw_spin_unlock(&rw->slock); 120 + arch_spin_unlock(&rw->slock); 121 121 return 1; 122 122 } 123 123 124 124 #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 125 125 #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 126 126 127 - #define _raw_spin_relax(lock) cpu_relax() 128 - #define _raw_read_relax(lock) cpu_relax() 129 - #define _raw_write_relax(lock) cpu_relax() 127 + #define arch_spin_relax(lock) cpu_relax() 128 + #define arch_read_relax(lock) cpu_relax() 129 + #define arch_write_relax(lock) cpu_relax() 130 130 131 131 #endif /* __ASM_ARCH_SPINLOCK_H */
+2 -2
arch/cris/kernel/irq.c
··· 52 52 } 53 53 54 54 if (i < NR_IRQS) { 55 - spin_lock_irqsave(&irq_desc[i].lock, flags); 55 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 56 56 action = irq_desc[i].action; 57 57 if (!action) 58 58 goto skip; ··· 71 71 72 72 seq_putc(p, '\n'); 73 73 skip: 74 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 74 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 75 75 } 76 76 return 0; 77 77 }
+2 -2
arch/frv/kernel/irq.c
··· 69 69 } 70 70 71 71 if (i < NR_IRQS) { 72 - spin_lock_irqsave(&irq_desc[i].lock, flags); 72 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 73 73 action = irq_desc[i].action; 74 74 if (action) { 75 75 seq_printf(p, "%3d: ", i); ··· 85 85 seq_putc(p, '\n'); 86 86 } 87 87 88 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 88 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 89 89 } else if (i == NR_IRQS) { 90 90 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); 91 91 }
+2 -2
arch/h8300/kernel/irq.c
··· 186 186 seq_puts(p, " CPU0"); 187 187 188 188 if (i < NR_IRQS) { 189 - spin_lock_irqsave(&irq_desc[i].lock, flags); 189 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 190 190 action = irq_desc[i].action; 191 191 if (!action) 192 192 goto unlock; ··· 200 200 seq_printf(p, ", %s", action->name); 201 201 seq_putc(p, '\n'); 202 202 unlock: 203 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 203 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 204 204 } 205 205 return 0; 206 206 }
+1 -1
arch/ia64/include/asm/bitops.h
··· 127 127 * @addr: Address to start counting from 128 128 * 129 129 * Similarly to clear_bit_unlock, the implementation uses a store 130 - * with release semantics. See also __raw_spin_unlock(). 130 + * with release semantics. See also arch_spin_unlock(). 131 131 */ 132 132 static __inline__ void 133 133 __clear_bit_unlock(int nr, void *addr)
+38 -38
arch/ia64/include/asm/spinlock.h
··· 17 17 #include <asm/intrinsics.h> 18 18 #include <asm/system.h> 19 19 20 - #define __raw_spin_lock_init(x) ((x)->lock = 0) 20 + #define arch_spin_lock_init(x) ((x)->lock = 0) 21 21 22 22 /* 23 23 * Ticket locks are conceptually two parts, one indicating the current head of ··· 38 38 #define TICKET_BITS 15 39 39 #define TICKET_MASK ((1 << TICKET_BITS) - 1) 40 40 41 - static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41 + static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 42 42 { 43 43 int *p = (int *)&lock->lock, ticket, serve; 44 44 ··· 58 58 } 59 59 } 60 60 61 - static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61 + static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 62 62 { 63 63 int tmp = ACCESS_ONCE(lock->lock); 64 64 ··· 67 67 return 0; 68 68 } 69 69 70 - static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70 + static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 71 71 { 72 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 73 73 ··· 75 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 76 76 } 77 77 78 - static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78 + static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) 79 79 { 80 80 int *p = (int *)&lock->lock, ticket; 81 81 ··· 89 89 } 90 90 } 91 91 92 - static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92 + static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 93 93 { 94 94 long tmp = ACCESS_ONCE(lock->lock); 95 95 96 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 97 97 } 98 98 99 - static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99 + static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 100 100 { 101 101 long tmp = ACCESS_ONCE(lock->lock); 102 102 103 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 104 104 } 105 105 106 - static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 107 107 { 108 108 return __ticket_spin_is_locked(lock); 109 109 } 110 110 111 - static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 112 112 { 113 113 return __ticket_spin_is_contended(lock); 114 114 } 115 - #define __raw_spin_is_contended __raw_spin_is_contended 115 + #define arch_spin_is_contended arch_spin_is_contended 116 116 117 - static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117 + static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 118 118 { 119 119 __ticket_spin_lock(lock); 120 120 } 121 121 122 - static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122 + static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 123 123 { 124 124 return __ticket_spin_trylock(lock); 125 125 } 126 126 127 - static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127 + static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 128 128 { 129 129 __ticket_spin_unlock(lock); 130 130 } 131 131 132 - static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132 + static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 133 133 unsigned long flags) 134 134 { 135 - __raw_spin_lock(lock); 135 + arch_spin_lock(lock); 136 136 } 137 137 138 - static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 139 139 { 140 140 __ticket_spin_unlock_wait(lock); 141 141 } 142 142 143 - #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 144 - #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 143 + #define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 144 + #define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) 145 145 146 146 #ifdef ASM_SUPPORTED 147 147 148 148 static __always_inline void 149 - __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149 + arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) 150 150 { 151 151 __asm__ __volatile__ ( 152 152 "tbit.nz p6, p0 = %1,%2\n" ··· 169 169 : "p6", "p7", "r2", "memory"); 170 170 } 171 171 172 - #define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172 + #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) 173 173 174 174 #else /* !ASM_SUPPORTED */ 175 175 176 - #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176 + #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 177 177 178 - #define __raw_read_lock(rw) \ 178 + #define arch_read_lock(rw) \ 179 179 do { \ 180 - raw_rwlock_t *__read_lock_ptr = (rw); \ 180 + arch_rwlock_t *__read_lock_ptr = (rw); \ 181 181 \ 182 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 183 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ ··· 188 188 189 189 #endif /* !ASM_SUPPORTED */ 190 190 191 - #define __raw_read_unlock(rw) \ 191 + #define arch_read_unlock(rw) \ 192 192 do { \ 193 - raw_rwlock_t *__read_lock_ptr = (rw); \ 193 + arch_rwlock_t *__read_lock_ptr = (rw); \ 194 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 195 195 } while (0) 196 196 197 197 #ifdef ASM_SUPPORTED 198 198 199 199 static __always_inline void 200 - __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200 + arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) 201 201 { 202 202 __asm__ __volatile__ ( 203 203 "tbit.nz p6, p0 = %1, %2\n" ··· 221 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 222 222 } 223 223 224 - #define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224 + #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) 225 225 226 - #define __raw_write_trylock(rw) \ 226 + #define arch_write_trylock(rw) \ 227 227 ({ \ 228 228 register long result; \ 229 229 \ ··· 235 235 (result == 0); \ 236 236 }) 237 237 238 - static inline void __raw_write_unlock(raw_rwlock_t *x) 238 + static inline void arch_write_unlock(arch_rwlock_t *x) 239 239 { 240 240 u8 *y = (u8 *)x; 241 241 barrier(); ··· 244 244 245 245 #else /* !ASM_SUPPORTED */ 246 246 247 - #define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247 + #define arch_write_lock_flags(l, flags) arch_write_lock(l) 248 248 249 - #define __raw_write_lock(l) \ 249 + #define arch_write_lock(l) \ 250 250 ({ \ 251 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 252 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ ··· 257 257 } while (ia64_val); \ 258 258 }) 259 259 260 - #define __raw_write_trylock(rw) \ 260 + #define arch_write_trylock(rw) \ 261 261 ({ \ 262 262 __u64 ia64_val; \ 263 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ ··· 265 265 (ia64_val == 0); \ 266 266 }) 267 267 268 - static inline void __raw_write_unlock(raw_rwlock_t *x) 268 + static inline void arch_write_unlock(arch_rwlock_t *x) 269 269 { 270 270 barrier(); 271 271 x->write_lock = 0; ··· 273 273 274 274 #endif /* !ASM_SUPPORTED */ 275 275 276 - static inline int __raw_read_trylock(raw_rwlock_t *x) 276 + static inline int arch_read_trylock(arch_rwlock_t *x) 277 277 { 278 278 union { 279 - raw_rwlock_t lock; 279 + arch_rwlock_t lock; 280 280 __u32 word; 281 281 } old, new; 282 282 old.lock = new.lock = *x; ··· 285 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 286 286 } 287 287 288 - #define _raw_spin_relax(lock) cpu_relax() 289 - #define _raw_read_relax(lock) cpu_relax() 290 - #define _raw_write_relax(lock) cpu_relax() 288 + #define arch_spin_relax(lock) cpu_relax() 289 + #define arch_read_relax(lock) cpu_relax() 290 + #define arch_write_relax(lock) cpu_relax() 291 291 292 292 #endif /* _ASM_IA64_SPINLOCK_H */
+4 -4
arch/ia64/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int lock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int read_counter : 31; 16 16 volatile unsigned int write_lock : 1; 17 - } raw_rwlock_t; 17 + } arch_rwlock_t; 18 18 19 - #define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19 + #define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } 20 20 21 21 #endif
+3 -3
arch/ia64/kernel/iosapic.c
··· 793 793 goto unlock_iosapic_lock; 794 794 } 795 795 796 - spin_lock(&irq_desc[irq].lock); 796 + raw_spin_lock(&irq_desc[irq].lock); 797 797 dest = get_target_cpu(gsi, irq); 798 798 dmode = choose_dmode(); 799 799 err = register_intr(gsi, irq, dmode, polarity, trigger); 800 800 if (err < 0) { 801 - spin_unlock(&irq_desc[irq].lock); 801 + raw_spin_unlock(&irq_desc[irq].lock); 802 802 irq = err; 803 803 goto unlock_iosapic_lock; 804 804 } ··· 817 817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 818 818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 819 819 820 - spin_unlock(&irq_desc[irq].lock); 820 + raw_spin_unlock(&irq_desc[irq].lock); 821 821 unlock_iosapic_lock: 822 822 spin_unlock_irqrestore(&iosapic_lock, flags); 823 823 return irq;
+2 -2
arch/ia64/kernel/irq.c
··· 71 71 } 72 72 73 73 if (i < NR_IRQS) { 74 - spin_lock_irqsave(&irq_desc[i].lock, flags); 74 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 75 75 action = irq_desc[i].action; 76 76 if (!action) 77 77 goto skip; ··· 91 91 92 92 seq_putc(p, '\n'); 93 93 skip: 94 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 95 95 } else if (i == NR_IRQS) 96 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 97 97 return 0;
+2 -2
arch/ia64/kernel/irq_ia64.c
··· 345 345 346 346 desc = irq_desc + irq; 347 347 cfg = irq_cfg + irq; 348 - spin_lock(&desc->lock); 348 + raw_spin_lock(&desc->lock); 349 349 if (!cfg->move_cleanup_count) 350 350 goto unlock; 351 351 ··· 358 358 spin_unlock_irqrestore(&vector_lock, flags); 359 359 cfg->move_cleanup_count--; 360 360 unlock: 361 - spin_unlock(&desc->lock); 361 + raw_spin_unlock(&desc->lock); 362 362 } 363 363 return IRQ_HANDLED; 364 364 }
+24 -24
arch/m32r/include/asm/spinlock.h
··· 24 24 * We make no fairness assumptions. They have a cost. 25 25 */ 26 26 27 - #define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 28 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 29 - #define __raw_spin_unlock_wait(x) \ 30 - do { cpu_relax(); } while (__raw_spin_is_locked(x)) 27 + #define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) 28 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 29 + #define arch_spin_unlock_wait(x) \ 30 + do { cpu_relax(); } while (arch_spin_is_locked(x)) 31 31 32 32 /** 33 - * __raw_spin_trylock - Try spin lock and return a result 33 + * arch_spin_trylock - Try spin lock and return a result 34 34 * @lock: Pointer to the lock variable 35 35 * 36 - * __raw_spin_trylock() tries to get the lock and returns a result. 36 + * arch_spin_trylock() tries to get the lock and returns a result. 37 37 * On the m32r, the result value is 1 (= Success) or 0 (= Failure). 38 38 */ 39 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 39 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 40 40 { 41 41 int oldval; 42 42 unsigned long tmp1, tmp2; ··· 50 50 * } 51 51 */ 52 52 __asm__ __volatile__ ( 53 - "# __raw_spin_trylock \n\t" 53 + "# arch_spin_trylock \n\t" 54 54 "ldi %1, #0; \n\t" 55 55 "mvfc %2, psw; \n\t" 56 56 "clrpsw #0x40 -> nop; \n\t" ··· 69 69 return (oldval > 0); 70 70 } 71 71 72 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 72 + static inline void arch_spin_lock(arch_spinlock_t *lock) 73 73 { 74 74 unsigned long tmp0, tmp1; 75 75 ··· 84 84 * } 85 85 */ 86 86 __asm__ __volatile__ ( 87 - "# __raw_spin_lock \n\t" 87 + "# arch_spin_lock \n\t" 88 88 ".fillinsn \n" 89 89 "1: \n\t" 90 90 "mvfc %1, psw; \n\t" ··· 111 111 ); 112 112 } 113 113 114 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 114 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 115 115 { 116 116 mb(); 117 117 lock->slock = 1; ··· 140 140 * read_can_lock - would read_trylock() succeed? 141 141 * @lock: the rwlock in question. 142 142 */ 143 - #define __raw_read_can_lock(x) ((int)(x)->lock > 0) 143 + #define arch_read_can_lock(x) ((int)(x)->lock > 0) 144 144 145 145 /** 146 146 * write_can_lock - would write_trylock() succeed? 147 147 * @lock: the rwlock in question. 148 148 */ 149 - #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 149 + #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 150 150 151 - static inline void __raw_read_lock(raw_rwlock_t *rw) 151 + static inline void arch_read_lock(arch_rwlock_t *rw) 152 152 { 153 153 unsigned long tmp0, tmp1; 154 154 ··· 199 199 ); 200 200 } 201 201 202 - static inline void __raw_write_lock(raw_rwlock_t *rw) 202 + static inline void arch_write_lock(arch_rwlock_t *rw) 203 203 { 204 204 unsigned long tmp0, tmp1, tmp2; 205 205 ··· 252 252 ); 253 253 } 254 254 255 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 255 + static inline void arch_read_unlock(arch_rwlock_t *rw) 256 256 { 257 257 unsigned long tmp0, tmp1; 258 258 ··· 274 274 ); 275 275 } 276 276 277 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 277 + static inline void arch_write_unlock(arch_rwlock_t *rw) 278 278 { 279 279 unsigned long tmp0, tmp1, tmp2; 280 280 ··· 298 298 ); 299 299 } 300 300 301 - static inline int __raw_read_trylock(raw_rwlock_t *lock) 301 + static inline int arch_read_trylock(arch_rwlock_t *lock) 302 302 { 303 303 atomic_t *count = (atomic_t*)lock; 304 304 if (atomic_dec_return(count) >= 0) ··· 307 307 return 0; 308 308 } 309 309 310 - static inline int __raw_write_trylock(raw_rwlock_t *lock) 310 + static inline int arch_write_trylock(arch_rwlock_t *lock) 311 311 { 312 312 atomic_t *count = (atomic_t *)lock; 313 313 if (atomic_sub_and_test(RW_LOCK_BIAS, count)) ··· 316 316 return 0; 317 317 } 318 318 319 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 320 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 319 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 320 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 321 321 322 - #define _raw_spin_relax(lock) cpu_relax() 323 - #define _raw_read_relax(lock) cpu_relax() 324 - #define _raw_write_relax(lock) cpu_relax() 322 + #define arch_spin_relax(lock) cpu_relax() 323 + #define arch_read_relax(lock) cpu_relax() 324 + #define arch_write_relax(lock) cpu_relax() 325 325 326 326 #endif /* _ASM_M32R_SPINLOCK_H */
+4 -4
arch/m32r/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile int slock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } 13 13 14 14 typedef struct { 15 15 volatile int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 18 #define RW_LOCK_BIAS 0x01000000 19 19 #define RW_LOCK_BIAS_STR "0x01000000" 20 20 21 - #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 21 + #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 22 22 23 23 #endif /* _ASM_M32R_SPINLOCK_TYPES_H */
+2 -2
arch/m32r/kernel/irq.c
··· 40 40 } 41 41 42 42 if (i < NR_IRQS) { 43 - spin_lock_irqsave(&irq_desc[i].lock, flags); 43 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 44 44 action = irq_desc[i].action; 45 45 if (!action) 46 46 goto skip; ··· 59 59 60 60 seq_putc(p, '\n'); 61 61 skip: 62 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 62 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 63 63 } 64 64 return 0; 65 65 }
+2 -2
arch/microblaze/kernel/irq.c
··· 68 68 } 69 69 70 70 if (i < nr_irq) { 71 - spin_lock_irqsave(&irq_desc[i].lock, flags); 71 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 72 72 action = irq_desc[i].action; 73 73 if (!action) 74 74 goto skip; ··· 89 89 90 90 seq_putc(p, '\n'); 91 91 skip: 92 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 92 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 93 93 } 94 94 return 0; 95 95 }
+39 -39
arch/mips/include/asm/spinlock.h
··· 34 34 * becomes equal to the the initial value of the tail. 35 35 */ 36 36 37 - static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 37 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 38 38 { 39 39 unsigned int counters = ACCESS_ONCE(lock->lock); 40 40 41 41 return ((counters >> 14) ^ counters) & 0x1fff; 42 42 } 43 43 44 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 45 - #define __raw_spin_unlock_wait(x) \ 46 - while (__raw_spin_is_locked(x)) { cpu_relax(); } 44 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 45 + #define arch_spin_unlock_wait(x) \ 46 + while (arch_spin_is_locked(x)) { cpu_relax(); } 47 47 48 - static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 48 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 49 49 { 50 50 unsigned int counters = ACCESS_ONCE(lock->lock); 51 51 52 52 return (((counters >> 14) - counters) & 0x1fff) > 1; 53 53 } 54 - #define __raw_spin_is_contended __raw_spin_is_contended 54 + #define arch_spin_is_contended arch_spin_is_contended 55 55 56 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 56 + static inline void arch_spin_lock(arch_spinlock_t *lock) 57 57 { 58 58 int my_ticket; 59 59 int tmp; 60 60 61 61 if (R10000_LLSC_WAR) { 62 62 __asm__ __volatile__ ( 63 - " .set push # __raw_spin_lock \n" 63 + " .set push # arch_spin_lock \n" 64 64 " .set noreorder \n" 65 65 " \n" 66 66 "1: ll %[ticket], %[ticket_ptr] \n" ··· 94 94 [my_ticket] "=&r" (my_ticket)); 95 95 } else { 96 96 __asm__ __volatile__ ( 97 - " .set push # __raw_spin_lock \n" 97 + " .set push # arch_spin_lock \n" 98 98 " .set noreorder \n" 99 99 " \n" 100 100 " ll %[ticket], %[ticket_ptr] \n" ··· 134 134 smp_llsc_mb(); 135 135 } 136 136 137 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 137 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 138 138 { 139 139 int tmp; 140 140 ··· 142 142 143 143 if (R10000_LLSC_WAR) { 144 144 __asm__ __volatile__ ( 145 - " # __raw_spin_unlock \n" 145 + " # arch_spin_unlock \n" 146 146 "1: ll %[ticket], %[ticket_ptr] \n" 147 147 " addiu %[ticket], %[ticket], 1 \n" 148 148 " ori %[ticket], %[ticket], 0x2000 \n" ··· 153 153 [ticket] "=&r" (tmp)); 154 154 } else { 155 155 __asm__ __volatile__ ( 156 - " .set push # __raw_spin_unlock \n" 156 + " .set push # arch_spin_unlock \n" 157 157 " .set noreorder \n" 158 158 " \n" 159 159 " ll %[ticket], %[ticket_ptr] \n" ··· 174 174 } 175 175 } 176 176 177 - static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) 177 + static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) 178 178 { 179 179 int tmp, tmp2, tmp3; 180 180 181 181 if (R10000_LLSC_WAR) { 182 182 __asm__ __volatile__ ( 183 - " .set push # __raw_spin_trylock \n" 183 + " .set push # arch_spin_trylock \n" 184 184 " .set noreorder \n" 185 185 " \n" 186 186 "1: ll %[ticket], %[ticket_ptr] \n" ··· 204 204 [now_serving] "=&r" (tmp3)); 205 205 } else { 206 206 __asm__ __volatile__ ( 207 - " .set push # __raw_spin_trylock \n" 207 + " .set push # arch_spin_trylock \n" 208 208 " .set noreorder \n" 209 209 " \n" 210 210 " ll %[ticket], %[ticket_ptr] \n" ··· 248 248 * read_can_lock - would read_trylock() succeed? 249 249 * @lock: the rwlock in question. 250 250 */ 251 - #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 251 + #define arch_read_can_lock(rw) ((rw)->lock >= 0) 252 252 253 253 /* 254 254 * write_can_lock - would write_trylock() succeed? 255 255 * @lock: the rwlock in question. 256 256 */ 257 - #define __raw_write_can_lock(rw) (!(rw)->lock) 257 + #define arch_write_can_lock(rw) (!(rw)->lock) 258 258 259 - static inline void __raw_read_lock(raw_rwlock_t *rw) 259 + static inline void arch_read_lock(arch_rwlock_t *rw) 260 260 { 261 261 unsigned int tmp; 262 262 263 263 if (R10000_LLSC_WAR) { 264 264 __asm__ __volatile__( 265 - " .set noreorder # __raw_read_lock \n" 265 + " .set noreorder # arch_read_lock \n" 266 266 "1: ll %1, %2 \n" 267 267 " bltz %1, 1b \n" 268 268 " addu %1, 1 \n" ··· 275 275 : "memory"); 276 276 } else { 277 277 __asm__ __volatile__( 278 - " .set noreorder # __raw_read_lock \n" 278 + " .set noreorder # arch_read_lock \n" 279 279 "1: ll %1, %2 \n" 280 280 " bltz %1, 2f \n" 281 281 " addu %1, 1 \n" ··· 301 301 /* Note the use of sub, not subu which will make the kernel die with an 302 302 overflow exception if we ever try to unlock an rwlock that is already 303 303 unlocked or is being held by a writer. */ 304 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 304 + static inline void arch_read_unlock(arch_rwlock_t *rw) 305 305 { 306 306 unsigned int tmp; 307 307 ··· 309 309 310 310 if (R10000_LLSC_WAR) { 311 311 __asm__ __volatile__( 312 - "1: ll %1, %2 # __raw_read_unlock \n" 312 + "1: ll %1, %2 # arch_read_unlock \n" 313 313 " sub %1, 1 \n" 314 314 " sc %1, %0 \n" 315 315 " beqzl %1, 1b \n" ··· 318 318 : "memory"); 319 319 } else { 320 320 __asm__ __volatile__( 321 - " .set noreorder # __raw_read_unlock \n" 321 + " .set noreorder # arch_read_unlock \n" 322 322 "1: ll %1, %2 \n" 323 323 " sub %1, 1 \n" 324 324 " sc %1, %0 \n" ··· 335 335 } 336 336 } 337 337 338 - static inline void __raw_write_lock(raw_rwlock_t *rw) 338 + static inline void arch_write_lock(arch_rwlock_t *rw) 339 339 { 340 340 unsigned int tmp; 341 341 342 342 if (R10000_LLSC_WAR) { 343 343 __asm__ __volatile__( 344 - " .set noreorder # __raw_write_lock \n" 344 + " .set noreorder # arch_write_lock \n" 345 345 "1: ll %1, %2 \n" 346 346 " bnez %1, 1b \n" 347 347 " lui %1, 0x8000 \n" ··· 354 354 : "memory"); 355 355 } else { 356 356 __asm__ __volatile__( 357 - " .set noreorder # __raw_write_lock \n" 357 + " .set noreorder # arch_write_lock \n" 358 358 "1: ll %1, %2 \n" 359 359 " bnez %1, 2f \n" 360 360 " lui %1, 0x8000 \n" ··· 377 377 smp_llsc_mb(); 378 378 } 379 379 380 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 380 + static inline void arch_write_unlock(arch_rwlock_t *rw) 381 381 { 382 382 smp_mb(); 383 383 384 384 __asm__ __volatile__( 385 - " # __raw_write_unlock \n" 385 + " # arch_write_unlock \n" 386 386 " sw $0, %0 \n" 387 387 : "=m" (rw->lock) 388 388 : "m" (rw->lock) 389 389 : "memory"); 390 390 } 391 391 392 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 392 + static inline int arch_read_trylock(arch_rwlock_t *rw) 393 393 { 394 394 unsigned int tmp; 395 395 int ret; 396 396 397 397 if (R10000_LLSC_WAR) { 398 398 __asm__ __volatile__( 399 - " .set noreorder # __raw_read_trylock \n" 399 + " .set noreorder # arch_read_trylock \n" 400 400 " li %2, 0 \n" 401 401 "1: ll %1, %3 \n" 402 402 " bltz %1, 2f \n" ··· 413 413 : "memory"); 414 414 } else { 415 415 __asm__ __volatile__( 416 - " .set noreorder # __raw_read_trylock \n" 416 + " .set noreorder # arch_read_trylock \n" 417 417 " li %2, 0 \n" 418 418 "1: ll %1, %3 \n" 419 419 " bltz %1, 2f \n" ··· 433 433 return ret; 434 434 } 435 435 436 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 436 + static inline int arch_write_trylock(arch_rwlock_t *rw) 437 437 { 438 438 unsigned int tmp; 439 439 int ret; 440 440 441 441 if (R10000_LLSC_WAR) { 442 442 __asm__ __volatile__( 443 - " .set noreorder # __raw_write_trylock \n" 443 + " .set noreorder # arch_write_trylock \n" 444 444 " li %2, 0 \n" 445 445 "1: ll %1, %3 \n" 446 446 " bnez %1, 2f \n" ··· 457 457 : "memory"); 458 458 } else { 459 459 __asm__ __volatile__( 460 - " .set noreorder # __raw_write_trylock \n" 460 + " .set noreorder # arch_write_trylock \n" 461 461 " li %2, 0 \n" 462 462 "1: ll %1, %3 \n" 463 463 " bnez %1, 2f \n" ··· 480 480 return ret; 481 481 } 482 482 483 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 484 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 483 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 484 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 485 485 486 - #define _raw_spin_relax(lock) cpu_relax() 487 - #define _raw_read_relax(lock) cpu_relax() 488 - #define _raw_write_relax(lock) cpu_relax() 486 + #define arch_spin_relax(lock) cpu_relax() 487 + #define arch_read_relax(lock) cpu_relax() 488 + #define arch_write_relax(lock) cpu_relax() 489 489 490 490 #endif /* _ASM_SPINLOCK_H */
+4 -4
arch/mips/include/asm/spinlock_types.h
··· 12 12 * bits 15..28: ticket 13 13 */ 14 14 unsigned int lock; 15 - } raw_spinlock_t; 15 + } arch_spinlock_t; 16 16 17 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 17 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 18 18 19 19 typedef struct { 20 20 volatile unsigned int lock; 21 - } raw_rwlock_t; 21 + } arch_rwlock_t; 22 22 23 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 23 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 24 24 25 25 #endif
+2 -2
arch/mips/kernel/irq.c
··· 99 99 } 100 100 101 101 if (i < NR_IRQS) { 102 - spin_lock_irqsave(&irq_desc[i].lock, flags); 102 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 103 103 action = irq_desc[i].action; 104 104 if (!action) 105 105 goto skip; ··· 118 118 119 119 seq_putc(p, '\n'); 120 120 skip: 121 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 121 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 122 122 } else if (i == NR_IRQS) { 123 123 seq_putc(p, '\n'); 124 124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+46 -46
arch/mips/vr41xx/common/icu.c
··· 159 159 160 160 if (current_cpu_type() == CPU_VR4111 || 161 161 current_cpu_type() == CPU_VR4121) { 162 - spin_lock_irqsave(&desc->lock, flags); 162 + raw_spin_lock_irqsave(&desc->lock, flags); 163 163 icu1_set(MPIUINTREG, mask); 164 - spin_unlock_irqrestore(&desc->lock, flags); 164 + raw_spin_unlock_irqrestore(&desc->lock, flags); 165 165 } 166 166 } 167 167 ··· 174 174 175 175 if (current_cpu_type() == CPU_VR4111 || 176 176 current_cpu_type() == CPU_VR4121) { 177 - spin_lock_irqsave(&desc->lock, flags); 177 + raw_spin_lock_irqsave(&desc->lock, flags); 178 178 icu1_clear(MPIUINTREG, mask); 179 - spin_unlock_irqrestore(&desc->lock, flags); 179 + raw_spin_unlock_irqrestore(&desc->lock, flags); 180 180 } 181 181 } 182 182 ··· 189 189 190 190 if (current_cpu_type() == CPU_VR4111 || 191 191 current_cpu_type() == CPU_VR4121) { 192 - spin_lock_irqsave(&desc->lock, flags); 192 + raw_spin_lock_irqsave(&desc->lock, flags); 193 193 icu1_set(MAIUINTREG, mask); 194 - spin_unlock_irqrestore(&desc->lock, flags); 194 + raw_spin_unlock_irqrestore(&desc->lock, flags); 195 195 } 196 196 } 197 197 ··· 204 204 205 205 if (current_cpu_type() == CPU_VR4111 || 206 206 current_cpu_type() == CPU_VR4121) { 207 - spin_lock_irqsave(&desc->lock, flags); 207 + raw_spin_lock_irqsave(&desc->lock, flags); 208 208 icu1_clear(MAIUINTREG, mask); 209 - spin_unlock_irqrestore(&desc->lock, flags); 209 + raw_spin_unlock_irqrestore(&desc->lock, flags); 210 210 } 211 211 } 212 212 ··· 219 219 220 220 if (current_cpu_type() == CPU_VR4111 || 221 221 current_cpu_type() == CPU_VR4121) { 222 - spin_lock_irqsave(&desc->lock, flags); 222 + raw_spin_lock_irqsave(&desc->lock, flags); 223 223 icu1_set(MKIUINTREG, mask); 224 - spin_unlock_irqrestore(&desc->lock, flags); 224 + raw_spin_unlock_irqrestore(&desc->lock, flags); 225 225 } 226 226 } 227 227 ··· 234 234 235 235 if (current_cpu_type() == CPU_VR4111 || 236 236 current_cpu_type() == CPU_VR4121) { 237 - spin_lock_irqsave(&desc->lock, flags); 237 + raw_spin_lock_irqsave(&desc->lock, flags); 238 238 icu1_clear(MKIUINTREG, mask); 239 - spin_unlock_irqrestore(&desc->lock, flags); 239 + raw_spin_unlock_irqrestore(&desc->lock, flags); 240 240 } 241 241 } 242 242 ··· 247 247 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 248 248 unsigned long flags; 249 249 250 - spin_lock_irqsave(&desc->lock, flags); 250 + raw_spin_lock_irqsave(&desc->lock, flags); 251 251 icu1_set(MMACINTREG, mask); 252 - spin_unlock_irqrestore(&desc->lock, flags); 252 + raw_spin_unlock_irqrestore(&desc->lock, flags); 253 253 } 254 254 255 255 EXPORT_SYMBOL(vr41xx_enable_macint); ··· 259 259 struct irq_desc *desc = irq_desc + ETHERNET_IRQ; 260 260 unsigned long flags; 261 261 262 - spin_lock_irqsave(&desc->lock, flags); 262 + raw_spin_lock_irqsave(&desc->lock, flags); 263 263 icu1_clear(MMACINTREG, mask); 264 - spin_unlock_irqrestore(&desc->lock, flags); 264 + raw_spin_unlock_irqrestore(&desc->lock, flags); 265 265 } 266 266 267 267 EXPORT_SYMBOL(vr41xx_disable_macint); ··· 271 271 struct irq_desc *desc = irq_desc + DSIU_IRQ; 272 272 unsigned long flags; 273 273 274 - spin_lock_irqsave(&desc->lock, flags); 274 + raw_spin_lock_irqsave(&desc->lock, flags); 275 275 icu1_set(MDSIUINTREG, mask); 276 - spin_unlock_irqrestore(&desc->lock, flags); 276 + raw_spin_unlock_irqrestore(&desc->lock, flags); 277 277 } 278 278 279 279 EXPORT_SYMBOL(vr41xx_enable_dsiuint); ··· 283 283 struct irq_desc *desc = irq_desc + DSIU_IRQ; 284 284 unsigned long flags; 285 285 286 - spin_lock_irqsave(&desc->lock, flags); 286 + raw_spin_lock_irqsave(&desc->lock, flags); 287 287 icu1_clear(MDSIUINTREG, mask); 288 - spin_unlock_irqrestore(&desc->lock, flags); 288 + raw_spin_unlock_irqrestore(&desc->lock, flags); 289 289 } 290 290 291 291 EXPORT_SYMBOL(vr41xx_disable_dsiuint); ··· 295 295 struct irq_desc *desc = irq_desc + FIR_IRQ; 296 296 unsigned long flags; 297 297 298 - spin_lock_irqsave(&desc->lock, flags); 298 + raw_spin_lock_irqsave(&desc->lock, flags); 299 299 icu2_set(MFIRINTREG, mask); 300 - spin_unlock_irqrestore(&desc->lock, flags); 300 + raw_spin_unlock_irqrestore(&desc->lock, flags); 301 301 } 302 302 303 303 EXPORT_SYMBOL(vr41xx_enable_firint); ··· 307 307 struct irq_desc *desc = irq_desc + FIR_IRQ; 308 308 unsigned long flags; 309 309 310 - spin_lock_irqsave(&desc->lock, flags); 310 + raw_spin_lock_irqsave(&desc->lock, flags); 311 311 icu2_clear(MFIRINTREG, mask); 312 - spin_unlock_irqrestore(&desc->lock, flags); 312 + raw_spin_unlock_irqrestore(&desc->lock, flags); 313 313 } 314 314 315 315 EXPORT_SYMBOL(vr41xx_disable_firint); ··· 322 322 if (current_cpu_type() == CPU_VR4122 || 323 323 current_cpu_type() == CPU_VR4131 || 324 324 current_cpu_type() == CPU_VR4133) { 325 - spin_lock_irqsave(&desc->lock, flags); 325 + raw_spin_lock_irqsave(&desc->lock, flags); 326 326 icu2_write(MPCIINTREG, PCIINT0); 327 - spin_unlock_irqrestore(&desc->lock, flags); 327 + raw_spin_unlock_irqrestore(&desc->lock, flags); 328 328 } 329 329 } 330 330 ··· 338 338 if (current_cpu_type() == CPU_VR4122 || 339 339 current_cpu_type() == CPU_VR4131 || 340 340 current_cpu_type() == CPU_VR4133) { 341 - spin_lock_irqsave(&desc->lock, flags); 341 + raw_spin_lock_irqsave(&desc->lock, flags); 342 342 icu2_write(MPCIINTREG, 0); 343 - spin_unlock_irqrestore(&desc->lock, flags); 343 + raw_spin_unlock_irqrestore(&desc->lock, flags); 344 344 } 345 345 } 346 346 ··· 354 354 if (current_cpu_type() == CPU_VR4122 || 355 355 current_cpu_type() == CPU_VR4131 || 356 356 current_cpu_type() == CPU_VR4133) { 357 - spin_lock_irqsave(&desc->lock, flags); 357 + raw_spin_lock_irqsave(&desc->lock, flags); 358 358 icu2_write(MSCUINTREG, SCUINT0); 359 - spin_unlock_irqrestore(&desc->lock, flags); 359 + raw_spin_unlock_irqrestore(&desc->lock, flags); 360 360 } 361 361 } 362 362 ··· 370 370 if (current_cpu_type() == CPU_VR4122 || 371 371 current_cpu_type() == CPU_VR4131 || 372 372 current_cpu_type() == CPU_VR4133) { 373 - spin_lock_irqsave(&desc->lock, flags); 373 + raw_spin_lock_irqsave(&desc->lock, flags); 374 374 icu2_write(MSCUINTREG, 0); 375 - spin_unlock_irqrestore(&desc->lock, flags); 375 + raw_spin_unlock_irqrestore(&desc->lock, flags); 376 376 } 377 377 } 378 378 ··· 386 386 if (current_cpu_type() == CPU_VR4122 || 387 387 current_cpu_type() == CPU_VR4131 || 388 388 current_cpu_type() == CPU_VR4133) { 389 - spin_lock_irqsave(&desc->lock, flags); 389 + raw_spin_lock_irqsave(&desc->lock, flags); 390 390 icu2_set(MCSIINTREG, mask); 391 - spin_unlock_irqrestore(&desc->lock, flags); 391 + raw_spin_unlock_irqrestore(&desc->lock, flags); 392 392 } 393 393 } 394 394 ··· 402 402 if (current_cpu_type() == CPU_VR4122 || 403 403 current_cpu_type() == CPU_VR4131 || 404 404 current_cpu_type() == CPU_VR4133) { 405 - spin_lock_irqsave(&desc->lock, flags); 405 + raw_spin_lock_irqsave(&desc->lock, flags); 406 406 icu2_clear(MCSIINTREG, mask); 407 - spin_unlock_irqrestore(&desc->lock, flags); 407 + raw_spin_unlock_irqrestore(&desc->lock, flags); 408 408 } 409 409 } 410 410 ··· 418 418 if (current_cpu_type() == CPU_VR4122 || 419 419 current_cpu_type() == CPU_VR4131 || 420 420 current_cpu_type() == CPU_VR4133) { 421 - spin_lock_irqsave(&desc->lock, flags); 421 + raw_spin_lock_irqsave(&desc->lock, flags); 422 422 icu2_write(MBCUINTREG, BCUINTR); 423 - spin_unlock_irqrestore(&desc->lock, flags); 423 + raw_spin_unlock_irqrestore(&desc->lock, flags); 424 424 } 425 425 } 426 426 ··· 434 434 if (current_cpu_type() == CPU_VR4122 || 435 435 current_cpu_type() == CPU_VR4131 || 436 436 current_cpu_type() == CPU_VR4133) { 437 - spin_lock_irqsave(&desc->lock, flags); 437 + raw_spin_lock_irqsave(&desc->lock, flags); 438 438 icu2_write(MBCUINTREG, 0); 439 - spin_unlock_irqrestore(&desc->lock, flags); 439 + raw_spin_unlock_irqrestore(&desc->lock, flags); 440 440 } 441 441 } 442 442 ··· 486 486 487 487 pin = SYSINT1_IRQ_TO_PIN(irq); 488 488 489 - spin_lock_irq(&desc->lock); 489 + raw_spin_lock_irq(&desc->lock); 490 490 491 491 intassign0 = icu1_read(INTASSIGN0); 492 492 intassign1 = icu1_read(INTASSIGN1); ··· 525 525 intassign1 |= (uint16_t)assign << 9; 526 526 break; 527 527 default: 528 - spin_unlock_irq(&desc->lock); 528 + raw_spin_unlock_irq(&desc->lock); 529 529 return -EINVAL; 530 530 } 531 531 ··· 533 533 icu1_write(INTASSIGN0, intassign0); 534 534 icu1_write(INTASSIGN1, intassign1); 535 535 536 - spin_unlock_irq(&desc->lock); 536 + raw_spin_unlock_irq(&desc->lock); 537 537 538 538 return 0; 539 539 } ··· 546 546 547 547 pin = SYSINT2_IRQ_TO_PIN(irq); 548 548 549 - spin_lock_irq(&desc->lock); 549 + raw_spin_lock_irq(&desc->lock); 550 550 551 551 intassign2 = icu1_read(INTASSIGN2); 552 552 intassign3 = icu1_read(INTASSIGN3); ··· 593 593 intassign3 |= (uint16_t)assign << 12; 594 594 break; 595 595 default: 596 - spin_unlock_irq(&desc->lock); 596 + raw_spin_unlock_irq(&desc->lock); 597 597 return -EINVAL; 598 598 } 599 599 ··· 601 601 icu1_write(INTASSIGN2, intassign2); 602 602 icu1_write(INTASSIGN3, intassign3); 603 603 604 - spin_unlock_irq(&desc->lock); 604 + raw_spin_unlock_irq(&desc->lock); 605 605 606 606 return 0; 607 607 }
+2 -2
arch/mn10300/kernel/irq.c
··· 215 215 216 216 /* display information rows, one per active CPU */ 217 217 case 1 ... NR_IRQS - 1: 218 - spin_lock_irqsave(&irq_desc[i].lock, flags); 218 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 219 219 220 220 action = irq_desc[i].action; 221 221 if (action) { ··· 235 235 seq_putc(p, '\n'); 236 236 } 237 237 238 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 238 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 239 239 break; 240 240 241 241 /* polish off with NMI and error counters */
+5 -5
arch/parisc/include/asm/atomic.h
··· 27 27 # define ATOMIC_HASH_SIZE 4 28 28 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 29 29 30 - extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 30 + extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 31 31 32 32 /* Can't use raw_spin_lock_irq because of #include problems, so 33 33 * this is the substitute */ 34 34 #define _atomic_spin_lock_irqsave(l,f) do { \ 35 - raw_spinlock_t *s = ATOMIC_HASH(l); \ 35 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 36 36 local_irq_save(f); \ 37 - __raw_spin_lock(s); \ 37 + arch_spin_lock(s); \ 38 38 } while(0) 39 39 40 40 #define _atomic_spin_unlock_irqrestore(l,f) do { \ 41 - raw_spinlock_t *s = ATOMIC_HASH(l); \ 42 - __raw_spin_unlock(s); \ 41 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 42 + arch_spin_unlock(s); \ 43 43 local_irq_restore(f); \ 44 44 } while(0) 45 45
+32 -32
arch/parisc/include/asm/spinlock.h
··· 5 5 #include <asm/processor.h> 6 6 #include <asm/spinlock_types.h> 7 7 8 - static inline int __raw_spin_is_locked(raw_spinlock_t *x) 8 + static inline int arch_spin_is_locked(arch_spinlock_t *x) 9 9 { 10 10 volatile unsigned int *a = __ldcw_align(x); 11 11 return *a == 0; 12 12 } 13 13 14 - #define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) 15 - #define __raw_spin_unlock_wait(x) \ 16 - do { cpu_relax(); } while (__raw_spin_is_locked(x)) 14 + #define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) 15 + #define arch_spin_unlock_wait(x) \ 16 + do { cpu_relax(); } while (arch_spin_is_locked(x)) 17 17 18 - static inline void __raw_spin_lock_flags(raw_spinlock_t *x, 18 + static inline void arch_spin_lock_flags(arch_spinlock_t *x, 19 19 unsigned long flags) 20 20 { 21 21 volatile unsigned int *a; ··· 33 33 mb(); 34 34 } 35 35 36 - static inline void __raw_spin_unlock(raw_spinlock_t *x) 36 + static inline void arch_spin_unlock(arch_spinlock_t *x) 37 37 { 38 38 volatile unsigned int *a; 39 39 mb(); ··· 42 42 mb(); 43 43 } 44 44 45 - static inline int __raw_spin_trylock(raw_spinlock_t *x) 45 + static inline int arch_spin_trylock(arch_spinlock_t *x) 46 46 { 47 47 volatile unsigned int *a; 48 48 int ret; ··· 69 69 70 70 /* Note that we have to ensure interrupts are disabled in case we're 71 71 * interrupted by some other code that wants to grab the same read lock */ 72 - static __inline__ void __raw_read_lock(raw_rwlock_t *rw) 72 + static __inline__ void arch_read_lock(arch_rwlock_t *rw) 73 73 { 74 74 unsigned long flags; 75 75 local_irq_save(flags); 76 - __raw_spin_lock_flags(&rw->lock, flags); 76 + arch_spin_lock_flags(&rw->lock, flags); 77 77 rw->counter++; 78 - __raw_spin_unlock(&rw->lock); 78 + arch_spin_unlock(&rw->lock); 79 79 local_irq_restore(flags); 80 80 } 81 81 82 82 /* Note that we have to ensure interrupts are disabled in case we're 83 83 * interrupted by some other code that wants to grab the same read lock */ 84 - static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) 84 + static __inline__ void arch_read_unlock(arch_rwlock_t *rw) 85 85 { 86 86 unsigned long flags; 87 87 local_irq_save(flags); 88 - __raw_spin_lock_flags(&rw->lock, flags); 88 + arch_spin_lock_flags(&rw->lock, flags); 89 89 rw->counter--; 90 - __raw_spin_unlock(&rw->lock); 90 + arch_spin_unlock(&rw->lock); 91 91 local_irq_restore(flags); 92 92 } 93 93 94 94 /* Note that we have to ensure interrupts are disabled in case we're 95 95 * interrupted by some other code that wants to grab the same read lock */ 96 - static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) 96 + static __inline__ int arch_read_trylock(arch_rwlock_t *rw) 97 97 { 98 98 unsigned long flags; 99 99 retry: 100 100 local_irq_save(flags); 101 - if (__raw_spin_trylock(&rw->lock)) { 101 + if (arch_spin_trylock(&rw->lock)) { 102 102 rw->counter++; 103 - __raw_spin_unlock(&rw->lock); 103 + arch_spin_unlock(&rw->lock); 104 104 local_irq_restore(flags); 105 105 return 1; 106 106 } ··· 111 111 return 0; 112 112 113 113 /* Wait until we have a realistic chance at the lock */ 114 - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) 114 + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) 115 115 cpu_relax(); 116 116 117 117 goto retry; ··· 119 119 120 120 /* Note that we have to ensure interrupts are disabled in case we're 121 121 * interrupted by some other code that wants to read_trylock() this lock */ 122 - static __inline__ void __raw_write_lock(raw_rwlock_t *rw) 122 + static __inline__ void arch_write_lock(arch_rwlock_t *rw) 123 123 { 124 124 unsigned long flags; 125 125 retry: 126 126 local_irq_save(flags); 127 - __raw_spin_lock_flags(&rw->lock, flags); 127 + arch_spin_lock_flags(&rw->lock, flags); 128 128 129 129 if (rw->counter != 0) { 130 - __raw_spin_unlock(&rw->lock); 130 + arch_spin_unlock(&rw->lock); 131 131 local_irq_restore(flags); 132 132 133 133 while (rw->counter != 0) ··· 141 141 local_irq_restore(flags); 142 142 } 143 143 144 - static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) 144 + static __inline__ void arch_write_unlock(arch_rwlock_t *rw) 145 145 { 146 146 rw->counter = 0; 147 - __raw_spin_unlock(&rw->lock); 147 + arch_spin_unlock(&rw->lock); 148 148 } 149 149 150 150 /* Note that we have to ensure interrupts are disabled in case we're 151 151 * interrupted by some other code that wants to read_trylock() this lock */ 152 - static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) 152 + static __inline__ int arch_write_trylock(arch_rwlock_t *rw) 153 153 { 154 154 unsigned long flags; 155 155 int result = 0; 156 156 157 157 local_irq_save(flags); 158 - if (__raw_spin_trylock(&rw->lock)) { 158 + if (arch_spin_trylock(&rw->lock)) { 159 159 if (rw->counter == 0) { 160 160 rw->counter = -1; 161 161 result = 1; 162 162 } else { 163 163 /* Read-locked. Oh well. */ 164 - __raw_spin_unlock(&rw->lock); 164 + arch_spin_unlock(&rw->lock); 165 165 } 166 166 } 167 167 local_irq_restore(flags); ··· 173 173 * read_can_lock - would read_trylock() succeed? 174 174 * @lock: the rwlock in question. 175 175 */ 176 - static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) 176 + static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) 177 177 { 178 178 return rw->counter >= 0; 179 179 } ··· 182 182 * write_can_lock - would write_trylock() succeed? 183 183 * @lock: the rwlock in question. 184 184 */ 185 - static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) 185 + static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) 186 186 { 187 187 return !rw->counter; 188 188 } 189 189 190 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 191 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 190 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 191 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 192 192 193 - #define _raw_spin_relax(lock) cpu_relax() 194 - #define _raw_read_relax(lock) cpu_relax() 195 - #define _raw_write_relax(lock) cpu_relax() 193 + #define arch_spin_relax(lock) cpu_relax() 194 + #define arch_read_relax(lock) cpu_relax() 195 + #define arch_write_relax(lock) cpu_relax() 196 196 197 197 #endif /* __ASM_SPINLOCK_H */
+6 -6
arch/parisc/include/asm/spinlock_types.h
··· 4 4 typedef struct { 5 5 #ifdef CONFIG_PA20 6 6 volatile unsigned int slock; 7 - # define __RAW_SPIN_LOCK_UNLOCKED { 1 } 7 + # define __ARCH_SPIN_LOCK_UNLOCKED { 1 } 8 8 #else 9 9 volatile unsigned int lock[4]; 10 - # define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 10 + # define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } 11 11 #endif 12 - } raw_spinlock_t; 12 + } arch_spinlock_t; 13 13 14 14 typedef struct { 15 - raw_spinlock_t lock; 15 + arch_spinlock_t lock; 16 16 volatile int counter; 17 - } raw_rwlock_t; 17 + } arch_rwlock_t; 18 18 19 - #define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } 19 + #define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } 20 20 21 21 #endif
+2 -2
arch/parisc/kernel/irq.c
··· 180 180 if (i < NR_IRQS) { 181 181 struct irqaction *action; 182 182 183 - spin_lock_irqsave(&irq_desc[i].lock, flags); 183 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 184 184 action = irq_desc[i].action; 185 185 if (!action) 186 186 goto skip; ··· 224 224 225 225 seq_putc(p, '\n'); 226 226 skip: 227 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 227 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 228 228 } 229 229 230 230 return 0;
+2 -2
arch/parisc/lib/bitops.c
··· 12 12 #include <asm/atomic.h> 13 13 14 14 #ifdef CONFIG_SMP 15 - raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 16 - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED 15 + arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { 16 + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED 17 17 }; 18 18 #endif 19 19
+1 -1
arch/powerpc/include/asm/rtas.h
··· 58 58 unsigned long entry; /* physical address pointer */ 59 59 unsigned long base; /* physical address pointer */ 60 60 unsigned long size; 61 - raw_spinlock_t lock; 61 + arch_spinlock_t lock; 62 62 struct rtas_args args; 63 63 struct device_node *dev; /* virtual address pointer */ 64 64 };
+34 -34
arch/powerpc/include/asm/spinlock.h
··· 28 28 #include <asm/asm-compat.h> 29 29 #include <asm/synch.h> 30 30 31 - #define __raw_spin_is_locked(x) ((x)->slock != 0) 31 + #define arch_spin_is_locked(x) ((x)->slock != 0) 32 32 33 33 #ifdef CONFIG_PPC64 34 34 /* use 0x800000yy when locked, where yy == CPU number */ ··· 54 54 * This returns the old value in the lock, so we succeeded 55 55 * in getting the lock if the return value is 0. 56 56 */ 57 - static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) 57 + static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) 58 58 { 59 59 unsigned long tmp, token; 60 60 ··· 73 73 return tmp; 74 74 } 75 75 76 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 76 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 77 77 { 78 78 CLEAR_IO_SYNC; 79 - return arch_spin_trylock(lock) == 0; 79 + return __arch_spin_trylock(lock) == 0; 80 80 } 81 81 82 82 /* ··· 96 96 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 97 97 /* We only yield to the hypervisor if we are in shared processor mode */ 98 98 #define SHARED_PROCESSOR (get_lppaca()->shared_proc) 99 - extern void __spin_yield(raw_spinlock_t *lock); 100 - extern void __rw_yield(raw_rwlock_t *lock); 99 + extern void __spin_yield(arch_spinlock_t *lock); 100 + extern void __rw_yield(arch_rwlock_t *lock); 101 101 #else /* SPLPAR || ISERIES */ 102 102 #define __spin_yield(x) barrier() 103 103 #define __rw_yield(x) barrier() 104 104 #define SHARED_PROCESSOR 0 105 105 #endif 106 106 107 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 107 + static inline void arch_spin_lock(arch_spinlock_t *lock) 108 108 { 109 109 CLEAR_IO_SYNC; 110 110 while (1) { 111 - if (likely(arch_spin_trylock(lock) == 0)) 111 + if (likely(__arch_spin_trylock(lock) == 0)) 112 112 break; 113 113 do { 114 114 HMT_low(); ··· 120 120 } 121 121 122 122 static inline 123 - void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 123 + void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 124 124 { 125 125 unsigned long flags_dis; 126 126 127 127 CLEAR_IO_SYNC; 128 128 while (1) { 129 - if (likely(arch_spin_trylock(lock) == 0)) 129 + if (likely(__arch_spin_trylock(lock) == 0)) 130 130 break; 131 131 local_save_flags(flags_dis); 132 132 local_irq_restore(flags); ··· 140 140 } 141 141 } 142 142 143 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 143 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 144 144 { 145 145 SYNC_IO; 146 - __asm__ __volatile__("# __raw_spin_unlock\n\t" 146 + __asm__ __volatile__("# arch_spin_unlock\n\t" 147 147 LWSYNC_ON_SMP: : :"memory"); 148 148 lock->slock = 0; 149 149 } 150 150 151 151 #ifdef CONFIG_PPC64 152 - extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); 152 + extern void arch_spin_unlock_wait(arch_spinlock_t *lock); 153 153 #else 154 - #define __raw_spin_unlock_wait(lock) \ 155 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 154 + #define arch_spin_unlock_wait(lock) \ 155 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 156 156 #endif 157 157 158 158 /* ··· 166 166 * read-locks. 167 167 */ 168 168 169 - #define __raw_read_can_lock(rw) ((rw)->lock >= 0) 170 - #define __raw_write_can_lock(rw) (!(rw)->lock) 169 + #define arch_read_can_lock(rw) ((rw)->lock >= 0) 170 + #define arch_write_can_lock(rw) (!(rw)->lock) 171 171 172 172 #ifdef CONFIG_PPC64 173 173 #define __DO_SIGN_EXTEND "extsw %0,%0\n" ··· 181 181 * This returns the old value in the lock + 1, 182 182 * so we got a read lock if the return value is > 0. 183 183 */ 184 - static inline long arch_read_trylock(raw_rwlock_t *rw) 184 + static inline long __arch_read_trylock(arch_rwlock_t *rw) 185 185 { 186 186 long tmp; 187 187 ··· 205 205 * This returns the old value in the lock, 206 206 * so we got the write lock if the return value is 0. 207 207 */ 208 - static inline long arch_write_trylock(raw_rwlock_t *rw) 208 + static inline long __arch_write_trylock(arch_rwlock_t *rw) 209 209 { 210 210 long tmp, token; 211 211 ··· 225 225 return tmp; 226 226 } 227 227 228 - static inline void __raw_read_lock(raw_rwlock_t *rw) 228 + static inline void arch_read_lock(arch_rwlock_t *rw) 229 229 { 230 230 while (1) { 231 - if (likely(arch_read_trylock(rw) > 0)) 231 + if (likely(__arch_read_trylock(rw) > 0)) 232 232 break; 233 233 do { 234 234 HMT_low(); ··· 239 239 } 240 240 } 241 241 242 - static inline void __raw_write_lock(raw_rwlock_t *rw) 242 + static inline void arch_write_lock(arch_rwlock_t *rw) 243 243 { 244 244 while (1) { 245 - if (likely(arch_write_trylock(rw) == 0)) 245 + if (likely(__arch_write_trylock(rw) == 0)) 246 246 break; 247 247 do { 248 248 HMT_low(); ··· 253 253 } 254 254 } 255 255 256 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 256 + static inline int arch_read_trylock(arch_rwlock_t *rw) 257 257 { 258 - return arch_read_trylock(rw) > 0; 258 + return __arch_read_trylock(rw) > 0; 259 259 } 260 260 261 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 261 + static inline int arch_write_trylock(arch_rwlock_t *rw) 262 262 { 263 - return arch_write_trylock(rw) == 0; 263 + return __arch_write_trylock(rw) == 0; 264 264 } 265 265 266 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 266 + static inline void arch_read_unlock(arch_rwlock_t *rw) 267 267 { 268 268 long tmp; 269 269 ··· 280 280 : "cr0", "xer", "memory"); 281 281 } 282 282 283 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 283 + static inline void arch_write_unlock(arch_rwlock_t *rw) 284 284 { 285 285 __asm__ __volatile__("# write_unlock\n\t" 286 286 LWSYNC_ON_SMP: : :"memory"); 287 287 rw->lock = 0; 288 288 } 289 289 290 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 291 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 290 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 291 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 292 292 293 - #define _raw_spin_relax(lock) __spin_yield(lock) 294 - #define _raw_read_relax(lock) __rw_yield(lock) 295 - #define _raw_write_relax(lock) __rw_yield(lock) 293 + #define arch_spin_relax(lock) __spin_yield(lock) 294 + #define arch_read_relax(lock) __rw_yield(lock) 295 + #define arch_write_relax(lock) __rw_yield(lock) 296 296 297 297 #endif /* __KERNEL__ */ 298 298 #endif /* __ASM_SPINLOCK_H */
+4 -4
arch/powerpc/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int slock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile signed int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 18 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 19 19 20 20 #endif
+4 -4
arch/powerpc/kernel/irq.c
··· 210 210 if (!desc) 211 211 return 0; 212 212 213 - spin_lock_irqsave(&desc->lock, flags); 213 + raw_spin_lock_irqsave(&desc->lock, flags); 214 214 215 215 action = desc->action; 216 216 if (!action || !action->handler) ··· 237 237 seq_putc(p, '\n'); 238 238 239 239 skip: 240 - spin_unlock_irqrestore(&desc->lock, flags); 240 + raw_spin_unlock_irqrestore(&desc->lock, flags); 241 241 242 242 return 0; 243 243 } ··· 1112 1112 if (!desc) 1113 1113 continue; 1114 1114 1115 - spin_lock_irqsave(&desc->lock, flags); 1115 + raw_spin_lock_irqsave(&desc->lock, flags); 1116 1116 1117 1117 if (desc->action && desc->action->handler) { 1118 1118 seq_printf(m, "%5d ", i); ··· 1131 1131 seq_printf(m, "%s\n", p); 1132 1132 } 1133 1133 1134 - spin_unlock_irqrestore(&desc->lock, flags); 1134 + raw_spin_unlock_irqrestore(&desc->lock, flags); 1135 1135 } 1136 1136 1137 1137 return 0;
+8 -8
arch/powerpc/kernel/rtas.c
··· 42 42 #include <asm/mmu.h> 43 43 44 44 struct rtas_t rtas = { 45 - .lock = __RAW_SPIN_LOCK_UNLOCKED 45 + .lock = __ARCH_SPIN_LOCK_UNLOCKED 46 46 }; 47 47 EXPORT_SYMBOL(rtas); 48 48 ··· 80 80 81 81 local_irq_save(flags); 82 82 preempt_disable(); 83 - __raw_spin_lock_flags(&rtas.lock, flags); 83 + arch_spin_lock_flags(&rtas.lock, flags); 84 84 return flags; 85 85 } 86 86 87 87 static void unlock_rtas(unsigned long flags) 88 88 { 89 - __raw_spin_unlock(&rtas.lock); 89 + arch_spin_unlock(&rtas.lock); 90 90 local_irq_restore(flags); 91 91 preempt_enable(); 92 92 } ··· 978 978 return 1; 979 979 } 980 980 981 - static raw_spinlock_t timebase_lock; 981 + static arch_spinlock_t timebase_lock; 982 982 static u64 timebase = 0; 983 983 984 984 void __cpuinit rtas_give_timebase(void) ··· 987 987 988 988 local_irq_save(flags); 989 989 hard_irq_disable(); 990 - __raw_spin_lock(&timebase_lock); 990 + arch_spin_lock(&timebase_lock); 991 991 rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); 992 992 timebase = get_tb(); 993 - __raw_spin_unlock(&timebase_lock); 993 + arch_spin_unlock(&timebase_lock); 994 994 995 995 while (timebase) 996 996 barrier(); ··· 1002 1002 { 1003 1003 while (!timebase) 1004 1004 barrier(); 1005 - __raw_spin_lock(&timebase_lock); 1005 + arch_spin_lock(&timebase_lock); 1006 1006 set_tb(timebase >> 32, timebase & 0xffffffff); 1007 1007 timebase = 0; 1008 - __raw_spin_unlock(&timebase_lock); 1008 + arch_spin_unlock(&timebase_lock); 1009 1009 }
+4 -4
arch/powerpc/lib/locks.c
··· 25 25 #include <asm/smp.h> 26 26 #include <asm/firmware.h> 27 27 28 - void __spin_yield(raw_spinlock_t *lock) 28 + void __spin_yield(arch_spinlock_t *lock) 29 29 { 30 30 unsigned int lock_value, holder_cpu, yield_count; 31 31 ··· 55 55 * This turns out to be the same for read and write locks, since 56 56 * we only know the holder if it is write-locked. 57 57 */ 58 - void __rw_yield(raw_rwlock_t *rw) 58 + void __rw_yield(arch_rwlock_t *rw) 59 59 { 60 60 int lock_value; 61 61 unsigned int holder_cpu, yield_count; ··· 82 82 } 83 83 #endif 84 84 85 - void __raw_spin_unlock_wait(raw_spinlock_t *lock) 85 + void arch_spin_unlock_wait(arch_spinlock_t *lock) 86 86 { 87 87 while (lock->slock) { 88 88 HMT_low(); ··· 92 92 HMT_medium(); 93 93 } 94 94 95 - EXPORT_SYMBOL(__raw_spin_unlock_wait); 95 + EXPORT_SYMBOL(arch_spin_unlock_wait);
+4 -4
arch/powerpc/platforms/52xx/media5200.c
··· 86 86 u32 status, enable; 87 87 88 88 /* Mask off the cascaded IRQ */ 89 - spin_lock(&desc->lock); 89 + raw_spin_lock(&desc->lock); 90 90 desc->chip->mask(virq); 91 - spin_unlock(&desc->lock); 91 + raw_spin_unlock(&desc->lock); 92 92 93 93 /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs 94 94 * are pending. 'ffs()' is 1 based */ ··· 104 104 } 105 105 106 106 /* Processing done; can reenable the cascade now */ 107 - spin_lock(&desc->lock); 107 + raw_spin_lock(&desc->lock); 108 108 desc->chip->ack(virq); 109 109 if (!(desc->status & IRQ_DISABLED)) 110 110 desc->chip->unmask(virq); 111 - spin_unlock(&desc->lock); 111 + raw_spin_unlock(&desc->lock); 112 112 } 113 113 114 114 static int media5200_irq_map(struct irq_host *h, unsigned int virq,
+4 -4
arch/powerpc/platforms/cell/interrupt.c
··· 237 237 238 238 static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) 239 239 { 240 - spin_lock(&desc->lock); 240 + raw_spin_lock(&desc->lock); 241 241 242 242 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 243 243 ··· 265 265 goto out_eoi; 266 266 267 267 desc->status &= ~IRQ_PENDING; 268 - spin_unlock(&desc->lock); 268 + raw_spin_unlock(&desc->lock); 269 269 action_ret = handle_IRQ_event(irq, action); 270 270 if (!noirqdebug) 271 271 note_interrupt(irq, desc, action_ret); 272 - spin_lock(&desc->lock); 272 + raw_spin_lock(&desc->lock); 273 273 274 274 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 275 275 276 276 desc->status &= ~IRQ_INPROGRESS; 277 277 out_eoi: 278 278 desc->chip->eoi(irq); 279 - spin_unlock(&desc->lock); 279 + raw_spin_unlock(&desc->lock); 280 280 } 281 281 282 282 static int iic_host_map(struct irq_host *h, unsigned int virq,
+2 -2
arch/powerpc/platforms/iseries/irq.c
··· 217 217 struct irq_desc *desc = irq_to_desc(irq); 218 218 219 219 if (desc && desc->chip && desc->chip->startup) { 220 - spin_lock_irqsave(&desc->lock, flags); 220 + raw_spin_lock_irqsave(&desc->lock, flags); 221 221 desc->chip->startup(irq); 222 - spin_unlock_irqrestore(&desc->lock, flags); 222 + raw_spin_unlock_irqrestore(&desc->lock, flags); 223 223 } 224 224 } 225 225 }
+5 -5
arch/powerpc/platforms/pasemi/setup.c
··· 71 71 } 72 72 73 73 #ifdef CONFIG_SMP 74 - static raw_spinlock_t timebase_lock; 74 + static arch_spinlock_t timebase_lock; 75 75 static unsigned long timebase; 76 76 77 77 static void __devinit pas_give_timebase(void) ··· 80 80 81 81 local_irq_save(flags); 82 82 hard_irq_disable(); 83 - __raw_spin_lock(&timebase_lock); 83 + arch_spin_lock(&timebase_lock); 84 84 mtspr(SPRN_TBCTL, TBCTL_FREEZE); 85 85 isync(); 86 86 timebase = get_tb(); 87 - __raw_spin_unlock(&timebase_lock); 87 + arch_spin_unlock(&timebase_lock); 88 88 89 89 while (timebase) 90 90 barrier(); ··· 97 97 while (!timebase) 98 98 smp_rmb(); 99 99 100 - __raw_spin_lock(&timebase_lock); 100 + arch_spin_lock(&timebase_lock); 101 101 set_tb(timebase >> 32, timebase & 0xffffffff); 102 102 timebase = 0; 103 - __raw_spin_unlock(&timebase_lock); 103 + arch_spin_unlock(&timebase_lock); 104 104 } 105 105 106 106 struct smp_ops_t pas_smp_ops = {
+2 -2
arch/powerpc/platforms/pseries/xics.c
··· 906 906 || desc->chip->set_affinity == NULL) 907 907 continue; 908 908 909 - spin_lock_irqsave(&desc->lock, flags); 909 + raw_spin_lock_irqsave(&desc->lock, flags); 910 910 911 911 status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); 912 912 if (status) { ··· 930 930 cpumask_setall(irq_to_desc(virq)->affinity); 931 931 desc->chip->set_affinity(virq, cpu_all_mask); 932 932 unlock: 933 - spin_unlock_irqrestore(&desc->lock, flags); 933 + raw_spin_unlock_irqrestore(&desc->lock, flags); 934 934 } 935 935 } 936 936 #endif
+2 -2
arch/powerpc/sysdev/fsl_msi.c
··· 173 173 u32 intr_index; 174 174 u32 have_shift = 0; 175 175 176 - spin_lock(&desc->lock); 176 + raw_spin_lock(&desc->lock); 177 177 if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { 178 178 if (desc->chip->mask_ack) 179 179 desc->chip->mask_ack(irq); ··· 225 225 break; 226 226 } 227 227 unlock: 228 - spin_unlock(&desc->lock); 228 + raw_spin_unlock(&desc->lock); 229 229 } 230 230 231 231 static int __devinit fsl_of_msi_probe(struct of_device *dev,
+4 -4
arch/powerpc/sysdev/uic.c
··· 225 225 int src; 226 226 int subvirq; 227 227 228 - spin_lock(&desc->lock); 228 + raw_spin_lock(&desc->lock); 229 229 if (desc->status & IRQ_LEVEL) 230 230 desc->chip->mask(virq); 231 231 else 232 232 desc->chip->mask_ack(virq); 233 - spin_unlock(&desc->lock); 233 + raw_spin_unlock(&desc->lock); 234 234 235 235 msr = mfdcr(uic->dcrbase + UIC_MSR); 236 236 if (!msr) /* spurious interrupt */ ··· 242 242 generic_handle_irq(subvirq); 243 243 244 244 uic_irq_ret: 245 - spin_lock(&desc->lock); 245 + raw_spin_lock(&desc->lock); 246 246 if (desc->status & IRQ_LEVEL) 247 247 desc->chip->ack(virq); 248 248 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 249 249 desc->chip->unmask(virq); 250 - spin_unlock(&desc->lock); 250 + raw_spin_unlock(&desc->lock); 251 251 } 252 252 253 253 static struct uic * __init uic_init_one(struct device_node *node)
+33 -33
arch/s390/include/asm/spinlock.h
··· 52 52 * (the type definitions are in asm/spinlock_types.h) 53 53 */ 54 54 55 - #define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 56 - #define __raw_spin_unlock_wait(lock) \ 57 - do { while (__raw_spin_is_locked(lock)) \ 58 - _raw_spin_relax(lock); } while (0) 55 + #define arch_spin_is_locked(x) ((x)->owner_cpu != 0) 56 + #define arch_spin_unlock_wait(lock) \ 57 + do { while (arch_spin_is_locked(lock)) \ 58 + arch_spin_relax(lock); } while (0) 59 59 60 - extern void _raw_spin_lock_wait(raw_spinlock_t *); 61 - extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); 62 - extern int _raw_spin_trylock_retry(raw_spinlock_t *); 63 - extern void _raw_spin_relax(raw_spinlock_t *lock); 60 + extern void arch_spin_lock_wait(arch_spinlock_t *); 61 + extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); 62 + extern int arch_spin_trylock_retry(arch_spinlock_t *); 63 + extern void arch_spin_relax(arch_spinlock_t *lock); 64 64 65 - static inline void __raw_spin_lock(raw_spinlock_t *lp) 65 + static inline void arch_spin_lock(arch_spinlock_t *lp) 66 66 { 67 67 int old; 68 68 69 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 70 70 if (likely(old == 0)) 71 71 return; 72 - _raw_spin_lock_wait(lp); 72 + arch_spin_lock_wait(lp); 73 73 } 74 74 75 - static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, 75 + static inline void arch_spin_lock_flags(arch_spinlock_t *lp, 76 76 unsigned long flags) 77 77 { 78 78 int old; ··· 80 80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 81 81 if (likely(old == 0)) 82 82 return; 83 - _raw_spin_lock_wait_flags(lp, flags); 83 + arch_spin_lock_wait_flags(lp, flags); 84 84 } 85 85 86 - static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86 + static inline int arch_spin_trylock(arch_spinlock_t *lp) 87 87 { 88 88 int old; 89 89 90 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 91 91 if (likely(old == 0)) 92 92 return 1; 93 - return _raw_spin_trylock_retry(lp); 93 + return arch_spin_trylock_retry(lp); 94 94 } 95 95 96 - static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96 + static inline void arch_spin_unlock(arch_spinlock_t *lp) 97 97 { 98 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 99 99 } ··· 113 113 * read_can_lock - would read_trylock() succeed? 114 114 * @lock: the rwlock in question. 115 115 */ 116 - #define __raw_read_can_lock(x) ((int)(x)->lock >= 0) 116 + #define arch_read_can_lock(x) ((int)(x)->lock >= 0) 117 117 118 118 /** 119 119 * write_can_lock - would write_trylock() succeed? 120 120 * @lock: the rwlock in question. 121 121 */ 122 - #define __raw_write_can_lock(x) ((x)->lock == 0) 122 + #define arch_write_can_lock(x) ((x)->lock == 0) 123 123 124 - extern void _raw_read_lock_wait(raw_rwlock_t *lp); 125 - extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 126 - extern int _raw_read_trylock_retry(raw_rwlock_t *lp); 127 - extern void _raw_write_lock_wait(raw_rwlock_t *lp); 128 - extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); 129 - extern int _raw_write_trylock_retry(raw_rwlock_t *lp); 124 + extern void _raw_read_lock_wait(arch_rwlock_t *lp); 125 + extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 126 + extern int _raw_read_trylock_retry(arch_rwlock_t *lp); 127 + extern void _raw_write_lock_wait(arch_rwlock_t *lp); 128 + extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); 129 + extern int _raw_write_trylock_retry(arch_rwlock_t *lp); 130 130 131 - static inline void __raw_read_lock(raw_rwlock_t *rw) 131 + static inline void arch_read_lock(arch_rwlock_t *rw) 132 132 { 133 133 unsigned int old; 134 134 old = rw->lock & 0x7fffffffU; ··· 136 136 _raw_read_lock_wait(rw); 137 137 } 138 138 139 - static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) 139 + static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) 140 140 { 141 141 unsigned int old; 142 142 old = rw->lock & 0x7fffffffU; ··· 144 144 _raw_read_lock_wait_flags(rw, flags); 145 145 } 146 146 147 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 147 + static inline void arch_read_unlock(arch_rwlock_t *rw) 148 148 { 149 149 unsigned int old, cmp; 150 150 ··· 155 155 } while (cmp != old); 156 156 } 157 157 158 - static inline void __raw_write_lock(raw_rwlock_t *rw) 158 + static inline void arch_write_lock(arch_rwlock_t *rw) 159 159 { 160 160 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 161 161 _raw_write_lock_wait(rw); 162 162 } 163 163 164 - static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) 164 + static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) 165 165 { 166 166 if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) 167 167 _raw_write_lock_wait_flags(rw, flags); 168 168 } 169 169 170 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 170 + static inline void arch_write_unlock(arch_rwlock_t *rw) 171 171 { 172 172 _raw_compare_and_swap(&rw->lock, 0x80000000, 0); 173 173 } 174 174 175 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 175 + static inline int arch_read_trylock(arch_rwlock_t *rw) 176 176 { 177 177 unsigned int old; 178 178 old = rw->lock & 0x7fffffffU; ··· 181 181 return _raw_read_trylock_retry(rw); 182 182 } 183 183 184 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 184 + static inline int arch_write_trylock(arch_rwlock_t *rw) 185 185 { 186 186 if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) 187 187 return 1; 188 188 return _raw_write_trylock_retry(rw); 189 189 } 190 190 191 - #define _raw_read_relax(lock) cpu_relax() 192 - #define _raw_write_relax(lock) cpu_relax() 191 + #define arch_read_relax(lock) cpu_relax() 192 + #define arch_write_relax(lock) cpu_relax() 193 193 194 194 #endif /* __ASM_SPINLOCK_H */
+4 -4
arch/s390/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int owner_cpu; 10 - } __attribute__ ((aligned (4))) raw_spinlock_t; 10 + } __attribute__ ((aligned (4))) arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 18 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 19 19 20 20 #endif
+23 -23
arch/s390/lib/spinlock.c
··· 39 39 _raw_yield(); 40 40 } 41 41 42 - void _raw_spin_lock_wait(raw_spinlock_t *lp) 42 + void arch_spin_lock_wait(arch_spinlock_t *lp) 43 43 { 44 44 int count = spin_retry; 45 45 unsigned int cpu = ~smp_processor_id(); ··· 51 51 _raw_yield_cpu(~owner); 52 52 count = spin_retry; 53 53 } 54 - if (__raw_spin_is_locked(lp)) 54 + if (arch_spin_is_locked(lp)) 55 55 continue; 56 56 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 57 57 return; 58 58 } 59 59 } 60 - EXPORT_SYMBOL(_raw_spin_lock_wait); 60 + EXPORT_SYMBOL(arch_spin_lock_wait); 61 61 62 - void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) 62 + void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) 63 63 { 64 64 int count = spin_retry; 65 65 unsigned int cpu = ~smp_processor_id(); ··· 72 72 _raw_yield_cpu(~owner); 73 73 count = spin_retry; 74 74 } 75 - if (__raw_spin_is_locked(lp)) 75 + if (arch_spin_is_locked(lp)) 76 76 continue; 77 77 local_irq_disable(); 78 78 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) ··· 80 80 local_irq_restore(flags); 81 81 } 82 82 } 83 - EXPORT_SYMBOL(_raw_spin_lock_wait_flags); 83 + EXPORT_SYMBOL(arch_spin_lock_wait_flags); 84 84 85 - int _raw_spin_trylock_retry(raw_spinlock_t *lp) 85 + int arch_spin_trylock_retry(arch_spinlock_t *lp) 86 86 { 87 87 unsigned int cpu = ~smp_processor_id(); 88 88 int count; 89 89 90 90 for (count = spin_retry; count > 0; count--) { 91 - if (__raw_spin_is_locked(lp)) 91 + if (arch_spin_is_locked(lp)) 92 92 continue; 93 93 if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) 94 94 return 1; 95 95 } 96 96 return 0; 97 97 } 98 - EXPORT_SYMBOL(_raw_spin_trylock_retry); 98 + EXPORT_SYMBOL(arch_spin_trylock_retry); 99 99 100 - void _raw_spin_relax(raw_spinlock_t *lock) 100 + void arch_spin_relax(arch_spinlock_t *lock) 101 101 { 102 102 unsigned int cpu = lock->owner_cpu; 103 103 if (cpu != 0) 104 104 _raw_yield_cpu(~cpu); 105 105 } 106 - EXPORT_SYMBOL(_raw_spin_relax); 106 + EXPORT_SYMBOL(arch_spin_relax); 107 107 108 - void _raw_read_lock_wait(raw_rwlock_t *rw) 108 + void _raw_read_lock_wait(arch_rwlock_t *rw) 109 109 { 110 110 unsigned int old; 111 111 int count = spin_retry; ··· 115 115 _raw_yield(); 116 116 count = spin_retry; 117 117 } 118 - if (!__raw_read_can_lock(rw)) 118 + if (!arch_read_can_lock(rw)) 119 119 continue; 120 120 old = rw->lock & 0x7fffffffU; 121 121 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) ··· 124 124 } 125 125 EXPORT_SYMBOL(_raw_read_lock_wait); 126 126 127 - void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 127 + void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 128 128 { 129 129 unsigned int old; 130 130 int count = spin_retry; ··· 135 135 _raw_yield(); 136 136 count = spin_retry; 137 137 } 138 - if (!__raw_read_can_lock(rw)) 138 + if (!arch_read_can_lock(rw)) 139 139 continue; 140 140 old = rw->lock & 0x7fffffffU; 141 141 local_irq_disable(); ··· 145 145 } 146 146 EXPORT_SYMBOL(_raw_read_lock_wait_flags); 147 147 148 - int _raw_read_trylock_retry(raw_rwlock_t *rw) 148 + int _raw_read_trylock_retry(arch_rwlock_t *rw) 149 149 { 150 150 unsigned int old; 151 151 int count = spin_retry; 152 152 153 153 while (count-- > 0) { 154 - if (!__raw_read_can_lock(rw)) 154 + if (!arch_read_can_lock(rw)) 155 155 continue; 156 156 old = rw->lock & 0x7fffffffU; 157 157 if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) ··· 161 161 } 162 162 EXPORT_SYMBOL(_raw_read_trylock_retry); 163 163 164 - void _raw_write_lock_wait(raw_rwlock_t *rw) 164 + void _raw_write_lock_wait(arch_rwlock_t *rw) 165 165 { 166 166 int count = spin_retry; 167 167 ··· 170 170 _raw_yield(); 171 171 count = spin_retry; 172 172 } 173 - if (!__raw_write_can_lock(rw)) 173 + if (!arch_write_can_lock(rw)) 174 174 continue; 175 175 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 176 176 return; ··· 178 178 } 179 179 EXPORT_SYMBOL(_raw_write_lock_wait); 180 180 181 - void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) 181 + void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) 182 182 { 183 183 int count = spin_retry; 184 184 ··· 188 188 _raw_yield(); 189 189 count = spin_retry; 190 190 } 191 - if (!__raw_write_can_lock(rw)) 191 + if (!arch_write_can_lock(rw)) 192 192 continue; 193 193 local_irq_disable(); 194 194 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) ··· 197 197 } 198 198 EXPORT_SYMBOL(_raw_write_lock_wait_flags); 199 199 200 - int _raw_write_trylock_retry(raw_rwlock_t *rw) 200 + int _raw_write_trylock_retry(arch_rwlock_t *rw) 201 201 { 202 202 int count = spin_retry; 203 203 204 204 while (count-- > 0) { 205 - if (!__raw_write_can_lock(rw)) 205 + if (!arch_write_can_lock(rw)) 206 206 continue; 207 207 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) 208 208 return 1;
+29 -29
arch/sh/include/asm/spinlock.h
··· 23 23 * Your basic SMP spinlocks, allowing only a single CPU anywhere 24 24 */ 25 25 26 - #define __raw_spin_is_locked(x) ((x)->lock <= 0) 27 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 28 - #define __raw_spin_unlock_wait(x) \ 29 - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) 26 + #define arch_spin_is_locked(x) ((x)->lock <= 0) 27 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 28 + #define arch_spin_unlock_wait(x) \ 29 + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 30 30 31 31 /* 32 32 * Simple spin lock operations. There are two variants, one clears IRQ's ··· 34 34 * 35 35 * We make no fairness assumptions. They have a cost. 36 36 */ 37 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 37 + static inline void arch_spin_lock(arch_spinlock_t *lock) 38 38 { 39 39 unsigned long tmp; 40 40 unsigned long oldval; 41 41 42 42 __asm__ __volatile__ ( 43 43 "1: \n\t" 44 - "movli.l @%2, %0 ! __raw_spin_lock \n\t" 44 + "movli.l @%2, %0 ! arch_spin_lock \n\t" 45 45 "mov %0, %1 \n\t" 46 46 "mov #0, %0 \n\t" 47 47 "movco.l %0, @%2 \n\t" ··· 54 54 ); 55 55 } 56 56 57 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 57 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 58 58 { 59 59 unsigned long tmp; 60 60 61 61 __asm__ __volatile__ ( 62 - "mov #1, %0 ! __raw_spin_unlock \n\t" 62 + "mov #1, %0 ! arch_spin_unlock \n\t" 63 63 "mov.l %0, @%1 \n\t" 64 64 : "=&z" (tmp) 65 65 : "r" (&lock->lock) ··· 67 67 ); 68 68 } 69 69 70 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 70 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 71 71 { 72 72 unsigned long tmp, oldval; 73 73 74 74 __asm__ __volatile__ ( 75 75 "1: \n\t" 76 - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" 76 + "movli.l @%2, %0 ! arch_spin_trylock \n\t" 77 77 "mov %0, %1 \n\t" 78 78 "mov #0, %0 \n\t" 79 79 "movco.l %0, @%2 \n\t" ··· 100 100 * read_can_lock - would read_trylock() succeed? 101 101 * @lock: the rwlock in question. 102 102 */ 103 - #define __raw_read_can_lock(x) ((x)->lock > 0) 103 + #define arch_read_can_lock(x) ((x)->lock > 0) 104 104 105 105 /** 106 106 * write_can_lock - would write_trylock() succeed? 107 107 * @lock: the rwlock in question. 108 108 */ 109 - #define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 109 + #define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) 110 110 111 - static inline void __raw_read_lock(raw_rwlock_t *rw) 111 + static inline void arch_read_lock(arch_rwlock_t *rw) 112 112 { 113 113 unsigned long tmp; 114 114 115 115 __asm__ __volatile__ ( 116 116 "1: \n\t" 117 - "movli.l @%1, %0 ! __raw_read_lock \n\t" 117 + "movli.l @%1, %0 ! arch_read_lock \n\t" 118 118 "cmp/pl %0 \n\t" 119 119 "bf 1b \n\t" 120 120 "add #-1, %0 \n\t" ··· 126 126 ); 127 127 } 128 128 129 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 129 + static inline void arch_read_unlock(arch_rwlock_t *rw) 130 130 { 131 131 unsigned long tmp; 132 132 133 133 __asm__ __volatile__ ( 134 134 "1: \n\t" 135 - "movli.l @%1, %0 ! __raw_read_unlock \n\t" 135 + "movli.l @%1, %0 ! arch_read_unlock \n\t" 136 136 "add #1, %0 \n\t" 137 137 "movco.l %0, @%1 \n\t" 138 138 "bf 1b \n\t" ··· 142 142 ); 143 143 } 144 144 145 - static inline void __raw_write_lock(raw_rwlock_t *rw) 145 + static inline void arch_write_lock(arch_rwlock_t *rw) 146 146 { 147 147 unsigned long tmp; 148 148 149 149 __asm__ __volatile__ ( 150 150 "1: \n\t" 151 - "movli.l @%1, %0 ! __raw_write_lock \n\t" 151 + "movli.l @%1, %0 ! arch_write_lock \n\t" 152 152 "cmp/hs %2, %0 \n\t" 153 153 "bf 1b \n\t" 154 154 "sub %2, %0 \n\t" ··· 160 160 ); 161 161 } 162 162 163 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 163 + static inline void arch_write_unlock(arch_rwlock_t *rw) 164 164 { 165 165 __asm__ __volatile__ ( 166 - "mov.l %1, @%0 ! __raw_write_unlock \n\t" 166 + "mov.l %1, @%0 ! arch_write_unlock \n\t" 167 167 : 168 168 : "r" (&rw->lock), "r" (RW_LOCK_BIAS) 169 169 : "t", "memory" 170 170 ); 171 171 } 172 172 173 - static inline int __raw_read_trylock(raw_rwlock_t *rw) 173 + static inline int arch_read_trylock(arch_rwlock_t *rw) 174 174 { 175 175 unsigned long tmp, oldval; 176 176 177 177 __asm__ __volatile__ ( 178 178 "1: \n\t" 179 - "movli.l @%2, %0 ! __raw_read_trylock \n\t" 179 + "movli.l @%2, %0 ! arch_read_trylock \n\t" 180 180 "mov %0, %1 \n\t" 181 181 "cmp/pl %0 \n\t" 182 182 "bf 2f \n\t" ··· 193 193 return (oldval > 0); 194 194 } 195 195 196 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 196 + static inline int arch_write_trylock(arch_rwlock_t *rw) 197 197 { 198 198 unsigned long tmp, oldval; 199 199 200 200 __asm__ __volatile__ ( 201 201 "1: \n\t" 202 - "movli.l @%2, %0 ! __raw_write_trylock \n\t" 202 + "movli.l @%2, %0 ! arch_write_trylock \n\t" 203 203 "mov %0, %1 \n\t" 204 204 "cmp/hs %3, %0 \n\t" 205 205 "bf 2f \n\t" ··· 216 216 return (oldval > (RW_LOCK_BIAS - 1)); 217 217 } 218 218 219 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 220 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 219 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 220 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 221 221 222 - #define _raw_spin_relax(lock) cpu_relax() 223 - #define _raw_read_relax(lock) cpu_relax() 224 - #define _raw_write_relax(lock) cpu_relax() 222 + #define arch_spin_relax(lock) cpu_relax() 223 + #define arch_read_relax(lock) cpu_relax() 224 + #define arch_write_relax(lock) cpu_relax() 225 225 226 226 #endif /* __ASM_SH_SPINLOCK_H */
+4 -4
arch/sh/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned int lock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 1 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 18 #define RW_LOCK_BIAS 0x01000000 19 - #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 19 + #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 20 20 21 21 #endif
+2 -2
arch/sh/kernel/irq.c
··· 76 76 if (!desc) 77 77 return 0; 78 78 79 - spin_lock_irqsave(&desc->lock, flags); 79 + raw_spin_lock_irqsave(&desc->lock, flags); 80 80 for_each_online_cpu(j) 81 81 any_count |= kstat_irqs_cpu(i, j); 82 82 action = desc->action; ··· 97 97 98 98 seq_putc(p, '\n'); 99 99 out: 100 - spin_unlock_irqrestore(&desc->lock, flags); 100 + raw_spin_unlock_irqrestore(&desc->lock, flags); 101 101 return 0; 102 102 } 103 103 #endif
+31 -31
arch/sparc/include/asm/spinlock_32.h
··· 10 10 11 11 #include <asm/psr.h> 12 12 13 - #define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 13 + #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 14 14 15 - #define __raw_spin_unlock_wait(lock) \ 16 - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) 15 + #define arch_spin_unlock_wait(lock) \ 16 + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) 17 17 18 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 18 + static inline void arch_spin_lock(arch_spinlock_t *lock) 19 19 { 20 20 __asm__ __volatile__( 21 21 "\n1:\n\t" ··· 35 35 : "g2", "memory", "cc"); 36 36 } 37 37 38 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 38 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 39 39 { 40 40 unsigned int result; 41 41 __asm__ __volatile__("ldstub [%1], %0" ··· 45 45 return (result == 0); 46 46 } 47 47 48 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 48 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 49 49 { 50 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 51 51 } ··· 65 65 * Sort of like atomic_t's on Sparc, but even more clever. 66 66 * 67 67 * ------------------------------------ 68 - * | 24-bit counter | wlock | raw_rwlock_t 68 + * | 24-bit counter | wlock | arch_rwlock_t 69 69 * ------------------------------------ 70 70 * 31 8 7 0 71 71 * ··· 76 76 * 77 77 * Unfortunately this scheme limits us to ~16,000,000 cpus. 78 78 */ 79 - static inline void arch_read_lock(raw_rwlock_t *rw) 79 + static inline void __arch_read_lock(arch_rwlock_t *rw) 80 80 { 81 - register raw_rwlock_t *lp asm("g1"); 81 + register arch_rwlock_t *lp asm("g1"); 82 82 lp = rw; 83 83 __asm__ __volatile__( 84 84 "mov %%o7, %%g4\n\t" ··· 89 89 : "g2", "g4", "memory", "cc"); 90 90 } 91 91 92 - #define __raw_read_lock(lock) \ 92 + #define arch_read_lock(lock) \ 93 93 do { unsigned long flags; \ 94 94 local_irq_save(flags); \ 95 - arch_read_lock(lock); \ 95 + __arch_read_lock(lock); \ 96 96 local_irq_restore(flags); \ 97 97 } while(0) 98 98 99 - static inline void arch_read_unlock(raw_rwlock_t *rw) 99 + static inline void __arch_read_unlock(arch_rwlock_t *rw) 100 100 { 101 - register raw_rwlock_t *lp asm("g1"); 101 + register arch_rwlock_t *lp asm("g1"); 102 102 lp = rw; 103 103 __asm__ __volatile__( 104 104 "mov %%o7, %%g4\n\t" ··· 109 109 : "g2", "g4", "memory", "cc"); 110 110 } 111 111 112 - #define __raw_read_unlock(lock) \ 112 + #define arch_read_unlock(lock) \ 113 113 do { unsigned long flags; \ 114 114 local_irq_save(flags); \ 115 - arch_read_unlock(lock); \ 115 + __arch_read_unlock(lock); \ 116 116 local_irq_restore(flags); \ 117 117 } while(0) 118 118 119 - static inline void __raw_write_lock(raw_rwlock_t *rw) 119 + static inline void arch_write_lock(arch_rwlock_t *rw) 120 120 { 121 - register raw_rwlock_t *lp asm("g1"); 121 + register arch_rwlock_t *lp asm("g1"); 122 122 lp = rw; 123 123 __asm__ __volatile__( 124 124 "mov %%o7, %%g4\n\t" ··· 130 130 *(volatile __u32 *)&lp->lock = ~0U; 131 131 } 132 132 133 - static inline int __raw_write_trylock(raw_rwlock_t *rw) 133 + static inline int arch_write_trylock(arch_rwlock_t *rw) 134 134 { 135 135 unsigned int val; 136 136 ··· 150 150 return (val == 0); 151 151 } 152 152 153 - static inline int arch_read_trylock(raw_rwlock_t *rw) 153 + static inline int __arch_read_trylock(arch_rwlock_t *rw) 154 154 { 155 - register raw_rwlock_t *lp asm("g1"); 155 + register arch_rwlock_t *lp asm("g1"); 156 156 register int res asm("o0"); 157 157 lp = rw; 158 158 __asm__ __volatile__( ··· 165 165 return res; 166 166 } 167 167 168 - #define __raw_read_trylock(lock) \ 168 + #define arch_read_trylock(lock) \ 169 169 ({ unsigned long flags; \ 170 170 int res; \ 171 171 local_irq_save(flags); \ 172 - res = arch_read_trylock(lock); \ 172 + res = __arch_read_trylock(lock); \ 173 173 local_irq_restore(flags); \ 174 174 res; \ 175 175 }) 176 176 177 - #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) 177 + #define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) 178 178 179 - #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) 180 - #define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 181 - #define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) 179 + #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) 180 + #define arch_read_lock_flags(rw, flags) arch_read_lock(rw) 181 + #define arch_write_lock_flags(rw, flags) arch_write_lock(rw) 182 182 183 - #define _raw_spin_relax(lock) cpu_relax() 184 - #define _raw_read_relax(lock) cpu_relax() 185 - #define _raw_write_relax(lock) cpu_relax() 183 + #define arch_spin_relax(lock) cpu_relax() 184 + #define arch_read_relax(lock) cpu_relax() 185 + #define arch_write_relax(lock) cpu_relax() 186 186 187 - #define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) 188 - #define __raw_write_can_lock(rw) (!(rw)->lock) 187 + #define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) 188 + #define arch_write_can_lock(rw) (!(rw)->lock) 189 189 190 190 #endif /* !(__ASSEMBLY__) */ 191 191
+25 -25
arch/sparc/include/asm/spinlock_64.h
··· 21 21 * the spinner sections must be pre-V9 branches. 22 22 */ 23 23 24 - #define __raw_spin_is_locked(lp) ((lp)->lock != 0) 24 + #define arch_spin_is_locked(lp) ((lp)->lock != 0) 25 25 26 - #define __raw_spin_unlock_wait(lp) \ 26 + #define arch_spin_unlock_wait(lp) \ 27 27 do { rmb(); \ 28 28 } while((lp)->lock) 29 29 30 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 30 + static inline void arch_spin_lock(arch_spinlock_t *lock) 31 31 { 32 32 unsigned long tmp; 33 33 ··· 46 46 : "memory"); 47 47 } 48 48 49 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 49 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 50 50 { 51 51 unsigned long result; 52 52 ··· 59 59 return (result == 0UL); 60 60 } 61 61 62 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 62 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 63 63 { 64 64 __asm__ __volatile__( 65 65 " stb %%g0, [%0]" ··· 68 68 : "memory"); 69 69 } 70 70 71 - static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 71 + static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 72 72 { 73 73 unsigned long tmp1, tmp2; 74 74 ··· 92 92 93 93 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ 94 94 95 - static void inline arch_read_lock(raw_rwlock_t *lock) 95 + static void inline arch_read_lock(arch_rwlock_t *lock) 96 96 { 97 97 unsigned long tmp1, tmp2; 98 98 ··· 115 115 : "memory"); 116 116 } 117 117 118 - static int inline arch_read_trylock(raw_rwlock_t *lock) 118 + static int inline arch_read_trylock(arch_rwlock_t *lock) 119 119 { 120 120 int tmp1, tmp2; 121 121 ··· 136 136 return tmp1; 137 137 } 138 138 139 - static void inline arch_read_unlock(raw_rwlock_t *lock) 139 + static void inline arch_read_unlock(arch_rwlock_t *lock) 140 140 { 141 141 unsigned long tmp1, tmp2; 142 142 ··· 152 152 : "memory"); 153 153 } 154 154 155 - static void inline arch_write_lock(raw_rwlock_t *lock) 155 + static void inline arch_write_lock(arch_rwlock_t *lock) 156 156 { 157 157 unsigned long mask, tmp1, tmp2; 158 158 ··· 177 177 : "memory"); 178 178 } 179 179 180 - static void inline arch_write_unlock(raw_rwlock_t *lock) 180 + static void inline arch_write_unlock(arch_rwlock_t *lock) 181 181 { 182 182 __asm__ __volatile__( 183 183 " stw %%g0, [%0]" ··· 186 186 : "memory"); 187 187 } 188 188 189 - static int inline arch_write_trylock(raw_rwlock_t *lock) 189 + static int inline arch_write_trylock(arch_rwlock_t *lock) 190 190 { 191 191 unsigned long mask, tmp1, tmp2, result; 192 192 ··· 210 210 return result; 211 211 } 212 212 213 - #define __raw_read_lock(p) arch_read_lock(p) 214 - #define __raw_read_lock_flags(p, f) arch_read_lock(p) 215 - #define __raw_read_trylock(p) arch_read_trylock(p) 216 - #define __raw_read_unlock(p) arch_read_unlock(p) 217 - #define __raw_write_lock(p) arch_write_lock(p) 218 - #define __raw_write_lock_flags(p, f) arch_write_lock(p) 219 - #define __raw_write_unlock(p) arch_write_unlock(p) 220 - #define __raw_write_trylock(p) arch_write_trylock(p) 213 + #define arch_read_lock(p) arch_read_lock(p) 214 + #define arch_read_lock_flags(p, f) arch_read_lock(p) 215 + #define arch_read_trylock(p) arch_read_trylock(p) 216 + #define arch_read_unlock(p) arch_read_unlock(p) 217 + #define arch_write_lock(p) arch_write_lock(p) 218 + #define arch_write_lock_flags(p, f) arch_write_lock(p) 219 + #define arch_write_unlock(p) arch_write_unlock(p) 220 + #define arch_write_trylock(p) arch_write_trylock(p) 221 221 222 - #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 - #define __raw_write_can_lock(rw) (!(rw)->lock) 222 + #define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) 223 + #define arch_write_can_lock(rw) (!(rw)->lock) 224 224 225 - #define _raw_spin_relax(lock) cpu_relax() 226 - #define _raw_read_relax(lock) cpu_relax() 227 - #define _raw_write_relax(lock) cpu_relax() 225 + #define arch_spin_relax(lock) cpu_relax() 226 + #define arch_read_relax(lock) cpu_relax() 227 + #define arch_write_relax(lock) cpu_relax() 228 228 229 229 #endif /* !(__ASSEMBLY__) */ 230 230
+4 -4
arch/sparc/include/asm/spinlock_types.h
··· 7 7 8 8 typedef struct { 9 9 volatile unsigned char lock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 volatile unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { 0 } 18 + #define __ARCH_RW_LOCK_UNLOCKED { 0 } 19 19 20 20 #endif
+4 -4
arch/sparc/kernel/irq_64.c
··· 176 176 } 177 177 178 178 if (i < NR_IRQS) { 179 - spin_lock_irqsave(&irq_desc[i].lock, flags); 179 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 180 180 action = irq_desc[i].action; 181 181 if (!action) 182 182 goto skip; ··· 195 195 196 196 seq_putc(p, '\n'); 197 197 skip: 198 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 198 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 199 199 } else if (i == NR_IRQS) { 200 200 seq_printf(p, "NMI: "); 201 201 for_each_online_cpu(j) ··· 785 785 for (irq = 0; irq < NR_IRQS; irq++) { 786 786 unsigned long flags; 787 787 788 - spin_lock_irqsave(&irq_desc[irq].lock, flags); 788 + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); 789 789 if (irq_desc[irq].action && 790 790 !(irq_desc[irq].status & IRQ_PER_CPU)) { 791 791 if (irq_desc[irq].chip->set_affinity) 792 792 irq_desc[irq].chip->set_affinity(irq, 793 793 irq_desc[irq].affinity); 794 794 } 795 - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 795 + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); 796 796 } 797 797 798 798 tick_ops->disable_irq();
+2 -2
arch/um/kernel/irq.c
··· 34 34 } 35 35 36 36 if (i < NR_IRQS) { 37 - spin_lock_irqsave(&irq_desc[i].lock, flags); 37 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 38 38 action = irq_desc[i].action; 39 39 if (!action) 40 40 goto skip; ··· 53 53 54 54 seq_putc(p, '\n'); 55 55 skip: 56 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 57 57 } else if (i == NR_IRQS) 58 58 seq_putc(p, '\n'); 59 59
+7 -7
arch/x86/include/asm/paravirt.h
··· 731 731 732 732 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 733 733 734 - static inline int __raw_spin_is_locked(struct raw_spinlock *lock) 734 + static inline int arch_spin_is_locked(struct arch_spinlock *lock) 735 735 { 736 736 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 737 737 } 738 738 739 - static inline int __raw_spin_is_contended(struct raw_spinlock *lock) 739 + static inline int arch_spin_is_contended(struct arch_spinlock *lock) 740 740 { 741 741 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 742 742 } 743 - #define __raw_spin_is_contended __raw_spin_is_contended 743 + #define arch_spin_is_contended arch_spin_is_contended 744 744 745 - static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) 745 + static __always_inline void arch_spin_lock(struct arch_spinlock *lock) 746 746 { 747 747 PVOP_VCALL1(pv_lock_ops.spin_lock, lock); 748 748 } 749 749 750 - static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, 750 + static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, 751 751 unsigned long flags) 752 752 { 753 753 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); 754 754 } 755 755 756 - static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) 756 + static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) 757 757 { 758 758 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); 759 759 } 760 760 761 - static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) 761 + static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) 762 762 { 763 763 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); 764 764 }
+7 -7
arch/x86/include/asm/paravirt_types.h
··· 318 318 phys_addr_t phys, pgprot_t flags); 319 319 }; 320 320 321 - struct raw_spinlock; 321 + struct arch_spinlock; 322 322 struct pv_lock_ops { 323 - int (*spin_is_locked)(struct raw_spinlock *lock); 324 - int (*spin_is_contended)(struct raw_spinlock *lock); 325 - void (*spin_lock)(struct raw_spinlock *lock); 326 - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); 327 - int (*spin_trylock)(struct raw_spinlock *lock); 328 - void (*spin_unlock)(struct raw_spinlock *lock); 323 + int (*spin_is_locked)(struct arch_spinlock *lock); 324 + int (*spin_is_contended)(struct arch_spinlock *lock); 325 + void (*spin_lock)(struct arch_spinlock *lock); 326 + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); 327 + int (*spin_trylock)(struct arch_spinlock *lock); 328 + void (*spin_unlock)(struct arch_spinlock *lock); 329 329 }; 330 330 331 331 /* This contains all the paravirt structures: we get a convenient
+31 -31
arch/x86/include/asm/spinlock.h
··· 58 58 #if (NR_CPUS < 256) 59 59 #define TICKET_SHIFT 8 60 60 61 - static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 61 + static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 62 62 { 63 63 short inc = 0x0100; 64 64 ··· 77 77 : "memory", "cc"); 78 78 } 79 79 80 - static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 80 + static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 81 81 { 82 82 int tmp, new; 83 83 ··· 96 96 return tmp; 97 97 } 98 98 99 - static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 99 + static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 100 100 { 101 101 asm volatile(UNLOCK_LOCK_PREFIX "incb %0" 102 102 : "+m" (lock->slock) ··· 106 106 #else 107 107 #define TICKET_SHIFT 16 108 108 109 - static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 109 + static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 110 110 { 111 111 int inc = 0x00010000; 112 112 int tmp; ··· 127 127 : "memory", "cc"); 128 128 } 129 129 130 - static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 130 + static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 131 131 { 132 132 int tmp; 133 133 int new; ··· 149 149 return tmp; 150 150 } 151 151 152 - static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 152 + static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 153 153 { 154 154 asm volatile(UNLOCK_LOCK_PREFIX "incw %0" 155 155 : "+m" (lock->slock) ··· 158 158 } 159 159 #endif 160 160 161 - static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 161 + static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 162 162 { 163 163 int tmp = ACCESS_ONCE(lock->slock); 164 164 165 165 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); 166 166 } 167 167 168 - static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 168 + static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 169 169 { 170 170 int tmp = ACCESS_ONCE(lock->slock); 171 171 ··· 174 174 175 175 #ifndef CONFIG_PARAVIRT_SPINLOCKS 176 176 177 - static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 177 + static inline int arch_spin_is_locked(arch_spinlock_t *lock) 178 178 { 179 179 return __ticket_spin_is_locked(lock); 180 180 } 181 181 182 - static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 182 + static inline int arch_spin_is_contended(arch_spinlock_t *lock) 183 183 { 184 184 return __ticket_spin_is_contended(lock); 185 185 } 186 - #define __raw_spin_is_contended __raw_spin_is_contended 186 + #define arch_spin_is_contended arch_spin_is_contended 187 187 188 - static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 188 + static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 189 189 { 190 190 __ticket_spin_lock(lock); 191 191 } 192 192 193 - static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 193 + static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 194 194 { 195 195 return __ticket_spin_trylock(lock); 196 196 } 197 197 198 - static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 198 + static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 199 199 { 200 200 __ticket_spin_unlock(lock); 201 201 } 202 202 203 - static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 203 + static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 204 204 unsigned long flags) 205 205 { 206 - __raw_spin_lock(lock); 206 + arch_spin_lock(lock); 207 207 } 208 208 209 209 #endif /* CONFIG_PARAVIRT_SPINLOCKS */ 210 210 211 - static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 211 + static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 212 212 { 213 - while (__raw_spin_is_locked(lock)) 213 + while (arch_spin_is_locked(lock)) 214 214 cpu_relax(); 215 215 } 216 216 ··· 232 232 * read_can_lock - would read_trylock() succeed? 233 233 * @lock: the rwlock in question. 234 234 */ 235 - static inline int __raw_read_can_lock(raw_rwlock_t *lock) 235 + static inline int arch_read_can_lock(arch_rwlock_t *lock) 236 236 { 237 237 return (int)(lock)->lock > 0; 238 238 } ··· 241 241 * write_can_lock - would write_trylock() succeed? 242 242 * @lock: the rwlock in question. 243 243 */ 244 - static inline int __raw_write_can_lock(raw_rwlock_t *lock) 244 + static inline int arch_write_can_lock(arch_rwlock_t *lock) 245 245 { 246 246 return (lock)->lock == RW_LOCK_BIAS; 247 247 } 248 248 249 - static inline void __raw_read_lock(raw_rwlock_t *rw) 249 + static inline void arch_read_lock(arch_rwlock_t *rw) 250 250 { 251 251 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" 252 252 "jns 1f\n" ··· 255 255 ::LOCK_PTR_REG (rw) : "memory"); 256 256 } 257 257 258 - static inline void __raw_write_lock(raw_rwlock_t *rw) 258 + static inline void arch_write_lock(arch_rwlock_t *rw) 259 259 { 260 260 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" 261 261 "jz 1f\n" ··· 264 264 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); 265 265 } 266 266 267 - static inline int __raw_read_trylock(raw_rwlock_t *lock) 267 + static inline int arch_read_trylock(arch_rwlock_t *lock) 268 268 { 269 269 atomic_t *count = (atomic_t *)lock; 270 270 ··· 274 274 return 0; 275 275 } 276 276 277 - static inline int __raw_write_trylock(raw_rwlock_t *lock) 277 + static inline int arch_write_trylock(arch_rwlock_t *lock) 278 278 { 279 279 atomic_t *count = (atomic_t *)lock; 280 280 ··· 284 284 return 0; 285 285 } 286 286 287 - static inline void __raw_read_unlock(raw_rwlock_t *rw) 287 + static inline void arch_read_unlock(arch_rwlock_t *rw) 288 288 { 289 289 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); 290 290 } 291 291 292 - static inline void __raw_write_unlock(raw_rwlock_t *rw) 292 + static inline void arch_write_unlock(arch_rwlock_t *rw) 293 293 { 294 294 asm volatile(LOCK_PREFIX "addl %1, %0" 295 295 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); 296 296 } 297 297 298 - #define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) 299 - #define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) 298 + #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 299 + #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 300 300 301 - #define _raw_spin_relax(lock) cpu_relax() 302 - #define _raw_read_relax(lock) cpu_relax() 303 - #define _raw_write_relax(lock) cpu_relax() 301 + #define arch_spin_relax(lock) cpu_relax() 302 + #define arch_read_relax(lock) cpu_relax() 303 + #define arch_write_relax(lock) cpu_relax() 304 304 305 305 /* The {read|write|spin}_lock() on x86 are full memory barriers. */ 306 306 static inline void smp_mb__after_lock(void) { }
+5 -5
arch/x86/include/asm/spinlock_types.h
··· 5 5 # error "please don't include this file directly" 6 6 #endif 7 7 8 - typedef struct raw_spinlock { 8 + typedef struct arch_spinlock { 9 9 unsigned int slock; 10 - } raw_spinlock_t; 10 + } arch_spinlock_t; 11 11 12 - #define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12 + #define __ARCH_SPIN_LOCK_UNLOCKED { 0 } 13 13 14 14 typedef struct { 15 15 unsigned int lock; 16 - } raw_rwlock_t; 16 + } arch_rwlock_t; 17 17 18 - #define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 18 + #define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } 19 19 20 20 #endif /* _ASM_X86_SPINLOCK_TYPES_H */
+2 -2
arch/x86/kernel/apic/io_apic.c
··· 2431 2431 continue; 2432 2432 2433 2433 cfg = irq_cfg(irq); 2434 - spin_lock(&desc->lock); 2434 + raw_spin_lock(&desc->lock); 2435 2435 2436 2436 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 2437 2437 goto unlock; ··· 2450 2450 } 2451 2451 __get_cpu_var(vector_irq)[vector] = -1; 2452 2452 unlock: 2453 - spin_unlock(&desc->lock); 2453 + raw_spin_unlock(&desc->lock); 2454 2454 } 2455 2455 2456 2456 irq_exit();
+4 -4
arch/x86/kernel/dumpstack.c
··· 188 188 } 189 189 EXPORT_SYMBOL(dump_stack); 190 190 191 - static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; 191 + static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; 192 192 static int die_owner = -1; 193 193 static unsigned int die_nest_count; 194 194 ··· 207 207 /* racy, but better than risking deadlock. */ 208 208 raw_local_irq_save(flags); 209 209 cpu = smp_processor_id(); 210 - if (!__raw_spin_trylock(&die_lock)) { 210 + if (!arch_spin_trylock(&die_lock)) { 211 211 if (cpu == die_owner) 212 212 /* nested oops. should stop eventually */; 213 213 else 214 - __raw_spin_lock(&die_lock); 214 + arch_spin_lock(&die_lock); 215 215 } 216 216 die_nest_count++; 217 217 die_owner = cpu; ··· 231 231 die_nest_count--; 232 232 if (!die_nest_count) 233 233 /* Nest count reaches zero, release the lock. */ 234 - __raw_spin_unlock(&die_lock); 234 + arch_spin_unlock(&die_lock); 235 235 raw_local_irq_restore(flags); 236 236 oops_exit(); 237 237
+7 -7
arch/x86/kernel/irq.c
··· 149 149 if (!desc) 150 150 return 0; 151 151 152 - spin_lock_irqsave(&desc->lock, flags); 152 + raw_spin_lock_irqsave(&desc->lock, flags); 153 153 for_each_online_cpu(j) 154 154 any_count |= kstat_irqs_cpu(i, j); 155 155 action = desc->action; ··· 170 170 171 171 seq_putc(p, '\n'); 172 172 out: 173 - spin_unlock_irqrestore(&desc->lock, flags); 173 + raw_spin_unlock_irqrestore(&desc->lock, flags); 174 174 return 0; 175 175 } 176 176 ··· 294 294 continue; 295 295 296 296 /* interrupt's are disabled at this point */ 297 - spin_lock(&desc->lock); 297 + raw_spin_lock(&desc->lock); 298 298 299 299 affinity = desc->affinity; 300 300 if (!irq_has_action(irq) || 301 301 cpumask_equal(affinity, cpu_online_mask)) { 302 - spin_unlock(&desc->lock); 302 + raw_spin_unlock(&desc->lock); 303 303 continue; 304 304 } 305 305 ··· 326 326 if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) 327 327 desc->chip->unmask(irq); 328 328 329 - spin_unlock(&desc->lock); 329 + raw_spin_unlock(&desc->lock); 330 330 331 331 if (break_affinity && set_affinity) 332 332 printk("Broke affinity for irq %i\n", irq); ··· 356 356 irq = __get_cpu_var(vector_irq)[vector]; 357 357 358 358 desc = irq_to_desc(irq); 359 - spin_lock(&desc->lock); 359 + raw_spin_lock(&desc->lock); 360 360 if (desc->chip->retrigger) 361 361 desc->chip->retrigger(irq); 362 - spin_unlock(&desc->lock); 362 + raw_spin_unlock(&desc->lock); 363 363 } 364 364 } 365 365 }
+2 -2
arch/x86/kernel/paravirt-spinlocks.c
··· 8 8 #include <asm/paravirt.h> 9 9 10 10 static inline void 11 - default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 11 + default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 12 12 { 13 - __raw_spin_lock(lock); 13 + arch_spin_lock(lock); 14 14 } 15 15 16 16 struct pv_lock_ops pv_lock_ops = {
+5 -5
arch/x86/kernel/tsc_sync.c
··· 33 33 * we want to have the fastest, inlined, non-debug version 34 34 * of a critical section, to be able to prove TSC time-warps: 35 35 */ 36 - static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; 36 + static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 37 37 38 38 static __cpuinitdata cycles_t last_tsc; 39 39 static __cpuinitdata cycles_t max_warp; ··· 62 62 * previous TSC that was measured (possibly on 63 63 * another CPU) and update the previous TSC timestamp. 64 64 */ 65 - __raw_spin_lock(&sync_lock); 65 + arch_spin_lock(&sync_lock); 66 66 prev = last_tsc; 67 67 rdtsc_barrier(); 68 68 now = get_cycles(); 69 69 rdtsc_barrier(); 70 70 last_tsc = now; 71 - __raw_spin_unlock(&sync_lock); 71 + arch_spin_unlock(&sync_lock); 72 72 73 73 /* 74 74 * Be nice every now and then (and also check whether ··· 87 87 * we saw a time-warp of the TSC going backwards: 88 88 */ 89 89 if (unlikely(prev > now)) { 90 - __raw_spin_lock(&sync_lock); 90 + arch_spin_lock(&sync_lock); 91 91 max_warp = max(max_warp, prev - now); 92 92 nr_warps++; 93 - __raw_spin_unlock(&sync_lock); 93 + arch_spin_unlock(&sync_lock); 94 94 } 95 95 } 96 96 WARN(!(now-start),
+8 -8
arch/x86/xen/spinlock.c
··· 120 120 unsigned short spinners; /* count of waiting cpus */ 121 121 }; 122 122 123 - static int xen_spin_is_locked(struct raw_spinlock *lock) 123 + static int xen_spin_is_locked(struct arch_spinlock *lock) 124 124 { 125 125 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 126 126 127 127 return xl->lock != 0; 128 128 } 129 129 130 - static int xen_spin_is_contended(struct raw_spinlock *lock) 130 + static int xen_spin_is_contended(struct arch_spinlock *lock) 131 131 { 132 132 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 133 133 ··· 136 136 return xl->spinners != 0; 137 137 } 138 138 139 - static int xen_spin_trylock(struct raw_spinlock *lock) 139 + static int xen_spin_trylock(struct arch_spinlock *lock) 140 140 { 141 141 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 142 142 u8 old = 1; ··· 181 181 __get_cpu_var(lock_spinners) = prev; 182 182 } 183 183 184 - static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) 184 + static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) 185 185 { 186 186 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 187 187 struct xen_spinlock *prev; ··· 254 254 return ret; 255 255 } 256 256 257 - static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) 257 + static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) 258 258 { 259 259 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 260 260 unsigned timeout; ··· 291 291 spin_time_accum_total(start_spin); 292 292 } 293 293 294 - static void xen_spin_lock(struct raw_spinlock *lock) 294 + static void xen_spin_lock(struct arch_spinlock *lock) 295 295 { 296 296 __xen_spin_lock(lock, false); 297 297 } 298 298 299 - static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) 299 + static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) 300 300 { 301 301 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); 302 302 } ··· 317 317 } 318 318 } 319 319 320 - static void xen_spin_unlock(struct raw_spinlock *lock) 320 + static void xen_spin_unlock(struct arch_spinlock *lock) 321 321 { 322 322 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 323 323
+2 -2
arch/xtensa/kernel/irq.c
··· 90 90 } 91 91 92 92 if (i < NR_IRQS) { 93 - spin_lock_irqsave(&irq_desc[i].lock, flags); 93 + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 94 94 action = irq_desc[i].action; 95 95 if (!action) 96 96 goto skip; ··· 109 109 110 110 seq_putc(p, '\n'); 111 111 skip: 112 - spin_unlock_irqrestore(&irq_desc[i].lock, flags); 112 + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 113 113 } else if (i == NR_IRQS) { 114 114 seq_printf(p, "NMI: "); 115 115 for_each_online_cpu(j)
+5 -5
include/asm-generic/bitops/atomic.h
··· 15 15 # define ATOMIC_HASH_SIZE 4 16 16 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) 17 17 18 - extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 18 + extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; 19 19 20 20 /* Can't use raw_spin_lock_irq because of #include problems, so 21 21 * this is the substitute */ 22 22 #define _atomic_spin_lock_irqsave(l,f) do { \ 23 - raw_spinlock_t *s = ATOMIC_HASH(l); \ 23 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 24 24 local_irq_save(f); \ 25 - __raw_spin_lock(s); \ 25 + arch_spin_lock(s); \ 26 26 } while(0) 27 27 28 28 #define _atomic_spin_unlock_irqrestore(l,f) do { \ 29 - raw_spinlock_t *s = ATOMIC_HASH(l); \ 30 - __raw_spin_unlock(s); \ 29 + arch_spinlock_t *s = ATOMIC_HASH(l); \ 30 + arch_spin_unlock(s); \ 31 31 local_irq_restore(f); \ 32 32 } while(0) 33 33
+1 -1
include/linux/hrtimer.h
··· 169 169 * @max_hang_time: Maximum time spent in hrtimer_interrupt 170 170 */ 171 171 struct hrtimer_cpu_base { 172 - spinlock_t lock; 172 + raw_spinlock_t lock; 173 173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 174 174 #ifdef CONFIG_HIGH_RES_TIMERS 175 175 ktime_t expires_next;
+1 -1
include/linux/init_task.h
··· 170 170 .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ 171 171 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 172 172 .fs_excl = ATOMIC_INIT(0), \ 173 - .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 173 + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 174 174 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 175 175 .pids = { \ 176 176 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+1 -1
include/linux/irq.h
··· 192 192 unsigned int irq_count; /* For detecting broken IRQs */ 193 193 unsigned long last_unhandled; /* Aging timer for unhandled count */ 194 194 unsigned int irqs_unhandled; 195 - spinlock_t lock; 195 + raw_spinlock_t lock; 196 196 #ifdef CONFIG_SMP 197 197 cpumask_var_t affinity; 198 198 unsigned int node;
+1 -1
include/linux/perf_event.h
··· 681 681 * Protect the states of the events in the list, 682 682 * nr_active, and the list: 683 683 */ 684 - spinlock_t lock; 684 + raw_spinlock_t lock; 685 685 /* 686 686 * Protect the list of events. Locking either mutex or lock 687 687 * is sufficient to ensure the list doesn't change; to change
+37 -6
include/linux/plist.h
··· 81 81 struct list_head prio_list; 82 82 struct list_head node_list; 83 83 #ifdef CONFIG_DEBUG_PI_LIST 84 - spinlock_t *lock; 84 + raw_spinlock_t *rawlock; 85 + spinlock_t *spinlock; 85 86 #endif 86 87 }; 87 88 ··· 92 91 }; 93 92 94 93 #ifdef CONFIG_DEBUG_PI_LIST 95 - # define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock 94 + # define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock 95 + # define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock 96 96 #else 97 97 # define PLIST_HEAD_LOCK_INIT(_lock) 98 + # define PLIST_HEAD_LOCK_INIT_RAW(_lock) 98 99 #endif 99 100 100 101 #define _PLIST_HEAD_INIT(head) \ ··· 110 107 */ 111 108 #define PLIST_HEAD_INIT(head, _lock) \ 112 109 { \ 113 - _PLIST_HEAD_INIT(head), \ 110 + _PLIST_HEAD_INIT(head), \ 114 111 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 112 + } 113 + 114 + /** 115 + * PLIST_HEAD_INIT_RAW - static struct plist_head initializer 116 + * @head: struct plist_head variable name 117 + * @_lock: lock to initialize for this list 118 + */ 119 + #define PLIST_HEAD_INIT_RAW(head, _lock) \ 120 + { \ 121 + _PLIST_HEAD_INIT(head), \ 122 + PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \ 115 123 } 116 124 117 125 /** ··· 133 119 #define PLIST_NODE_INIT(node, __prio) \ 134 120 { \ 135 121 .prio = (__prio), \ 136 - .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 122 + .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 137 123 } 138 124 139 125 /** 140 126 * plist_head_init - dynamic struct plist_head initializer 141 127 * @head: &struct plist_head pointer 142 - * @lock: list spinlock, remembered for debugging 128 + * @lock: spinlock protecting the list (debugging) 143 129 */ 144 130 static inline void 145 131 plist_head_init(struct plist_head *head, spinlock_t *lock) ··· 147 133 INIT_LIST_HEAD(&head->prio_list); 148 134 INIT_LIST_HEAD(&head->node_list); 149 135 #ifdef CONFIG_DEBUG_PI_LIST 150 - head->lock = lock; 136 + head->spinlock = lock; 137 + head->rawlock = NULL; 138 + #endif 139 + } 140 + 141 + /** 142 + * plist_head_init_raw - dynamic struct plist_head initializer 143 + * @head: &struct plist_head pointer 144 + * @lock: raw_spinlock protecting the list (debugging) 145 + */ 146 + static inline void 147 + plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) 148 + { 149 + INIT_LIST_HEAD(&head->prio_list); 150 + INIT_LIST_HEAD(&head->node_list); 151 + #ifdef CONFIG_DEBUG_PI_LIST 152 + head->rawlock = lock; 153 + head->spinlock = NULL; 151 154 #endif 152 155 } 153 156
+3 -3
include/linux/rtmutex.h
··· 24 24 * @owner: the mutex owner 25 25 */ 26 26 struct rt_mutex { 27 - spinlock_t wait_lock; 27 + raw_spinlock_t wait_lock; 28 28 struct plist_head wait_list; 29 29 struct task_struct *owner; 30 30 #ifdef CONFIG_DEBUG_RT_MUTEXES ··· 63 63 #endif 64 64 65 65 #define __RT_MUTEX_INITIALIZER(mutexname) \ 66 - { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 67 - , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 66 + { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 67 + , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ 68 68 , .owner = NULL \ 69 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 70 70
+125
include/linux/rwlock.h
··· 1 + #ifndef __LINUX_RWLOCK_H 2 + #define __LINUX_RWLOCK_H 3 + 4 + #ifndef __LINUX_SPINLOCK_H 5 + # error "please don't include this file directly" 6 + #endif 7 + 8 + /* 9 + * rwlock related methods 10 + * 11 + * split out from spinlock.h 12 + * 13 + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 14 + * Released under the General Public License (GPL). 15 + */ 16 + 17 + #ifdef CONFIG_DEBUG_SPINLOCK 18 + extern void __rwlock_init(rwlock_t *lock, const char *name, 19 + struct lock_class_key *key); 20 + # define rwlock_init(lock) \ 21 + do { \ 22 + static struct lock_class_key __key; \ 23 + \ 24 + __rwlock_init((lock), #lock, &__key); \ 25 + } while (0) 26 + #else 27 + # define rwlock_init(lock) \ 28 + do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 29 + #endif 30 + 31 + #ifdef CONFIG_DEBUG_SPINLOCK 32 + extern void do_raw_read_lock(rwlock_t *lock); 33 + #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) 34 + extern int do_raw_read_trylock(rwlock_t *lock); 35 + extern void do_raw_read_unlock(rwlock_t *lock); 36 + extern void do_raw_write_lock(rwlock_t *lock); 37 + #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) 38 + extern int do_raw_write_trylock(rwlock_t *lock); 39 + extern void do_raw_write_unlock(rwlock_t *lock); 40 + #else 41 + # define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) 42 + # define do_raw_read_lock_flags(lock, flags) \ 43 + arch_read_lock_flags(&(lock)->raw_lock, *(flags)) 44 + # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) 45 + # define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) 46 + # define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) 47 + # define do_raw_write_lock_flags(lock, flags) \ 48 + arch_write_lock_flags(&(lock)->raw_lock, *(flags)) 49 + # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) 50 + # define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) 51 + #endif 52 + 53 + #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) 54 + #define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) 55 + 56 + /* 57 + * Define the various rw_lock methods. Note we define these 58 + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 59 + * methods are defined as nops in the case they are not required. 60 + */ 61 + #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) 62 + #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) 63 + 64 + #define write_lock(lock) _raw_write_lock(lock) 65 + #define read_lock(lock) _raw_read_lock(lock) 66 + 67 + #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 68 + 69 + #define read_lock_irqsave(lock, flags) \ 70 + do { \ 71 + typecheck(unsigned long, flags); \ 72 + flags = _raw_read_lock_irqsave(lock); \ 73 + } while (0) 74 + #define write_lock_irqsave(lock, flags) \ 75 + do { \ 76 + typecheck(unsigned long, flags); \ 77 + flags = _raw_write_lock_irqsave(lock); \ 78 + } while (0) 79 + 80 + #else 81 + 82 + #define read_lock_irqsave(lock, flags) \ 83 + do { \ 84 + typecheck(unsigned long, flags); \ 85 + _raw_read_lock_irqsave(lock, flags); \ 86 + } while (0) 87 + #define write_lock_irqsave(lock, flags) \ 88 + do { \ 89 + typecheck(unsigned long, flags); \ 90 + _raw_write_lock_irqsave(lock, flags); \ 91 + } while (0) 92 + 93 + #endif 94 + 95 + #define read_lock_irq(lock) _raw_read_lock_irq(lock) 96 + #define read_lock_bh(lock) _raw_read_lock_bh(lock) 97 + #define write_lock_irq(lock) _raw_write_lock_irq(lock) 98 + #define write_lock_bh(lock) _raw_write_lock_bh(lock) 99 + #define read_unlock(lock) _raw_read_unlock(lock) 100 + #define write_unlock(lock) _raw_write_unlock(lock) 101 + #define read_unlock_irq(lock) _raw_read_unlock_irq(lock) 102 + #define write_unlock_irq(lock) _raw_write_unlock_irq(lock) 103 + 104 + #define read_unlock_irqrestore(lock, flags) \ 105 + do { \ 106 + typecheck(unsigned long, flags); \ 107 + _raw_read_unlock_irqrestore(lock, flags); \ 108 + } while (0) 109 + #define read_unlock_bh(lock) _raw_read_unlock_bh(lock) 110 + 111 + #define write_unlock_irqrestore(lock, flags) \ 112 + do { \ 113 + typecheck(unsigned long, flags); \ 114 + _raw_write_unlock_irqrestore(lock, flags); \ 115 + } while (0) 116 + #define write_unlock_bh(lock) _raw_write_unlock_bh(lock) 117 + 118 + #define write_trylock_irqsave(lock, flags) \ 119 + ({ \ 120 + local_irq_save(flags); \ 121 + write_trylock(lock) ? \ 122 + 1 : ({ local_irq_restore(flags); 0; }); \ 123 + }) 124 + 125 + #endif /* __LINUX_RWLOCK_H */
+282
include/linux/rwlock_api_smp.h
··· 1 + #ifndef __LINUX_RWLOCK_API_SMP_H 2 + #define __LINUX_RWLOCK_API_SMP_H 3 + 4 + #ifndef __LINUX_SPINLOCK_API_SMP_H 5 + # error "please don't include this file directly" 6 + #endif 7 + 8 + /* 9 + * include/linux/rwlock_api_smp.h 10 + * 11 + * spinlock API declarations on SMP (and debug) 12 + * (implemented in kernel/spinlock.c) 13 + * 14 + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 15 + * Released under the General Public License (GPL). 16 + */ 17 + 18 + void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); 19 + void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); 20 + void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); 21 + void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); 22 + void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); 23 + void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); 24 + unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 25 + __acquires(lock); 26 + unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 27 + __acquires(lock); 28 + int __lockfunc _raw_read_trylock(rwlock_t *lock); 29 + int __lockfunc _raw_write_trylock(rwlock_t *lock); 30 + void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); 31 + void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); 32 + void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); 33 + void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); 34 + void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); 35 + void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); 36 + void __lockfunc 37 + _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 38 + __releases(lock); 39 + void __lockfunc 40 + _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 41 + __releases(lock); 42 + 43 + #ifdef CONFIG_INLINE_READ_LOCK 44 + #define _raw_read_lock(lock) __raw_read_lock(lock) 45 + #endif 46 + 47 + #ifdef CONFIG_INLINE_WRITE_LOCK 48 + #define _raw_write_lock(lock) __raw_write_lock(lock) 49 + #endif 50 + 51 + #ifdef CONFIG_INLINE_READ_LOCK_BH 52 + #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) 53 + #endif 54 + 55 + #ifdef CONFIG_INLINE_WRITE_LOCK_BH 56 + #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) 57 + #endif 58 + 59 + #ifdef CONFIG_INLINE_READ_LOCK_IRQ 60 + #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) 61 + #endif 62 + 63 + #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ 64 + #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) 65 + #endif 66 + 67 + #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE 68 + #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) 69 + #endif 70 + 71 + #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 72 + #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) 73 + #endif 74 + 75 + #ifdef CONFIG_INLINE_READ_TRYLOCK 76 + #define _raw_read_trylock(lock) __raw_read_trylock(lock) 77 + #endif 78 + 79 + #ifdef CONFIG_INLINE_WRITE_TRYLOCK 80 + #define _raw_write_trylock(lock) __raw_write_trylock(lock) 81 + #endif 82 + 83 + #ifdef CONFIG_INLINE_READ_UNLOCK 84 + #define _raw_read_unlock(lock) __raw_read_unlock(lock) 85 + #endif 86 + 87 + #ifdef CONFIG_INLINE_WRITE_UNLOCK 88 + #define _raw_write_unlock(lock) __raw_write_unlock(lock) 89 + #endif 90 + 91 + #ifdef CONFIG_INLINE_READ_UNLOCK_BH 92 + #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) 93 + #endif 94 + 95 + #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH 96 + #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) 97 + #endif 98 + 99 + #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ 100 + #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) 101 + #endif 102 + 103 + #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ 104 + #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) 105 + #endif 106 + 107 + #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 108 + #define _raw_read_unlock_irqrestore(lock, flags) \ 109 + __raw_read_unlock_irqrestore(lock, flags) 110 + #endif 111 + 112 + #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 113 + #define _raw_write_unlock_irqrestore(lock, flags) \ 114 + __raw_write_unlock_irqrestore(lock, flags) 115 + #endif 116 + 117 + static inline int __raw_read_trylock(rwlock_t *lock) 118 + { 119 + preempt_disable(); 120 + if (do_raw_read_trylock(lock)) { 121 + rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); 122 + return 1; 123 + } 124 + preempt_enable(); 125 + return 0; 126 + } 127 + 128 + static inline int __raw_write_trylock(rwlock_t *lock) 129 + { 130 + preempt_disable(); 131 + if (do_raw_write_trylock(lock)) { 132 + rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); 133 + return 1; 134 + } 135 + preempt_enable(); 136 + return 0; 137 + } 138 + 139 + /* 140 + * If lockdep is enabled then we use the non-preemption spin-ops 141 + * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 142 + * not re-enabled during lock-acquire (which the preempt-spin-ops do): 143 + */ 144 + #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 145 + 146 + static inline void __raw_read_lock(rwlock_t *lock) 147 + { 148 + preempt_disable(); 149 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 150 + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); 151 + } 152 + 153 + static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) 154 + { 155 + unsigned long flags; 156 + 157 + local_irq_save(flags); 158 + preempt_disable(); 159 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 160 + LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, 161 + do_raw_read_lock_flags, &flags); 162 + return flags; 163 + } 164 + 165 + static inline void __raw_read_lock_irq(rwlock_t *lock) 166 + { 167 + local_irq_disable(); 168 + preempt_disable(); 169 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 170 + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); 171 + } 172 + 173 + static inline void __raw_read_lock_bh(rwlock_t *lock) 174 + { 175 + local_bh_disable(); 176 + preempt_disable(); 177 + rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 178 + LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); 179 + } 180 + 181 + static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) 182 + { 183 + unsigned long flags; 184 + 185 + local_irq_save(flags); 186 + preempt_disable(); 187 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 188 + LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, 189 + do_raw_write_lock_flags, &flags); 190 + return flags; 191 + } 192 + 193 + static inline void __raw_write_lock_irq(rwlock_t *lock) 194 + { 195 + local_irq_disable(); 196 + preempt_disable(); 197 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 198 + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); 199 + } 200 + 201 + static inline void __raw_write_lock_bh(rwlock_t *lock) 202 + { 203 + local_bh_disable(); 204 + preempt_disable(); 205 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 206 + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); 207 + } 208 + 209 + static inline void __raw_write_lock(rwlock_t *lock) 210 + { 211 + preempt_disable(); 212 + rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 213 + LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); 214 + } 215 + 216 + #endif /* CONFIG_PREEMPT */ 217 + 218 + static inline void __raw_write_unlock(rwlock_t *lock) 219 + { 220 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 221 + do_raw_write_unlock(lock); 222 + preempt_enable(); 223 + } 224 + 225 + static inline void __raw_read_unlock(rwlock_t *lock) 226 + { 227 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 228 + do_raw_read_unlock(lock); 229 + preempt_enable(); 230 + } 231 + 232 + static inline void 233 + __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 234 + { 235 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 236 + do_raw_read_unlock(lock); 237 + local_irq_restore(flags); 238 + preempt_enable(); 239 + } 240 + 241 + static inline void __raw_read_unlock_irq(rwlock_t *lock) 242 + { 243 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 244 + do_raw_read_unlock(lock); 245 + local_irq_enable(); 246 + preempt_enable(); 247 + } 248 + 249 + static inline void __raw_read_unlock_bh(rwlock_t *lock) 250 + { 251 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 252 + do_raw_read_unlock(lock); 253 + preempt_enable_no_resched(); 254 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 255 + } 256 + 257 + static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, 258 + unsigned long flags) 259 + { 260 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 261 + do_raw_write_unlock(lock); 262 + local_irq_restore(flags); 263 + preempt_enable(); 264 + } 265 + 266 + static inline void __raw_write_unlock_irq(rwlock_t *lock) 267 + { 268 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 269 + do_raw_write_unlock(lock); 270 + local_irq_enable(); 271 + preempt_enable(); 272 + } 273 + 274 + static inline void __raw_write_unlock_bh(rwlock_t *lock) 275 + { 276 + rwlock_release(&lock->dep_map, 1, _RET_IP_); 277 + do_raw_write_unlock(lock); 278 + preempt_enable_no_resched(); 279 + local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 280 + } 281 + 282 + #endif /* __LINUX_RWLOCK_API_SMP_H */
+56
include/linux/rwlock_types.h
··· 1 + #ifndef __LINUX_RWLOCK_TYPES_H 2 + #define __LINUX_RWLOCK_TYPES_H 3 + 4 + /* 5 + * include/linux/rwlock_types.h - generic rwlock type definitions 6 + * and initializers 7 + * 8 + * portions Copyright 2005, Red Hat, Inc., Ingo Molnar 9 + * Released under the General Public License (GPL). 10 + */ 11 + typedef struct { 12 + arch_rwlock_t raw_lock; 13 + #ifdef CONFIG_GENERIC_LOCKBREAK 14 + unsigned int break_lock; 15 + #endif 16 + #ifdef CONFIG_DEBUG_SPINLOCK 17 + unsigned int magic, owner_cpu; 18 + void *owner; 19 + #endif 20 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 21 + struct lockdep_map dep_map; 22 + #endif 23 + } rwlock_t; 24 + 25 + #define RWLOCK_MAGIC 0xdeaf1eed 26 + 27 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 28 + # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 29 + #else 30 + # define RW_DEP_MAP_INIT(lockname) 31 + #endif 32 + 33 + #ifdef CONFIG_DEBUG_SPINLOCK 34 + #define __RW_LOCK_UNLOCKED(lockname) \ 35 + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ 36 + .magic = RWLOCK_MAGIC, \ 37 + .owner = SPINLOCK_OWNER_INIT, \ 38 + .owner_cpu = -1, \ 39 + RW_DEP_MAP_INIT(lockname) } 40 + #else 41 + #define __RW_LOCK_UNLOCKED(lockname) \ 42 + (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ 43 + RW_DEP_MAP_INIT(lockname) } 44 + #endif 45 + 46 + /* 47 + * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence 48 + * deprecated. 49 + * 50 + * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate. 51 + */ 52 + #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) 53 + 54 + #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 55 + 56 + #endif /* __LINUX_RWLOCK_TYPES_H */
+1 -1
include/linux/sched.h
··· 1409 1409 #endif 1410 1410 1411 1411 /* Protection of the PI data structures: */ 1412 - spinlock_t pi_lock; 1412 + raw_spinlock_t pi_lock; 1413 1413 1414 1414 #ifdef CONFIG_RT_MUTEXES 1415 1415 /* PI waiters blocked on a rt_mutex held by this task */
+213 -164
include/linux/spinlock.h
··· 8 8 * 9 9 * on SMP builds: 10 10 * 11 - * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 + * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the 12 12 * initializers 13 13 * 14 14 * linux/spinlock_types.h: 15 15 * defines the generic type and initializers 16 16 * 17 - * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 + * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel 18 18 * implementations, mostly inline assembly code 19 19 * 20 20 * (also included on UP-debug builds:) ··· 34 34 * defines the generic type and initializers 35 35 * 36 36 * linux/spinlock_up.h: 37 - * contains the __raw_spin_*()/etc. version of UP 37 + * contains the arch_spin_*()/etc. version of UP 38 38 * builds. (which are NOPs on non-debug, non-preempt 39 39 * builds) 40 40 * ··· 75 75 #define __lockfunc __attribute__((section(".spinlock.text"))) 76 76 77 77 /* 78 - * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 + * Pull the arch_spinlock_t and arch_rwlock_t definitions: 79 79 */ 80 80 #include <linux/spinlock_types.h> 81 81 82 82 /* 83 - * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 + * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): 84 84 */ 85 85 #ifdef CONFIG_SMP 86 86 # include <asm/spinlock.h> ··· 89 89 #endif 90 90 91 91 #ifdef CONFIG_DEBUG_SPINLOCK 92 - extern void __spin_lock_init(spinlock_t *lock, const char *name, 93 - struct lock_class_key *key); 94 - # define spin_lock_init(lock) \ 92 + extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 93 + struct lock_class_key *key); 94 + # define raw_spin_lock_init(lock) \ 95 95 do { \ 96 96 static struct lock_class_key __key; \ 97 97 \ 98 - __spin_lock_init((lock), #lock, &__key); \ 98 + __raw_spin_lock_init((lock), #lock, &__key); \ 99 99 } while (0) 100 100 101 101 #else 102 - # define spin_lock_init(lock) \ 103 - do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 102 + # define raw_spin_lock_init(lock) \ 103 + do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) 104 104 #endif 105 105 106 - #ifdef CONFIG_DEBUG_SPINLOCK 107 - extern void __rwlock_init(rwlock_t *lock, const char *name, 108 - struct lock_class_key *key); 109 - # define rwlock_init(lock) \ 110 - do { \ 111 - static struct lock_class_key __key; \ 112 - \ 113 - __rwlock_init((lock), #lock, &__key); \ 114 - } while (0) 115 - #else 116 - # define rwlock_init(lock) \ 117 - do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) 118 - #endif 119 - 120 - #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) 106 + #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) 121 107 122 108 #ifdef CONFIG_GENERIC_LOCKBREAK 123 - #define spin_is_contended(lock) ((lock)->break_lock) 109 + #define raw_spin_is_contended(lock) ((lock)->break_lock) 124 110 #else 125 111 126 - #ifdef __raw_spin_is_contended 127 - #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 112 + #ifdef arch_spin_is_contended 113 + #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) 128 114 #else 129 - #define spin_is_contended(lock) (((void)(lock), 0)) 130 - #endif /*__raw_spin_is_contended*/ 115 + #define raw_spin_is_contended(lock) (((void)(lock), 0)) 116 + #endif /*arch_spin_is_contended*/ 131 117 #endif 132 118 133 119 /* The lock does not imply full memory barrier. */ ··· 122 136 #endif 123 137 124 138 /** 125 - * spin_unlock_wait - wait until the spinlock gets unlocked 139 + * raw_spin_unlock_wait - wait until the spinlock gets unlocked 126 140 * @lock: the spinlock in question. 127 141 */ 128 - #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 142 + #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 129 143 130 144 #ifdef CONFIG_DEBUG_SPINLOCK 131 - extern void _raw_spin_lock(spinlock_t *lock); 132 - #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 133 - extern int _raw_spin_trylock(spinlock_t *lock); 134 - extern void _raw_spin_unlock(spinlock_t *lock); 135 - extern void _raw_read_lock(rwlock_t *lock); 136 - #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) 137 - extern int _raw_read_trylock(rwlock_t *lock); 138 - extern void _raw_read_unlock(rwlock_t *lock); 139 - extern void _raw_write_lock(rwlock_t *lock); 140 - #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) 141 - extern int _raw_write_trylock(rwlock_t *lock); 142 - extern void _raw_write_unlock(rwlock_t *lock); 145 + extern void do_raw_spin_lock(raw_spinlock_t *lock); 146 + #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 147 + extern int do_raw_spin_trylock(raw_spinlock_t *lock); 148 + extern void do_raw_spin_unlock(raw_spinlock_t *lock); 143 149 #else 144 - # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 145 - # define _raw_spin_lock_flags(lock, flags) \ 146 - __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 147 - # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 148 - # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 149 - # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 150 - # define _raw_read_lock_flags(lock, flags) \ 151 - __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 152 - # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 153 - # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 154 - # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 155 - # define _raw_write_lock_flags(lock, flags) \ 156 - __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 157 - # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 158 - # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 159 - #endif 150 + static inline void do_raw_spin_lock(raw_spinlock_t *lock) 151 + { 152 + arch_spin_lock(&lock->raw_lock); 153 + } 160 154 161 - #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) 162 - #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) 155 + static inline void 156 + do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) 157 + { 158 + arch_spin_lock_flags(&lock->raw_lock, *flags); 159 + } 160 + 161 + static inline int do_raw_spin_trylock(raw_spinlock_t *lock) 162 + { 163 + return arch_spin_trylock(&(lock)->raw_lock); 164 + } 165 + 166 + static inline void do_raw_spin_unlock(raw_spinlock_t *lock) 167 + { 168 + arch_spin_unlock(&lock->raw_lock); 169 + } 170 + #endif 163 171 164 172 /* 165 - * Define the various spin_lock and rw_lock methods. Note we define these 166 - * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 167 - * methods are defined as nops in the case they are not required. 173 + * Define the various spin_lock methods. Note we define these 174 + * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The 175 + * various methods are defined as nops in the case they are not 176 + * required. 168 177 */ 169 - #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 170 - #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) 171 - #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) 178 + #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) 172 179 173 - #define spin_lock(lock) _spin_lock(lock) 180 + #define raw_spin_lock(lock) _raw_spin_lock(lock) 174 181 175 182 #ifdef CONFIG_DEBUG_LOCK_ALLOC 176 - # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 177 - # define spin_lock_nest_lock(lock, nest_lock) \ 183 + # define raw_spin_lock_nested(lock, subclass) \ 184 + _raw_spin_lock_nested(lock, subclass) 185 + 186 + # define raw_spin_lock_nest_lock(lock, nest_lock) \ 178 187 do { \ 179 188 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 180 - _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 189 + _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 181 190 } while (0) 182 191 #else 183 - # define spin_lock_nested(lock, subclass) _spin_lock(lock) 184 - # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 192 + # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) 193 + # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 185 194 #endif 186 - 187 - #define write_lock(lock) _write_lock(lock) 188 - #define read_lock(lock) _read_lock(lock) 189 195 190 196 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 191 197 192 - #define spin_lock_irqsave(lock, flags) \ 198 + #define raw_spin_lock_irqsave(lock, flags) \ 193 199 do { \ 194 200 typecheck(unsigned long, flags); \ 195 - flags = _spin_lock_irqsave(lock); \ 196 - } while (0) 197 - #define read_lock_irqsave(lock, flags) \ 198 - do { \ 199 - typecheck(unsigned long, flags); \ 200 - flags = _read_lock_irqsave(lock); \ 201 - } while (0) 202 - #define write_lock_irqsave(lock, flags) \ 203 - do { \ 204 - typecheck(unsigned long, flags); \ 205 - flags = _write_lock_irqsave(lock); \ 201 + flags = _raw_spin_lock_irqsave(lock); \ 206 202 } while (0) 207 203 208 204 #ifdef CONFIG_DEBUG_LOCK_ALLOC 209 - #define spin_lock_irqsave_nested(lock, flags, subclass) \ 205 + #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 210 206 do { \ 211 207 typecheck(unsigned long, flags); \ 212 - flags = _spin_lock_irqsave_nested(lock, subclass); \ 208 + flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ 213 209 } while (0) 214 210 #else 215 - #define spin_lock_irqsave_nested(lock, flags, subclass) \ 211 + #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 216 212 do { \ 217 213 typecheck(unsigned long, flags); \ 218 - flags = _spin_lock_irqsave(lock); \ 214 + flags = _raw_spin_lock_irqsave(lock); \ 219 215 } while (0) 220 216 #endif 221 217 222 218 #else 223 219 224 - #define spin_lock_irqsave(lock, flags) \ 220 + #define raw_spin_lock_irqsave(lock, flags) \ 225 221 do { \ 226 222 typecheck(unsigned long, flags); \ 227 - _spin_lock_irqsave(lock, flags); \ 223 + _raw_spin_lock_irqsave(lock, flags); \ 228 224 } while (0) 229 - #define read_lock_irqsave(lock, flags) \ 230 - do { \ 231 - typecheck(unsigned long, flags); \ 232 - _read_lock_irqsave(lock, flags); \ 233 - } while (0) 234 - #define write_lock_irqsave(lock, flags) \ 235 - do { \ 236 - typecheck(unsigned long, flags); \ 237 - _write_lock_irqsave(lock, flags); \ 238 - } while (0) 239 - #define spin_lock_irqsave_nested(lock, flags, subclass) \ 240 - spin_lock_irqsave(lock, flags) 225 + 226 + #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ 227 + raw_spin_lock_irqsave(lock, flags) 241 228 242 229 #endif 243 230 244 - #define spin_lock_irq(lock) _spin_lock_irq(lock) 245 - #define spin_lock_bh(lock) _spin_lock_bh(lock) 246 - #define read_lock_irq(lock) _read_lock_irq(lock) 247 - #define read_lock_bh(lock) _read_lock_bh(lock) 248 - #define write_lock_irq(lock) _write_lock_irq(lock) 249 - #define write_lock_bh(lock) _write_lock_bh(lock) 250 - #define spin_unlock(lock) _spin_unlock(lock) 251 - #define read_unlock(lock) _read_unlock(lock) 252 - #define write_unlock(lock) _write_unlock(lock) 253 - #define spin_unlock_irq(lock) _spin_unlock_irq(lock) 254 - #define read_unlock_irq(lock) _read_unlock_irq(lock) 255 - #define write_unlock_irq(lock) _write_unlock_irq(lock) 231 + #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) 232 + #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) 233 + #define raw_spin_unlock(lock) _raw_spin_unlock(lock) 234 + #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) 256 235 257 - #define spin_unlock_irqrestore(lock, flags) \ 258 - do { \ 259 - typecheck(unsigned long, flags); \ 260 - _spin_unlock_irqrestore(lock, flags); \ 236 + #define raw_spin_unlock_irqrestore(lock, flags) \ 237 + do { \ 238 + typecheck(unsigned long, flags); \ 239 + _raw_spin_unlock_irqrestore(lock, flags); \ 261 240 } while (0) 262 - #define spin_unlock_bh(lock) _spin_unlock_bh(lock) 241 + #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) 263 242 264 - #define read_unlock_irqrestore(lock, flags) \ 265 - do { \ 266 - typecheck(unsigned long, flags); \ 267 - _read_unlock_irqrestore(lock, flags); \ 268 - } while (0) 269 - #define read_unlock_bh(lock) _read_unlock_bh(lock) 243 + #define raw_spin_trylock_bh(lock) \ 244 + __cond_lock(lock, _raw_spin_trylock_bh(lock)) 270 245 271 - #define write_unlock_irqrestore(lock, flags) \ 272 - do { \ 273 - typecheck(unsigned long, flags); \ 274 - _write_unlock_irqrestore(lock, flags); \ 275 - } while (0) 276 - #define write_unlock_bh(lock) _write_unlock_bh(lock) 277 - 278 - #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 279 - 280 - #define spin_trylock_irq(lock) \ 246 + #define raw_spin_trylock_irq(lock) \ 281 247 ({ \ 282 248 local_irq_disable(); \ 283 - spin_trylock(lock) ? \ 249 + raw_spin_trylock(lock) ? \ 284 250 1 : ({ local_irq_enable(); 0; }); \ 285 251 }) 286 252 287 - #define spin_trylock_irqsave(lock, flags) \ 253 + #define raw_spin_trylock_irqsave(lock, flags) \ 288 254 ({ \ 289 255 local_irq_save(flags); \ 290 - spin_trylock(lock) ? \ 256 + raw_spin_trylock(lock) ? \ 291 257 1 : ({ local_irq_restore(flags); 0; }); \ 292 258 }) 293 259 294 - #define write_trylock_irqsave(lock, flags) \ 295 - ({ \ 296 - local_irq_save(flags); \ 297 - write_trylock(lock) ? \ 298 - 1 : ({ local_irq_restore(flags); 0; }); \ 260 + /** 261 + * raw_spin_can_lock - would raw_spin_trylock() succeed? 262 + * @lock: the spinlock in question. 263 + */ 264 + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) 265 + 266 + /* Include rwlock functions */ 267 + #include <linux/rwlock.h> 268 + 269 + /* 270 + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 271 + */ 272 + #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 273 + # include <linux/spinlock_api_smp.h> 274 + #else 275 + # include <linux/spinlock_api_up.h> 276 + #endif 277 + 278 + /* 279 + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n 280 + */ 281 + 282 + static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) 283 + { 284 + return &lock->rlock; 285 + } 286 + 287 + #define spin_lock_init(_lock) \ 288 + do { \ 289 + spinlock_check(_lock); \ 290 + raw_spin_lock_init(&(_lock)->rlock); \ 291 + } while (0) 292 + 293 + static inline void spin_lock(spinlock_t *lock) 294 + { 295 + raw_spin_lock(&lock->rlock); 296 + } 297 + 298 + static inline void spin_lock_bh(spinlock_t *lock) 299 + { 300 + raw_spin_lock_bh(&lock->rlock); 301 + } 302 + 303 + static inline int spin_trylock(spinlock_t *lock) 304 + { 305 + return raw_spin_trylock(&lock->rlock); 306 + } 307 + 308 + #define spin_lock_nested(lock, subclass) \ 309 + do { \ 310 + raw_spin_lock_nested(spinlock_check(lock), subclass); \ 311 + } while (0) 312 + 313 + #define spin_lock_nest_lock(lock, nest_lock) \ 314 + do { \ 315 + raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ 316 + } while (0) 317 + 318 + static inline void spin_lock_irq(spinlock_t *lock) 319 + { 320 + raw_spin_lock_irq(&lock->rlock); 321 + } 322 + 323 + #define spin_lock_irqsave(lock, flags) \ 324 + do { \ 325 + raw_spin_lock_irqsave(spinlock_check(lock), flags); \ 326 + } while (0) 327 + 328 + #define spin_lock_irqsave_nested(lock, flags, subclass) \ 329 + do { \ 330 + raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ 331 + } while (0) 332 + 333 + static inline void spin_unlock(spinlock_t *lock) 334 + { 335 + raw_spin_unlock(&lock->rlock); 336 + } 337 + 338 + static inline void spin_unlock_bh(spinlock_t *lock) 339 + { 340 + raw_spin_unlock_bh(&lock->rlock); 341 + } 342 + 343 + static inline void spin_unlock_irq(spinlock_t *lock) 344 + { 345 + raw_spin_unlock_irq(&lock->rlock); 346 + } 347 + 348 + static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 349 + { 350 + raw_spin_unlock_irqrestore(&lock->rlock, flags); 351 + } 352 + 353 + static inline int spin_trylock_bh(spinlock_t *lock) 354 + { 355 + return raw_spin_trylock_bh(&lock->rlock); 356 + } 357 + 358 + static inline int spin_trylock_irq(spinlock_t *lock) 359 + { 360 + return raw_spin_trylock_irq(&lock->rlock); 361 + } 362 + 363 + #define spin_trylock_irqsave(lock, flags) \ 364 + ({ \ 365 + raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 299 366 }) 367 + 368 + static inline void spin_unlock_wait(spinlock_t *lock) 369 + { 370 + raw_spin_unlock_wait(&lock->rlock); 371 + } 372 + 373 + static inline int spin_is_locked(spinlock_t *lock) 374 + { 375 + return raw_spin_is_locked(&lock->rlock); 376 + } 377 + 378 + static inline int spin_is_contended(spinlock_t *lock) 379 + { 380 + return raw_spin_is_contended(&lock->rlock); 381 + } 382 + 383 + static inline int spin_can_lock(spinlock_t *lock) 384 + { 385 + return raw_spin_can_lock(&lock->rlock); 386 + } 387 + 388 + static inline void assert_spin_locked(spinlock_t *lock) 389 + { 390 + assert_raw_spin_locked(&lock->rlock); 391 + } 300 392 301 393 /* 302 394 * Pull the atomic_t declaration: ··· 392 328 extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); 393 329 #define atomic_dec_and_lock(atomic, lock) \ 394 330 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 395 - 396 - /** 397 - * spin_can_lock - would spin_trylock() succeed? 398 - * @lock: the spinlock in question. 399 - */ 400 - #define spin_can_lock(lock) (!spin_is_locked(lock)) 401 - 402 - /* 403 - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: 404 - */ 405 - #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 406 - # include <linux/spinlock_api_smp.h> 407 - #else 408 - # include <linux/spinlock_api_up.h> 409 - #endif 410 331 411 332 #endif /* __LINUX_SPINLOCK_H */
+58 -300
include/linux/spinlock_api_smp.h
··· 17 17 18 18 int in_lock_functions(unsigned long addr); 19 19 20 - #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20 + #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) 21 21 22 - void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 23 - void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 24 - __acquires(lock); 25 - void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 26 - __acquires(lock); 27 - void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); 28 - void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); 29 - void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 30 - void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); 31 - void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); 32 - void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 33 - void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); 34 - void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); 35 - unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 36 - __acquires(lock); 37 - unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 38 - __acquires(lock); 39 - unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 40 - __acquires(lock); 41 - unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 42 - __acquires(lock); 43 - int __lockfunc _spin_trylock(spinlock_t *lock); 44 - int __lockfunc _read_trylock(rwlock_t *lock); 45 - int __lockfunc _write_trylock(rwlock_t *lock); 46 - int __lockfunc _spin_trylock_bh(spinlock_t *lock); 47 - void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); 48 - void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); 49 - void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); 50 - void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); 51 - void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); 52 - void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); 53 - void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); 54 - void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); 55 - void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); 56 - void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 57 - __releases(lock); 58 - void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 59 - __releases(lock); 60 - void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 61 - __releases(lock); 22 + void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 23 + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 24 + __acquires(lock); 25 + void __lockfunc 26 + _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) 27 + __acquires(lock); 28 + void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); 29 + void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 30 + __acquires(lock); 31 + 32 + unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 33 + __acquires(lock); 34 + unsigned long __lockfunc 35 + _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) 36 + __acquires(lock); 37 + int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); 38 + int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); 39 + void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); 40 + void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); 41 + void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); 42 + void __lockfunc 43 + _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) 44 + __releases(lock); 62 45 63 46 #ifdef CONFIG_INLINE_SPIN_LOCK 64 - #define _spin_lock(lock) __spin_lock(lock) 65 - #endif 66 - 67 - #ifdef CONFIG_INLINE_READ_LOCK 68 - #define _read_lock(lock) __read_lock(lock) 69 - #endif 70 - 71 - #ifdef CONFIG_INLINE_WRITE_LOCK 72 - #define _write_lock(lock) __write_lock(lock) 47 + #define _raw_spin_lock(lock) __raw_spin_lock(lock) 73 48 #endif 74 49 75 50 #ifdef CONFIG_INLINE_SPIN_LOCK_BH 76 - #define _spin_lock_bh(lock) __spin_lock_bh(lock) 77 - #endif 78 - 79 - #ifdef CONFIG_INLINE_READ_LOCK_BH 80 - #define _read_lock_bh(lock) __read_lock_bh(lock) 81 - #endif 82 - 83 - #ifdef CONFIG_INLINE_WRITE_LOCK_BH 84 - #define _write_lock_bh(lock) __write_lock_bh(lock) 51 + #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) 85 52 #endif 86 53 87 54 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 88 - #define _spin_lock_irq(lock) __spin_lock_irq(lock) 89 - #endif 90 - 91 - #ifdef CONFIG_INLINE_READ_LOCK_IRQ 92 - #define _read_lock_irq(lock) __read_lock_irq(lock) 93 - #endif 94 - 95 - #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ 96 - #define _write_lock_irq(lock) __write_lock_irq(lock) 55 + #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) 97 56 #endif 98 57 99 58 #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 100 - #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 101 - #endif 102 - 103 - #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE 104 - #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) 105 - #endif 106 - 107 - #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 108 - #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) 59 + #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) 109 60 #endif 110 61 111 62 #ifdef CONFIG_INLINE_SPIN_TRYLOCK 112 - #define _spin_trylock(lock) __spin_trylock(lock) 113 - #endif 114 - 115 - #ifdef CONFIG_INLINE_READ_TRYLOCK 116 - #define _read_trylock(lock) __read_trylock(lock) 117 - #endif 118 - 119 - #ifdef CONFIG_INLINE_WRITE_TRYLOCK 120 - #define _write_trylock(lock) __write_trylock(lock) 63 + #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) 121 64 #endif 122 65 123 66 #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 124 - #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 67 + #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) 125 68 #endif 126 69 127 70 #ifdef CONFIG_INLINE_SPIN_UNLOCK 128 - #define _spin_unlock(lock) __spin_unlock(lock) 129 - #endif 130 - 131 - #ifdef CONFIG_INLINE_READ_UNLOCK 132 - #define _read_unlock(lock) __read_unlock(lock) 133 - #endif 134 - 135 - #ifdef CONFIG_INLINE_WRITE_UNLOCK 136 - #define _write_unlock(lock) __write_unlock(lock) 71 + #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) 137 72 #endif 138 73 139 74 #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 140 - #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 141 - #endif 142 - 143 - #ifdef CONFIG_INLINE_READ_UNLOCK_BH 144 - #define _read_unlock_bh(lock) __read_unlock_bh(lock) 145 - #endif 146 - 147 - #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH 148 - #define _write_unlock_bh(lock) __write_unlock_bh(lock) 75 + #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) 149 76 #endif 150 77 151 78 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 152 - #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 153 - #endif 154 - 155 - #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ 156 - #define _read_unlock_irq(lock) __read_unlock_irq(lock) 157 - #endif 158 - 159 - #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ 160 - #define _write_unlock_irq(lock) __write_unlock_irq(lock) 79 + #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) 161 80 #endif 162 81 163 82 #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 164 - #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 83 + #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) 165 84 #endif 166 85 167 - #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 168 - #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) 169 - #endif 170 - 171 - #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 172 - #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) 173 - #endif 174 - 175 - static inline int __spin_trylock(spinlock_t *lock) 86 + static inline int __raw_spin_trylock(raw_spinlock_t *lock) 176 87 { 177 88 preempt_disable(); 178 - if (_raw_spin_trylock(lock)) { 89 + if (do_raw_spin_trylock(lock)) { 179 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 180 - return 1; 181 - } 182 - preempt_enable(); 183 - return 0; 184 - } 185 - 186 - static inline int __read_trylock(rwlock_t *lock) 187 - { 188 - preempt_disable(); 189 - if (_raw_read_trylock(lock)) { 190 - rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); 191 - return 1; 192 - } 193 - preempt_enable(); 194 - return 0; 195 - } 196 - 197 - static inline int __write_trylock(rwlock_t *lock) 198 - { 199 - preempt_disable(); 200 - if (_raw_write_trylock(lock)) { 201 - rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); 202 91 return 1; 203 92 } 204 93 preempt_enable(); ··· 101 212 */ 102 213 #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 103 214 104 - static inline void __read_lock(rwlock_t *lock) 105 - { 106 - preempt_disable(); 107 - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 108 - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 109 - } 110 - 111 - static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) 215 + static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) 112 216 { 113 217 unsigned long flags; 114 218 ··· 110 228 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 111 229 /* 112 230 * On lockdep we dont want the hand-coded irq-enable of 113 - * _raw_spin_lock_flags() code, because lockdep assumes 231 + * do_raw_spin_lock_flags() code, because lockdep assumes 114 232 * that interrupts are not re-enabled during lock-acquire: 115 233 */ 116 234 #ifdef CONFIG_LOCKDEP 117 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 235 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 118 236 #else 119 - _raw_spin_lock_flags(lock, &flags); 237 + do_raw_spin_lock_flags(lock, &flags); 120 238 #endif 121 239 return flags; 122 240 } 123 241 124 - static inline void __spin_lock_irq(spinlock_t *lock) 242 + static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) 125 243 { 126 244 local_irq_disable(); 127 245 preempt_disable(); 128 246 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 129 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 247 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 130 248 } 131 249 132 - static inline void __spin_lock_bh(spinlock_t *lock) 250 + static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) 133 251 { 134 252 local_bh_disable(); 135 253 preempt_disable(); 136 254 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 137 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 255 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 138 256 } 139 257 140 - static inline unsigned long __read_lock_irqsave(rwlock_t *lock) 141 - { 142 - unsigned long flags; 143 - 144 - local_irq_save(flags); 145 - preempt_disable(); 146 - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 147 - LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, 148 - _raw_read_lock_flags, &flags); 149 - return flags; 150 - } 151 - 152 - static inline void __read_lock_irq(rwlock_t *lock) 153 - { 154 - local_irq_disable(); 155 - preempt_disable(); 156 - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 157 - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 158 - } 159 - 160 - static inline void __read_lock_bh(rwlock_t *lock) 161 - { 162 - local_bh_disable(); 163 - preempt_disable(); 164 - rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 165 - LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 166 - } 167 - 168 - static inline unsigned long __write_lock_irqsave(rwlock_t *lock) 169 - { 170 - unsigned long flags; 171 - 172 - local_irq_save(flags); 173 - preempt_disable(); 174 - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 175 - LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, 176 - _raw_write_lock_flags, &flags); 177 - return flags; 178 - } 179 - 180 - static inline void __write_lock_irq(rwlock_t *lock) 181 - { 182 - local_irq_disable(); 183 - preempt_disable(); 184 - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 185 - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 186 - } 187 - 188 - static inline void __write_lock_bh(rwlock_t *lock) 189 - { 190 - local_bh_disable(); 191 - preempt_disable(); 192 - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 193 - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 194 - } 195 - 196 - static inline void __spin_lock(spinlock_t *lock) 258 + static inline void __raw_spin_lock(raw_spinlock_t *lock) 197 259 { 198 260 preempt_disable(); 199 261 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 200 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 201 - } 202 - 203 - static inline void __write_lock(rwlock_t *lock) 204 - { 205 - preempt_disable(); 206 - rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 207 - LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 262 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 208 263 } 209 264 210 265 #endif /* CONFIG_PREEMPT */ 211 266 212 - static inline void __spin_unlock(spinlock_t *lock) 267 + static inline void __raw_spin_unlock(raw_spinlock_t *lock) 213 268 { 214 269 spin_release(&lock->dep_map, 1, _RET_IP_); 215 - _raw_spin_unlock(lock); 270 + do_raw_spin_unlock(lock); 216 271 preempt_enable(); 217 272 } 218 273 219 - static inline void __write_unlock(rwlock_t *lock) 220 - { 221 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 222 - _raw_write_unlock(lock); 223 - preempt_enable(); 224 - } 225 - 226 - static inline void __read_unlock(rwlock_t *lock) 227 - { 228 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 229 - _raw_read_unlock(lock); 230 - preempt_enable(); 231 - } 232 - 233 - static inline void __spin_unlock_irqrestore(spinlock_t *lock, 274 + static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, 234 275 unsigned long flags) 235 276 { 236 277 spin_release(&lock->dep_map, 1, _RET_IP_); 237 - _raw_spin_unlock(lock); 278 + do_raw_spin_unlock(lock); 238 279 local_irq_restore(flags); 239 280 preempt_enable(); 240 281 } 241 282 242 - static inline void __spin_unlock_irq(spinlock_t *lock) 283 + static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) 243 284 { 244 285 spin_release(&lock->dep_map, 1, _RET_IP_); 245 - _raw_spin_unlock(lock); 286 + do_raw_spin_unlock(lock); 246 287 local_irq_enable(); 247 288 preempt_enable(); 248 289 } 249 290 250 - static inline void __spin_unlock_bh(spinlock_t *lock) 291 + static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) 251 292 { 252 293 spin_release(&lock->dep_map, 1, _RET_IP_); 253 - _raw_spin_unlock(lock); 294 + do_raw_spin_unlock(lock); 254 295 preempt_enable_no_resched(); 255 296 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 256 297 } 257 298 258 - static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 259 - { 260 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 261 - _raw_read_unlock(lock); 262 - local_irq_restore(flags); 263 - preempt_enable(); 264 - } 265 - 266 - static inline void __read_unlock_irq(rwlock_t *lock) 267 - { 268 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 269 - _raw_read_unlock(lock); 270 - local_irq_enable(); 271 - preempt_enable(); 272 - } 273 - 274 - static inline void __read_unlock_bh(rwlock_t *lock) 275 - { 276 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 277 - _raw_read_unlock(lock); 278 - preempt_enable_no_resched(); 279 - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 280 - } 281 - 282 - static inline void __write_unlock_irqrestore(rwlock_t *lock, 283 - unsigned long flags) 284 - { 285 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 286 - _raw_write_unlock(lock); 287 - local_irq_restore(flags); 288 - preempt_enable(); 289 - } 290 - 291 - static inline void __write_unlock_irq(rwlock_t *lock) 292 - { 293 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 294 - _raw_write_unlock(lock); 295 - local_irq_enable(); 296 - preempt_enable(); 297 - } 298 - 299 - static inline void __write_unlock_bh(rwlock_t *lock) 300 - { 301 - rwlock_release(&lock->dep_map, 1, _RET_IP_); 302 - _raw_write_unlock(lock); 303 - preempt_enable_no_resched(); 304 - local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 305 - } 306 - 307 - static inline int __spin_trylock_bh(spinlock_t *lock) 299 + static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) 308 300 { 309 301 local_bh_disable(); 310 302 preempt_disable(); 311 - if (_raw_spin_trylock(lock)) { 303 + if (do_raw_spin_trylock(lock)) { 312 304 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 313 305 return 1; 314 306 } ··· 190 434 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 191 435 return 0; 192 436 } 437 + 438 + #include <linux/rwlock_api_smp.h> 193 439 194 440 #endif /* __LINUX_SPINLOCK_API_SMP_H */
+35 -31
include/linux/spinlock_api_up.h
··· 16 16 17 17 #define in_lock_functions(ADDR) 0 18 18 19 - #define assert_spin_locked(lock) do { (void)(lock); } while (0) 19 + #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) 20 20 21 21 /* 22 22 * In the UP-nondebug case there's no real locking going on, so the ··· 40 40 do { preempt_enable(); __release(lock); (void)(lock); } while (0) 41 41 42 42 #define __UNLOCK_BH(lock) \ 43 - do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) 43 + do { preempt_enable_no_resched(); local_bh_enable(); \ 44 + __release(lock); (void)(lock); } while (0) 44 45 45 46 #define __UNLOCK_IRQ(lock) \ 46 47 do { local_irq_enable(); __UNLOCK(lock); } while (0) ··· 49 48 #define __UNLOCK_IRQRESTORE(lock, flags) \ 50 49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 51 50 52 - #define _spin_lock(lock) __LOCK(lock) 53 - #define _spin_lock_nested(lock, subclass) __LOCK(lock) 54 - #define _read_lock(lock) __LOCK(lock) 55 - #define _write_lock(lock) __LOCK(lock) 56 - #define _spin_lock_bh(lock) __LOCK_BH(lock) 57 - #define _read_lock_bh(lock) __LOCK_BH(lock) 58 - #define _write_lock_bh(lock) __LOCK_BH(lock) 59 - #define _spin_lock_irq(lock) __LOCK_IRQ(lock) 60 - #define _read_lock_irq(lock) __LOCK_IRQ(lock) 61 - #define _write_lock_irq(lock) __LOCK_IRQ(lock) 62 - #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63 - #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64 - #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 65 - #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) 66 - #define _read_trylock(lock) ({ __LOCK(lock); 1; }) 67 - #define _write_trylock(lock) ({ __LOCK(lock); 1; }) 68 - #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 69 - #define _spin_unlock(lock) __UNLOCK(lock) 70 - #define _read_unlock(lock) __UNLOCK(lock) 71 - #define _write_unlock(lock) __UNLOCK(lock) 72 - #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) 73 - #define _write_unlock_bh(lock) __UNLOCK_BH(lock) 74 - #define _read_unlock_bh(lock) __UNLOCK_BH(lock) 75 - #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 76 - #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) 77 - #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) 78 - #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 79 - #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 80 - #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 51 + #define _raw_spin_lock(lock) __LOCK(lock) 52 + #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) 53 + #define _raw_read_lock(lock) __LOCK(lock) 54 + #define _raw_write_lock(lock) __LOCK(lock) 55 + #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) 56 + #define _raw_read_lock_bh(lock) __LOCK_BH(lock) 57 + #define _raw_write_lock_bh(lock) __LOCK_BH(lock) 58 + #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) 59 + #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) 60 + #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) 61 + #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 62 + #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63 + #define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64 + #define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) 65 + #define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) 66 + #define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) 67 + #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 68 + #define _raw_spin_unlock(lock) __UNLOCK(lock) 69 + #define _raw_read_unlock(lock) __UNLOCK(lock) 70 + #define _raw_write_unlock(lock) __UNLOCK(lock) 71 + #define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) 72 + #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) 73 + #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) 74 + #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 75 + #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) 76 + #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) 77 + #define _raw_spin_unlock_irqrestore(lock, flags) \ 78 + __UNLOCK_IRQRESTORE(lock, flags) 79 + #define _raw_read_unlock_irqrestore(lock, flags) \ 80 + __UNLOCK_IRQRESTORE(lock, flags) 81 + #define _raw_write_unlock_irqrestore(lock, flags) \ 82 + __UNLOCK_IRQRESTORE(lock, flags) 81 83 82 84 #endif /* __LINUX_SPINLOCK_API_UP_H */
+44 -48
include/linux/spinlock_types.h
··· 17 17 18 18 #include <linux/lockdep.h> 19 19 20 - typedef struct { 21 - raw_spinlock_t raw_lock; 20 + typedef struct raw_spinlock { 21 + arch_spinlock_t raw_lock; 22 22 #ifdef CONFIG_GENERIC_LOCKBREAK 23 23 unsigned int break_lock; 24 24 #endif ··· 29 29 #ifdef CONFIG_DEBUG_LOCK_ALLOC 30 30 struct lockdep_map dep_map; 31 31 #endif 32 - } spinlock_t; 32 + } raw_spinlock_t; 33 33 34 34 #define SPINLOCK_MAGIC 0xdead4ead 35 - 36 - typedef struct { 37 - raw_rwlock_t raw_lock; 38 - #ifdef CONFIG_GENERIC_LOCKBREAK 39 - unsigned int break_lock; 40 - #endif 41 - #ifdef CONFIG_DEBUG_SPINLOCK 42 - unsigned int magic, owner_cpu; 43 - void *owner; 44 - #endif 45 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 46 - struct lockdep_map dep_map; 47 - #endif 48 - } rwlock_t; 49 - 50 - #define RWLOCK_MAGIC 0xdeaf1eed 51 35 52 36 #define SPINLOCK_OWNER_INIT ((void *)-1L) 53 37 ··· 41 57 # define SPIN_DEP_MAP_INIT(lockname) 42 58 #endif 43 59 44 - #ifdef CONFIG_DEBUG_LOCK_ALLOC 45 - # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 60 + #ifdef CONFIG_DEBUG_SPINLOCK 61 + # define SPIN_DEBUG_INIT(lockname) \ 62 + .magic = SPINLOCK_MAGIC, \ 63 + .owner_cpu = -1, \ 64 + .owner = SPINLOCK_OWNER_INIT, 46 65 #else 47 - # define RW_DEP_MAP_INIT(lockname) 66 + # define SPIN_DEBUG_INIT(lockname) 48 67 #endif 49 68 50 - #ifdef CONFIG_DEBUG_SPINLOCK 51 - # define __SPIN_LOCK_UNLOCKED(lockname) \ 52 - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 53 - .magic = SPINLOCK_MAGIC, \ 54 - .owner = SPINLOCK_OWNER_INIT, \ 55 - .owner_cpu = -1, \ 56 - SPIN_DEP_MAP_INIT(lockname) } 57 - #define __RW_LOCK_UNLOCKED(lockname) \ 58 - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 59 - .magic = RWLOCK_MAGIC, \ 60 - .owner = SPINLOCK_OWNER_INIT, \ 61 - .owner_cpu = -1, \ 62 - RW_DEP_MAP_INIT(lockname) } 63 - #else 64 - # define __SPIN_LOCK_UNLOCKED(lockname) \ 65 - (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 66 - SPIN_DEP_MAP_INIT(lockname) } 67 - #define __RW_LOCK_UNLOCKED(lockname) \ 68 - (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 69 - RW_DEP_MAP_INIT(lockname) } 69 + #define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ 70 + { \ 71 + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ 72 + SPIN_DEBUG_INIT(lockname) \ 73 + SPIN_DEP_MAP_INIT(lockname) } 74 + 75 + #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ 76 + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) 77 + 78 + #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) 79 + 80 + typedef struct spinlock { 81 + union { 82 + struct raw_spinlock rlock; 83 + 84 + #ifdef CONFIG_DEBUG_LOCK_ALLOC 85 + # define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) 86 + struct { 87 + u8 __padding[LOCK_PADSIZE]; 88 + struct lockdep_map dep_map; 89 + }; 70 90 #endif 91 + }; 92 + } spinlock_t; 93 + 94 + #define __SPIN_LOCK_INITIALIZER(lockname) \ 95 + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } 96 + 97 + #define __SPIN_LOCK_UNLOCKED(lockname) \ 98 + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) 71 99 72 100 /* 73 - * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 74 - * are hence deprecated. 75 - * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 76 - * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. 101 + * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence 102 + * deprecated. 103 + * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as 104 + * appropriate. 77 105 */ 78 106 #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 79 - #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) 80 107 81 108 #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 82 - #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 109 + 110 + #include <linux/rwlock_types.h> 83 111 84 112 #endif /* __LINUX_SPINLOCK_TYPES_H */
+6 -6
include/linux/spinlock_types_up.h
··· 16 16 17 17 typedef struct { 18 18 volatile unsigned int slock; 19 - } raw_spinlock_t; 19 + } arch_spinlock_t; 20 20 21 - #define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21 + #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } 22 22 23 23 #else 24 24 25 - typedef struct { } raw_spinlock_t; 25 + typedef struct { } arch_spinlock_t; 26 26 27 - #define __RAW_SPIN_LOCK_UNLOCKED { } 27 + #define __ARCH_SPIN_LOCK_UNLOCKED { } 28 28 29 29 #endif 30 30 31 31 typedef struct { 32 32 /* no debug version on UP */ 33 - } raw_rwlock_t; 33 + } arch_rwlock_t; 34 34 35 - #define __RAW_RW_LOCK_UNLOCKED { } 35 + #define __ARCH_RW_LOCK_UNLOCKED { } 36 36 37 37 #endif /* __LINUX_SPINLOCK_TYPES_UP_H */
+21 -21
include/linux/spinlock_up.h
··· 18 18 */ 19 19 20 20 #ifdef CONFIG_DEBUG_SPINLOCK 21 - #define __raw_spin_is_locked(x) ((x)->slock == 0) 21 + #define arch_spin_is_locked(x) ((x)->slock == 0) 22 22 23 - static inline void __raw_spin_lock(raw_spinlock_t *lock) 23 + static inline void arch_spin_lock(arch_spinlock_t *lock) 24 24 { 25 25 lock->slock = 0; 26 26 } 27 27 28 28 static inline void 29 - __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29 + arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) 30 30 { 31 31 local_irq_save(flags); 32 32 lock->slock = 0; 33 33 } 34 34 35 - static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35 + static inline int arch_spin_trylock(arch_spinlock_t *lock) 36 36 { 37 37 char oldval = lock->slock; 38 38 ··· 41 41 return oldval > 0; 42 42 } 43 43 44 - static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44 + static inline void arch_spin_unlock(arch_spinlock_t *lock) 45 45 { 46 46 lock->slock = 1; 47 47 } ··· 49 49 /* 50 50 * Read-write spinlocks. No debug version. 51 51 */ 52 - #define __raw_read_lock(lock) do { (void)(lock); } while (0) 53 - #define __raw_write_lock(lock) do { (void)(lock); } while (0) 54 - #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 55 - #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 56 - #define __raw_read_unlock(lock) do { (void)(lock); } while (0) 57 - #define __raw_write_unlock(lock) do { (void)(lock); } while (0) 52 + #define arch_read_lock(lock) do { (void)(lock); } while (0) 53 + #define arch_write_lock(lock) do { (void)(lock); } while (0) 54 + #define arch_read_trylock(lock) ({ (void)(lock); 1; }) 55 + #define arch_write_trylock(lock) ({ (void)(lock); 1; }) 56 + #define arch_read_unlock(lock) do { (void)(lock); } while (0) 57 + #define arch_write_unlock(lock) do { (void)(lock); } while (0) 58 58 59 59 #else /* DEBUG_SPINLOCK */ 60 - #define __raw_spin_is_locked(lock) ((void)(lock), 0) 60 + #define arch_spin_is_locked(lock) ((void)(lock), 0) 61 61 /* for sched.c and kernel_lock.c: */ 62 - # define __raw_spin_lock(lock) do { (void)(lock); } while (0) 63 - # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 64 - # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 65 - # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 62 + # define arch_spin_lock(lock) do { (void)(lock); } while (0) 63 + # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 64 + # define arch_spin_unlock(lock) do { (void)(lock); } while (0) 65 + # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) 66 66 #endif /* DEBUG_SPINLOCK */ 67 67 68 - #define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68 + #define arch_spin_is_contended(lock) (((void)(lock), 0)) 69 69 70 - #define __raw_read_can_lock(lock) (((void)(lock), 1)) 71 - #define __raw_write_can_lock(lock) (((void)(lock), 1)) 70 + #define arch_read_can_lock(lock) (((void)(lock), 1)) 71 + #define arch_write_can_lock(lock) (((void)(lock), 1)) 72 72 73 - #define __raw_spin_unlock_wait(lock) \ 74 - do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 73 + #define arch_spin_unlock_wait(lock) \ 74 + do { cpu_relax(); } while (arch_spin_is_locked(lock)) 75 75 76 76 #endif /* __LINUX_SPINLOCK_UP_H */
+1 -1
kernel/exit.c
··· 933 933 * an exiting task cleaning up the robust pi futexes. 934 934 */ 935 935 smp_mb(); 936 - spin_unlock_wait(&tsk->pi_lock); 936 + raw_spin_unlock_wait(&tsk->pi_lock); 937 937 938 938 if (unlikely(in_atomic())) 939 939 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
+2 -2
kernel/fork.c
··· 939 939 940 940 static void rt_mutex_init_task(struct task_struct *p) 941 941 { 942 - spin_lock_init(&p->pi_lock); 942 + raw_spin_lock_init(&p->pi_lock); 943 943 #ifdef CONFIG_RT_MUTEXES 944 - plist_head_init(&p->pi_waiters, &p->pi_lock); 944 + plist_head_init_raw(&p->pi_waiters, &p->pi_lock); 945 945 p->pi_blocked_on = NULL; 946 946 #endif 947 947 }
+25 -25
kernel/futex.c
··· 403 403 * and has cleaned up the pi_state already 404 404 */ 405 405 if (pi_state->owner) { 406 - spin_lock_irq(&pi_state->owner->pi_lock); 406 + raw_spin_lock_irq(&pi_state->owner->pi_lock); 407 407 list_del_init(&pi_state->list); 408 - spin_unlock_irq(&pi_state->owner->pi_lock); 408 + raw_spin_unlock_irq(&pi_state->owner->pi_lock); 409 409 410 410 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner); 411 411 } ··· 470 470 * pi_state_list anymore, but we have to be careful 471 471 * versus waiters unqueueing themselves: 472 472 */ 473 - spin_lock_irq(&curr->pi_lock); 473 + raw_spin_lock_irq(&curr->pi_lock); 474 474 while (!list_empty(head)) { 475 475 476 476 next = head->next; 477 477 pi_state = list_entry(next, struct futex_pi_state, list); 478 478 key = pi_state->key; 479 479 hb = hash_futex(&key); 480 - spin_unlock_irq(&curr->pi_lock); 480 + raw_spin_unlock_irq(&curr->pi_lock); 481 481 482 482 spin_lock(&hb->lock); 483 483 484 - spin_lock_irq(&curr->pi_lock); 484 + raw_spin_lock_irq(&curr->pi_lock); 485 485 /* 486 486 * We dropped the pi-lock, so re-check whether this 487 487 * task still owns the PI-state: ··· 495 495 WARN_ON(list_empty(&pi_state->list)); 496 496 list_del_init(&pi_state->list); 497 497 pi_state->owner = NULL; 498 - spin_unlock_irq(&curr->pi_lock); 498 + raw_spin_unlock_irq(&curr->pi_lock); 499 499 500 500 rt_mutex_unlock(&pi_state->pi_mutex); 501 501 502 502 spin_unlock(&hb->lock); 503 503 504 - spin_lock_irq(&curr->pi_lock); 504 + raw_spin_lock_irq(&curr->pi_lock); 505 505 } 506 - spin_unlock_irq(&curr->pi_lock); 506 + raw_spin_unlock_irq(&curr->pi_lock); 507 507 } 508 508 509 509 static int ··· 558 558 * change of the task flags, we do this protected by 559 559 * p->pi_lock: 560 560 */ 561 - spin_lock_irq(&p->pi_lock); 561 + raw_spin_lock_irq(&p->pi_lock); 562 562 if (unlikely(p->flags & PF_EXITING)) { 563 563 /* 564 564 * The task is on the way out. When PF_EXITPIDONE is ··· 567 567 */ 568 568 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN; 569 569 570 - spin_unlock_irq(&p->pi_lock); 570 + raw_spin_unlock_irq(&p->pi_lock); 571 571 put_task_struct(p); 572 572 return ret; 573 573 } ··· 586 586 WARN_ON(!list_empty(&pi_state->list)); 587 587 list_add(&pi_state->list, &p->pi_state_list); 588 588 pi_state->owner = p; 589 - spin_unlock_irq(&p->pi_lock); 589 + raw_spin_unlock_irq(&p->pi_lock); 590 590 591 591 put_task_struct(p); 592 592 ··· 760 760 if (!pi_state) 761 761 return -EINVAL; 762 762 763 - spin_lock(&pi_state->pi_mutex.wait_lock); 763 + raw_spin_lock(&pi_state->pi_mutex.wait_lock); 764 764 new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); 765 765 766 766 /* ··· 789 789 else if (curval != uval) 790 790 ret = -EINVAL; 791 791 if (ret) { 792 - spin_unlock(&pi_state->pi_mutex.wait_lock); 792 + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 793 793 return ret; 794 794 } 795 795 } 796 796 797 - spin_lock_irq(&pi_state->owner->pi_lock); 797 + raw_spin_lock_irq(&pi_state->owner->pi_lock); 798 798 WARN_ON(list_empty(&pi_state->list)); 799 799 list_del_init(&pi_state->list); 800 - spin_unlock_irq(&pi_state->owner->pi_lock); 800 + raw_spin_unlock_irq(&pi_state->owner->pi_lock); 801 801 802 - spin_lock_irq(&new_owner->pi_lock); 802 + raw_spin_lock_irq(&new_owner->pi_lock); 803 803 WARN_ON(!list_empty(&pi_state->list)); 804 804 list_add(&pi_state->list, &new_owner->pi_state_list); 805 805 pi_state->owner = new_owner; 806 - spin_unlock_irq(&new_owner->pi_lock); 806 + raw_spin_unlock_irq(&new_owner->pi_lock); 807 807 808 - spin_unlock(&pi_state->pi_mutex.wait_lock); 808 + raw_spin_unlock(&pi_state->pi_mutex.wait_lock); 809 809 rt_mutex_unlock(&pi_state->pi_mutex); 810 810 811 811 return 0; ··· 1010 1010 plist_add(&q->list, &hb2->chain); 1011 1011 q->lock_ptr = &hb2->lock; 1012 1012 #ifdef CONFIG_DEBUG_PI_LIST 1013 - q->list.plist.lock = &hb2->lock; 1013 + q->list.plist.spinlock = &hb2->lock; 1014 1014 #endif 1015 1015 } 1016 1016 get_futex_key_refs(key2); ··· 1046 1046 1047 1047 q->lock_ptr = &hb->lock; 1048 1048 #ifdef CONFIG_DEBUG_PI_LIST 1049 - q->list.plist.lock = &hb->lock; 1049 + q->list.plist.spinlock = &hb->lock; 1050 1050 #endif 1051 1051 1052 1052 wake_up_state(q->task, TASK_NORMAL); ··· 1394 1394 1395 1395 plist_node_init(&q->list, prio); 1396 1396 #ifdef CONFIG_DEBUG_PI_LIST 1397 - q->list.plist.lock = &hb->lock; 1397 + q->list.plist.spinlock = &hb->lock; 1398 1398 #endif 1399 1399 plist_add(&q->list, &hb->chain); 1400 1400 q->task = current; ··· 1529 1529 * itself. 1530 1530 */ 1531 1531 if (pi_state->owner != NULL) { 1532 - spin_lock_irq(&pi_state->owner->pi_lock); 1532 + raw_spin_lock_irq(&pi_state->owner->pi_lock); 1533 1533 WARN_ON(list_empty(&pi_state->list)); 1534 1534 list_del_init(&pi_state->list); 1535 - spin_unlock_irq(&pi_state->owner->pi_lock); 1535 + raw_spin_unlock_irq(&pi_state->owner->pi_lock); 1536 1536 } 1537 1537 1538 1538 pi_state->owner = newowner; 1539 1539 1540 - spin_lock_irq(&newowner->pi_lock); 1540 + raw_spin_lock_irq(&newowner->pi_lock); 1541 1541 WARN_ON(!list_empty(&pi_state->list)); 1542 1542 list_add(&pi_state->list, &newowner->pi_state_list); 1543 - spin_unlock_irq(&newowner->pi_lock); 1543 + raw_spin_unlock_irq(&newowner->pi_lock); 1544 1544 return 0; 1545 1545 1546 1546 /*
+25 -25
kernel/hrtimer.c
··· 127 127 for (;;) { 128 128 base = timer->base; 129 129 if (likely(base != NULL)) { 130 - spin_lock_irqsave(&base->cpu_base->lock, *flags); 130 + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 131 131 if (likely(base == timer->base)) 132 132 return base; 133 133 /* The timer has migrated to another CPU: */ 134 - spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 134 + raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); 135 135 } 136 136 cpu_relax(); 137 137 } ··· 208 208 209 209 /* See the comment in lock_timer_base() */ 210 210 timer->base = NULL; 211 - spin_unlock(&base->cpu_base->lock); 212 - spin_lock(&new_base->cpu_base->lock); 211 + raw_spin_unlock(&base->cpu_base->lock); 212 + raw_spin_lock(&new_base->cpu_base->lock); 213 213 214 214 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { 215 215 cpu = this_cpu; 216 - spin_unlock(&new_base->cpu_base->lock); 217 - spin_lock(&base->cpu_base->lock); 216 + raw_spin_unlock(&new_base->cpu_base->lock); 217 + raw_spin_lock(&base->cpu_base->lock); 218 218 timer->base = base; 219 219 goto again; 220 220 } ··· 230 230 { 231 231 struct hrtimer_clock_base *base = timer->base; 232 232 233 - spin_lock_irqsave(&base->cpu_base->lock, *flags); 233 + raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); 234 234 235 235 return base; 236 236 } ··· 628 628 base = &__get_cpu_var(hrtimer_bases); 629 629 630 630 /* Adjust CLOCK_REALTIME offset */ 631 - spin_lock(&base->lock); 631 + raw_spin_lock(&base->lock); 632 632 base->clock_base[CLOCK_REALTIME].offset = 633 633 timespec_to_ktime(realtime_offset); 634 634 635 635 hrtimer_force_reprogram(base, 0); 636 - spin_unlock(&base->lock); 636 + raw_spin_unlock(&base->lock); 637 637 } 638 638 639 639 /* ··· 694 694 { 695 695 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 696 696 if (wakeup) { 697 - spin_unlock(&base->cpu_base->lock); 697 + raw_spin_unlock(&base->cpu_base->lock); 698 698 raise_softirq_irqoff(HRTIMER_SOFTIRQ); 699 - spin_lock(&base->cpu_base->lock); 699 + raw_spin_lock(&base->cpu_base->lock); 700 700 } else 701 701 __raise_softirq_irqoff(HRTIMER_SOFTIRQ); 702 702 ··· 790 790 static inline 791 791 void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) 792 792 { 793 - spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 793 + raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); 794 794 } 795 795 796 796 /** ··· 1123 1123 unsigned long flags; 1124 1124 int i; 1125 1125 1126 - spin_lock_irqsave(&cpu_base->lock, flags); 1126 + raw_spin_lock_irqsave(&cpu_base->lock, flags); 1127 1127 1128 1128 if (!hrtimer_hres_active()) { 1129 1129 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { ··· 1140 1140 } 1141 1141 } 1142 1142 1143 - spin_unlock_irqrestore(&cpu_base->lock, flags); 1143 + raw_spin_unlock_irqrestore(&cpu_base->lock, flags); 1144 1144 1145 1145 if (mindelta.tv64 < 0) 1146 1146 mindelta.tv64 = 0; ··· 1222 1222 * they get migrated to another cpu, therefore its safe to unlock 1223 1223 * the timer base. 1224 1224 */ 1225 - spin_unlock(&cpu_base->lock); 1225 + raw_spin_unlock(&cpu_base->lock); 1226 1226 trace_hrtimer_expire_entry(timer, now); 1227 1227 restart = fn(timer); 1228 1228 trace_hrtimer_expire_exit(timer); 1229 - spin_lock(&cpu_base->lock); 1229 + raw_spin_lock(&cpu_base->lock); 1230 1230 1231 1231 /* 1232 1232 * Note: We clear the CALLBACK bit after enqueue_hrtimer and ··· 1261 1261 retry: 1262 1262 expires_next.tv64 = KTIME_MAX; 1263 1263 1264 - spin_lock(&cpu_base->lock); 1264 + raw_spin_lock(&cpu_base->lock); 1265 1265 /* 1266 1266 * We set expires_next to KTIME_MAX here with cpu_base->lock 1267 1267 * held to prevent that a timer is enqueued in our queue via ··· 1317 1317 * against it. 1318 1318 */ 1319 1319 cpu_base->expires_next = expires_next; 1320 - spin_unlock(&cpu_base->lock); 1320 + raw_spin_unlock(&cpu_base->lock); 1321 1321 1322 1322 /* Reprogramming necessary ? */ 1323 1323 if (expires_next.tv64 == KTIME_MAX || ··· 1457 1457 gettime = 0; 1458 1458 } 1459 1459 1460 - spin_lock(&cpu_base->lock); 1460 + raw_spin_lock(&cpu_base->lock); 1461 1461 1462 1462 while ((node = base->first)) { 1463 1463 struct hrtimer *timer; ··· 1469 1469 1470 1470 __run_hrtimer(timer, &base->softirq_time); 1471 1471 } 1472 - spin_unlock(&cpu_base->lock); 1472 + raw_spin_unlock(&cpu_base->lock); 1473 1473 } 1474 1474 } 1475 1475 ··· 1625 1625 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1626 1626 int i; 1627 1627 1628 - spin_lock_init(&cpu_base->lock); 1628 + raw_spin_lock_init(&cpu_base->lock); 1629 1629 1630 1630 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1631 1631 cpu_base->clock_base[i].cpu_base = cpu_base; ··· 1683 1683 * The caller is globally serialized and nobody else 1684 1684 * takes two locks at once, deadlock is not possible. 1685 1685 */ 1686 - spin_lock(&new_base->lock); 1687 - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1686 + raw_spin_lock(&new_base->lock); 1687 + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1688 1688 1689 1689 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1690 1690 migrate_hrtimer_list(&old_base->clock_base[i], 1691 1691 &new_base->clock_base[i]); 1692 1692 } 1693 1693 1694 - spin_unlock(&old_base->lock); 1695 - spin_unlock(&new_base->lock); 1694 + raw_spin_unlock(&old_base->lock); 1695 + raw_spin_unlock(&new_base->lock); 1696 1696 1697 1697 /* Check, if we got expired work to do */ 1698 1698 __hrtimer_peek_ahead_timers();
+2 -2
kernel/hw_breakpoint.c
··· 96 96 97 97 list = &ctx->event_list; 98 98 99 - spin_lock_irqsave(&ctx->lock, flags); 99 + raw_spin_lock_irqsave(&ctx->lock, flags); 100 100 101 101 /* 102 102 * The current breakpoint counter is not included in the list ··· 107 107 count++; 108 108 } 109 109 110 - spin_unlock_irqrestore(&ctx->lock, flags); 110 + raw_spin_unlock_irqrestore(&ctx->lock, flags); 111 111 112 112 return count; 113 113 }
+10 -10
kernel/irq/autoprobe.c
··· 45 45 * flush such a longstanding irq before considering it as spurious. 46 46 */ 47 47 for_each_irq_desc_reverse(i, desc) { 48 - spin_lock_irq(&desc->lock); 48 + raw_spin_lock_irq(&desc->lock); 49 49 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 50 50 /* 51 51 * An old-style architecture might still have ··· 61 61 desc->chip->set_type(i, IRQ_TYPE_PROBE); 62 62 desc->chip->startup(i); 63 63 } 64 - spin_unlock_irq(&desc->lock); 64 + raw_spin_unlock_irq(&desc->lock); 65 65 } 66 66 67 67 /* Wait for longstanding interrupts to trigger. */ ··· 73 73 * happened in the previous stage, it may have masked itself) 74 74 */ 75 75 for_each_irq_desc_reverse(i, desc) { 76 - spin_lock_irq(&desc->lock); 76 + raw_spin_lock_irq(&desc->lock); 77 77 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 78 78 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 79 79 if (desc->chip->startup(i)) 80 80 desc->status |= IRQ_PENDING; 81 81 } 82 - spin_unlock_irq(&desc->lock); 82 + raw_spin_unlock_irq(&desc->lock); 83 83 } 84 84 85 85 /* ··· 91 91 * Now filter out any obviously spurious interrupts 92 92 */ 93 93 for_each_irq_desc(i, desc) { 94 - spin_lock_irq(&desc->lock); 94 + raw_spin_lock_irq(&desc->lock); 95 95 status = desc->status; 96 96 97 97 if (status & IRQ_AUTODETECT) { ··· 103 103 if (i < 32) 104 104 mask |= 1 << i; 105 105 } 106 - spin_unlock_irq(&desc->lock); 106 + raw_spin_unlock_irq(&desc->lock); 107 107 } 108 108 109 109 return mask; ··· 129 129 int i; 130 130 131 131 for_each_irq_desc(i, desc) { 132 - spin_lock_irq(&desc->lock); 132 + raw_spin_lock_irq(&desc->lock); 133 133 status = desc->status; 134 134 135 135 if (status & IRQ_AUTODETECT) { ··· 139 139 desc->status = status & ~IRQ_AUTODETECT; 140 140 desc->chip->shutdown(i); 141 141 } 142 - spin_unlock_irq(&desc->lock); 142 + raw_spin_unlock_irq(&desc->lock); 143 143 } 144 144 mutex_unlock(&probing_active); 145 145 ··· 171 171 unsigned int status; 172 172 173 173 for_each_irq_desc(i, desc) { 174 - spin_lock_irq(&desc->lock); 174 + raw_spin_lock_irq(&desc->lock); 175 175 status = desc->status; 176 176 177 177 if (status & IRQ_AUTODETECT) { ··· 183 183 desc->status = status & ~IRQ_AUTODETECT; 184 184 desc->chip->shutdown(i); 185 185 } 186 - spin_unlock_irq(&desc->lock); 186 + raw_spin_unlock_irq(&desc->lock); 187 187 } 188 188 mutex_unlock(&probing_active); 189 189
+43 -43
kernel/irq/chip.c
··· 34 34 } 35 35 36 36 /* Ensure we don't have left over values from a previous use of this irq */ 37 - spin_lock_irqsave(&desc->lock, flags); 37 + raw_spin_lock_irqsave(&desc->lock, flags); 38 38 desc->status = IRQ_DISABLED; 39 39 desc->chip = &no_irq_chip; 40 40 desc->handle_irq = handle_bad_irq; ··· 51 51 cpumask_clear(desc->pending_mask); 52 52 #endif 53 53 #endif 54 - spin_unlock_irqrestore(&desc->lock, flags); 54 + raw_spin_unlock_irqrestore(&desc->lock, flags); 55 55 } 56 56 57 57 /** ··· 68 68 return; 69 69 } 70 70 71 - spin_lock_irqsave(&desc->lock, flags); 71 + raw_spin_lock_irqsave(&desc->lock, flags); 72 72 if (desc->action) { 73 - spin_unlock_irqrestore(&desc->lock, flags); 73 + raw_spin_unlock_irqrestore(&desc->lock, flags); 74 74 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", 75 75 irq); 76 76 return; ··· 82 82 desc->chip = &no_irq_chip; 83 83 desc->name = NULL; 84 84 clear_kstat_irqs(desc); 85 - spin_unlock_irqrestore(&desc->lock, flags); 85 + raw_spin_unlock_irqrestore(&desc->lock, flags); 86 86 } 87 87 88 88 ··· 104 104 if (!chip) 105 105 chip = &no_irq_chip; 106 106 107 - spin_lock_irqsave(&desc->lock, flags); 107 + raw_spin_lock_irqsave(&desc->lock, flags); 108 108 irq_chip_set_defaults(chip); 109 109 desc->chip = chip; 110 - spin_unlock_irqrestore(&desc->lock, flags); 110 + raw_spin_unlock_irqrestore(&desc->lock, flags); 111 111 112 112 return 0; 113 113 } ··· 133 133 if (type == IRQ_TYPE_NONE) 134 134 return 0; 135 135 136 - spin_lock_irqsave(&desc->lock, flags); 136 + raw_spin_lock_irqsave(&desc->lock, flags); 137 137 ret = __irq_set_trigger(desc, irq, type); 138 - spin_unlock_irqrestore(&desc->lock, flags); 138 + raw_spin_unlock_irqrestore(&desc->lock, flags); 139 139 return ret; 140 140 } 141 141 EXPORT_SYMBOL(set_irq_type); ··· 158 158 return -EINVAL; 159 159 } 160 160 161 - spin_lock_irqsave(&desc->lock, flags); 161 + raw_spin_lock_irqsave(&desc->lock, flags); 162 162 desc->handler_data = data; 163 - spin_unlock_irqrestore(&desc->lock, flags); 163 + raw_spin_unlock_irqrestore(&desc->lock, flags); 164 164 return 0; 165 165 } 166 166 EXPORT_SYMBOL(set_irq_data); ··· 183 183 return -EINVAL; 184 184 } 185 185 186 - spin_lock_irqsave(&desc->lock, flags); 186 + raw_spin_lock_irqsave(&desc->lock, flags); 187 187 desc->msi_desc = entry; 188 188 if (entry) 189 189 entry->irq = irq; 190 - spin_unlock_irqrestore(&desc->lock, flags); 190 + raw_spin_unlock_irqrestore(&desc->lock, flags); 191 191 return 0; 192 192 } 193 193 ··· 214 214 return -EINVAL; 215 215 } 216 216 217 - spin_lock_irqsave(&desc->lock, flags); 217 + raw_spin_lock_irqsave(&desc->lock, flags); 218 218 desc->chip_data = data; 219 - spin_unlock_irqrestore(&desc->lock, flags); 219 + raw_spin_unlock_irqrestore(&desc->lock, flags); 220 220 221 221 return 0; 222 222 } ··· 241 241 if (!desc) 242 242 return; 243 243 244 - spin_lock_irqsave(&desc->lock, flags); 244 + raw_spin_lock_irqsave(&desc->lock, flags); 245 245 if (nest) 246 246 desc->status |= IRQ_NESTED_THREAD; 247 247 else 248 248 desc->status &= ~IRQ_NESTED_THREAD; 249 - spin_unlock_irqrestore(&desc->lock, flags); 249 + raw_spin_unlock_irqrestore(&desc->lock, flags); 250 250 } 251 251 EXPORT_SYMBOL_GPL(set_irq_nested_thread); 252 252 ··· 343 343 344 344 might_sleep(); 345 345 346 - spin_lock_irq(&desc->lock); 346 + raw_spin_lock_irq(&desc->lock); 347 347 348 348 kstat_incr_irqs_this_cpu(irq, desc); 349 349 ··· 352 352 goto out_unlock; 353 353 354 354 desc->status |= IRQ_INPROGRESS; 355 - spin_unlock_irq(&desc->lock); 355 + raw_spin_unlock_irq(&desc->lock); 356 356 357 357 action_ret = action->thread_fn(action->irq, action->dev_id); 358 358 if (!noirqdebug) 359 359 note_interrupt(irq, desc, action_ret); 360 360 361 - spin_lock_irq(&desc->lock); 361 + raw_spin_lock_irq(&desc->lock); 362 362 desc->status &= ~IRQ_INPROGRESS; 363 363 364 364 out_unlock: 365 - spin_unlock_irq(&desc->lock); 365 + raw_spin_unlock_irq(&desc->lock); 366 366 } 367 367 EXPORT_SYMBOL_GPL(handle_nested_irq); 368 368 ··· 384 384 struct irqaction *action; 385 385 irqreturn_t action_ret; 386 386 387 - spin_lock(&desc->lock); 387 + raw_spin_lock(&desc->lock); 388 388 389 389 if (unlikely(desc->status & IRQ_INPROGRESS)) 390 390 goto out_unlock; ··· 396 396 goto out_unlock; 397 397 398 398 desc->status |= IRQ_INPROGRESS; 399 - spin_unlock(&desc->lock); 399 + raw_spin_unlock(&desc->lock); 400 400 401 401 action_ret = handle_IRQ_event(irq, action); 402 402 if (!noirqdebug) 403 403 note_interrupt(irq, desc, action_ret); 404 404 405 - spin_lock(&desc->lock); 405 + raw_spin_lock(&desc->lock); 406 406 desc->status &= ~IRQ_INPROGRESS; 407 407 out_unlock: 408 - spin_unlock(&desc->lock); 408 + raw_spin_unlock(&desc->lock); 409 409 } 410 410 411 411 /** ··· 424 424 struct irqaction *action; 425 425 irqreturn_t action_ret; 426 426 427 - spin_lock(&desc->lock); 427 + raw_spin_lock(&desc->lock); 428 428 mask_ack_irq(desc, irq); 429 429 430 430 if (unlikely(desc->status & IRQ_INPROGRESS)) ··· 441 441 goto out_unlock; 442 442 443 443 desc->status |= IRQ_INPROGRESS; 444 - spin_unlock(&desc->lock); 444 + raw_spin_unlock(&desc->lock); 445 445 446 446 action_ret = handle_IRQ_event(irq, action); 447 447 if (!noirqdebug) 448 448 note_interrupt(irq, desc, action_ret); 449 449 450 - spin_lock(&desc->lock); 450 + raw_spin_lock(&desc->lock); 451 451 desc->status &= ~IRQ_INPROGRESS; 452 452 453 453 if (unlikely(desc->status & IRQ_ONESHOT)) ··· 455 455 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) 456 456 desc->chip->unmask(irq); 457 457 out_unlock: 458 - spin_unlock(&desc->lock); 458 + raw_spin_unlock(&desc->lock); 459 459 } 460 460 EXPORT_SYMBOL_GPL(handle_level_irq); 461 461 ··· 475 475 struct irqaction *action; 476 476 irqreturn_t action_ret; 477 477 478 - spin_lock(&desc->lock); 478 + raw_spin_lock(&desc->lock); 479 479 480 480 if (unlikely(desc->status & IRQ_INPROGRESS)) 481 481 goto out; ··· 497 497 498 498 desc->status |= IRQ_INPROGRESS; 499 499 desc->status &= ~IRQ_PENDING; 500 - spin_unlock(&desc->lock); 500 + raw_spin_unlock(&desc->lock); 501 501 502 502 action_ret = handle_IRQ_event(irq, action); 503 503 if (!noirqdebug) 504 504 note_interrupt(irq, desc, action_ret); 505 505 506 - spin_lock(&desc->lock); 506 + raw_spin_lock(&desc->lock); 507 507 desc->status &= ~IRQ_INPROGRESS; 508 508 out: 509 509 desc->chip->eoi(irq); 510 510 511 - spin_unlock(&desc->lock); 511 + raw_spin_unlock(&desc->lock); 512 512 } 513 513 514 514 /** ··· 530 530 void 531 531 handle_edge_irq(unsigned int irq, struct irq_desc *desc) 532 532 { 533 - spin_lock(&desc->lock); 533 + raw_spin_lock(&desc->lock); 534 534 535 535 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 536 536 ··· 576 576 } 577 577 578 578 desc->status &= ~IRQ_PENDING; 579 - spin_unlock(&desc->lock); 579 + raw_spin_unlock(&desc->lock); 580 580 action_ret = handle_IRQ_event(irq, action); 581 581 if (!noirqdebug) 582 582 note_interrupt(irq, desc, action_ret); 583 - spin_lock(&desc->lock); 583 + raw_spin_lock(&desc->lock); 584 584 585 585 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); 586 586 587 587 desc->status &= ~IRQ_INPROGRESS; 588 588 out_unlock: 589 - spin_unlock(&desc->lock); 589 + raw_spin_unlock(&desc->lock); 590 590 } 591 591 592 592 /** ··· 643 643 } 644 644 645 645 chip_bus_lock(irq, desc); 646 - spin_lock_irqsave(&desc->lock, flags); 646 + raw_spin_lock_irqsave(&desc->lock, flags); 647 647 648 648 /* Uninstall? */ 649 649 if (handle == handle_bad_irq) { ··· 661 661 desc->depth = 0; 662 662 desc->chip->startup(irq); 663 663 } 664 - spin_unlock_irqrestore(&desc->lock, flags); 664 + raw_spin_unlock_irqrestore(&desc->lock, flags); 665 665 chip_bus_sync_unlock(irq, desc); 666 666 } 667 667 EXPORT_SYMBOL_GPL(__set_irq_handler); ··· 692 692 return; 693 693 } 694 694 695 - spin_lock_irqsave(&desc->lock, flags); 695 + raw_spin_lock_irqsave(&desc->lock, flags); 696 696 desc->status |= IRQ_NOPROBE; 697 - spin_unlock_irqrestore(&desc->lock, flags); 697 + raw_spin_unlock_irqrestore(&desc->lock, flags); 698 698 } 699 699 700 700 void __init set_irq_probe(unsigned int irq) ··· 707 707 return; 708 708 } 709 709 710 - spin_lock_irqsave(&desc->lock, flags); 710 + raw_spin_lock_irqsave(&desc->lock, flags); 711 711 desc->status &= ~IRQ_NOPROBE; 712 - spin_unlock_irqrestore(&desc->lock, flags); 712 + raw_spin_unlock_irqrestore(&desc->lock, flags); 713 713 }
+11 -11
kernel/irq/handle.c
··· 80 80 .chip = &no_irq_chip, 81 81 .handle_irq = handle_bad_irq, 82 82 .depth = 1, 83 - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 83 + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 84 84 }; 85 85 86 86 void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) ··· 108 108 { 109 109 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 110 110 111 - spin_lock_init(&desc->lock); 111 + raw_spin_lock_init(&desc->lock); 112 112 desc->irq = irq; 113 113 #ifdef CONFIG_SMP 114 114 desc->node = node; ··· 130 130 /* 131 131 * Protect the sparse_irqs: 132 132 */ 133 - DEFINE_SPINLOCK(sparse_irq_lock); 133 + DEFINE_RAW_SPINLOCK(sparse_irq_lock); 134 134 135 135 struct irq_desc **irq_desc_ptrs __read_mostly; 136 136 ··· 141 141 .chip = &no_irq_chip, 142 142 .handle_irq = handle_bad_irq, 143 143 .depth = 1, 144 - .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 144 + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 145 145 } 146 146 }; 147 147 ··· 212 212 if (desc) 213 213 return desc; 214 214 215 - spin_lock_irqsave(&sparse_irq_lock, flags); 215 + raw_spin_lock_irqsave(&sparse_irq_lock, flags); 216 216 217 217 /* We have to check it to avoid races with another CPU */ 218 218 desc = irq_desc_ptrs[irq]; ··· 234 234 irq_desc_ptrs[irq] = desc; 235 235 236 236 out_unlock: 237 - spin_unlock_irqrestore(&sparse_irq_lock, flags); 237 + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 238 238 239 239 return desc; 240 240 } ··· 247 247 .chip = &no_irq_chip, 248 248 .handle_irq = handle_bad_irq, 249 249 .depth = 1, 250 - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 250 + .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), 251 251 } 252 252 }; 253 253 ··· 473 473 return 1; 474 474 } 475 475 476 - spin_lock(&desc->lock); 476 + raw_spin_lock(&desc->lock); 477 477 if (desc->chip->ack) 478 478 desc->chip->ack(irq); 479 479 /* ··· 517 517 for (;;) { 518 518 irqreturn_t action_ret; 519 519 520 - spin_unlock(&desc->lock); 520 + raw_spin_unlock(&desc->lock); 521 521 522 522 action_ret = handle_IRQ_event(irq, action); 523 523 if (!noirqdebug) 524 524 note_interrupt(irq, desc, action_ret); 525 525 526 - spin_lock(&desc->lock); 526 + raw_spin_lock(&desc->lock); 527 527 if (likely(!(desc->status & IRQ_PENDING))) 528 528 break; 529 529 desc->status &= ~IRQ_PENDING; ··· 536 536 * disabled while the handler was running. 537 537 */ 538 538 desc->chip->end(irq); 539 - spin_unlock(&desc->lock); 539 + raw_spin_unlock(&desc->lock); 540 540 541 541 return 1; 542 542 }
+1 -1
kernel/irq/internals.h
··· 18 18 extern struct lock_class_key irq_desc_lock_class; 19 19 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 20 20 extern void clear_kstat_irqs(struct irq_desc *desc); 21 - extern spinlock_t sparse_irq_lock; 21 + extern raw_spinlock_t sparse_irq_lock; 22 22 23 23 #ifdef CONFIG_SPARSE_IRQ 24 24 /* irq_desc_ptrs allocated at boot time */
+25 -25
kernel/irq/manage.c
··· 46 46 cpu_relax(); 47 47 48 48 /* Ok, that indicated we're done: double-check carefully. */ 49 - spin_lock_irqsave(&desc->lock, flags); 49 + raw_spin_lock_irqsave(&desc->lock, flags); 50 50 status = desc->status; 51 - spin_unlock_irqrestore(&desc->lock, flags); 51 + raw_spin_unlock_irqrestore(&desc->lock, flags); 52 52 53 53 /* Oops, that failed? */ 54 54 } while (status & IRQ_INPROGRESS); ··· 114 114 if (!desc->chip->set_affinity) 115 115 return -EINVAL; 116 116 117 - spin_lock_irqsave(&desc->lock, flags); 117 + raw_spin_lock_irqsave(&desc->lock, flags); 118 118 119 119 #ifdef CONFIG_GENERIC_PENDING_IRQ 120 120 if (desc->status & IRQ_MOVE_PCNTXT) { ··· 134 134 } 135 135 #endif 136 136 desc->status |= IRQ_AFFINITY_SET; 137 - spin_unlock_irqrestore(&desc->lock, flags); 137 + raw_spin_unlock_irqrestore(&desc->lock, flags); 138 138 return 0; 139 139 } 140 140 ··· 181 181 unsigned long flags; 182 182 int ret; 183 183 184 - spin_lock_irqsave(&desc->lock, flags); 184 + raw_spin_lock_irqsave(&desc->lock, flags); 185 185 ret = setup_affinity(irq, desc); 186 186 if (!ret) 187 187 irq_set_thread_affinity(desc); 188 - spin_unlock_irqrestore(&desc->lock, flags); 188 + raw_spin_unlock_irqrestore(&desc->lock, flags); 189 189 190 190 return ret; 191 191 } ··· 231 231 return; 232 232 233 233 chip_bus_lock(irq, desc); 234 - spin_lock_irqsave(&desc->lock, flags); 234 + raw_spin_lock_irqsave(&desc->lock, flags); 235 235 __disable_irq(desc, irq, false); 236 - spin_unlock_irqrestore(&desc->lock, flags); 236 + raw_spin_unlock_irqrestore(&desc->lock, flags); 237 237 chip_bus_sync_unlock(irq, desc); 238 238 } 239 239 EXPORT_SYMBOL(disable_irq_nosync); ··· 308 308 return; 309 309 310 310 chip_bus_lock(irq, desc); 311 - spin_lock_irqsave(&desc->lock, flags); 311 + raw_spin_lock_irqsave(&desc->lock, flags); 312 312 __enable_irq(desc, irq, false); 313 - spin_unlock_irqrestore(&desc->lock, flags); 313 + raw_spin_unlock_irqrestore(&desc->lock, flags); 314 314 chip_bus_sync_unlock(irq, desc); 315 315 } 316 316 EXPORT_SYMBOL(enable_irq); ··· 347 347 /* wakeup-capable irqs can be shared between drivers that 348 348 * don't need to have the same sleep mode behaviors. 349 349 */ 350 - spin_lock_irqsave(&desc->lock, flags); 350 + raw_spin_lock_irqsave(&desc->lock, flags); 351 351 if (on) { 352 352 if (desc->wake_depth++ == 0) { 353 353 ret = set_irq_wake_real(irq, on); ··· 368 368 } 369 369 } 370 370 371 - spin_unlock_irqrestore(&desc->lock, flags); 371 + raw_spin_unlock_irqrestore(&desc->lock, flags); 372 372 return ret; 373 373 } 374 374 EXPORT_SYMBOL(set_irq_wake); ··· 484 484 static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 485 485 { 486 486 chip_bus_lock(irq, desc); 487 - spin_lock_irq(&desc->lock); 487 + raw_spin_lock_irq(&desc->lock); 488 488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 489 489 desc->status &= ~IRQ_MASKED; 490 490 desc->chip->unmask(irq); 491 491 } 492 - spin_unlock_irq(&desc->lock); 492 + raw_spin_unlock_irq(&desc->lock); 493 493 chip_bus_sync_unlock(irq, desc); 494 494 } 495 495 ··· 514 514 return; 515 515 } 516 516 517 - spin_lock_irq(&desc->lock); 517 + raw_spin_lock_irq(&desc->lock); 518 518 cpumask_copy(mask, desc->affinity); 519 - spin_unlock_irq(&desc->lock); 519 + raw_spin_unlock_irq(&desc->lock); 520 520 521 521 set_cpus_allowed_ptr(current, mask); 522 522 free_cpumask_var(mask); ··· 545 545 546 546 atomic_inc(&desc->threads_active); 547 547 548 - spin_lock_irq(&desc->lock); 548 + raw_spin_lock_irq(&desc->lock); 549 549 if (unlikely(desc->status & IRQ_DISABLED)) { 550 550 /* 551 551 * CHECKME: We might need a dedicated ··· 555 555 * retriggers the interrupt itself --- tglx 556 556 */ 557 557 desc->status |= IRQ_PENDING; 558 - spin_unlock_irq(&desc->lock); 558 + raw_spin_unlock_irq(&desc->lock); 559 559 } else { 560 - spin_unlock_irq(&desc->lock); 560 + raw_spin_unlock_irq(&desc->lock); 561 561 562 562 action->thread_fn(action->irq, action->dev_id); 563 563 ··· 679 679 /* 680 680 * The following block of code has to be executed atomically 681 681 */ 682 - spin_lock_irqsave(&desc->lock, flags); 682 + raw_spin_lock_irqsave(&desc->lock, flags); 683 683 old_ptr = &desc->action; 684 684 old = *old_ptr; 685 685 if (old) { ··· 775 775 __enable_irq(desc, irq, false); 776 776 } 777 777 778 - spin_unlock_irqrestore(&desc->lock, flags); 778 + raw_spin_unlock_irqrestore(&desc->lock, flags); 779 779 780 780 /* 781 781 * Strictly no need to wake it up, but hung_task complains ··· 802 802 ret = -EBUSY; 803 803 804 804 out_thread: 805 - spin_unlock_irqrestore(&desc->lock, flags); 805 + raw_spin_unlock_irqrestore(&desc->lock, flags); 806 806 if (new->thread) { 807 807 struct task_struct *t = new->thread; 808 808 ··· 844 844 if (!desc) 845 845 return NULL; 846 846 847 - spin_lock_irqsave(&desc->lock, flags); 847 + raw_spin_lock_irqsave(&desc->lock, flags); 848 848 849 849 /* 850 850 * There can be multiple actions per IRQ descriptor, find the right ··· 856 856 857 857 if (!action) { 858 858 WARN(1, "Trying to free already-free IRQ %d\n", irq); 859 - spin_unlock_irqrestore(&desc->lock, flags); 859 + raw_spin_unlock_irqrestore(&desc->lock, flags); 860 860 861 861 return NULL; 862 862 } ··· 884 884 desc->chip->disable(irq); 885 885 } 886 886 887 - spin_unlock_irqrestore(&desc->lock, flags); 887 + raw_spin_unlock_irqrestore(&desc->lock, flags); 888 888 889 889 unregister_handler_proc(irq, action); 890 890
+1 -1
kernel/irq/migration.c
··· 27 27 if (!desc->chip->set_affinity) 28 28 return; 29 29 30 - assert_spin_locked(&desc->lock); 30 + assert_raw_spin_locked(&desc->lock); 31 31 32 32 /* 33 33 * If there was a valid mask to work with, please
+4 -4
kernel/irq/numa_migrate.c
··· 42 42 "for migration.\n", irq); 43 43 return false; 44 44 } 45 - spin_lock_init(&desc->lock); 45 + raw_spin_lock_init(&desc->lock); 46 46 desc->node = node; 47 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 48 48 init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); ··· 67 67 68 68 irq = old_desc->irq; 69 69 70 - spin_lock_irqsave(&sparse_irq_lock, flags); 70 + raw_spin_lock_irqsave(&sparse_irq_lock, flags); 71 71 72 72 /* We have to check it to avoid races with another CPU */ 73 73 desc = irq_desc_ptrs[irq]; ··· 91 91 } 92 92 93 93 irq_desc_ptrs[irq] = desc; 94 - spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 95 95 96 96 /* free the old one */ 97 97 free_one_irq_desc(old_desc, desc); ··· 100 100 return desc; 101 101 102 102 out_unlock: 103 - spin_unlock_irqrestore(&sparse_irq_lock, flags); 103 + raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 104 104 105 105 return desc; 106 106 }
+4 -4
kernel/irq/pm.c
··· 28 28 for_each_irq_desc(irq, desc) { 29 29 unsigned long flags; 30 30 31 - spin_lock_irqsave(&desc->lock, flags); 31 + raw_spin_lock_irqsave(&desc->lock, flags); 32 32 __disable_irq(desc, irq, true); 33 - spin_unlock_irqrestore(&desc->lock, flags); 33 + raw_spin_unlock_irqrestore(&desc->lock, flags); 34 34 } 35 35 36 36 for_each_irq_desc(irq, desc) ··· 56 56 if (!(desc->status & IRQ_SUSPENDED)) 57 57 continue; 58 58 59 - spin_lock_irqsave(&desc->lock, flags); 59 + raw_spin_lock_irqsave(&desc->lock, flags); 60 60 __enable_irq(desc, irq, true); 61 - spin_unlock_irqrestore(&desc->lock, flags); 61 + raw_spin_unlock_irqrestore(&desc->lock, flags); 62 62 } 63 63 } 64 64 EXPORT_SYMBOL_GPL(resume_device_irqs);
+2 -2
kernel/irq/proc.c
··· 179 179 unsigned long flags; 180 180 int ret = 1; 181 181 182 - spin_lock_irqsave(&desc->lock, flags); 182 + raw_spin_lock_irqsave(&desc->lock, flags); 183 183 for (action = desc->action ; action; action = action->next) { 184 184 if ((action != new_action) && action->name && 185 185 !strcmp(new_action->name, action->name)) { ··· 187 187 break; 188 188 } 189 189 } 190 - spin_unlock_irqrestore(&desc->lock, flags); 190 + raw_spin_unlock_irqrestore(&desc->lock, flags); 191 191 return ret; 192 192 } 193 193
+7 -7
kernel/irq/spurious.c
··· 28 28 struct irqaction *action; 29 29 int ok = 0, work = 0; 30 30 31 - spin_lock(&desc->lock); 31 + raw_spin_lock(&desc->lock); 32 32 /* Already running on another processor */ 33 33 if (desc->status & IRQ_INPROGRESS) { 34 34 /* ··· 37 37 */ 38 38 if (desc->action && (desc->action->flags & IRQF_SHARED)) 39 39 desc->status |= IRQ_PENDING; 40 - spin_unlock(&desc->lock); 40 + raw_spin_unlock(&desc->lock); 41 41 return ok; 42 42 } 43 43 /* Honour the normal IRQ locking */ 44 44 desc->status |= IRQ_INPROGRESS; 45 45 action = desc->action; 46 - spin_unlock(&desc->lock); 46 + raw_spin_unlock(&desc->lock); 47 47 48 48 while (action) { 49 49 /* Only shared IRQ handlers are safe to call */ ··· 56 56 } 57 57 local_irq_disable(); 58 58 /* Now clean up the flags */ 59 - spin_lock(&desc->lock); 59 + raw_spin_lock(&desc->lock); 60 60 action = desc->action; 61 61 62 62 /* ··· 68 68 * Perform real IRQ processing for the IRQ we deferred 69 69 */ 70 70 work = 1; 71 - spin_unlock(&desc->lock); 71 + raw_spin_unlock(&desc->lock); 72 72 handle_IRQ_event(irq, action); 73 - spin_lock(&desc->lock); 73 + raw_spin_lock(&desc->lock); 74 74 desc->status &= ~IRQ_PENDING; 75 75 } 76 76 desc->status &= ~IRQ_INPROGRESS; ··· 80 80 */ 81 81 if (work && desc->chip && desc->chip->end) 82 82 desc->chip->end(irq); 83 - spin_unlock(&desc->lock); 83 + raw_spin_unlock(&desc->lock); 84 84 85 85 return ok; 86 86 }
+10 -10
kernel/lockdep.c
··· 73 73 * to use a raw spinlock - we really dont want the spinlock 74 74 * code to recurse back into the lockdep code... 75 75 */ 76 - static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 76 + static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 77 77 78 78 static int graph_lock(void) 79 79 { 80 - __raw_spin_lock(&lockdep_lock); 80 + arch_spin_lock(&lockdep_lock); 81 81 /* 82 82 * Make sure that if another CPU detected a bug while 83 83 * walking the graph we dont change it (while the other ··· 85 85 * dropped already) 86 86 */ 87 87 if (!debug_locks) { 88 - __raw_spin_unlock(&lockdep_lock); 88 + arch_spin_unlock(&lockdep_lock); 89 89 return 0; 90 90 } 91 91 /* prevent any recursions within lockdep from causing deadlocks */ ··· 95 95 96 96 static inline int graph_unlock(void) 97 97 { 98 - if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) 98 + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) 99 99 return DEBUG_LOCKS_WARN_ON(1); 100 100 101 101 current->lockdep_recursion--; 102 - __raw_spin_unlock(&lockdep_lock); 102 + arch_spin_unlock(&lockdep_lock); 103 103 return 0; 104 104 } 105 105 ··· 111 111 { 112 112 int ret = debug_locks_off(); 113 113 114 - __raw_spin_unlock(&lockdep_lock); 114 + arch_spin_unlock(&lockdep_lock); 115 115 116 116 return ret; 117 117 } ··· 1170 1170 this.class = class; 1171 1171 1172 1172 local_irq_save(flags); 1173 - __raw_spin_lock(&lockdep_lock); 1173 + arch_spin_lock(&lockdep_lock); 1174 1174 ret = __lockdep_count_forward_deps(&this); 1175 - __raw_spin_unlock(&lockdep_lock); 1175 + arch_spin_unlock(&lockdep_lock); 1176 1176 local_irq_restore(flags); 1177 1177 1178 1178 return ret; ··· 1197 1197 this.class = class; 1198 1198 1199 1199 local_irq_save(flags); 1200 - __raw_spin_lock(&lockdep_lock); 1200 + arch_spin_lock(&lockdep_lock); 1201 1201 ret = __lockdep_count_backward_deps(&this); 1202 - __raw_spin_unlock(&lockdep_lock); 1202 + arch_spin_unlock(&lockdep_lock); 1203 1203 local_irq_restore(flags); 1204 1204 1205 1205 return ret;
+6 -6
kernel/mutex-debug.h
··· 43 43 \ 44 44 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 45 45 local_irq_save(flags); \ 46 - __raw_spin_lock(&(lock)->raw_lock); \ 46 + arch_spin_lock(&(lock)->rlock.raw_lock);\ 47 47 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 48 48 } while (0) 49 49 50 - #define spin_unlock_mutex(lock, flags) \ 51 - do { \ 52 - __raw_spin_unlock(&(lock)->raw_lock); \ 53 - local_irq_restore(flags); \ 54 - preempt_check_resched(); \ 50 + #define spin_unlock_mutex(lock, flags) \ 51 + do { \ 52 + arch_spin_unlock(&(lock)->rlock.raw_lock); \ 53 + local_irq_restore(flags); \ 54 + preempt_check_resched(); \ 55 55 } while (0)
+53 -53
kernel/perf_event.c
··· 203 203 * if so. If we locked the right context, then it 204 204 * can't get swapped on us any more. 205 205 */ 206 - spin_lock_irqsave(&ctx->lock, *flags); 206 + raw_spin_lock_irqsave(&ctx->lock, *flags); 207 207 if (ctx != rcu_dereference(task->perf_event_ctxp)) { 208 - spin_unlock_irqrestore(&ctx->lock, *flags); 208 + raw_spin_unlock_irqrestore(&ctx->lock, *flags); 209 209 goto retry; 210 210 } 211 211 212 212 if (!atomic_inc_not_zero(&ctx->refcount)) { 213 - spin_unlock_irqrestore(&ctx->lock, *flags); 213 + raw_spin_unlock_irqrestore(&ctx->lock, *flags); 214 214 ctx = NULL; 215 215 } 216 216 } ··· 231 231 ctx = perf_lock_task_context(task, &flags); 232 232 if (ctx) { 233 233 ++ctx->pin_count; 234 - spin_unlock_irqrestore(&ctx->lock, flags); 234 + raw_spin_unlock_irqrestore(&ctx->lock, flags); 235 235 } 236 236 return ctx; 237 237 } ··· 240 240 { 241 241 unsigned long flags; 242 242 243 - spin_lock_irqsave(&ctx->lock, flags); 243 + raw_spin_lock_irqsave(&ctx->lock, flags); 244 244 --ctx->pin_count; 245 - spin_unlock_irqrestore(&ctx->lock, flags); 245 + raw_spin_unlock_irqrestore(&ctx->lock, flags); 246 246 put_ctx(ctx); 247 247 } 248 248 ··· 427 427 if (ctx->task && cpuctx->task_ctx != ctx) 428 428 return; 429 429 430 - spin_lock(&ctx->lock); 430 + raw_spin_lock(&ctx->lock); 431 431 /* 432 432 * Protect the list operation against NMI by disabling the 433 433 * events on a global level. ··· 449 449 } 450 450 451 451 perf_enable(); 452 - spin_unlock(&ctx->lock); 452 + raw_spin_unlock(&ctx->lock); 453 453 } 454 454 455 455 ··· 488 488 task_oncpu_function_call(task, __perf_event_remove_from_context, 489 489 event); 490 490 491 - spin_lock_irq(&ctx->lock); 491 + raw_spin_lock_irq(&ctx->lock); 492 492 /* 493 493 * If the context is active we need to retry the smp call. 494 494 */ 495 495 if (ctx->nr_active && !list_empty(&event->group_entry)) { 496 - spin_unlock_irq(&ctx->lock); 496 + raw_spin_unlock_irq(&ctx->lock); 497 497 goto retry; 498 498 } 499 499 ··· 504 504 */ 505 505 if (!list_empty(&event->group_entry)) 506 506 list_del_event(event, ctx); 507 - spin_unlock_irq(&ctx->lock); 507 + raw_spin_unlock_irq(&ctx->lock); 508 508 } 509 509 510 510 /* ··· 535 535 if (ctx->task && cpuctx->task_ctx != ctx) 536 536 return; 537 537 538 - spin_lock(&ctx->lock); 538 + raw_spin_lock(&ctx->lock); 539 539 540 540 /* 541 541 * If the event is on, turn it off. ··· 551 551 event->state = PERF_EVENT_STATE_OFF; 552 552 } 553 553 554 - spin_unlock(&ctx->lock); 554 + raw_spin_unlock(&ctx->lock); 555 555 } 556 556 557 557 /* ··· 584 584 retry: 585 585 task_oncpu_function_call(task, __perf_event_disable, event); 586 586 587 - spin_lock_irq(&ctx->lock); 587 + raw_spin_lock_irq(&ctx->lock); 588 588 /* 589 589 * If the event is still active, we need to retry the cross-call. 590 590 */ 591 591 if (event->state == PERF_EVENT_STATE_ACTIVE) { 592 - spin_unlock_irq(&ctx->lock); 592 + raw_spin_unlock_irq(&ctx->lock); 593 593 goto retry; 594 594 } 595 595 ··· 602 602 event->state = PERF_EVENT_STATE_OFF; 603 603 } 604 604 605 - spin_unlock_irq(&ctx->lock); 605 + raw_spin_unlock_irq(&ctx->lock); 606 606 } 607 607 608 608 static int ··· 770 770 cpuctx->task_ctx = ctx; 771 771 } 772 772 773 - spin_lock(&ctx->lock); 773 + raw_spin_lock(&ctx->lock); 774 774 ctx->is_active = 1; 775 775 update_context_time(ctx); 776 776 ··· 820 820 unlock: 821 821 perf_enable(); 822 822 823 - spin_unlock(&ctx->lock); 823 + raw_spin_unlock(&ctx->lock); 824 824 } 825 825 826 826 /* ··· 856 856 task_oncpu_function_call(task, __perf_install_in_context, 857 857 event); 858 858 859 - spin_lock_irq(&ctx->lock); 859 + raw_spin_lock_irq(&ctx->lock); 860 860 /* 861 861 * we need to retry the smp call. 862 862 */ 863 863 if (ctx->is_active && list_empty(&event->group_entry)) { 864 - spin_unlock_irq(&ctx->lock); 864 + raw_spin_unlock_irq(&ctx->lock); 865 865 goto retry; 866 866 } 867 867 ··· 872 872 */ 873 873 if (list_empty(&event->group_entry)) 874 874 add_event_to_ctx(event, ctx); 875 - spin_unlock_irq(&ctx->lock); 875 + raw_spin_unlock_irq(&ctx->lock); 876 876 } 877 877 878 878 /* ··· 917 917 cpuctx->task_ctx = ctx; 918 918 } 919 919 920 - spin_lock(&ctx->lock); 920 + raw_spin_lock(&ctx->lock); 921 921 ctx->is_active = 1; 922 922 update_context_time(ctx); 923 923 ··· 959 959 } 960 960 961 961 unlock: 962 - spin_unlock(&ctx->lock); 962 + raw_spin_unlock(&ctx->lock); 963 963 } 964 964 965 965 /* ··· 985 985 return; 986 986 } 987 987 988 - spin_lock_irq(&ctx->lock); 988 + raw_spin_lock_irq(&ctx->lock); 989 989 if (event->state >= PERF_EVENT_STATE_INACTIVE) 990 990 goto out; 991 991 ··· 1000 1000 event->state = PERF_EVENT_STATE_OFF; 1001 1001 1002 1002 retry: 1003 - spin_unlock_irq(&ctx->lock); 1003 + raw_spin_unlock_irq(&ctx->lock); 1004 1004 task_oncpu_function_call(task, __perf_event_enable, event); 1005 1005 1006 - spin_lock_irq(&ctx->lock); 1006 + raw_spin_lock_irq(&ctx->lock); 1007 1007 1008 1008 /* 1009 1009 * If the context is active and the event is still off, ··· 1020 1020 __perf_event_mark_enabled(event, ctx); 1021 1021 1022 1022 out: 1023 - spin_unlock_irq(&ctx->lock); 1023 + raw_spin_unlock_irq(&ctx->lock); 1024 1024 } 1025 1025 1026 1026 static int perf_event_refresh(struct perf_event *event, int refresh) ··· 1042 1042 { 1043 1043 struct perf_event *event; 1044 1044 1045 - spin_lock(&ctx->lock); 1045 + raw_spin_lock(&ctx->lock); 1046 1046 ctx->is_active = 0; 1047 1047 if (likely(!ctx->nr_events)) 1048 1048 goto out; ··· 1055 1055 } 1056 1056 perf_enable(); 1057 1057 out: 1058 - spin_unlock(&ctx->lock); 1058 + raw_spin_unlock(&ctx->lock); 1059 1059 } 1060 1060 1061 1061 /* ··· 1193 1193 * order we take the locks because no other cpu could 1194 1194 * be trying to lock both of these tasks. 1195 1195 */ 1196 - spin_lock(&ctx->lock); 1197 - spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1196 + raw_spin_lock(&ctx->lock); 1197 + raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); 1198 1198 if (context_equiv(ctx, next_ctx)) { 1199 1199 /* 1200 1200 * XXX do we need a memory barrier of sorts ··· 1208 1208 1209 1209 perf_event_sync_stat(ctx, next_ctx); 1210 1210 } 1211 - spin_unlock(&next_ctx->lock); 1212 - spin_unlock(&ctx->lock); 1211 + raw_spin_unlock(&next_ctx->lock); 1212 + raw_spin_unlock(&ctx->lock); 1213 1213 } 1214 1214 rcu_read_unlock(); 1215 1215 ··· 1251 1251 struct perf_event *event; 1252 1252 int can_add_hw = 1; 1253 1253 1254 - spin_lock(&ctx->lock); 1254 + raw_spin_lock(&ctx->lock); 1255 1255 ctx->is_active = 1; 1256 1256 if (likely(!ctx->nr_events)) 1257 1257 goto out; ··· 1306 1306 } 1307 1307 perf_enable(); 1308 1308 out: 1309 - spin_unlock(&ctx->lock); 1309 + raw_spin_unlock(&ctx->lock); 1310 1310 } 1311 1311 1312 1312 /* ··· 1370 1370 struct hw_perf_event *hwc; 1371 1371 u64 interrupts, freq; 1372 1372 1373 - spin_lock(&ctx->lock); 1373 + raw_spin_lock(&ctx->lock); 1374 1374 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 1375 1375 if (event->state != PERF_EVENT_STATE_ACTIVE) 1376 1376 continue; ··· 1425 1425 perf_enable(); 1426 1426 } 1427 1427 } 1428 - spin_unlock(&ctx->lock); 1428 + raw_spin_unlock(&ctx->lock); 1429 1429 } 1430 1430 1431 1431 /* ··· 1438 1438 if (!ctx->nr_events) 1439 1439 return; 1440 1440 1441 - spin_lock(&ctx->lock); 1441 + raw_spin_lock(&ctx->lock); 1442 1442 /* 1443 1443 * Rotate the first entry last (works just fine for group events too): 1444 1444 */ ··· 1449 1449 } 1450 1450 perf_enable(); 1451 1451 1452 - spin_unlock(&ctx->lock); 1452 + raw_spin_unlock(&ctx->lock); 1453 1453 } 1454 1454 1455 1455 void perf_event_task_tick(struct task_struct *curr, int cpu) ··· 1498 1498 1499 1499 __perf_event_task_sched_out(ctx); 1500 1500 1501 - spin_lock(&ctx->lock); 1501 + raw_spin_lock(&ctx->lock); 1502 1502 1503 1503 list_for_each_entry(event, &ctx->group_list, group_entry) { 1504 1504 if (!event->attr.enable_on_exec) ··· 1516 1516 if (enabled) 1517 1517 unclone_ctx(ctx); 1518 1518 1519 - spin_unlock(&ctx->lock); 1519 + raw_spin_unlock(&ctx->lock); 1520 1520 1521 1521 perf_event_task_sched_in(task, smp_processor_id()); 1522 1522 out: ··· 1542 1542 if (ctx->task && cpuctx->task_ctx != ctx) 1543 1543 return; 1544 1544 1545 - spin_lock(&ctx->lock); 1545 + raw_spin_lock(&ctx->lock); 1546 1546 update_context_time(ctx); 1547 1547 update_event_times(event); 1548 - spin_unlock(&ctx->lock); 1548 + raw_spin_unlock(&ctx->lock); 1549 1549 1550 1550 event->pmu->read(event); 1551 1551 } ··· 1563 1563 struct perf_event_context *ctx = event->ctx; 1564 1564 unsigned long flags; 1565 1565 1566 - spin_lock_irqsave(&ctx->lock, flags); 1566 + raw_spin_lock_irqsave(&ctx->lock, flags); 1567 1567 update_context_time(ctx); 1568 1568 update_event_times(event); 1569 - spin_unlock_irqrestore(&ctx->lock, flags); 1569 + raw_spin_unlock_irqrestore(&ctx->lock, flags); 1570 1570 } 1571 1571 1572 1572 return atomic64_read(&event->count); ··· 1579 1579 __perf_event_init_context(struct perf_event_context *ctx, 1580 1580 struct task_struct *task) 1581 1581 { 1582 - spin_lock_init(&ctx->lock); 1582 + raw_spin_lock_init(&ctx->lock); 1583 1583 mutex_init(&ctx->mutex); 1584 1584 INIT_LIST_HEAD(&ctx->group_list); 1585 1585 INIT_LIST_HEAD(&ctx->event_list); ··· 1649 1649 ctx = perf_lock_task_context(task, &flags); 1650 1650 if (ctx) { 1651 1651 unclone_ctx(ctx); 1652 - spin_unlock_irqrestore(&ctx->lock, flags); 1652 + raw_spin_unlock_irqrestore(&ctx->lock, flags); 1653 1653 } 1654 1654 1655 1655 if (!ctx) { ··· 1987 1987 if (!value) 1988 1988 return -EINVAL; 1989 1989 1990 - spin_lock_irq(&ctx->lock); 1990 + raw_spin_lock_irq(&ctx->lock); 1991 1991 if (event->attr.freq) { 1992 1992 if (value > sysctl_perf_event_sample_rate) { 1993 1993 ret = -EINVAL; ··· 2000 2000 event->hw.sample_period = value; 2001 2001 } 2002 2002 unlock: 2003 - spin_unlock_irq(&ctx->lock); 2003 + raw_spin_unlock_irq(&ctx->lock); 2004 2004 2005 2005 return ret; 2006 2006 } ··· 4992 4992 * reading child->perf_event_ctxp, we wait until it has 4993 4993 * incremented the context's refcount before we do put_ctx below. 4994 4994 */ 4995 - spin_lock(&child_ctx->lock); 4995 + raw_spin_lock(&child_ctx->lock); 4996 4996 child->perf_event_ctxp = NULL; 4997 4997 /* 4998 4998 * If this context is a clone; unclone it so it can't get ··· 5001 5001 */ 5002 5002 unclone_ctx(child_ctx); 5003 5003 update_context_time(child_ctx); 5004 - spin_unlock_irqrestore(&child_ctx->lock, flags); 5004 + raw_spin_unlock_irqrestore(&child_ctx->lock, flags); 5005 5005 5006 5006 /* 5007 5007 * Report the task dead after unscheduling the events so that we ··· 5292 5292 perf_reserved_percpu = val; 5293 5293 for_each_online_cpu(cpu) { 5294 5294 cpuctx = &per_cpu(perf_cpu_context, cpu); 5295 - spin_lock_irq(&cpuctx->ctx.lock); 5295 + raw_spin_lock_irq(&cpuctx->ctx.lock); 5296 5296 mpt = min(perf_max_events - cpuctx->ctx.nr_events, 5297 5297 perf_max_events - perf_reserved_percpu); 5298 5298 cpuctx->max_pertask = mpt; 5299 - spin_unlock_irq(&cpuctx->ctx.lock); 5299 + raw_spin_unlock_irq(&cpuctx->ctx.lock); 5300 5300 } 5301 5301 spin_unlock(&perf_resource_lock); 5302 5302
+2 -2
kernel/rtmutex-debug.c
··· 37 37 if (rt_trace_on) { \ 38 38 rt_trace_on = 0; \ 39 39 console_verbose(); \ 40 - if (spin_is_locked(&current->pi_lock)) \ 41 - spin_unlock(&current->pi_lock); \ 40 + if (raw_spin_is_locked(&current->pi_lock)) \ 41 + raw_spin_unlock(&current->pi_lock); \ 42 42 } \ 43 43 } while (0) 44 44
+53 -53
kernel/rtmutex.c
··· 138 138 { 139 139 unsigned long flags; 140 140 141 - spin_lock_irqsave(&task->pi_lock, flags); 141 + raw_spin_lock_irqsave(&task->pi_lock, flags); 142 142 __rt_mutex_adjust_prio(task); 143 - spin_unlock_irqrestore(&task->pi_lock, flags); 143 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 144 144 } 145 145 146 146 /* ··· 195 195 /* 196 196 * Task can not go away as we did a get_task() before ! 197 197 */ 198 - spin_lock_irqsave(&task->pi_lock, flags); 198 + raw_spin_lock_irqsave(&task->pi_lock, flags); 199 199 200 200 waiter = task->pi_blocked_on; 201 201 /* ··· 231 231 goto out_unlock_pi; 232 232 233 233 lock = waiter->lock; 234 - if (!spin_trylock(&lock->wait_lock)) { 235 - spin_unlock_irqrestore(&task->pi_lock, flags); 234 + if (!raw_spin_trylock(&lock->wait_lock)) { 235 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 236 236 cpu_relax(); 237 237 goto retry; 238 238 } ··· 240 240 /* Deadlock detection */ 241 241 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { 242 242 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); 243 - spin_unlock(&lock->wait_lock); 243 + raw_spin_unlock(&lock->wait_lock); 244 244 ret = deadlock_detect ? -EDEADLK : 0; 245 245 goto out_unlock_pi; 246 246 } ··· 253 253 plist_add(&waiter->list_entry, &lock->wait_list); 254 254 255 255 /* Release the task */ 256 - spin_unlock_irqrestore(&task->pi_lock, flags); 256 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 257 257 put_task_struct(task); 258 258 259 259 /* Grab the next task */ 260 260 task = rt_mutex_owner(lock); 261 261 get_task_struct(task); 262 - spin_lock_irqsave(&task->pi_lock, flags); 262 + raw_spin_lock_irqsave(&task->pi_lock, flags); 263 263 264 264 if (waiter == rt_mutex_top_waiter(lock)) { 265 265 /* Boost the owner */ ··· 277 277 __rt_mutex_adjust_prio(task); 278 278 } 279 279 280 - spin_unlock_irqrestore(&task->pi_lock, flags); 280 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 281 281 282 282 top_waiter = rt_mutex_top_waiter(lock); 283 - spin_unlock(&lock->wait_lock); 283 + raw_spin_unlock(&lock->wait_lock); 284 284 285 285 if (!detect_deadlock && waiter != top_waiter) 286 286 goto out_put_task; ··· 288 288 goto again; 289 289 290 290 out_unlock_pi: 291 - spin_unlock_irqrestore(&task->pi_lock, flags); 291 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 292 292 out_put_task: 293 293 put_task_struct(task); 294 294 ··· 313 313 if (pendowner == task) 314 314 return 1; 315 315 316 - spin_lock_irqsave(&pendowner->pi_lock, flags); 316 + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); 317 317 if (task->prio >= pendowner->prio) { 318 - spin_unlock_irqrestore(&pendowner->pi_lock, flags); 318 + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); 319 319 return 0; 320 320 } 321 321 ··· 325 325 * priority. 326 326 */ 327 327 if (likely(!rt_mutex_has_waiters(lock))) { 328 - spin_unlock_irqrestore(&pendowner->pi_lock, flags); 328 + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); 329 329 return 1; 330 330 } 331 331 ··· 333 333 next = rt_mutex_top_waiter(lock); 334 334 plist_del(&next->pi_list_entry, &pendowner->pi_waiters); 335 335 __rt_mutex_adjust_prio(pendowner); 336 - spin_unlock_irqrestore(&pendowner->pi_lock, flags); 336 + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); 337 337 338 338 /* 339 339 * We are going to steal the lock and a waiter was ··· 350 350 * might be task: 351 351 */ 352 352 if (likely(next->task != task)) { 353 - spin_lock_irqsave(&task->pi_lock, flags); 353 + raw_spin_lock_irqsave(&task->pi_lock, flags); 354 354 plist_add(&next->pi_list_entry, &task->pi_waiters); 355 355 __rt_mutex_adjust_prio(task); 356 - spin_unlock_irqrestore(&task->pi_lock, flags); 356 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 357 357 } 358 358 return 1; 359 359 } ··· 420 420 unsigned long flags; 421 421 int chain_walk = 0, res; 422 422 423 - spin_lock_irqsave(&task->pi_lock, flags); 423 + raw_spin_lock_irqsave(&task->pi_lock, flags); 424 424 __rt_mutex_adjust_prio(task); 425 425 waiter->task = task; 426 426 waiter->lock = lock; ··· 434 434 435 435 task->pi_blocked_on = waiter; 436 436 437 - spin_unlock_irqrestore(&task->pi_lock, flags); 437 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 438 438 439 439 if (waiter == rt_mutex_top_waiter(lock)) { 440 - spin_lock_irqsave(&owner->pi_lock, flags); 440 + raw_spin_lock_irqsave(&owner->pi_lock, flags); 441 441 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters); 442 442 plist_add(&waiter->pi_list_entry, &owner->pi_waiters); 443 443 444 444 __rt_mutex_adjust_prio(owner); 445 445 if (owner->pi_blocked_on) 446 446 chain_walk = 1; 447 - spin_unlock_irqrestore(&owner->pi_lock, flags); 447 + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 448 448 } 449 449 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) 450 450 chain_walk = 1; ··· 459 459 */ 460 460 get_task_struct(owner); 461 461 462 - spin_unlock(&lock->wait_lock); 462 + raw_spin_unlock(&lock->wait_lock); 463 463 464 464 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 465 465 task); 466 466 467 - spin_lock(&lock->wait_lock); 467 + raw_spin_lock(&lock->wait_lock); 468 468 469 469 return res; 470 470 } ··· 483 483 struct task_struct *pendowner; 484 484 unsigned long flags; 485 485 486 - spin_lock_irqsave(&current->pi_lock, flags); 486 + raw_spin_lock_irqsave(&current->pi_lock, flags); 487 487 488 488 waiter = rt_mutex_top_waiter(lock); 489 489 plist_del(&waiter->list_entry, &lock->wait_list); ··· 500 500 501 501 rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING); 502 502 503 - spin_unlock_irqrestore(&current->pi_lock, flags); 503 + raw_spin_unlock_irqrestore(&current->pi_lock, flags); 504 504 505 505 /* 506 506 * Clear the pi_blocked_on variable and enqueue a possible ··· 509 509 * waiter with higher priority than pending-owner->normal_prio 510 510 * is blocked on the unboosted (pending) owner. 511 511 */ 512 - spin_lock_irqsave(&pendowner->pi_lock, flags); 512 + raw_spin_lock_irqsave(&pendowner->pi_lock, flags); 513 513 514 514 WARN_ON(!pendowner->pi_blocked_on); 515 515 WARN_ON(pendowner->pi_blocked_on != waiter); ··· 523 523 next = rt_mutex_top_waiter(lock); 524 524 plist_add(&next->pi_list_entry, &pendowner->pi_waiters); 525 525 } 526 - spin_unlock_irqrestore(&pendowner->pi_lock, flags); 526 + raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags); 527 527 528 528 wake_up_process(pendowner); 529 529 } ··· 541 541 unsigned long flags; 542 542 int chain_walk = 0; 543 543 544 - spin_lock_irqsave(&current->pi_lock, flags); 544 + raw_spin_lock_irqsave(&current->pi_lock, flags); 545 545 plist_del(&waiter->list_entry, &lock->wait_list); 546 546 waiter->task = NULL; 547 547 current->pi_blocked_on = NULL; 548 - spin_unlock_irqrestore(&current->pi_lock, flags); 548 + raw_spin_unlock_irqrestore(&current->pi_lock, flags); 549 549 550 550 if (first && owner != current) { 551 551 552 - spin_lock_irqsave(&owner->pi_lock, flags); 552 + raw_spin_lock_irqsave(&owner->pi_lock, flags); 553 553 554 554 plist_del(&waiter->pi_list_entry, &owner->pi_waiters); 555 555 ··· 564 564 if (owner->pi_blocked_on) 565 565 chain_walk = 1; 566 566 567 - spin_unlock_irqrestore(&owner->pi_lock, flags); 567 + raw_spin_unlock_irqrestore(&owner->pi_lock, flags); 568 568 } 569 569 570 570 WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); ··· 575 575 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 576 576 get_task_struct(owner); 577 577 578 - spin_unlock(&lock->wait_lock); 578 + raw_spin_unlock(&lock->wait_lock); 579 579 580 580 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current); 581 581 582 - spin_lock(&lock->wait_lock); 582 + raw_spin_lock(&lock->wait_lock); 583 583 } 584 584 585 585 /* ··· 592 592 struct rt_mutex_waiter *waiter; 593 593 unsigned long flags; 594 594 595 - spin_lock_irqsave(&task->pi_lock, flags); 595 + raw_spin_lock_irqsave(&task->pi_lock, flags); 596 596 597 597 waiter = task->pi_blocked_on; 598 598 if (!waiter || waiter->list_entry.prio == task->prio) { 599 - spin_unlock_irqrestore(&task->pi_lock, flags); 599 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 600 600 return; 601 601 } 602 602 603 - spin_unlock_irqrestore(&task->pi_lock, flags); 603 + raw_spin_unlock_irqrestore(&task->pi_lock, flags); 604 604 605 605 /* gets dropped in rt_mutex_adjust_prio_chain()! */ 606 606 get_task_struct(task); ··· 672 672 break; 673 673 } 674 674 675 - spin_unlock(&lock->wait_lock); 675 + raw_spin_unlock(&lock->wait_lock); 676 676 677 677 debug_rt_mutex_print_deadlock(waiter); 678 678 679 679 if (waiter->task) 680 680 schedule_rt_mutex(lock); 681 681 682 - spin_lock(&lock->wait_lock); 682 + raw_spin_lock(&lock->wait_lock); 683 683 set_current_state(state); 684 684 } 685 685 ··· 700 700 debug_rt_mutex_init_waiter(&waiter); 701 701 waiter.task = NULL; 702 702 703 - spin_lock(&lock->wait_lock); 703 + raw_spin_lock(&lock->wait_lock); 704 704 705 705 /* Try to acquire the lock again: */ 706 706 if (try_to_take_rt_mutex(lock)) { 707 - spin_unlock(&lock->wait_lock); 707 + raw_spin_unlock(&lock->wait_lock); 708 708 return 0; 709 709 } 710 710 ··· 731 731 */ 732 732 fixup_rt_mutex_waiters(lock); 733 733 734 - spin_unlock(&lock->wait_lock); 734 + raw_spin_unlock(&lock->wait_lock); 735 735 736 736 /* Remove pending timer: */ 737 737 if (unlikely(timeout)) ··· 758 758 { 759 759 int ret = 0; 760 760 761 - spin_lock(&lock->wait_lock); 761 + raw_spin_lock(&lock->wait_lock); 762 762 763 763 if (likely(rt_mutex_owner(lock) != current)) { 764 764 ··· 770 770 fixup_rt_mutex_waiters(lock); 771 771 } 772 772 773 - spin_unlock(&lock->wait_lock); 773 + raw_spin_unlock(&lock->wait_lock); 774 774 775 775 return ret; 776 776 } ··· 781 781 static void __sched 782 782 rt_mutex_slowunlock(struct rt_mutex *lock) 783 783 { 784 - spin_lock(&lock->wait_lock); 784 + raw_spin_lock(&lock->wait_lock); 785 785 786 786 debug_rt_mutex_unlock(lock); 787 787 ··· 789 789 790 790 if (!rt_mutex_has_waiters(lock)) { 791 791 lock->owner = NULL; 792 - spin_unlock(&lock->wait_lock); 792 + raw_spin_unlock(&lock->wait_lock); 793 793 return; 794 794 } 795 795 796 796 wakeup_next_waiter(lock); 797 797 798 - spin_unlock(&lock->wait_lock); 798 + raw_spin_unlock(&lock->wait_lock); 799 799 800 800 /* Undo pi boosting if necessary: */ 801 801 rt_mutex_adjust_prio(current); ··· 970 970 void __rt_mutex_init(struct rt_mutex *lock, const char *name) 971 971 { 972 972 lock->owner = NULL; 973 - spin_lock_init(&lock->wait_lock); 974 - plist_head_init(&lock->wait_list, &lock->wait_lock); 973 + raw_spin_lock_init(&lock->wait_lock); 974 + plist_head_init_raw(&lock->wait_list, &lock->wait_lock); 975 975 976 976 debug_rt_mutex_init(lock, name); 977 977 } ··· 1032 1032 { 1033 1033 int ret; 1034 1034 1035 - spin_lock(&lock->wait_lock); 1035 + raw_spin_lock(&lock->wait_lock); 1036 1036 1037 1037 mark_rt_mutex_waiters(lock); 1038 1038 ··· 1040 1040 /* We got the lock for task. */ 1041 1041 debug_rt_mutex_lock(lock); 1042 1042 rt_mutex_set_owner(lock, task, 0); 1043 - spin_unlock(&lock->wait_lock); 1043 + raw_spin_unlock(&lock->wait_lock); 1044 1044 rt_mutex_deadlock_account_lock(lock, task); 1045 1045 return 1; 1046 1046 } ··· 1056 1056 */ 1057 1057 ret = 0; 1058 1058 } 1059 - spin_unlock(&lock->wait_lock); 1059 + raw_spin_unlock(&lock->wait_lock); 1060 1060 1061 1061 debug_rt_mutex_print_deadlock(waiter); 1062 1062 ··· 1106 1106 { 1107 1107 int ret; 1108 1108 1109 - spin_lock(&lock->wait_lock); 1109 + raw_spin_lock(&lock->wait_lock); 1110 1110 1111 1111 set_current_state(TASK_INTERRUPTIBLE); 1112 1112 ··· 1124 1124 */ 1125 1125 fixup_rt_mutex_waiters(lock); 1126 1126 1127 - spin_unlock(&lock->wait_lock); 1127 + raw_spin_unlock(&lock->wait_lock); 1128 1128 1129 1129 /* 1130 1130 * Readjust priority, when we did not get the lock. We might have been
+113 -110
kernel/sched.c
··· 141 141 142 142 struct rt_bandwidth { 143 143 /* nests inside the rq lock: */ 144 - spinlock_t rt_runtime_lock; 144 + raw_spinlock_t rt_runtime_lock; 145 145 ktime_t rt_period; 146 146 u64 rt_runtime; 147 147 struct hrtimer rt_period_timer; ··· 178 178 rt_b->rt_period = ns_to_ktime(period); 179 179 rt_b->rt_runtime = runtime; 180 180 181 - spin_lock_init(&rt_b->rt_runtime_lock); 181 + raw_spin_lock_init(&rt_b->rt_runtime_lock); 182 182 183 183 hrtimer_init(&rt_b->rt_period_timer, 184 184 CLOCK_MONOTONIC, HRTIMER_MODE_REL); ··· 200 200 if (hrtimer_active(&rt_b->rt_period_timer)) 201 201 return; 202 202 203 - spin_lock(&rt_b->rt_runtime_lock); 203 + raw_spin_lock(&rt_b->rt_runtime_lock); 204 204 for (;;) { 205 205 unsigned long delta; 206 206 ktime_t soft, hard; ··· 217 217 __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, 218 218 HRTIMER_MODE_ABS_PINNED, 0); 219 219 } 220 - spin_unlock(&rt_b->rt_runtime_lock); 220 + raw_spin_unlock(&rt_b->rt_runtime_lock); 221 221 } 222 222 223 223 #ifdef CONFIG_RT_GROUP_SCHED ··· 470 470 u64 rt_time; 471 471 u64 rt_runtime; 472 472 /* Nests inside the rq lock: */ 473 - spinlock_t rt_runtime_lock; 473 + raw_spinlock_t rt_runtime_lock; 474 474 475 475 #ifdef CONFIG_RT_GROUP_SCHED 476 476 unsigned long rt_nr_boosted; ··· 525 525 */ 526 526 struct rq { 527 527 /* runqueue lock: */ 528 - spinlock_t lock; 528 + raw_spinlock_t lock; 529 529 530 530 /* 531 531 * nr_running and cpu_load should be in the same cacheline because ··· 685 685 */ 686 686 int runqueue_is_locked(int cpu) 687 687 { 688 - return spin_is_locked(&cpu_rq(cpu)->lock); 688 + return raw_spin_is_locked(&cpu_rq(cpu)->lock); 689 689 } 690 690 691 691 /* ··· 893 893 */ 894 894 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); 895 895 896 - spin_unlock_irq(&rq->lock); 896 + raw_spin_unlock_irq(&rq->lock); 897 897 } 898 898 899 899 #else /* __ARCH_WANT_UNLOCKED_CTXSW */ ··· 917 917 next->oncpu = 1; 918 918 #endif 919 919 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 920 - spin_unlock_irq(&rq->lock); 920 + raw_spin_unlock_irq(&rq->lock); 921 921 #else 922 - spin_unlock(&rq->lock); 922 + raw_spin_unlock(&rq->lock); 923 923 #endif 924 924 } 925 925 ··· 949 949 { 950 950 for (;;) { 951 951 struct rq *rq = task_rq(p); 952 - spin_lock(&rq->lock); 952 + raw_spin_lock(&rq->lock); 953 953 if (likely(rq == task_rq(p))) 954 954 return rq; 955 - spin_unlock(&rq->lock); 955 + raw_spin_unlock(&rq->lock); 956 956 } 957 957 } 958 958 ··· 969 969 for (;;) { 970 970 local_irq_save(*flags); 971 971 rq = task_rq(p); 972 - spin_lock(&rq->lock); 972 + raw_spin_lock(&rq->lock); 973 973 if (likely(rq == task_rq(p))) 974 974 return rq; 975 - spin_unlock_irqrestore(&rq->lock, *flags); 975 + raw_spin_unlock_irqrestore(&rq->lock, *flags); 976 976 } 977 977 } 978 978 ··· 981 981 struct rq *rq = task_rq(p); 982 982 983 983 smp_mb(); /* spin-unlock-wait is not a full memory barrier */ 984 - spin_unlock_wait(&rq->lock); 984 + raw_spin_unlock_wait(&rq->lock); 985 985 } 986 986 987 987 static void __task_rq_unlock(struct rq *rq) 988 988 __releases(rq->lock) 989 989 { 990 - spin_unlock(&rq->lock); 990 + raw_spin_unlock(&rq->lock); 991 991 } 992 992 993 993 static inline void task_rq_unlock(struct rq *rq, unsigned long *flags) 994 994 __releases(rq->lock) 995 995 { 996 - spin_unlock_irqrestore(&rq->lock, *flags); 996 + raw_spin_unlock_irqrestore(&rq->lock, *flags); 997 997 } 998 998 999 999 /* ··· 1006 1006 1007 1007 local_irq_disable(); 1008 1008 rq = this_rq(); 1009 - spin_lock(&rq->lock); 1009 + raw_spin_lock(&rq->lock); 1010 1010 1011 1011 return rq; 1012 1012 } ··· 1053 1053 1054 1054 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); 1055 1055 1056 - spin_lock(&rq->lock); 1056 + raw_spin_lock(&rq->lock); 1057 1057 update_rq_clock(rq); 1058 1058 rq->curr->sched_class->task_tick(rq, rq->curr, 1); 1059 - spin_unlock(&rq->lock); 1059 + raw_spin_unlock(&rq->lock); 1060 1060 1061 1061 return HRTIMER_NORESTART; 1062 1062 } ··· 1069 1069 { 1070 1070 struct rq *rq = arg; 1071 1071 1072 - spin_lock(&rq->lock); 1072 + raw_spin_lock(&rq->lock); 1073 1073 hrtimer_restart(&rq->hrtick_timer); 1074 1074 rq->hrtick_csd_pending = 0; 1075 - spin_unlock(&rq->lock); 1075 + raw_spin_unlock(&rq->lock); 1076 1076 } 1077 1077 1078 1078 /* ··· 1179 1179 { 1180 1180 int cpu; 1181 1181 1182 - assert_spin_locked(&task_rq(p)->lock); 1182 + assert_raw_spin_locked(&task_rq(p)->lock); 1183 1183 1184 1184 if (test_tsk_need_resched(p)) 1185 1185 return; ··· 1201 1201 struct rq *rq = cpu_rq(cpu); 1202 1202 unsigned long flags; 1203 1203 1204 - if (!spin_trylock_irqsave(&rq->lock, flags)) 1204 + if (!raw_spin_trylock_irqsave(&rq->lock, flags)) 1205 1205 return; 1206 1206 resched_task(cpu_curr(cpu)); 1207 - spin_unlock_irqrestore(&rq->lock, flags); 1207 + raw_spin_unlock_irqrestore(&rq->lock, flags); 1208 1208 } 1209 1209 1210 1210 #ifdef CONFIG_NO_HZ ··· 1273 1273 #else /* !CONFIG_SMP */ 1274 1274 static void resched_task(struct task_struct *p) 1275 1275 { 1276 - assert_spin_locked(&task_rq(p)->lock); 1276 + assert_raw_spin_locked(&task_rq(p)->lock); 1277 1277 set_tsk_need_resched(p); 1278 1278 } 1279 1279 ··· 1600 1600 struct rq *rq = cpu_rq(cpu); 1601 1601 unsigned long flags; 1602 1602 1603 - spin_lock_irqsave(&rq->lock, flags); 1603 + raw_spin_lock_irqsave(&rq->lock, flags); 1604 1604 tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; 1605 1605 tg->cfs_rq[cpu]->shares = boost ? 0 : shares; 1606 1606 __set_se_shares(tg->se[cpu], shares); 1607 - spin_unlock_irqrestore(&rq->lock, flags); 1607 + raw_spin_unlock_irqrestore(&rq->lock, flags); 1608 1608 } 1609 1609 } 1610 1610 ··· 1706 1706 if (root_task_group_empty()) 1707 1707 return; 1708 1708 1709 - spin_unlock(&rq->lock); 1709 + raw_spin_unlock(&rq->lock); 1710 1710 update_shares(sd); 1711 - spin_lock(&rq->lock); 1711 + raw_spin_lock(&rq->lock); 1712 1712 } 1713 1713 1714 1714 static void update_h_load(long cpu) ··· 1748 1748 __acquires(busiest->lock) 1749 1749 __acquires(this_rq->lock) 1750 1750 { 1751 - spin_unlock(&this_rq->lock); 1751 + raw_spin_unlock(&this_rq->lock); 1752 1752 double_rq_lock(this_rq, busiest); 1753 1753 1754 1754 return 1; ··· 1769 1769 { 1770 1770 int ret = 0; 1771 1771 1772 - if (unlikely(!spin_trylock(&busiest->lock))) { 1772 + if (unlikely(!raw_spin_trylock(&busiest->lock))) { 1773 1773 if (busiest < this_rq) { 1774 - spin_unlock(&this_rq->lock); 1775 - spin_lock(&busiest->lock); 1776 - spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); 1774 + raw_spin_unlock(&this_rq->lock); 1775 + raw_spin_lock(&busiest->lock); 1776 + raw_spin_lock_nested(&this_rq->lock, 1777 + SINGLE_DEPTH_NESTING); 1777 1778 ret = 1; 1778 1779 } else 1779 - spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); 1780 + raw_spin_lock_nested(&busiest->lock, 1781 + SINGLE_DEPTH_NESTING); 1780 1782 } 1781 1783 return ret; 1782 1784 } ··· 1792 1790 { 1793 1791 if (unlikely(!irqs_disabled())) { 1794 1792 /* printk() doesn't work good under rq->lock */ 1795 - spin_unlock(&this_rq->lock); 1793 + raw_spin_unlock(&this_rq->lock); 1796 1794 BUG_ON(1); 1797 1795 } 1798 1796 ··· 1802 1800 static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1803 1801 __releases(busiest->lock) 1804 1802 { 1805 - spin_unlock(&busiest->lock); 1803 + raw_spin_unlock(&busiest->lock); 1806 1804 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); 1807 1805 } 1808 1806 #endif ··· 2025 2023 return; 2026 2024 } 2027 2025 2028 - spin_lock_irqsave(&rq->lock, flags); 2026 + raw_spin_lock_irqsave(&rq->lock, flags); 2029 2027 update_rq_clock(rq); 2030 2028 set_task_cpu(p, cpu); 2031 2029 p->cpus_allowed = cpumask_of_cpu(cpu); 2032 2030 p->rt.nr_cpus_allowed = 1; 2033 2031 p->flags |= PF_THREAD_BOUND; 2034 - spin_unlock_irqrestore(&rq->lock, flags); 2032 + raw_spin_unlock_irqrestore(&rq->lock, flags); 2035 2033 } 2036 2034 EXPORT_SYMBOL(kthread_bind); 2037 2035 ··· 2783 2781 if (rq->post_schedule) { 2784 2782 unsigned long flags; 2785 2783 2786 - spin_lock_irqsave(&rq->lock, flags); 2784 + raw_spin_lock_irqsave(&rq->lock, flags); 2787 2785 if (rq->curr->sched_class->post_schedule) 2788 2786 rq->curr->sched_class->post_schedule(rq); 2789 - spin_unlock_irqrestore(&rq->lock, flags); 2787 + raw_spin_unlock_irqrestore(&rq->lock, flags); 2790 2788 2791 2789 rq->post_schedule = 0; 2792 2790 } ··· 3068 3066 { 3069 3067 BUG_ON(!irqs_disabled()); 3070 3068 if (rq1 == rq2) { 3071 - spin_lock(&rq1->lock); 3069 + raw_spin_lock(&rq1->lock); 3072 3070 __acquire(rq2->lock); /* Fake it out ;) */ 3073 3071 } else { 3074 3072 if (rq1 < rq2) { 3075 - spin_lock(&rq1->lock); 3076 - spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3073 + raw_spin_lock(&rq1->lock); 3074 + raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); 3077 3075 } else { 3078 - spin_lock(&rq2->lock); 3079 - spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3076 + raw_spin_lock(&rq2->lock); 3077 + raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 3080 3078 } 3081 3079 } 3082 3080 update_rq_clock(rq1); ··· 3093 3091 __releases(rq1->lock) 3094 3092 __releases(rq2->lock) 3095 3093 { 3096 - spin_unlock(&rq1->lock); 3094 + raw_spin_unlock(&rq1->lock); 3097 3095 if (rq1 != rq2) 3098 - spin_unlock(&rq2->lock); 3096 + raw_spin_unlock(&rq2->lock); 3099 3097 else 3100 3098 __release(rq2->lock); 3101 3099 } ··· 4188 4186 4189 4187 if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) { 4190 4188 4191 - spin_lock_irqsave(&busiest->lock, flags); 4189 + raw_spin_lock_irqsave(&busiest->lock, flags); 4192 4190 4193 4191 /* don't kick the migration_thread, if the curr 4194 4192 * task on busiest cpu can't be moved to this_cpu 4195 4193 */ 4196 4194 if (!cpumask_test_cpu(this_cpu, 4197 4195 &busiest->curr->cpus_allowed)) { 4198 - spin_unlock_irqrestore(&busiest->lock, flags); 4196 + raw_spin_unlock_irqrestore(&busiest->lock, 4197 + flags); 4199 4198 all_pinned = 1; 4200 4199 goto out_one_pinned; 4201 4200 } ··· 4206 4203 busiest->push_cpu = this_cpu; 4207 4204 active_balance = 1; 4208 4205 } 4209 - spin_unlock_irqrestore(&busiest->lock, flags); 4206 + raw_spin_unlock_irqrestore(&busiest->lock, flags); 4210 4207 if (active_balance) 4211 4208 wake_up_process(busiest->migration_thread); 4212 4209 ··· 4388 4385 /* 4389 4386 * Should not call ttwu while holding a rq->lock 4390 4387 */ 4391 - spin_unlock(&this_rq->lock); 4388 + raw_spin_unlock(&this_rq->lock); 4392 4389 if (active_balance) 4393 4390 wake_up_process(busiest->migration_thread); 4394 - spin_lock(&this_rq->lock); 4391 + raw_spin_lock(&this_rq->lock); 4395 4392 4396 4393 } else 4397 4394 sd->nr_balance_failed = 0; ··· 5260 5257 5261 5258 sched_clock_tick(); 5262 5259 5263 - spin_lock(&rq->lock); 5260 + raw_spin_lock(&rq->lock); 5264 5261 update_rq_clock(rq); 5265 5262 update_cpu_load(rq); 5266 5263 curr->sched_class->task_tick(rq, curr, 0); 5267 - spin_unlock(&rq->lock); 5264 + raw_spin_unlock(&rq->lock); 5268 5265 5269 5266 perf_event_task_tick(curr, cpu); 5270 5267 ··· 5458 5455 if (sched_feat(HRTICK)) 5459 5456 hrtick_clear(rq); 5460 5457 5461 - spin_lock_irq(&rq->lock); 5458 + raw_spin_lock_irq(&rq->lock); 5462 5459 update_rq_clock(rq); 5463 5460 clear_tsk_need_resched(prev); 5464 5461 ··· 5494 5491 cpu = smp_processor_id(); 5495 5492 rq = cpu_rq(cpu); 5496 5493 } else 5497 - spin_unlock_irq(&rq->lock); 5494 + raw_spin_unlock_irq(&rq->lock); 5498 5495 5499 5496 post_schedule(rq); 5500 5497 ··· 6323 6320 * make sure no PI-waiters arrive (or leave) while we are 6324 6321 * changing the priority of the task: 6325 6322 */ 6326 - spin_lock_irqsave(&p->pi_lock, flags); 6323 + raw_spin_lock_irqsave(&p->pi_lock, flags); 6327 6324 /* 6328 6325 * To be able to change p->policy safely, the apropriate 6329 6326 * runqueue lock must be held. ··· 6333 6330 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { 6334 6331 policy = oldpolicy = -1; 6335 6332 __task_rq_unlock(rq); 6336 - spin_unlock_irqrestore(&p->pi_lock, flags); 6333 + raw_spin_unlock_irqrestore(&p->pi_lock, flags); 6337 6334 goto recheck; 6338 6335 } 6339 6336 update_rq_clock(rq); ··· 6357 6354 check_class_changed(rq, p, prev_class, oldprio, running); 6358 6355 } 6359 6356 __task_rq_unlock(rq); 6360 - spin_unlock_irqrestore(&p->pi_lock, flags); 6357 + raw_spin_unlock_irqrestore(&p->pi_lock, flags); 6361 6358 6362 6359 rt_mutex_adjust_pi(p); 6363 6360 ··· 6687 6684 */ 6688 6685 __release(rq->lock); 6689 6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6690 - _raw_spin_unlock(&rq->lock); 6687 + do_raw_spin_unlock(&rq->lock); 6691 6688 preempt_enable_no_resched(); 6692 6689 6693 6690 schedule(); ··· 6983 6980 struct rq *rq = cpu_rq(cpu); 6984 6981 unsigned long flags; 6985 6982 6986 - spin_lock_irqsave(&rq->lock, flags); 6983 + raw_spin_lock_irqsave(&rq->lock, flags); 6987 6984 6988 6985 __sched_fork(idle); 6989 6986 idle->se.exec_start = sched_clock(); ··· 6995 6992 #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) 6996 6993 idle->oncpu = 1; 6997 6994 #endif 6998 - spin_unlock_irqrestore(&rq->lock, flags); 6995 + raw_spin_unlock_irqrestore(&rq->lock, flags); 6999 6996 7000 6997 /* Set the preempt count _outside_ the spinlocks! */ 7001 6998 #if defined(CONFIG_PREEMPT) ··· 7212 7209 struct migration_req *req; 7213 7210 struct list_head *head; 7214 7211 7215 - spin_lock_irq(&rq->lock); 7212 + raw_spin_lock_irq(&rq->lock); 7216 7213 7217 7214 if (cpu_is_offline(cpu)) { 7218 - spin_unlock_irq(&rq->lock); 7215 + raw_spin_unlock_irq(&rq->lock); 7219 7216 break; 7220 7217 } 7221 7218 ··· 7227 7224 head = &rq->migration_queue; 7228 7225 7229 7226 if (list_empty(head)) { 7230 - spin_unlock_irq(&rq->lock); 7227 + raw_spin_unlock_irq(&rq->lock); 7231 7228 schedule(); 7232 7229 set_current_state(TASK_INTERRUPTIBLE); 7233 7230 continue; ··· 7236 7233 list_del_init(head->next); 7237 7234 7238 7235 if (req->task != NULL) { 7239 - spin_unlock(&rq->lock); 7236 + raw_spin_unlock(&rq->lock); 7240 7237 __migrate_task(req->task, cpu, req->dest_cpu); 7241 7238 } else if (likely(cpu == (badcpu = smp_processor_id()))) { 7242 7239 req->dest_cpu = RCU_MIGRATION_GOT_QS; 7243 - spin_unlock(&rq->lock); 7240 + raw_spin_unlock(&rq->lock); 7244 7241 } else { 7245 7242 req->dest_cpu = RCU_MIGRATION_MUST_SYNC; 7246 - spin_unlock(&rq->lock); 7243 + raw_spin_unlock(&rq->lock); 7247 7244 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu); 7248 7245 } 7249 7246 local_irq_enable(); ··· 7366 7363 * Strictly not necessary since rest of the CPUs are stopped by now 7367 7364 * and interrupts disabled on the current cpu. 7368 7365 */ 7369 - spin_lock_irqsave(&rq->lock, flags); 7366 + raw_spin_lock_irqsave(&rq->lock, flags); 7370 7367 7371 7368 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 7372 7369 7373 7370 update_rq_clock(rq); 7374 7371 activate_task(rq, p, 0); 7375 7372 7376 - spin_unlock_irqrestore(&rq->lock, flags); 7373 + raw_spin_unlock_irqrestore(&rq->lock, flags); 7377 7374 } 7378 7375 7379 7376 /* ··· 7409 7406 * that's OK. No task can be added to this CPU, so iteration is 7410 7407 * fine. 7411 7408 */ 7412 - spin_unlock_irq(&rq->lock); 7409 + raw_spin_unlock_irq(&rq->lock); 7413 7410 move_task_off_dead_cpu(dead_cpu, p); 7414 - spin_lock_irq(&rq->lock); 7411 + raw_spin_lock_irq(&rq->lock); 7415 7412 7416 7413 put_task_struct(p); 7417 7414 } ··· 7677 7674 7678 7675 /* Update our root-domain */ 7679 7676 rq = cpu_rq(cpu); 7680 - spin_lock_irqsave(&rq->lock, flags); 7677 + raw_spin_lock_irqsave(&rq->lock, flags); 7681 7678 if (rq->rd) { 7682 7679 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7683 7680 7684 7681 set_rq_online(rq); 7685 7682 } 7686 - spin_unlock_irqrestore(&rq->lock, flags); 7683 + raw_spin_unlock_irqrestore(&rq->lock, flags); 7687 7684 break; 7688 7685 7689 7686 #ifdef CONFIG_HOTPLUG_CPU ··· 7708 7705 put_task_struct(rq->migration_thread); 7709 7706 rq->migration_thread = NULL; 7710 7707 /* Idle task back to normal (off runqueue, low prio) */ 7711 - spin_lock_irq(&rq->lock); 7708 + raw_spin_lock_irq(&rq->lock); 7712 7709 update_rq_clock(rq); 7713 7710 deactivate_task(rq, rq->idle, 0); 7714 7711 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 7715 7712 rq->idle->sched_class = &idle_sched_class; 7716 7713 migrate_dead_tasks(cpu); 7717 - spin_unlock_irq(&rq->lock); 7714 + raw_spin_unlock_irq(&rq->lock); 7718 7715 cpuset_unlock(); 7719 7716 migrate_nr_uninterruptible(rq); 7720 7717 BUG_ON(rq->nr_running != 0); ··· 7724 7721 * they didn't take sched_hotcpu_mutex. Just wake up 7725 7722 * the requestors. 7726 7723 */ 7727 - spin_lock_irq(&rq->lock); 7724 + raw_spin_lock_irq(&rq->lock); 7728 7725 while (!list_empty(&rq->migration_queue)) { 7729 7726 struct migration_req *req; 7730 7727 7731 7728 req = list_entry(rq->migration_queue.next, 7732 7729 struct migration_req, list); 7733 7730 list_del_init(&req->list); 7734 - spin_unlock_irq(&rq->lock); 7731 + raw_spin_unlock_irq(&rq->lock); 7735 7732 complete(&req->done); 7736 - spin_lock_irq(&rq->lock); 7733 + raw_spin_lock_irq(&rq->lock); 7737 7734 } 7738 - spin_unlock_irq(&rq->lock); 7735 + raw_spin_unlock_irq(&rq->lock); 7739 7736 break; 7740 7737 7741 7738 case CPU_DYING: 7742 7739 case CPU_DYING_FROZEN: 7743 7740 /* Update our root-domain */ 7744 7741 rq = cpu_rq(cpu); 7745 - spin_lock_irqsave(&rq->lock, flags); 7742 + raw_spin_lock_irqsave(&rq->lock, flags); 7746 7743 if (rq->rd) { 7747 7744 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7748 7745 set_rq_offline(rq); 7749 7746 } 7750 - spin_unlock_irqrestore(&rq->lock, flags); 7747 + raw_spin_unlock_irqrestore(&rq->lock, flags); 7751 7748 break; 7752 7749 #endif 7753 7750 } ··· 7977 7974 struct root_domain *old_rd = NULL; 7978 7975 unsigned long flags; 7979 7976 7980 - spin_lock_irqsave(&rq->lock, flags); 7977 + raw_spin_lock_irqsave(&rq->lock, flags); 7981 7978 7982 7979 if (rq->rd) { 7983 7980 old_rd = rq->rd; ··· 8003 8000 if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) 8004 8001 set_rq_online(rq); 8005 8002 8006 - spin_unlock_irqrestore(&rq->lock, flags); 8003 + raw_spin_unlock_irqrestore(&rq->lock, flags); 8007 8004 8008 8005 if (old_rd) 8009 8006 free_rootdomain(old_rd); ··· 9360 9357 #ifdef CONFIG_SMP 9361 9358 rt_rq->rt_nr_migratory = 0; 9362 9359 rt_rq->overloaded = 0; 9363 - plist_head_init(&rt_rq->pushable_tasks, &rq->lock); 9360 + plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); 9364 9361 #endif 9365 9362 9366 9363 rt_rq->rt_time = 0; 9367 9364 rt_rq->rt_throttled = 0; 9368 9365 rt_rq->rt_runtime = 0; 9369 - spin_lock_init(&rt_rq->rt_runtime_lock); 9366 + raw_spin_lock_init(&rt_rq->rt_runtime_lock); 9370 9367 9371 9368 #ifdef CONFIG_RT_GROUP_SCHED 9372 9369 rt_rq->rt_nr_boosted = 0; ··· 9526 9523 struct rq *rq; 9527 9524 9528 9525 rq = cpu_rq(i); 9529 - spin_lock_init(&rq->lock); 9526 + raw_spin_lock_init(&rq->lock); 9530 9527 rq->nr_running = 0; 9531 9528 rq->calc_load_active = 0; 9532 9529 rq->calc_load_update = jiffies + LOAD_FREQ; ··· 9624 9621 #endif 9625 9622 9626 9623 #ifdef CONFIG_RT_MUTEXES 9627 - plist_head_init(&init_task.pi_waiters, &init_task.pi_lock); 9624 + plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); 9628 9625 #endif 9629 9626 9630 9627 /* ··· 9749 9746 continue; 9750 9747 } 9751 9748 9752 - spin_lock(&p->pi_lock); 9749 + raw_spin_lock(&p->pi_lock); 9753 9750 rq = __task_rq_lock(p); 9754 9751 9755 9752 normalize_task(rq, p); 9756 9753 9757 9754 __task_rq_unlock(rq); 9758 - spin_unlock(&p->pi_lock); 9755 + raw_spin_unlock(&p->pi_lock); 9759 9756 } while_each_thread(g, p); 9760 9757 9761 9758 read_unlock_irqrestore(&tasklist_lock, flags); ··· 10118 10115 struct rq *rq = cfs_rq->rq; 10119 10116 unsigned long flags; 10120 10117 10121 - spin_lock_irqsave(&rq->lock, flags); 10118 + raw_spin_lock_irqsave(&rq->lock, flags); 10122 10119 __set_se_shares(se, shares); 10123 - spin_unlock_irqrestore(&rq->lock, flags); 10120 + raw_spin_unlock_irqrestore(&rq->lock, flags); 10124 10121 } 10125 10122 10126 10123 static DEFINE_MUTEX(shares_mutex); ··· 10305 10302 if (err) 10306 10303 goto unlock; 10307 10304 10308 - spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10305 + raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10309 10306 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 10310 10307 tg->rt_bandwidth.rt_runtime = rt_runtime; 10311 10308 10312 10309 for_each_possible_cpu(i) { 10313 10310 struct rt_rq *rt_rq = tg->rt_rq[i]; 10314 10311 10315 - spin_lock(&rt_rq->rt_runtime_lock); 10312 + raw_spin_lock(&rt_rq->rt_runtime_lock); 10316 10313 rt_rq->rt_runtime = rt_runtime; 10317 - spin_unlock(&rt_rq->rt_runtime_lock); 10314 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 10318 10315 } 10319 - spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10316 + raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock); 10320 10317 unlock: 10321 10318 read_unlock(&tasklist_lock); 10322 10319 mutex_unlock(&rt_constraints_mutex); ··· 10421 10418 if (sysctl_sched_rt_runtime == 0) 10422 10419 return -EBUSY; 10423 10420 10424 - spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10421 + raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10425 10422 for_each_possible_cpu(i) { 10426 10423 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10427 10424 10428 - spin_lock(&rt_rq->rt_runtime_lock); 10425 + raw_spin_lock(&rt_rq->rt_runtime_lock); 10429 10426 rt_rq->rt_runtime = global_rt_runtime(); 10430 - spin_unlock(&rt_rq->rt_runtime_lock); 10427 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 10431 10428 } 10432 - spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10429 + raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); 10433 10430 10434 10431 return 0; 10435 10432 } ··· 10720 10717 /* 10721 10718 * Take rq->lock to make 64-bit read safe on 32-bit platforms. 10722 10719 */ 10723 - spin_lock_irq(&cpu_rq(cpu)->lock); 10720 + raw_spin_lock_irq(&cpu_rq(cpu)->lock); 10724 10721 data = *cpuusage; 10725 - spin_unlock_irq(&cpu_rq(cpu)->lock); 10722 + raw_spin_unlock_irq(&cpu_rq(cpu)->lock); 10726 10723 #else 10727 10724 data = *cpuusage; 10728 10725 #endif ··· 10738 10735 /* 10739 10736 * Take rq->lock to make 64-bit write safe on 32-bit platforms. 10740 10737 */ 10741 - spin_lock_irq(&cpu_rq(cpu)->lock); 10738 + raw_spin_lock_irq(&cpu_rq(cpu)->lock); 10742 10739 *cpuusage = val; 10743 - spin_unlock_irq(&cpu_rq(cpu)->lock); 10740 + raw_spin_unlock_irq(&cpu_rq(cpu)->lock); 10744 10741 #else 10745 10742 *cpuusage = val; 10746 10743 #endif ··· 10974 10971 init_completion(&req->done); 10975 10972 req->task = NULL; 10976 10973 req->dest_cpu = RCU_MIGRATION_NEED_QS; 10977 - spin_lock_irqsave(&rq->lock, flags); 10974 + raw_spin_lock_irqsave(&rq->lock, flags); 10978 10975 list_add(&req->list, &rq->migration_queue); 10979 - spin_unlock_irqrestore(&rq->lock, flags); 10976 + raw_spin_unlock_irqrestore(&rq->lock, flags); 10980 10977 wake_up_process(rq->migration_thread); 10981 10978 } 10982 10979 for_each_online_cpu(cpu) { ··· 10984 10981 req = &per_cpu(rcu_migration_req, cpu); 10985 10982 rq = cpu_rq(cpu); 10986 10983 wait_for_completion(&req->done); 10987 - spin_lock_irqsave(&rq->lock, flags); 10984 + raw_spin_lock_irqsave(&rq->lock, flags); 10988 10985 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC)) 10989 10986 need_full_sync = 1; 10990 10987 req->dest_cpu = RCU_MIGRATION_IDLE; 10991 - spin_unlock_irqrestore(&rq->lock, flags); 10988 + raw_spin_unlock_irqrestore(&rq->lock, flags); 10992 10989 } 10993 10990 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE; 10994 10991 synchronize_sched_expedited_count++;
+5 -5
kernel/sched_cpupri.c
··· 135 135 if (likely(newpri != CPUPRI_INVALID)) { 136 136 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; 137 137 138 - spin_lock_irqsave(&vec->lock, flags); 138 + raw_spin_lock_irqsave(&vec->lock, flags); 139 139 140 140 cpumask_set_cpu(cpu, vec->mask); 141 141 vec->count++; 142 142 if (vec->count == 1) 143 143 set_bit(newpri, cp->pri_active); 144 144 145 - spin_unlock_irqrestore(&vec->lock, flags); 145 + raw_spin_unlock_irqrestore(&vec->lock, flags); 146 146 } 147 147 if (likely(oldpri != CPUPRI_INVALID)) { 148 148 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; 149 149 150 - spin_lock_irqsave(&vec->lock, flags); 150 + raw_spin_lock_irqsave(&vec->lock, flags); 151 151 152 152 vec->count--; 153 153 if (!vec->count) 154 154 clear_bit(oldpri, cp->pri_active); 155 155 cpumask_clear_cpu(cpu, vec->mask); 156 156 157 - spin_unlock_irqrestore(&vec->lock, flags); 157 + raw_spin_unlock_irqrestore(&vec->lock, flags); 158 158 } 159 159 160 160 *currpri = newpri; ··· 180 180 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { 181 181 struct cpupri_vec *vec = &cp->pri_to_cpu[i]; 182 182 183 - spin_lock_init(&vec->lock); 183 + raw_spin_lock_init(&vec->lock); 184 184 vec->count = 0; 185 185 if (!zalloc_cpumask_var(&vec->mask, gfp)) 186 186 goto cleanup;
+1 -1
kernel/sched_cpupri.h
··· 12 12 /* values 2-101 are RT priorities 0-99 */ 13 13 14 14 struct cpupri_vec { 15 - spinlock_t lock; 15 + raw_spinlock_t lock; 16 16 int count; 17 17 cpumask_var_t mask; 18 18 };
+2 -2
kernel/sched_debug.c
··· 184 184 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", 185 185 SPLIT_NS(cfs_rq->exec_clock)); 186 186 187 - spin_lock_irqsave(&rq->lock, flags); 187 + raw_spin_lock_irqsave(&rq->lock, flags); 188 188 if (cfs_rq->rb_leftmost) 189 189 MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime; 190 190 last = __pick_last_entity(cfs_rq); ··· 192 192 max_vruntime = last->vruntime; 193 193 min_vruntime = cfs_rq->min_vruntime; 194 194 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime; 195 - spin_unlock_irqrestore(&rq->lock, flags); 195 + raw_spin_unlock_irqrestore(&rq->lock, flags); 196 196 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime", 197 197 SPLIT_NS(MIN_vruntime)); 198 198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
+2 -2
kernel/sched_fair.c
··· 1955 1955 struct rq *rq = this_rq(); 1956 1956 unsigned long flags; 1957 1957 1958 - spin_lock_irqsave(&rq->lock, flags); 1958 + raw_spin_lock_irqsave(&rq->lock, flags); 1959 1959 1960 1960 if (unlikely(task_cpu(p) != this_cpu)) 1961 1961 __set_task_cpu(p, this_cpu); ··· 1975 1975 resched_task(rq->curr); 1976 1976 } 1977 1977 1978 - spin_unlock_irqrestore(&rq->lock, flags); 1978 + raw_spin_unlock_irqrestore(&rq->lock, flags); 1979 1979 } 1980 1980 1981 1981 /*
+2 -2
kernel/sched_idletask.c
··· 34 34 static void 35 35 dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 36 36 { 37 - spin_unlock_irq(&rq->lock); 37 + raw_spin_unlock_irq(&rq->lock); 38 38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 39 39 dump_stack(); 40 - spin_lock_irq(&rq->lock); 40 + raw_spin_lock_irq(&rq->lock); 41 41 } 42 42 43 43 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+30 -30
kernel/sched_rt.c
··· 327 327 328 328 weight = cpumask_weight(rd->span); 329 329 330 - spin_lock(&rt_b->rt_runtime_lock); 330 + raw_spin_lock(&rt_b->rt_runtime_lock); 331 331 rt_period = ktime_to_ns(rt_b->rt_period); 332 332 for_each_cpu(i, rd->span) { 333 333 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); ··· 336 336 if (iter == rt_rq) 337 337 continue; 338 338 339 - spin_lock(&iter->rt_runtime_lock); 339 + raw_spin_lock(&iter->rt_runtime_lock); 340 340 /* 341 341 * Either all rqs have inf runtime and there's nothing to steal 342 342 * or __disable_runtime() below sets a specific rq to inf to ··· 358 358 rt_rq->rt_runtime += diff; 359 359 more = 1; 360 360 if (rt_rq->rt_runtime == rt_period) { 361 - spin_unlock(&iter->rt_runtime_lock); 361 + raw_spin_unlock(&iter->rt_runtime_lock); 362 362 break; 363 363 } 364 364 } 365 365 next: 366 - spin_unlock(&iter->rt_runtime_lock); 366 + raw_spin_unlock(&iter->rt_runtime_lock); 367 367 } 368 - spin_unlock(&rt_b->rt_runtime_lock); 368 + raw_spin_unlock(&rt_b->rt_runtime_lock); 369 369 370 370 return more; 371 371 } ··· 386 386 s64 want; 387 387 int i; 388 388 389 - spin_lock(&rt_b->rt_runtime_lock); 390 - spin_lock(&rt_rq->rt_runtime_lock); 389 + raw_spin_lock(&rt_b->rt_runtime_lock); 390 + raw_spin_lock(&rt_rq->rt_runtime_lock); 391 391 /* 392 392 * Either we're all inf and nobody needs to borrow, or we're 393 393 * already disabled and thus have nothing to do, or we have ··· 396 396 if (rt_rq->rt_runtime == RUNTIME_INF || 397 397 rt_rq->rt_runtime == rt_b->rt_runtime) 398 398 goto balanced; 399 - spin_unlock(&rt_rq->rt_runtime_lock); 399 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 400 400 401 401 /* 402 402 * Calculate the difference between what we started out with ··· 418 418 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 419 419 continue; 420 420 421 - spin_lock(&iter->rt_runtime_lock); 421 + raw_spin_lock(&iter->rt_runtime_lock); 422 422 if (want > 0) { 423 423 diff = min_t(s64, iter->rt_runtime, want); 424 424 iter->rt_runtime -= diff; ··· 427 427 iter->rt_runtime -= want; 428 428 want -= want; 429 429 } 430 - spin_unlock(&iter->rt_runtime_lock); 430 + raw_spin_unlock(&iter->rt_runtime_lock); 431 431 432 432 if (!want) 433 433 break; 434 434 } 435 435 436 - spin_lock(&rt_rq->rt_runtime_lock); 436 + raw_spin_lock(&rt_rq->rt_runtime_lock); 437 437 /* 438 438 * We cannot be left wanting - that would mean some runtime 439 439 * leaked out of the system. ··· 445 445 * runtime - in which case borrowing doesn't make sense. 446 446 */ 447 447 rt_rq->rt_runtime = RUNTIME_INF; 448 - spin_unlock(&rt_rq->rt_runtime_lock); 449 - spin_unlock(&rt_b->rt_runtime_lock); 448 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 449 + raw_spin_unlock(&rt_b->rt_runtime_lock); 450 450 } 451 451 } 452 452 ··· 454 454 { 455 455 unsigned long flags; 456 456 457 - spin_lock_irqsave(&rq->lock, flags); 457 + raw_spin_lock_irqsave(&rq->lock, flags); 458 458 __disable_runtime(rq); 459 - spin_unlock_irqrestore(&rq->lock, flags); 459 + raw_spin_unlock_irqrestore(&rq->lock, flags); 460 460 } 461 461 462 462 static void __enable_runtime(struct rq *rq) ··· 472 472 for_each_leaf_rt_rq(rt_rq, rq) { 473 473 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 474 474 475 - spin_lock(&rt_b->rt_runtime_lock); 476 - spin_lock(&rt_rq->rt_runtime_lock); 475 + raw_spin_lock(&rt_b->rt_runtime_lock); 476 + raw_spin_lock(&rt_rq->rt_runtime_lock); 477 477 rt_rq->rt_runtime = rt_b->rt_runtime; 478 478 rt_rq->rt_time = 0; 479 479 rt_rq->rt_throttled = 0; 480 - spin_unlock(&rt_rq->rt_runtime_lock); 481 - spin_unlock(&rt_b->rt_runtime_lock); 480 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 481 + raw_spin_unlock(&rt_b->rt_runtime_lock); 482 482 } 483 483 } 484 484 ··· 486 486 { 487 487 unsigned long flags; 488 488 489 - spin_lock_irqsave(&rq->lock, flags); 489 + raw_spin_lock_irqsave(&rq->lock, flags); 490 490 __enable_runtime(rq); 491 - spin_unlock_irqrestore(&rq->lock, flags); 491 + raw_spin_unlock_irqrestore(&rq->lock, flags); 492 492 } 493 493 494 494 static int balance_runtime(struct rt_rq *rt_rq) ··· 496 496 int more = 0; 497 497 498 498 if (rt_rq->rt_time > rt_rq->rt_runtime) { 499 - spin_unlock(&rt_rq->rt_runtime_lock); 499 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 500 500 more = do_balance_runtime(rt_rq); 501 - spin_lock(&rt_rq->rt_runtime_lock); 501 + raw_spin_lock(&rt_rq->rt_runtime_lock); 502 502 } 503 503 504 504 return more; ··· 524 524 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); 525 525 struct rq *rq = rq_of_rt_rq(rt_rq); 526 526 527 - spin_lock(&rq->lock); 527 + raw_spin_lock(&rq->lock); 528 528 if (rt_rq->rt_time) { 529 529 u64 runtime; 530 530 531 - spin_lock(&rt_rq->rt_runtime_lock); 531 + raw_spin_lock(&rt_rq->rt_runtime_lock); 532 532 if (rt_rq->rt_throttled) 533 533 balance_runtime(rt_rq); 534 534 runtime = rt_rq->rt_runtime; ··· 539 539 } 540 540 if (rt_rq->rt_time || rt_rq->rt_nr_running) 541 541 idle = 0; 542 - spin_unlock(&rt_rq->rt_runtime_lock); 542 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 543 543 } else if (rt_rq->rt_nr_running) 544 544 idle = 0; 545 545 546 546 if (enqueue) 547 547 sched_rt_rq_enqueue(rt_rq); 548 - spin_unlock(&rq->lock); 548 + raw_spin_unlock(&rq->lock); 549 549 } 550 550 551 551 return idle; ··· 624 624 rt_rq = rt_rq_of_se(rt_se); 625 625 626 626 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 627 - spin_lock(&rt_rq->rt_runtime_lock); 627 + raw_spin_lock(&rt_rq->rt_runtime_lock); 628 628 rt_rq->rt_time += delta_exec; 629 629 if (sched_rt_runtime_exceeded(rt_rq)) 630 630 resched_task(curr); 631 - spin_unlock(&rt_rq->rt_runtime_lock); 631 + raw_spin_unlock(&rt_rq->rt_runtime_lock); 632 632 } 633 633 } 634 634 } ··· 1246 1246 task_running(rq, task) || 1247 1247 !task->se.on_rq)) { 1248 1248 1249 - spin_unlock(&lowest_rq->lock); 1249 + raw_spin_unlock(&lowest_rq->lock); 1250 1250 lowest_rq = NULL; 1251 1251 break; 1252 1252 }
+16 -16
kernel/smp.c
··· 16 16 17 17 static struct { 18 18 struct list_head queue; 19 - spinlock_t lock; 19 + raw_spinlock_t lock; 20 20 } call_function __cacheline_aligned_in_smp = 21 21 { 22 22 .queue = LIST_HEAD_INIT(call_function.queue), 23 - .lock = __SPIN_LOCK_UNLOCKED(call_function.lock), 23 + .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock), 24 24 }; 25 25 26 26 enum { ··· 35 35 36 36 struct call_single_queue { 37 37 struct list_head list; 38 - spinlock_t lock; 38 + raw_spinlock_t lock; 39 39 }; 40 40 41 41 static DEFINE_PER_CPU(struct call_function_data, cfd_data); ··· 80 80 for_each_possible_cpu(i) { 81 81 struct call_single_queue *q = &per_cpu(call_single_queue, i); 82 82 83 - spin_lock_init(&q->lock); 83 + raw_spin_lock_init(&q->lock); 84 84 INIT_LIST_HEAD(&q->list); 85 85 } 86 86 ··· 141 141 unsigned long flags; 142 142 int ipi; 143 143 144 - spin_lock_irqsave(&dst->lock, flags); 144 + raw_spin_lock_irqsave(&dst->lock, flags); 145 145 ipi = list_empty(&dst->list); 146 146 list_add_tail(&data->list, &dst->list); 147 - spin_unlock_irqrestore(&dst->lock, flags); 147 + raw_spin_unlock_irqrestore(&dst->lock, flags); 148 148 149 149 /* 150 150 * The list addition should be visible before sending the IPI ··· 201 201 refs = atomic_dec_return(&data->refs); 202 202 WARN_ON(refs < 0); 203 203 if (!refs) { 204 - spin_lock(&call_function.lock); 204 + raw_spin_lock(&call_function.lock); 205 205 list_del_rcu(&data->csd.list); 206 - spin_unlock(&call_function.lock); 206 + raw_spin_unlock(&call_function.lock); 207 207 } 208 208 209 209 if (refs) ··· 229 229 */ 230 230 WARN_ON_ONCE(!cpu_online(smp_processor_id())); 231 231 232 - spin_lock(&q->lock); 232 + raw_spin_lock(&q->lock); 233 233 list_replace_init(&q->list, &list); 234 - spin_unlock(&q->lock); 234 + raw_spin_unlock(&q->lock); 235 235 236 236 while (!list_empty(&list)) { 237 237 struct call_single_data *data; ··· 448 448 cpumask_clear_cpu(this_cpu, data->cpumask); 449 449 atomic_set(&data->refs, cpumask_weight(data->cpumask)); 450 450 451 - spin_lock_irqsave(&call_function.lock, flags); 451 + raw_spin_lock_irqsave(&call_function.lock, flags); 452 452 /* 453 453 * Place entry at the _HEAD_ of the list, so that any cpu still 454 454 * observing the entry in generic_smp_call_function_interrupt() 455 455 * will not miss any other list entries: 456 456 */ 457 457 list_add_rcu(&data->csd.list, &call_function.queue); 458 - spin_unlock_irqrestore(&call_function.lock, flags); 458 + raw_spin_unlock_irqrestore(&call_function.lock, flags); 459 459 460 460 /* 461 461 * Make the list addition visible before sending the ipi. ··· 500 500 501 501 void ipi_call_lock(void) 502 502 { 503 - spin_lock(&call_function.lock); 503 + raw_spin_lock(&call_function.lock); 504 504 } 505 505 506 506 void ipi_call_unlock(void) 507 507 { 508 - spin_unlock(&call_function.lock); 508 + raw_spin_unlock(&call_function.lock); 509 509 } 510 510 511 511 void ipi_call_lock_irq(void) 512 512 { 513 - spin_lock_irq(&call_function.lock); 513 + raw_spin_lock_irq(&call_function.lock); 514 514 } 515 515 516 516 void ipi_call_unlock_irq(void) 517 517 { 518 - spin_unlock_irq(&call_function.lock); 518 + raw_spin_unlock_irq(&call_function.lock); 519 519 }
+249 -247
kernel/spinlock.c
··· 32 32 * include/linux/spinlock_api_smp.h 33 33 */ 34 34 #else 35 + #define raw_read_can_lock(l) read_can_lock(l) 36 + #define raw_write_can_lock(l) write_can_lock(l) 35 37 /* 36 38 * We build the __lock_function inlines here. They are too large for 37 39 * inlining all over the place, but here is only one user per function ··· 44 42 * towards that other CPU that it should break the lock ASAP. 45 43 */ 46 44 #define BUILD_LOCK_OPS(op, locktype) \ 47 - void __lockfunc __##op##_lock(locktype##_t *lock) \ 45 + void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ 48 46 { \ 49 47 for (;;) { \ 50 48 preempt_disable(); \ 51 - if (likely(_raw_##op##_trylock(lock))) \ 49 + if (likely(do_raw_##op##_trylock(lock))) \ 52 50 break; \ 53 51 preempt_enable(); \ 54 52 \ 55 53 if (!(lock)->break_lock) \ 56 54 (lock)->break_lock = 1; \ 57 - while (!op##_can_lock(lock) && (lock)->break_lock) \ 58 - _raw_##op##_relax(&lock->raw_lock); \ 55 + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ 56 + arch_##op##_relax(&lock->raw_lock); \ 59 57 } \ 60 58 (lock)->break_lock = 0; \ 61 59 } \ 62 60 \ 63 - unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \ 61 + unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ 64 62 { \ 65 63 unsigned long flags; \ 66 64 \ 67 65 for (;;) { \ 68 66 preempt_disable(); \ 69 67 local_irq_save(flags); \ 70 - if (likely(_raw_##op##_trylock(lock))) \ 68 + if (likely(do_raw_##op##_trylock(lock))) \ 71 69 break; \ 72 70 local_irq_restore(flags); \ 73 71 preempt_enable(); \ 74 72 \ 75 73 if (!(lock)->break_lock) \ 76 74 (lock)->break_lock = 1; \ 77 - while (!op##_can_lock(lock) && (lock)->break_lock) \ 78 - _raw_##op##_relax(&lock->raw_lock); \ 75 + while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\ 76 + arch_##op##_relax(&lock->raw_lock); \ 79 77 } \ 80 78 (lock)->break_lock = 0; \ 81 79 return flags; \ 82 80 } \ 83 81 \ 84 - void __lockfunc __##op##_lock_irq(locktype##_t *lock) \ 82 + void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \ 85 83 { \ 86 - _##op##_lock_irqsave(lock); \ 84 + _raw_##op##_lock_irqsave(lock); \ 87 85 } \ 88 86 \ 89 - void __lockfunc __##op##_lock_bh(locktype##_t *lock) \ 87 + void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ 90 88 { \ 91 89 unsigned long flags; \ 92 90 \ ··· 95 93 /* irq-disabling. We use the generic preemption-aware */ \ 96 94 /* function: */ \ 97 95 /**/ \ 98 - flags = _##op##_lock_irqsave(lock); \ 96 + flags = _raw_##op##_lock_irqsave(lock); \ 99 97 local_bh_disable(); \ 100 98 local_irq_restore(flags); \ 101 99 } \ ··· 109 107 * __[spin|read|write]_lock_irqsave() 110 108 * __[spin|read|write]_lock_bh() 111 109 */ 112 - BUILD_LOCK_OPS(spin, spinlock); 110 + BUILD_LOCK_OPS(spin, raw_spinlock); 113 111 BUILD_LOCK_OPS(read, rwlock); 114 112 BUILD_LOCK_OPS(write, rwlock); 115 113 116 114 #endif 117 115 116 + #ifndef CONFIG_INLINE_SPIN_TRYLOCK 117 + int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock) 118 + { 119 + return __raw_spin_trylock(lock); 120 + } 121 + EXPORT_SYMBOL(_raw_spin_trylock); 122 + #endif 123 + 124 + #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 125 + int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock) 126 + { 127 + return __raw_spin_trylock_bh(lock); 128 + } 129 + EXPORT_SYMBOL(_raw_spin_trylock_bh); 130 + #endif 131 + 132 + #ifndef CONFIG_INLINE_SPIN_LOCK 133 + void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) 134 + { 135 + __raw_spin_lock(lock); 136 + } 137 + EXPORT_SYMBOL(_raw_spin_lock); 138 + #endif 139 + 140 + #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 141 + unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) 142 + { 143 + return __raw_spin_lock_irqsave(lock); 144 + } 145 + EXPORT_SYMBOL(_raw_spin_lock_irqsave); 146 + #endif 147 + 148 + #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 149 + void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) 150 + { 151 + __raw_spin_lock_irq(lock); 152 + } 153 + EXPORT_SYMBOL(_raw_spin_lock_irq); 154 + #endif 155 + 156 + #ifndef CONFIG_INLINE_SPIN_LOCK_BH 157 + void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) 158 + { 159 + __raw_spin_lock_bh(lock); 160 + } 161 + EXPORT_SYMBOL(_raw_spin_lock_bh); 162 + #endif 163 + 164 + #ifndef CONFIG_INLINE_SPIN_UNLOCK 165 + void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) 166 + { 167 + __raw_spin_unlock(lock); 168 + } 169 + EXPORT_SYMBOL(_raw_spin_unlock); 170 + #endif 171 + 172 + #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 173 + void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) 174 + { 175 + __raw_spin_unlock_irqrestore(lock, flags); 176 + } 177 + EXPORT_SYMBOL(_raw_spin_unlock_irqrestore); 178 + #endif 179 + 180 + #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 181 + void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) 182 + { 183 + __raw_spin_unlock_irq(lock); 184 + } 185 + EXPORT_SYMBOL(_raw_spin_unlock_irq); 186 + #endif 187 + 188 + #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 189 + void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) 190 + { 191 + __raw_spin_unlock_bh(lock); 192 + } 193 + EXPORT_SYMBOL(_raw_spin_unlock_bh); 194 + #endif 195 + 196 + #ifndef CONFIG_INLINE_READ_TRYLOCK 197 + int __lockfunc _raw_read_trylock(rwlock_t *lock) 198 + { 199 + return __raw_read_trylock(lock); 200 + } 201 + EXPORT_SYMBOL(_raw_read_trylock); 202 + #endif 203 + 204 + #ifndef CONFIG_INLINE_READ_LOCK 205 + void __lockfunc _raw_read_lock(rwlock_t *lock) 206 + { 207 + __raw_read_lock(lock); 208 + } 209 + EXPORT_SYMBOL(_raw_read_lock); 210 + #endif 211 + 212 + #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE 213 + unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) 214 + { 215 + return __raw_read_lock_irqsave(lock); 216 + } 217 + EXPORT_SYMBOL(_raw_read_lock_irqsave); 218 + #endif 219 + 220 + #ifndef CONFIG_INLINE_READ_LOCK_IRQ 221 + void __lockfunc _raw_read_lock_irq(rwlock_t *lock) 222 + { 223 + __raw_read_lock_irq(lock); 224 + } 225 + EXPORT_SYMBOL(_raw_read_lock_irq); 226 + #endif 227 + 228 + #ifndef CONFIG_INLINE_READ_LOCK_BH 229 + void __lockfunc _raw_read_lock_bh(rwlock_t *lock) 230 + { 231 + __raw_read_lock_bh(lock); 232 + } 233 + EXPORT_SYMBOL(_raw_read_lock_bh); 234 + #endif 235 + 236 + #ifndef CONFIG_INLINE_READ_UNLOCK 237 + void __lockfunc _raw_read_unlock(rwlock_t *lock) 238 + { 239 + __raw_read_unlock(lock); 240 + } 241 + EXPORT_SYMBOL(_raw_read_unlock); 242 + #endif 243 + 244 + #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 245 + void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 246 + { 247 + __raw_read_unlock_irqrestore(lock, flags); 248 + } 249 + EXPORT_SYMBOL(_raw_read_unlock_irqrestore); 250 + #endif 251 + 252 + #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ 253 + void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) 254 + { 255 + __raw_read_unlock_irq(lock); 256 + } 257 + EXPORT_SYMBOL(_raw_read_unlock_irq); 258 + #endif 259 + 260 + #ifndef CONFIG_INLINE_READ_UNLOCK_BH 261 + void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) 262 + { 263 + __raw_read_unlock_bh(lock); 264 + } 265 + EXPORT_SYMBOL(_raw_read_unlock_bh); 266 + #endif 267 + 268 + #ifndef CONFIG_INLINE_WRITE_TRYLOCK 269 + int __lockfunc _raw_write_trylock(rwlock_t *lock) 270 + { 271 + return __raw_write_trylock(lock); 272 + } 273 + EXPORT_SYMBOL(_raw_write_trylock); 274 + #endif 275 + 276 + #ifndef CONFIG_INLINE_WRITE_LOCK 277 + void __lockfunc _raw_write_lock(rwlock_t *lock) 278 + { 279 + __raw_write_lock(lock); 280 + } 281 + EXPORT_SYMBOL(_raw_write_lock); 282 + #endif 283 + 284 + #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 285 + unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) 286 + { 287 + return __raw_write_lock_irqsave(lock); 288 + } 289 + EXPORT_SYMBOL(_raw_write_lock_irqsave); 290 + #endif 291 + 292 + #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ 293 + void __lockfunc _raw_write_lock_irq(rwlock_t *lock) 294 + { 295 + __raw_write_lock_irq(lock); 296 + } 297 + EXPORT_SYMBOL(_raw_write_lock_irq); 298 + #endif 299 + 300 + #ifndef CONFIG_INLINE_WRITE_LOCK_BH 301 + void __lockfunc _raw_write_lock_bh(rwlock_t *lock) 302 + { 303 + __raw_write_lock_bh(lock); 304 + } 305 + EXPORT_SYMBOL(_raw_write_lock_bh); 306 + #endif 307 + 308 + #ifndef CONFIG_INLINE_WRITE_UNLOCK 309 + void __lockfunc _raw_write_unlock(rwlock_t *lock) 310 + { 311 + __raw_write_unlock(lock); 312 + } 313 + EXPORT_SYMBOL(_raw_write_unlock); 314 + #endif 315 + 316 + #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 317 + void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 318 + { 319 + __raw_write_unlock_irqrestore(lock, flags); 320 + } 321 + EXPORT_SYMBOL(_raw_write_unlock_irqrestore); 322 + #endif 323 + 324 + #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ 325 + void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) 326 + { 327 + __raw_write_unlock_irq(lock); 328 + } 329 + EXPORT_SYMBOL(_raw_write_unlock_irq); 330 + #endif 331 + 332 + #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH 333 + void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) 334 + { 335 + __raw_write_unlock_bh(lock); 336 + } 337 + EXPORT_SYMBOL(_raw_write_unlock_bh); 338 + #endif 339 + 118 340 #ifdef CONFIG_DEBUG_LOCK_ALLOC 119 341 120 - void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 342 + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) 121 343 { 122 344 preempt_disable(); 123 345 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 124 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 346 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 125 347 } 126 - EXPORT_SYMBOL(_spin_lock_nested); 348 + EXPORT_SYMBOL(_raw_spin_lock_nested); 127 349 128 - unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, 350 + unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, 129 351 int subclass) 130 352 { 131 353 unsigned long flags; ··· 357 131 local_irq_save(flags); 358 132 preempt_disable(); 359 133 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 360 - LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, 361 - _raw_spin_lock_flags, &flags); 134 + LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock, 135 + do_raw_spin_lock_flags, &flags); 362 136 return flags; 363 137 } 364 - EXPORT_SYMBOL(_spin_lock_irqsave_nested); 138 + EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested); 365 139 366 - void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, 140 + void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock, 367 141 struct lockdep_map *nest_lock) 368 142 { 369 143 preempt_disable(); 370 144 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); 371 - LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 145 + LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); 372 146 } 373 - EXPORT_SYMBOL(_spin_lock_nest_lock); 147 + EXPORT_SYMBOL(_raw_spin_lock_nest_lock); 374 148 375 - #endif 376 - 377 - #ifndef CONFIG_INLINE_SPIN_TRYLOCK 378 - int __lockfunc _spin_trylock(spinlock_t *lock) 379 - { 380 - return __spin_trylock(lock); 381 - } 382 - EXPORT_SYMBOL(_spin_trylock); 383 - #endif 384 - 385 - #ifndef CONFIG_INLINE_READ_TRYLOCK 386 - int __lockfunc _read_trylock(rwlock_t *lock) 387 - { 388 - return __read_trylock(lock); 389 - } 390 - EXPORT_SYMBOL(_read_trylock); 391 - #endif 392 - 393 - #ifndef CONFIG_INLINE_WRITE_TRYLOCK 394 - int __lockfunc _write_trylock(rwlock_t *lock) 395 - { 396 - return __write_trylock(lock); 397 - } 398 - EXPORT_SYMBOL(_write_trylock); 399 - #endif 400 - 401 - #ifndef CONFIG_INLINE_READ_LOCK 402 - void __lockfunc _read_lock(rwlock_t *lock) 403 - { 404 - __read_lock(lock); 405 - } 406 - EXPORT_SYMBOL(_read_lock); 407 - #endif 408 - 409 - #ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 410 - unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 411 - { 412 - return __spin_lock_irqsave(lock); 413 - } 414 - EXPORT_SYMBOL(_spin_lock_irqsave); 415 - #endif 416 - 417 - #ifndef CONFIG_INLINE_SPIN_LOCK_IRQ 418 - void __lockfunc _spin_lock_irq(spinlock_t *lock) 419 - { 420 - __spin_lock_irq(lock); 421 - } 422 - EXPORT_SYMBOL(_spin_lock_irq); 423 - #endif 424 - 425 - #ifndef CONFIG_INLINE_SPIN_LOCK_BH 426 - void __lockfunc _spin_lock_bh(spinlock_t *lock) 427 - { 428 - __spin_lock_bh(lock); 429 - } 430 - EXPORT_SYMBOL(_spin_lock_bh); 431 - #endif 432 - 433 - #ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE 434 - unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 435 - { 436 - return __read_lock_irqsave(lock); 437 - } 438 - EXPORT_SYMBOL(_read_lock_irqsave); 439 - #endif 440 - 441 - #ifndef CONFIG_INLINE_READ_LOCK_IRQ 442 - void __lockfunc _read_lock_irq(rwlock_t *lock) 443 - { 444 - __read_lock_irq(lock); 445 - } 446 - EXPORT_SYMBOL(_read_lock_irq); 447 - #endif 448 - 449 - #ifndef CONFIG_INLINE_READ_LOCK_BH 450 - void __lockfunc _read_lock_bh(rwlock_t *lock) 451 - { 452 - __read_lock_bh(lock); 453 - } 454 - EXPORT_SYMBOL(_read_lock_bh); 455 - #endif 456 - 457 - #ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE 458 - unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 459 - { 460 - return __write_lock_irqsave(lock); 461 - } 462 - EXPORT_SYMBOL(_write_lock_irqsave); 463 - #endif 464 - 465 - #ifndef CONFIG_INLINE_WRITE_LOCK_IRQ 466 - void __lockfunc _write_lock_irq(rwlock_t *lock) 467 - { 468 - __write_lock_irq(lock); 469 - } 470 - EXPORT_SYMBOL(_write_lock_irq); 471 - #endif 472 - 473 - #ifndef CONFIG_INLINE_WRITE_LOCK_BH 474 - void __lockfunc _write_lock_bh(rwlock_t *lock) 475 - { 476 - __write_lock_bh(lock); 477 - } 478 - EXPORT_SYMBOL(_write_lock_bh); 479 - #endif 480 - 481 - #ifndef CONFIG_INLINE_SPIN_LOCK 482 - void __lockfunc _spin_lock(spinlock_t *lock) 483 - { 484 - __spin_lock(lock); 485 - } 486 - EXPORT_SYMBOL(_spin_lock); 487 - #endif 488 - 489 - #ifndef CONFIG_INLINE_WRITE_LOCK 490 - void __lockfunc _write_lock(rwlock_t *lock) 491 - { 492 - __write_lock(lock); 493 - } 494 - EXPORT_SYMBOL(_write_lock); 495 - #endif 496 - 497 - #ifndef CONFIG_INLINE_SPIN_UNLOCK 498 - void __lockfunc _spin_unlock(spinlock_t *lock) 499 - { 500 - __spin_unlock(lock); 501 - } 502 - EXPORT_SYMBOL(_spin_unlock); 503 - #endif 504 - 505 - #ifndef CONFIG_INLINE_WRITE_UNLOCK 506 - void __lockfunc _write_unlock(rwlock_t *lock) 507 - { 508 - __write_unlock(lock); 509 - } 510 - EXPORT_SYMBOL(_write_unlock); 511 - #endif 512 - 513 - #ifndef CONFIG_INLINE_READ_UNLOCK 514 - void __lockfunc _read_unlock(rwlock_t *lock) 515 - { 516 - __read_unlock(lock); 517 - } 518 - EXPORT_SYMBOL(_read_unlock); 519 - #endif 520 - 521 - #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 522 - void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) 523 - { 524 - __spin_unlock_irqrestore(lock, flags); 525 - } 526 - EXPORT_SYMBOL(_spin_unlock_irqrestore); 527 - #endif 528 - 529 - #ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ 530 - void __lockfunc _spin_unlock_irq(spinlock_t *lock) 531 - { 532 - __spin_unlock_irq(lock); 533 - } 534 - EXPORT_SYMBOL(_spin_unlock_irq); 535 - #endif 536 - 537 - #ifndef CONFIG_INLINE_SPIN_UNLOCK_BH 538 - void __lockfunc _spin_unlock_bh(spinlock_t *lock) 539 - { 540 - __spin_unlock_bh(lock); 541 - } 542 - EXPORT_SYMBOL(_spin_unlock_bh); 543 - #endif 544 - 545 - #ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE 546 - void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 547 - { 548 - __read_unlock_irqrestore(lock, flags); 549 - } 550 - EXPORT_SYMBOL(_read_unlock_irqrestore); 551 - #endif 552 - 553 - #ifndef CONFIG_INLINE_READ_UNLOCK_IRQ 554 - void __lockfunc _read_unlock_irq(rwlock_t *lock) 555 - { 556 - __read_unlock_irq(lock); 557 - } 558 - EXPORT_SYMBOL(_read_unlock_irq); 559 - #endif 560 - 561 - #ifndef CONFIG_INLINE_READ_UNLOCK_BH 562 - void __lockfunc _read_unlock_bh(rwlock_t *lock) 563 - { 564 - __read_unlock_bh(lock); 565 - } 566 - EXPORT_SYMBOL(_read_unlock_bh); 567 - #endif 568 - 569 - #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE 570 - void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) 571 - { 572 - __write_unlock_irqrestore(lock, flags); 573 - } 574 - EXPORT_SYMBOL(_write_unlock_irqrestore); 575 - #endif 576 - 577 - #ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ 578 - void __lockfunc _write_unlock_irq(rwlock_t *lock) 579 - { 580 - __write_unlock_irq(lock); 581 - } 582 - EXPORT_SYMBOL(_write_unlock_irq); 583 - #endif 584 - 585 - #ifndef CONFIG_INLINE_WRITE_UNLOCK_BH 586 - void __lockfunc _write_unlock_bh(rwlock_t *lock) 587 - { 588 - __write_unlock_bh(lock); 589 - } 590 - EXPORT_SYMBOL(_write_unlock_bh); 591 - #endif 592 - 593 - #ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH 594 - int __lockfunc _spin_trylock_bh(spinlock_t *lock) 595 - { 596 - return __spin_trylock_bh(lock); 597 - } 598 - EXPORT_SYMBOL(_spin_trylock_bh); 599 149 #endif 600 150 601 151 notrace int in_lock_functions(unsigned long addr)
+7 -7
kernel/time/clockevents.c
··· 30 30 static RAW_NOTIFIER_HEAD(clockevents_chain); 31 31 32 32 /* Protection for the above */ 33 - static DEFINE_SPINLOCK(clockevents_lock); 33 + static DEFINE_RAW_SPINLOCK(clockevents_lock); 34 34 35 35 /** 36 36 * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds ··· 141 141 unsigned long flags; 142 142 int ret; 143 143 144 - spin_lock_irqsave(&clockevents_lock, flags); 144 + raw_spin_lock_irqsave(&clockevents_lock, flags); 145 145 ret = raw_notifier_chain_register(&clockevents_chain, nb); 146 - spin_unlock_irqrestore(&clockevents_lock, flags); 146 + raw_spin_unlock_irqrestore(&clockevents_lock, flags); 147 147 148 148 return ret; 149 149 } ··· 185 185 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 186 186 BUG_ON(!dev->cpumask); 187 187 188 - spin_lock_irqsave(&clockevents_lock, flags); 188 + raw_spin_lock_irqsave(&clockevents_lock, flags); 189 189 190 190 list_add(&dev->list, &clockevent_devices); 191 191 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 192 192 clockevents_notify_released(); 193 193 194 - spin_unlock_irqrestore(&clockevents_lock, flags); 194 + raw_spin_unlock_irqrestore(&clockevents_lock, flags); 195 195 } 196 196 EXPORT_SYMBOL_GPL(clockevents_register_device); 197 197 ··· 241 241 struct list_head *node, *tmp; 242 242 unsigned long flags; 243 243 244 - spin_lock_irqsave(&clockevents_lock, flags); 244 + raw_spin_lock_irqsave(&clockevents_lock, flags); 245 245 clockevents_do_notify(reason, arg); 246 246 247 247 switch (reason) { ··· 256 256 default: 257 257 break; 258 258 } 259 - spin_unlock_irqrestore(&clockevents_lock, flags); 259 + raw_spin_unlock_irqrestore(&clockevents_lock, flags); 260 260 } 261 261 EXPORT_SYMBOL_GPL(clockevents_notify); 262 262 #endif
+21 -21
kernel/time/tick-broadcast.c
··· 31 31 /* FIXME: Use cpumask_var_t. */ 32 32 static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS); 33 33 static DECLARE_BITMAP(tmpmask, NR_CPUS); 34 - static DEFINE_SPINLOCK(tick_broadcast_lock); 34 + static DEFINE_RAW_SPINLOCK(tick_broadcast_lock); 35 35 static int tick_broadcast_force; 36 36 37 37 #ifdef CONFIG_TICK_ONESHOT ··· 96 96 unsigned long flags; 97 97 int ret = 0; 98 98 99 - spin_lock_irqsave(&tick_broadcast_lock, flags); 99 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 100 100 101 101 /* 102 102 * Devices might be registered with both periodic and oneshot ··· 122 122 tick_broadcast_clear_oneshot(cpu); 123 123 } 124 124 } 125 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 125 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 126 126 return ret; 127 127 } 128 128 ··· 161 161 */ 162 162 static void tick_do_periodic_broadcast(void) 163 163 { 164 - spin_lock(&tick_broadcast_lock); 164 + raw_spin_lock(&tick_broadcast_lock); 165 165 166 166 cpumask_and(to_cpumask(tmpmask), 167 167 cpu_online_mask, tick_get_broadcast_mask()); 168 168 tick_do_broadcast(to_cpumask(tmpmask)); 169 169 170 - spin_unlock(&tick_broadcast_lock); 170 + raw_spin_unlock(&tick_broadcast_lock); 171 171 } 172 172 173 173 /* ··· 212 212 unsigned long flags; 213 213 int cpu, bc_stopped; 214 214 215 - spin_lock_irqsave(&tick_broadcast_lock, flags); 215 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 216 216 217 217 cpu = smp_processor_id(); 218 218 td = &per_cpu(tick_cpu_device, cpu); ··· 263 263 tick_broadcast_setup_oneshot(bc); 264 264 } 265 265 out: 266 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 266 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 267 267 } 268 268 269 269 /* ··· 299 299 unsigned long flags; 300 300 unsigned int cpu = *cpup; 301 301 302 - spin_lock_irqsave(&tick_broadcast_lock, flags); 302 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 303 303 304 304 bc = tick_broadcast_device.evtdev; 305 305 cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); ··· 309 309 clockevents_shutdown(bc); 310 310 } 311 311 312 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 312 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 313 313 } 314 314 315 315 void tick_suspend_broadcast(void) ··· 317 317 struct clock_event_device *bc; 318 318 unsigned long flags; 319 319 320 - spin_lock_irqsave(&tick_broadcast_lock, flags); 320 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 321 321 322 322 bc = tick_broadcast_device.evtdev; 323 323 if (bc) 324 324 clockevents_shutdown(bc); 325 325 326 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 326 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 327 327 } 328 328 329 329 int tick_resume_broadcast(void) ··· 332 332 unsigned long flags; 333 333 int broadcast = 0; 334 334 335 - spin_lock_irqsave(&tick_broadcast_lock, flags); 335 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 336 336 337 337 bc = tick_broadcast_device.evtdev; 338 338 ··· 351 351 break; 352 352 } 353 353 } 354 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 354 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 355 355 356 356 return broadcast; 357 357 } ··· 405 405 ktime_t now, next_event; 406 406 int cpu; 407 407 408 - spin_lock(&tick_broadcast_lock); 408 + raw_spin_lock(&tick_broadcast_lock); 409 409 again: 410 410 dev->next_event.tv64 = KTIME_MAX; 411 411 next_event.tv64 = KTIME_MAX; ··· 443 443 if (tick_broadcast_set_event(next_event, 0)) 444 444 goto again; 445 445 } 446 - spin_unlock(&tick_broadcast_lock); 446 + raw_spin_unlock(&tick_broadcast_lock); 447 447 } 448 448 449 449 /* ··· 457 457 unsigned long flags; 458 458 int cpu; 459 459 460 - spin_lock_irqsave(&tick_broadcast_lock, flags); 460 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 461 461 462 462 /* 463 463 * Periodic mode does not care about the enter/exit of power ··· 492 492 } 493 493 494 494 out: 495 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 495 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 496 496 } 497 497 498 498 /* ··· 563 563 struct clock_event_device *bc; 564 564 unsigned long flags; 565 565 566 - spin_lock_irqsave(&tick_broadcast_lock, flags); 566 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 567 567 568 568 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; 569 569 bc = tick_broadcast_device.evtdev; 570 570 if (bc) 571 571 tick_broadcast_setup_oneshot(bc); 572 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 572 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 573 573 } 574 574 575 575 ··· 581 581 unsigned long flags; 582 582 unsigned int cpu = *cpup; 583 583 584 - spin_lock_irqsave(&tick_broadcast_lock, flags); 584 + raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 585 585 586 586 /* 587 587 * Clear the broadcast mask flag for the dead cpu, but do not ··· 589 589 */ 590 590 cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask()); 591 591 592 - spin_unlock_irqrestore(&tick_broadcast_lock, flags); 592 + raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 593 593 } 594 594 595 595 /*
+10 -10
kernel/time/tick-common.c
··· 34 34 ktime_t tick_next_period; 35 35 ktime_t tick_period; 36 36 int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; 37 - DEFINE_SPINLOCK(tick_device_lock); 37 + static DEFINE_RAW_SPINLOCK(tick_device_lock); 38 38 39 39 /* 40 40 * Debugging: see timer_list.c ··· 209 209 int cpu, ret = NOTIFY_OK; 210 210 unsigned long flags; 211 211 212 - spin_lock_irqsave(&tick_device_lock, flags); 212 + raw_spin_lock_irqsave(&tick_device_lock, flags); 213 213 214 214 cpu = smp_processor_id(); 215 215 if (!cpumask_test_cpu(cpu, newdev->cpumask)) ··· 268 268 if (newdev->features & CLOCK_EVT_FEAT_ONESHOT) 269 269 tick_oneshot_notify(); 270 270 271 - spin_unlock_irqrestore(&tick_device_lock, flags); 271 + raw_spin_unlock_irqrestore(&tick_device_lock, flags); 272 272 return NOTIFY_STOP; 273 273 274 274 out_bc: ··· 278 278 if (tick_check_broadcast_device(newdev)) 279 279 ret = NOTIFY_STOP; 280 280 281 - spin_unlock_irqrestore(&tick_device_lock, flags); 281 + raw_spin_unlock_irqrestore(&tick_device_lock, flags); 282 282 283 283 return ret; 284 284 } ··· 311 311 struct clock_event_device *dev = td->evtdev; 312 312 unsigned long flags; 313 313 314 - spin_lock_irqsave(&tick_device_lock, flags); 314 + raw_spin_lock_irqsave(&tick_device_lock, flags); 315 315 td->mode = TICKDEV_MODE_PERIODIC; 316 316 if (dev) { 317 317 /* ··· 322 322 clockevents_exchange_device(dev, NULL); 323 323 td->evtdev = NULL; 324 324 } 325 - spin_unlock_irqrestore(&tick_device_lock, flags); 325 + raw_spin_unlock_irqrestore(&tick_device_lock, flags); 326 326 } 327 327 328 328 static void tick_suspend(void) ··· 330 330 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 331 331 unsigned long flags; 332 332 333 - spin_lock_irqsave(&tick_device_lock, flags); 333 + raw_spin_lock_irqsave(&tick_device_lock, flags); 334 334 clockevents_shutdown(td->evtdev); 335 - spin_unlock_irqrestore(&tick_device_lock, flags); 335 + raw_spin_unlock_irqrestore(&tick_device_lock, flags); 336 336 } 337 337 338 338 static void tick_resume(void) ··· 341 341 unsigned long flags; 342 342 int broadcast = tick_resume_broadcast(); 343 343 344 - spin_lock_irqsave(&tick_device_lock, flags); 344 + raw_spin_lock_irqsave(&tick_device_lock, flags); 345 345 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 346 346 347 347 if (!broadcast) { ··· 350 350 else 351 351 tick_resume_oneshot(); 352 352 } 353 - spin_unlock_irqrestore(&tick_device_lock, flags); 353 + raw_spin_unlock_irqrestore(&tick_device_lock, flags); 354 354 } 355 355 356 356 /*
-1
kernel/time/tick-internal.h
··· 6 6 #define TICK_DO_TIMER_BOOT -2 7 7 8 8 DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 9 - extern spinlock_t tick_device_lock; 10 9 extern ktime_t tick_next_period; 11 10 extern ktime_t tick_period; 12 11 extern int tick_do_timer_cpu __read_mostly;
+3 -3
kernel/time/timer_list.c
··· 84 84 85 85 next_one: 86 86 i = 0; 87 - spin_lock_irqsave(&base->cpu_base->lock, flags); 87 + raw_spin_lock_irqsave(&base->cpu_base->lock, flags); 88 88 89 89 curr = base->first; 90 90 /* ··· 100 100 101 101 timer = rb_entry(curr, struct hrtimer, node); 102 102 tmp = *timer; 103 - spin_unlock_irqrestore(&base->cpu_base->lock, flags); 103 + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); 104 104 105 105 print_timer(m, timer, &tmp, i, now); 106 106 next++; 107 107 goto next_one; 108 108 } 109 - spin_unlock_irqrestore(&base->cpu_base->lock, flags); 109 + raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags); 110 110 } 111 111 112 112 static void
+9 -8
kernel/time/timer_stats.c
··· 86 86 /* 87 87 * Per-CPU lookup locks for fast hash lookup: 88 88 */ 89 - static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock); 89 + static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock); 90 90 91 91 /* 92 92 * Mutex to serialize state changes with show-stats activities: ··· 238 238 /* 239 239 * It doesnt matter which lock we take: 240 240 */ 241 - spinlock_t *lock; 241 + raw_spinlock_t *lock; 242 242 struct entry *entry, input; 243 243 unsigned long flags; 244 244 ··· 253 253 input.pid = pid; 254 254 input.timer_flag = timer_flag; 255 255 256 - spin_lock_irqsave(lock, flags); 256 + raw_spin_lock_irqsave(lock, flags); 257 257 if (!timer_stats_active) 258 258 goto out_unlock; 259 259 ··· 264 264 atomic_inc(&overflow_count); 265 265 266 266 out_unlock: 267 - spin_unlock_irqrestore(lock, flags); 267 + raw_spin_unlock_irqrestore(lock, flags); 268 268 } 269 269 270 270 static void print_name_offset(struct seq_file *m, unsigned long addr) ··· 348 348 int cpu; 349 349 350 350 for_each_online_cpu(cpu) { 351 - spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); 352 - spin_lock_irqsave(lock, flags); 351 + raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); 352 + 353 + raw_spin_lock_irqsave(lock, flags); 353 354 /* nothing */ 354 - spin_unlock_irqrestore(lock, flags); 355 + raw_spin_unlock_irqrestore(lock, flags); 355 356 } 356 357 } 357 358 ··· 410 409 int cpu; 411 410 412 411 for_each_possible_cpu(cpu) 413 - spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); 412 + raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); 414 413 } 415 414 416 415 static int __init init_tstats_procfs(void)
+8 -8
kernel/trace/ring_buffer.c
··· 423 423 int cpu; 424 424 struct ring_buffer *buffer; 425 425 spinlock_t reader_lock; /* serialize readers */ 426 - raw_spinlock_t lock; 426 + arch_spinlock_t lock; 427 427 struct lock_class_key lock_key; 428 428 struct list_head *pages; 429 429 struct buffer_page *head_page; /* read from head */ ··· 998 998 cpu_buffer->buffer = buffer; 999 999 spin_lock_init(&cpu_buffer->reader_lock); 1000 1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1001 - cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1001 + cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1002 1002 1003 1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1004 1004 GFP_KERNEL, cpu_to_node(cpu)); ··· 2834 2834 int ret; 2835 2835 2836 2836 local_irq_save(flags); 2837 - __raw_spin_lock(&cpu_buffer->lock); 2837 + arch_spin_lock(&cpu_buffer->lock); 2838 2838 2839 2839 again: 2840 2840 /* ··· 2923 2923 goto again; 2924 2924 2925 2925 out: 2926 - __raw_spin_unlock(&cpu_buffer->lock); 2926 + arch_spin_unlock(&cpu_buffer->lock); 2927 2927 local_irq_restore(flags); 2928 2928 2929 2929 return reader; ··· 3286 3286 synchronize_sched(); 3287 3287 3288 3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3289 - __raw_spin_lock(&cpu_buffer->lock); 3289 + arch_spin_lock(&cpu_buffer->lock); 3290 3290 rb_iter_reset(iter); 3291 - __raw_spin_unlock(&cpu_buffer->lock); 3291 + arch_spin_unlock(&cpu_buffer->lock); 3292 3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3293 3293 3294 3294 return iter; ··· 3408 3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3409 3409 goto out; 3410 3410 3411 - __raw_spin_lock(&cpu_buffer->lock); 3411 + arch_spin_lock(&cpu_buffer->lock); 3412 3412 3413 3413 rb_reset_cpu(cpu_buffer); 3414 3414 3415 - __raw_spin_unlock(&cpu_buffer->lock); 3415 + arch_spin_unlock(&cpu_buffer->lock); 3416 3416 3417 3417 out: 3418 3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+25 -25
kernel/trace/trace.c
··· 493 493 * protected by per_cpu spinlocks. But the action of the swap 494 494 * needs its own lock. 495 495 * 496 - * This is defined as a raw_spinlock_t in order to help 496 + * This is defined as a arch_spinlock_t in order to help 497 497 * with performance when lockdep debugging is enabled. 498 498 * 499 499 * It is also used in other places outside the update_max_tr 500 500 * so it needs to be defined outside of the 501 501 * CONFIG_TRACER_MAX_TRACE. 502 502 */ 503 - static raw_spinlock_t ftrace_max_lock = 504 - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 503 + static arch_spinlock_t ftrace_max_lock = 504 + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 505 505 506 506 #ifdef CONFIG_TRACER_MAX_TRACE 507 507 unsigned long __read_mostly tracing_max_latency; ··· 555 555 return; 556 556 557 557 WARN_ON_ONCE(!irqs_disabled()); 558 - __raw_spin_lock(&ftrace_max_lock); 558 + arch_spin_lock(&ftrace_max_lock); 559 559 560 560 tr->buffer = max_tr.buffer; 561 561 max_tr.buffer = buf; 562 562 563 563 __update_max_tr(tr, tsk, cpu); 564 - __raw_spin_unlock(&ftrace_max_lock); 564 + arch_spin_unlock(&ftrace_max_lock); 565 565 } 566 566 567 567 /** ··· 581 581 return; 582 582 583 583 WARN_ON_ONCE(!irqs_disabled()); 584 - __raw_spin_lock(&ftrace_max_lock); 584 + arch_spin_lock(&ftrace_max_lock); 585 585 586 586 ftrace_disable_cpu(); 587 587 ··· 603 603 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); 604 604 605 605 __update_max_tr(tr, tsk, cpu); 606 - __raw_spin_unlock(&ftrace_max_lock); 606 + arch_spin_unlock(&ftrace_max_lock); 607 607 } 608 608 #endif /* CONFIG_TRACER_MAX_TRACE */ 609 609 ··· 802 802 static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; 803 803 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; 804 804 static int cmdline_idx; 805 - static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; 805 + static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; 806 806 807 807 /* temporary disable recording */ 808 808 static atomic_t trace_record_cmdline_disabled __read_mostly; ··· 915 915 * nor do we want to disable interrupts, 916 916 * so if we miss here, then better luck next time. 917 917 */ 918 - if (!__raw_spin_trylock(&trace_cmdline_lock)) 918 + if (!arch_spin_trylock(&trace_cmdline_lock)) 919 919 return; 920 920 921 921 idx = map_pid_to_cmdline[tsk->pid]; ··· 940 940 941 941 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); 942 942 943 - __raw_spin_unlock(&trace_cmdline_lock); 943 + arch_spin_unlock(&trace_cmdline_lock); 944 944 } 945 945 946 946 void trace_find_cmdline(int pid, char comm[]) ··· 958 958 } 959 959 960 960 preempt_disable(); 961 - __raw_spin_lock(&trace_cmdline_lock); 961 + arch_spin_lock(&trace_cmdline_lock); 962 962 map = map_pid_to_cmdline[pid]; 963 963 if (map != NO_CMDLINE_MAP) 964 964 strcpy(comm, saved_cmdlines[map]); 965 965 else 966 966 strcpy(comm, "<...>"); 967 967 968 - __raw_spin_unlock(&trace_cmdline_lock); 968 + arch_spin_unlock(&trace_cmdline_lock); 969 969 preempt_enable(); 970 970 } 971 971 ··· 1251 1251 */ 1252 1252 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) 1253 1253 { 1254 - static raw_spinlock_t trace_buf_lock = 1255 - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1254 + static arch_spinlock_t trace_buf_lock = 1255 + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1256 1256 static u32 trace_buf[TRACE_BUF_SIZE]; 1257 1257 1258 1258 struct ftrace_event_call *call = &event_bprint; ··· 1283 1283 1284 1284 /* Lockdep uses trace_printk for lock tracing */ 1285 1285 local_irq_save(flags); 1286 - __raw_spin_lock(&trace_buf_lock); 1286 + arch_spin_lock(&trace_buf_lock); 1287 1287 len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1288 1288 1289 1289 if (len > TRACE_BUF_SIZE || len < 0) ··· 1304 1304 ring_buffer_unlock_commit(buffer, event); 1305 1305 1306 1306 out_unlock: 1307 - __raw_spin_unlock(&trace_buf_lock); 1307 + arch_spin_unlock(&trace_buf_lock); 1308 1308 local_irq_restore(flags); 1309 1309 1310 1310 out: ··· 1334 1334 int trace_array_vprintk(struct trace_array *tr, 1335 1335 unsigned long ip, const char *fmt, va_list args) 1336 1336 { 1337 - static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; 1337 + static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED; 1338 1338 static char trace_buf[TRACE_BUF_SIZE]; 1339 1339 1340 1340 struct ftrace_event_call *call = &event_print; ··· 1360 1360 1361 1361 pause_graph_tracing(); 1362 1362 raw_local_irq_save(irq_flags); 1363 - __raw_spin_lock(&trace_buf_lock); 1363 + arch_spin_lock(&trace_buf_lock); 1364 1364 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); 1365 1365 1366 1366 size = sizeof(*entry) + len + 1; ··· 1378 1378 ring_buffer_unlock_commit(buffer, event); 1379 1379 1380 1380 out_unlock: 1381 - __raw_spin_unlock(&trace_buf_lock); 1381 + arch_spin_unlock(&trace_buf_lock); 1382 1382 raw_local_irq_restore(irq_flags); 1383 1383 unpause_graph_tracing(); 1384 1384 out: ··· 2279 2279 mutex_lock(&tracing_cpumask_update_lock); 2280 2280 2281 2281 local_irq_disable(); 2282 - __raw_spin_lock(&ftrace_max_lock); 2282 + arch_spin_lock(&ftrace_max_lock); 2283 2283 for_each_tracing_cpu(cpu) { 2284 2284 /* 2285 2285 * Increase/decrease the disabled counter if we are ··· 2294 2294 atomic_dec(&global_trace.data[cpu]->disabled); 2295 2295 } 2296 2296 } 2297 - __raw_spin_unlock(&ftrace_max_lock); 2297 + arch_spin_unlock(&ftrace_max_lock); 2298 2298 local_irq_enable(); 2299 2299 2300 2300 cpumask_copy(tracing_cpumask, tracing_cpumask_new); ··· 4307 4307 4308 4308 static void __ftrace_dump(bool disable_tracing) 4309 4309 { 4310 - static raw_spinlock_t ftrace_dump_lock = 4311 - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 4310 + static arch_spinlock_t ftrace_dump_lock = 4311 + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 4312 4312 /* use static because iter can be a bit big for the stack */ 4313 4313 static struct trace_iterator iter; 4314 4314 unsigned int old_userobj; ··· 4318 4318 4319 4319 /* only one dump */ 4320 4320 local_irq_save(flags); 4321 - __raw_spin_lock(&ftrace_dump_lock); 4321 + arch_spin_lock(&ftrace_dump_lock); 4322 4322 if (dump_ran) 4323 4323 goto out; 4324 4324 ··· 4393 4393 } 4394 4394 4395 4395 out: 4396 - __raw_spin_unlock(&ftrace_dump_lock); 4396 + arch_spin_unlock(&ftrace_dump_lock); 4397 4397 local_irq_restore(flags); 4398 4398 } 4399 4399
+4 -4
kernel/trace/trace_clock.c
··· 71 71 /* keep prev_time and lock in the same cacheline. */ 72 72 static struct { 73 73 u64 prev_time; 74 - raw_spinlock_t lock; 74 + arch_spinlock_t lock; 75 75 } trace_clock_struct ____cacheline_aligned_in_smp = 76 76 { 77 - .lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED, 77 + .lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED, 78 78 }; 79 79 80 80 u64 notrace trace_clock_global(void) ··· 94 94 if (unlikely(in_nmi())) 95 95 goto out; 96 96 97 - __raw_spin_lock(&trace_clock_struct.lock); 97 + arch_spin_lock(&trace_clock_struct.lock); 98 98 99 99 /* 100 100 * TODO: if this happens often then maybe we should reset ··· 106 106 107 107 trace_clock_struct.prev_time = now; 108 108 109 - __raw_spin_unlock(&trace_clock_struct.lock); 109 + arch_spin_unlock(&trace_clock_struct.lock); 110 110 111 111 out: 112 112 raw_local_irq_restore(flags);
+8 -8
kernel/trace/trace_sched_wakeup.c
··· 28 28 static unsigned wakeup_prio = -1; 29 29 static int wakeup_rt; 30 30 31 - static raw_spinlock_t wakeup_lock = 32 - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 31 + static arch_spinlock_t wakeup_lock = 32 + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 33 33 34 34 static void __wakeup_reset(struct trace_array *tr); 35 35 ··· 143 143 goto out; 144 144 145 145 local_irq_save(flags); 146 - __raw_spin_lock(&wakeup_lock); 146 + arch_spin_lock(&wakeup_lock); 147 147 148 148 /* We could race with grabbing wakeup_lock */ 149 149 if (unlikely(!tracer_enabled || next != wakeup_task)) ··· 169 169 170 170 out_unlock: 171 171 __wakeup_reset(wakeup_trace); 172 - __raw_spin_unlock(&wakeup_lock); 172 + arch_spin_unlock(&wakeup_lock); 173 173 local_irq_restore(flags); 174 174 out: 175 175 atomic_dec(&wakeup_trace->data[cpu]->disabled); ··· 193 193 tracing_reset_online_cpus(tr); 194 194 195 195 local_irq_save(flags); 196 - __raw_spin_lock(&wakeup_lock); 196 + arch_spin_lock(&wakeup_lock); 197 197 __wakeup_reset(tr); 198 - __raw_spin_unlock(&wakeup_lock); 198 + arch_spin_unlock(&wakeup_lock); 199 199 local_irq_restore(flags); 200 200 } 201 201 ··· 225 225 goto out; 226 226 227 227 /* interrupts should be off from try_to_wake_up */ 228 - __raw_spin_lock(&wakeup_lock); 228 + arch_spin_lock(&wakeup_lock); 229 229 230 230 /* check for races. */ 231 231 if (!tracer_enabled || p->prio >= wakeup_prio) ··· 255 255 trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); 256 256 257 257 out_locked: 258 - __raw_spin_unlock(&wakeup_lock); 258 + arch_spin_unlock(&wakeup_lock); 259 259 out: 260 260 atomic_dec(&wakeup_trace->data[cpu]->disabled); 261 261 }
+2 -2
kernel/trace/trace_selftest.c
··· 67 67 68 68 /* Don't allow flipping of max traces now */ 69 69 local_irq_save(flags); 70 - __raw_spin_lock(&ftrace_max_lock); 70 + arch_spin_lock(&ftrace_max_lock); 71 71 72 72 cnt = ring_buffer_entries(tr->buffer); 73 73 ··· 85 85 break; 86 86 } 87 87 tracing_on(); 88 - __raw_spin_unlock(&ftrace_max_lock); 88 + arch_spin_unlock(&ftrace_max_lock); 89 89 local_irq_restore(flags); 90 90 91 91 if (count)
+8 -8
kernel/trace/trace_stack.c
··· 27 27 }; 28 28 29 29 static unsigned long max_stack_size; 30 - static raw_spinlock_t max_stack_lock = 31 - (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 30 + static arch_spinlock_t max_stack_lock = 31 + (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 32 32 33 33 static int stack_trace_disabled __read_mostly; 34 34 static DEFINE_PER_CPU(int, trace_active); ··· 54 54 return; 55 55 56 56 local_irq_save(flags); 57 - __raw_spin_lock(&max_stack_lock); 57 + arch_spin_lock(&max_stack_lock); 58 58 59 59 /* a race could have already updated it */ 60 60 if (this_size <= max_stack_size) ··· 103 103 } 104 104 105 105 out: 106 - __raw_spin_unlock(&max_stack_lock); 106 + arch_spin_unlock(&max_stack_lock); 107 107 local_irq_restore(flags); 108 108 } 109 109 ··· 171 171 return ret; 172 172 173 173 local_irq_save(flags); 174 - __raw_spin_lock(&max_stack_lock); 174 + arch_spin_lock(&max_stack_lock); 175 175 *ptr = val; 176 - __raw_spin_unlock(&max_stack_lock); 176 + arch_spin_unlock(&max_stack_lock); 177 177 local_irq_restore(flags); 178 178 179 179 return count; ··· 207 207 static void *t_start(struct seq_file *m, loff_t *pos) 208 208 { 209 209 local_irq_disable(); 210 - __raw_spin_lock(&max_stack_lock); 210 + arch_spin_lock(&max_stack_lock); 211 211 212 212 if (*pos == 0) 213 213 return SEQ_START_TOKEN; ··· 217 217 218 218 static void t_stop(struct seq_file *m, void *p) 219 219 { 220 - __raw_spin_unlock(&max_stack_lock); 220 + arch_spin_unlock(&max_stack_lock); 221 221 local_irq_enable(); 222 222 } 223 223
+37 -37
lib/debugobjects.c
··· 26 26 27 27 struct debug_bucket { 28 28 struct hlist_head list; 29 - spinlock_t lock; 29 + raw_spinlock_t lock; 30 30 }; 31 31 32 32 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; 33 33 34 34 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; 35 35 36 - static DEFINE_SPINLOCK(pool_lock); 36 + static DEFINE_RAW_SPINLOCK(pool_lock); 37 37 38 38 static HLIST_HEAD(obj_pool); 39 39 ··· 96 96 if (!new) 97 97 return obj_pool_free; 98 98 99 - spin_lock_irqsave(&pool_lock, flags); 99 + raw_spin_lock_irqsave(&pool_lock, flags); 100 100 hlist_add_head(&new->node, &obj_pool); 101 101 obj_pool_free++; 102 - spin_unlock_irqrestore(&pool_lock, flags); 102 + raw_spin_unlock_irqrestore(&pool_lock, flags); 103 103 } 104 104 return obj_pool_free; 105 105 } ··· 133 133 { 134 134 struct debug_obj *obj = NULL; 135 135 136 - spin_lock(&pool_lock); 136 + raw_spin_lock(&pool_lock); 137 137 if (obj_pool.first) { 138 138 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 139 139 ··· 152 152 if (obj_pool_free < obj_pool_min_free) 153 153 obj_pool_min_free = obj_pool_free; 154 154 } 155 - spin_unlock(&pool_lock); 155 + raw_spin_unlock(&pool_lock); 156 156 157 157 return obj; 158 158 } ··· 165 165 struct debug_obj *obj; 166 166 unsigned long flags; 167 167 168 - spin_lock_irqsave(&pool_lock, flags); 168 + raw_spin_lock_irqsave(&pool_lock, flags); 169 169 while (obj_pool_free > ODEBUG_POOL_SIZE) { 170 170 obj = hlist_entry(obj_pool.first, typeof(*obj), node); 171 171 hlist_del(&obj->node); ··· 174 174 * We release pool_lock across kmem_cache_free() to 175 175 * avoid contention on pool_lock. 176 176 */ 177 - spin_unlock_irqrestore(&pool_lock, flags); 177 + raw_spin_unlock_irqrestore(&pool_lock, flags); 178 178 kmem_cache_free(obj_cache, obj); 179 - spin_lock_irqsave(&pool_lock, flags); 179 + raw_spin_lock_irqsave(&pool_lock, flags); 180 180 } 181 - spin_unlock_irqrestore(&pool_lock, flags); 181 + raw_spin_unlock_irqrestore(&pool_lock, flags); 182 182 } 183 183 184 184 /* ··· 190 190 unsigned long flags; 191 191 int sched = 0; 192 192 193 - spin_lock_irqsave(&pool_lock, flags); 193 + raw_spin_lock_irqsave(&pool_lock, flags); 194 194 /* 195 195 * schedule work when the pool is filled and the cache is 196 196 * initialized: ··· 200 200 hlist_add_head(&obj->node, &obj_pool); 201 201 obj_pool_free++; 202 202 obj_pool_used--; 203 - spin_unlock_irqrestore(&pool_lock, flags); 203 + raw_spin_unlock_irqrestore(&pool_lock, flags); 204 204 if (sched) 205 205 schedule_work(&debug_obj_work); 206 206 } ··· 221 221 printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n"); 222 222 223 223 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { 224 - spin_lock_irqsave(&db->lock, flags); 224 + raw_spin_lock_irqsave(&db->lock, flags); 225 225 hlist_move_list(&db->list, &freelist); 226 - spin_unlock_irqrestore(&db->lock, flags); 226 + raw_spin_unlock_irqrestore(&db->lock, flags); 227 227 228 228 /* Now free them */ 229 229 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { ··· 303 303 304 304 db = get_bucket((unsigned long) addr); 305 305 306 - spin_lock_irqsave(&db->lock, flags); 306 + raw_spin_lock_irqsave(&db->lock, flags); 307 307 308 308 obj = lookup_object(addr, db); 309 309 if (!obj) { 310 310 obj = alloc_object(addr, db, descr); 311 311 if (!obj) { 312 312 debug_objects_enabled = 0; 313 - spin_unlock_irqrestore(&db->lock, flags); 313 + raw_spin_unlock_irqrestore(&db->lock, flags); 314 314 debug_objects_oom(); 315 315 return; 316 316 } ··· 327 327 case ODEBUG_STATE_ACTIVE: 328 328 debug_print_object(obj, "init"); 329 329 state = obj->state; 330 - spin_unlock_irqrestore(&db->lock, flags); 330 + raw_spin_unlock_irqrestore(&db->lock, flags); 331 331 debug_object_fixup(descr->fixup_init, addr, state); 332 332 return; 333 333 ··· 338 338 break; 339 339 } 340 340 341 - spin_unlock_irqrestore(&db->lock, flags); 341 + raw_spin_unlock_irqrestore(&db->lock, flags); 342 342 } 343 343 344 344 /** ··· 385 385 386 386 db = get_bucket((unsigned long) addr); 387 387 388 - spin_lock_irqsave(&db->lock, flags); 388 + raw_spin_lock_irqsave(&db->lock, flags); 389 389 390 390 obj = lookup_object(addr, db); 391 391 if (obj) { ··· 398 398 case ODEBUG_STATE_ACTIVE: 399 399 debug_print_object(obj, "activate"); 400 400 state = obj->state; 401 - spin_unlock_irqrestore(&db->lock, flags); 401 + raw_spin_unlock_irqrestore(&db->lock, flags); 402 402 debug_object_fixup(descr->fixup_activate, addr, state); 403 403 return; 404 404 ··· 408 408 default: 409 409 break; 410 410 } 411 - spin_unlock_irqrestore(&db->lock, flags); 411 + raw_spin_unlock_irqrestore(&db->lock, flags); 412 412 return; 413 413 } 414 414 415 - spin_unlock_irqrestore(&db->lock, flags); 415 + raw_spin_unlock_irqrestore(&db->lock, flags); 416 416 /* 417 417 * This happens when a static object is activated. We 418 418 * let the type specific code decide whether this is ··· 438 438 439 439 db = get_bucket((unsigned long) addr); 440 440 441 - spin_lock_irqsave(&db->lock, flags); 441 + raw_spin_lock_irqsave(&db->lock, flags); 442 442 443 443 obj = lookup_object(addr, db); 444 444 if (obj) { ··· 463 463 debug_print_object(&o, "deactivate"); 464 464 } 465 465 466 - spin_unlock_irqrestore(&db->lock, flags); 466 + raw_spin_unlock_irqrestore(&db->lock, flags); 467 467 } 468 468 469 469 /** ··· 483 483 484 484 db = get_bucket((unsigned long) addr); 485 485 486 - spin_lock_irqsave(&db->lock, flags); 486 + raw_spin_lock_irqsave(&db->lock, flags); 487 487 488 488 obj = lookup_object(addr, db); 489 489 if (!obj) ··· 498 498 case ODEBUG_STATE_ACTIVE: 499 499 debug_print_object(obj, "destroy"); 500 500 state = obj->state; 501 - spin_unlock_irqrestore(&db->lock, flags); 501 + raw_spin_unlock_irqrestore(&db->lock, flags); 502 502 debug_object_fixup(descr->fixup_destroy, addr, state); 503 503 return; 504 504 ··· 509 509 break; 510 510 } 511 511 out_unlock: 512 - spin_unlock_irqrestore(&db->lock, flags); 512 + raw_spin_unlock_irqrestore(&db->lock, flags); 513 513 } 514 514 515 515 /** ··· 529 529 530 530 db = get_bucket((unsigned long) addr); 531 531 532 - spin_lock_irqsave(&db->lock, flags); 532 + raw_spin_lock_irqsave(&db->lock, flags); 533 533 534 534 obj = lookup_object(addr, db); 535 535 if (!obj) ··· 539 539 case ODEBUG_STATE_ACTIVE: 540 540 debug_print_object(obj, "free"); 541 541 state = obj->state; 542 - spin_unlock_irqrestore(&db->lock, flags); 542 + raw_spin_unlock_irqrestore(&db->lock, flags); 543 543 debug_object_fixup(descr->fixup_free, addr, state); 544 544 return; 545 545 default: 546 546 hlist_del(&obj->node); 547 - spin_unlock_irqrestore(&db->lock, flags); 547 + raw_spin_unlock_irqrestore(&db->lock, flags); 548 548 free_object(obj); 549 549 return; 550 550 } 551 551 out_unlock: 552 - spin_unlock_irqrestore(&db->lock, flags); 552 + raw_spin_unlock_irqrestore(&db->lock, flags); 553 553 } 554 554 555 555 #ifdef CONFIG_DEBUG_OBJECTS_FREE ··· 575 575 576 576 repeat: 577 577 cnt = 0; 578 - spin_lock_irqsave(&db->lock, flags); 578 + raw_spin_lock_irqsave(&db->lock, flags); 579 579 hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) { 580 580 cnt++; 581 581 oaddr = (unsigned long) obj->object; ··· 587 587 debug_print_object(obj, "free"); 588 588 descr = obj->descr; 589 589 state = obj->state; 590 - spin_unlock_irqrestore(&db->lock, flags); 590 + raw_spin_unlock_irqrestore(&db->lock, flags); 591 591 debug_object_fixup(descr->fixup_free, 592 592 (void *) oaddr, state); 593 593 goto repeat; ··· 597 597 break; 598 598 } 599 599 } 600 - spin_unlock_irqrestore(&db->lock, flags); 600 + raw_spin_unlock_irqrestore(&db->lock, flags); 601 601 602 602 /* Now free them */ 603 603 hlist_for_each_entry_safe(obj, node, tmp, &freelist, node) { ··· 783 783 784 784 db = get_bucket((unsigned long) addr); 785 785 786 - spin_lock_irqsave(&db->lock, flags); 786 + raw_spin_lock_irqsave(&db->lock, flags); 787 787 788 788 obj = lookup_object(addr, db); 789 789 if (!obj && state != ODEBUG_STATE_NONE) { ··· 807 807 } 808 808 res = 0; 809 809 out: 810 - spin_unlock_irqrestore(&db->lock, flags); 810 + raw_spin_unlock_irqrestore(&db->lock, flags); 811 811 if (res) 812 812 debug_objects_enabled = 0; 813 813 return res; ··· 907 907 int i; 908 908 909 909 for (i = 0; i < ODEBUG_HASH_SIZE; i++) 910 - spin_lock_init(&obj_hash[i].lock); 910 + raw_spin_lock_init(&obj_hash[i].lock); 911 911 912 912 for (i = 0; i < ODEBUG_POOL_SIZE; i++) 913 913 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+11 -11
lib/kernel_lock.c
··· 23 23 * 24 24 * Don't use in new code. 25 25 */ 26 - static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); 26 + static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(kernel_flag); 27 27 28 28 29 29 /* ··· 36 36 * If it successfully gets the lock, it should increment 37 37 * the preemption count like any spinlock does. 38 38 * 39 - * (This works on UP too - _raw_spin_trylock will never 39 + * (This works on UP too - do_raw_spin_trylock will never 40 40 * return false in that case) 41 41 */ 42 42 int __lockfunc __reacquire_kernel_lock(void) 43 43 { 44 - while (!_raw_spin_trylock(&kernel_flag)) { 44 + while (!do_raw_spin_trylock(&kernel_flag)) { 45 45 if (need_resched()) 46 46 return -EAGAIN; 47 47 cpu_relax(); ··· 52 52 53 53 void __lockfunc __release_kernel_lock(void) 54 54 { 55 - _raw_spin_unlock(&kernel_flag); 55 + do_raw_spin_unlock(&kernel_flag); 56 56 preempt_enable_no_resched(); 57 57 } 58 58 59 59 /* 60 60 * These are the BKL spinlocks - we try to be polite about preemption. 61 61 * If SMP is not on (ie UP preemption), this all goes away because the 62 - * _raw_spin_trylock() will always succeed. 62 + * do_raw_spin_trylock() will always succeed. 63 63 */ 64 64 #ifdef CONFIG_PREEMPT 65 65 static inline void __lock_kernel(void) 66 66 { 67 67 preempt_disable(); 68 - if (unlikely(!_raw_spin_trylock(&kernel_flag))) { 68 + if (unlikely(!do_raw_spin_trylock(&kernel_flag))) { 69 69 /* 70 70 * If preemption was disabled even before this 71 71 * was called, there's nothing we can be polite 72 72 * about - just spin. 73 73 */ 74 74 if (preempt_count() > 1) { 75 - _raw_spin_lock(&kernel_flag); 75 + do_raw_spin_lock(&kernel_flag); 76 76 return; 77 77 } 78 78 ··· 82 82 */ 83 83 do { 84 84 preempt_enable(); 85 - while (spin_is_locked(&kernel_flag)) 85 + while (raw_spin_is_locked(&kernel_flag)) 86 86 cpu_relax(); 87 87 preempt_disable(); 88 - } while (!_raw_spin_trylock(&kernel_flag)); 88 + } while (!do_raw_spin_trylock(&kernel_flag)); 89 89 } 90 90 } 91 91 ··· 96 96 */ 97 97 static inline void __lock_kernel(void) 98 98 { 99 - _raw_spin_lock(&kernel_flag); 99 + do_raw_spin_lock(&kernel_flag); 100 100 } 101 101 #endif 102 102 ··· 106 106 * the BKL is not covered by lockdep, so we open-code the 107 107 * unlocking sequence (and thus avoid the dep-chain ops): 108 108 */ 109 - _raw_spin_unlock(&kernel_flag); 109 + do_raw_spin_unlock(&kernel_flag); 110 110 preempt_enable(); 111 111 } 112 112
+5 -3
lib/plist.c
··· 54 54 55 55 static void plist_check_head(struct plist_head *head) 56 56 { 57 - WARN_ON(!head->lock); 58 - if (head->lock) 59 - WARN_ON_SMP(!spin_is_locked(head->lock)); 57 + WARN_ON(!head->rawlock && !head->spinlock); 58 + if (head->rawlock) 59 + WARN_ON_SMP(!raw_spin_is_locked(head->rawlock)); 60 + if (head->spinlock) 61 + WARN_ON_SMP(!spin_is_locked(head->spinlock)); 60 62 plist_check_list(&head->prio_list); 61 63 plist_check_list(&head->node_list); 62 64 }
+32 -32
lib/spinlock_debug.c
··· 13 13 #include <linux/delay.h> 14 14 #include <linux/module.h> 15 15 16 - void __spin_lock_init(spinlock_t *lock, const char *name, 17 - struct lock_class_key *key) 16 + void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, 17 + struct lock_class_key *key) 18 18 { 19 19 #ifdef CONFIG_DEBUG_LOCK_ALLOC 20 20 /* ··· 23 23 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 24 24 lockdep_init_map(&lock->dep_map, name, key, 0); 25 25 #endif 26 - lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 26 + lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 27 27 lock->magic = SPINLOCK_MAGIC; 28 28 lock->owner = SPINLOCK_OWNER_INIT; 29 29 lock->owner_cpu = -1; 30 30 } 31 31 32 - EXPORT_SYMBOL(__spin_lock_init); 32 + EXPORT_SYMBOL(__raw_spin_lock_init); 33 33 34 34 void __rwlock_init(rwlock_t *lock, const char *name, 35 35 struct lock_class_key *key) ··· 41 41 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 42 42 lockdep_init_map(&lock->dep_map, name, key, 0); 43 43 #endif 44 - lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; 44 + lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED; 45 45 lock->magic = RWLOCK_MAGIC; 46 46 lock->owner = SPINLOCK_OWNER_INIT; 47 47 lock->owner_cpu = -1; ··· 49 49 50 50 EXPORT_SYMBOL(__rwlock_init); 51 51 52 - static void spin_bug(spinlock_t *lock, const char *msg) 52 + static void spin_bug(raw_spinlock_t *lock, const char *msg) 53 53 { 54 54 struct task_struct *owner = NULL; 55 55 ··· 73 73 #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg) 74 74 75 75 static inline void 76 - debug_spin_lock_before(spinlock_t *lock) 76 + debug_spin_lock_before(raw_spinlock_t *lock) 77 77 { 78 78 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 79 79 SPIN_BUG_ON(lock->owner == current, lock, "recursion"); ··· 81 81 lock, "cpu recursion"); 82 82 } 83 83 84 - static inline void debug_spin_lock_after(spinlock_t *lock) 84 + static inline void debug_spin_lock_after(raw_spinlock_t *lock) 85 85 { 86 86 lock->owner_cpu = raw_smp_processor_id(); 87 87 lock->owner = current; 88 88 } 89 89 90 - static inline void debug_spin_unlock(spinlock_t *lock) 90 + static inline void debug_spin_unlock(raw_spinlock_t *lock) 91 91 { 92 92 SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic"); 93 - SPIN_BUG_ON(!spin_is_locked(lock), lock, "already unlocked"); 93 + SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked"); 94 94 SPIN_BUG_ON(lock->owner != current, lock, "wrong owner"); 95 95 SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(), 96 96 lock, "wrong CPU"); ··· 98 98 lock->owner_cpu = -1; 99 99 } 100 100 101 - static void __spin_lock_debug(spinlock_t *lock) 101 + static void __spin_lock_debug(raw_spinlock_t *lock) 102 102 { 103 103 u64 i; 104 104 u64 loops = loops_per_jiffy * HZ; ··· 106 106 107 107 for (;;) { 108 108 for (i = 0; i < loops; i++) { 109 - if (__raw_spin_trylock(&lock->raw_lock)) 109 + if (arch_spin_trylock(&lock->raw_lock)) 110 110 return; 111 111 __delay(1); 112 112 } ··· 125 125 } 126 126 } 127 127 128 - void _raw_spin_lock(spinlock_t *lock) 128 + void do_raw_spin_lock(raw_spinlock_t *lock) 129 129 { 130 130 debug_spin_lock_before(lock); 131 - if (unlikely(!__raw_spin_trylock(&lock->raw_lock))) 131 + if (unlikely(!arch_spin_trylock(&lock->raw_lock))) 132 132 __spin_lock_debug(lock); 133 133 debug_spin_lock_after(lock); 134 134 } 135 135 136 - int _raw_spin_trylock(spinlock_t *lock) 136 + int do_raw_spin_trylock(raw_spinlock_t *lock) 137 137 { 138 - int ret = __raw_spin_trylock(&lock->raw_lock); 138 + int ret = arch_spin_trylock(&lock->raw_lock); 139 139 140 140 if (ret) 141 141 debug_spin_lock_after(lock); ··· 148 148 return ret; 149 149 } 150 150 151 - void _raw_spin_unlock(spinlock_t *lock) 151 + void do_raw_spin_unlock(raw_spinlock_t *lock) 152 152 { 153 153 debug_spin_unlock(lock); 154 - __raw_spin_unlock(&lock->raw_lock); 154 + arch_spin_unlock(&lock->raw_lock); 155 155 } 156 156 157 157 static void rwlock_bug(rwlock_t *lock, const char *msg) ··· 176 176 177 177 for (;;) { 178 178 for (i = 0; i < loops; i++) { 179 - if (__raw_read_trylock(&lock->raw_lock)) 179 + if (arch_read_trylock(&lock->raw_lock)) 180 180 return; 181 181 __delay(1); 182 182 } ··· 193 193 } 194 194 #endif 195 195 196 - void _raw_read_lock(rwlock_t *lock) 196 + void do_raw_read_lock(rwlock_t *lock) 197 197 { 198 198 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 199 - __raw_read_lock(&lock->raw_lock); 199 + arch_read_lock(&lock->raw_lock); 200 200 } 201 201 202 - int _raw_read_trylock(rwlock_t *lock) 202 + int do_raw_read_trylock(rwlock_t *lock) 203 203 { 204 - int ret = __raw_read_trylock(&lock->raw_lock); 204 + int ret = arch_read_trylock(&lock->raw_lock); 205 205 206 206 #ifndef CONFIG_SMP 207 207 /* ··· 212 212 return ret; 213 213 } 214 214 215 - void _raw_read_unlock(rwlock_t *lock) 215 + void do_raw_read_unlock(rwlock_t *lock) 216 216 { 217 217 RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic"); 218 - __raw_read_unlock(&lock->raw_lock); 218 + arch_read_unlock(&lock->raw_lock); 219 219 } 220 220 221 221 static inline void debug_write_lock_before(rwlock_t *lock) ··· 251 251 252 252 for (;;) { 253 253 for (i = 0; i < loops; i++) { 254 - if (__raw_write_trylock(&lock->raw_lock)) 254 + if (arch_write_trylock(&lock->raw_lock)) 255 255 return; 256 256 __delay(1); 257 257 } ··· 268 268 } 269 269 #endif 270 270 271 - void _raw_write_lock(rwlock_t *lock) 271 + void do_raw_write_lock(rwlock_t *lock) 272 272 { 273 273 debug_write_lock_before(lock); 274 - __raw_write_lock(&lock->raw_lock); 274 + arch_write_lock(&lock->raw_lock); 275 275 debug_write_lock_after(lock); 276 276 } 277 277 278 - int _raw_write_trylock(rwlock_t *lock) 278 + int do_raw_write_trylock(rwlock_t *lock) 279 279 { 280 - int ret = __raw_write_trylock(&lock->raw_lock); 280 + int ret = arch_write_trylock(&lock->raw_lock); 281 281 282 282 if (ret) 283 283 debug_write_lock_after(lock); ··· 290 290 return ret; 291 291 } 292 292 293 - void _raw_write_unlock(rwlock_t *lock) 293 + void do_raw_write_unlock(rwlock_t *lock) 294 294 { 295 295 debug_write_unlock(lock); 296 - __raw_write_unlock(&lock->raw_lock); 296 + arch_write_unlock(&lock->raw_lock); 297 297 }