Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock()
lockdep: Comment all warnings
lib: atomic64: Change the type of local lock to raw_spinlock_t
locking, lib/atomic64: Annotate atomic64_lock::lock as raw
locking, x86, iommu: Annotate qi->q_lock as raw
locking, x86, iommu: Annotate irq_2_ir_lock as raw
locking, x86, iommu: Annotate iommu->register_lock as raw
locking, dma, ipu: Annotate bank_lock as raw
locking, ARM: Annotate low level hw locks as raw
locking, drivers/dca: Annotate dca_lock as raw
locking, powerpc: Annotate uic->lock as raw
locking, x86: mce: Annotate cmci_discover_lock as raw
locking, ACPI: Annotate c3_lock as raw
locking, oprofile: Annotate oprofilefs lock as raw
locking, video: Annotate vga console lock as raw
locking, latencytop: Annotate latency_lock as raw
locking, timer_stats: Annotate table_lock as raw
locking, rwsem: Annotate inner lock as raw
locking, semaphores: Annotate inner lock as raw
locking, sched: Annotate thread_group_cputimer as raw
...

Fix up conflicts in kernel/posix-cpu-timers.c manually: making
cputimer->cputime a raw lock conflicted with the ABBA fix in commit
bcd5cff7216f ("cputimer: Cure lock inversion").

+670 -573
+13 -13
arch/arm/common/gic.c
··· 33 33 #include <asm/mach/irq.h> 34 34 #include <asm/hardware/gic.h> 35 35 36 - static DEFINE_SPINLOCK(irq_controller_lock); 36 + static DEFINE_RAW_SPINLOCK(irq_controller_lock); 37 37 38 38 /* Address of GIC 0 CPU interface */ 39 39 void __iomem *gic_cpu_base_addr __read_mostly; ··· 82 82 { 83 83 u32 mask = 1 << (d->irq % 32); 84 84 85 - spin_lock(&irq_controller_lock); 85 + raw_spin_lock(&irq_controller_lock); 86 86 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); 87 87 if (gic_arch_extn.irq_mask) 88 88 gic_arch_extn.irq_mask(d); 89 - spin_unlock(&irq_controller_lock); 89 + raw_spin_unlock(&irq_controller_lock); 90 90 } 91 91 92 92 static void gic_unmask_irq(struct irq_data *d) 93 93 { 94 94 u32 mask = 1 << (d->irq % 32); 95 95 96 - spin_lock(&irq_controller_lock); 96 + raw_spin_lock(&irq_controller_lock); 97 97 if (gic_arch_extn.irq_unmask) 98 98 gic_arch_extn.irq_unmask(d); 99 99 writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); 100 - spin_unlock(&irq_controller_lock); 100 + raw_spin_unlock(&irq_controller_lock); 101 101 } 102 102 103 103 static void gic_eoi_irq(struct irq_data *d) 104 104 { 105 105 if (gic_arch_extn.irq_eoi) { 106 - spin_lock(&irq_controller_lock); 106 + raw_spin_lock(&irq_controller_lock); 107 107 gic_arch_extn.irq_eoi(d); 108 - spin_unlock(&irq_controller_lock); 108 + raw_spin_unlock(&irq_controller_lock); 109 109 } 110 110 111 111 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); ··· 129 129 if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 130 130 return -EINVAL; 131 131 132 - spin_lock(&irq_controller_lock); 132 + raw_spin_lock(&irq_controller_lock); 133 133 134 134 if (gic_arch_extn.irq_set_type) 135 135 gic_arch_extn.irq_set_type(d, type); ··· 154 154 if (enabled) 155 155 writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); 156 156 157 - spin_unlock(&irq_controller_lock); 157 + raw_spin_unlock(&irq_controller_lock); 158 158 159 159 return 0; 160 160 } ··· 182 182 mask = 0xff << shift; 183 183 bit = 1 << (cpu_logical_map(cpu) + shift); 184 184 185 - spin_lock(&irq_controller_lock); 185 + raw_spin_lock(&irq_controller_lock); 186 186 val = readl_relaxed(reg) & ~mask; 187 187 writel_relaxed(val | bit, reg); 188 - spin_unlock(&irq_controller_lock); 188 + raw_spin_unlock(&irq_controller_lock); 189 189 190 190 return IRQ_SET_MASK_OK; 191 191 } ··· 215 215 216 216 chained_irq_enter(chip, desc); 217 217 218 - spin_lock(&irq_controller_lock); 218 + raw_spin_lock(&irq_controller_lock); 219 219 status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK); 220 - spin_unlock(&irq_controller_lock); 220 + raw_spin_unlock(&irq_controller_lock); 221 221 222 222 gic_irq = (status & 0x3ff); 223 223 if (gic_irq == 1023)
+3 -3
arch/arm/include/asm/dma.h
··· 34 34 #define DMA_MODE_CASCADE 0xc0 35 35 #define DMA_AUTOINIT 0x10 36 36 37 - extern spinlock_t dma_spin_lock; 37 + extern raw_spinlock_t dma_spin_lock; 38 38 39 39 static inline unsigned long claim_dma_lock(void) 40 40 { 41 41 unsigned long flags; 42 - spin_lock_irqsave(&dma_spin_lock, flags); 42 + raw_spin_lock_irqsave(&dma_spin_lock, flags); 43 43 return flags; 44 44 } 45 45 46 46 static inline void release_dma_lock(unsigned long flags) 47 47 { 48 - spin_unlock_irqrestore(&dma_spin_lock, flags); 48 + raw_spin_unlock_irqrestore(&dma_spin_lock, flags); 49 49 } 50 50 51 51 /* Clear the 'DMA Pointer Flip Flop'.
+2 -2
arch/arm/include/asm/mmu.h
··· 6 6 typedef struct { 7 7 #ifdef CONFIG_CPU_HAS_ASID 8 8 unsigned int id; 9 - spinlock_t id_lock; 9 + raw_spinlock_t id_lock; 10 10 #endif 11 11 unsigned int kvm_seq; 12 12 } mm_context_t; ··· 16 16 17 17 /* init_mm.context.id_lock should be initialized. */ 18 18 #define INIT_MM_CONTEXT(name) \ 19 - .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock), 19 + .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock), 20 20 #else 21 21 #define ASID(mm) (0) 22 22 #endif
+1 -1
arch/arm/kernel/dma.c
··· 23 23 24 24 #include <asm/mach/dma.h> 25 25 26 - DEFINE_SPINLOCK(dma_spin_lock); 26 + DEFINE_RAW_SPINLOCK(dma_spin_lock); 27 27 EXPORT_SYMBOL(dma_spin_lock); 28 28 29 29 static dma_t *dma_chan[MAX_DMA_CHANNELS];
+3 -3
arch/arm/kernel/smp.c
··· 566 566 } 567 567 #endif 568 568 569 - static DEFINE_SPINLOCK(stop_lock); 569 + static DEFINE_RAW_SPINLOCK(stop_lock); 570 570 571 571 /* 572 572 * ipi_cpu_stop - handle IPI from smp_send_stop() ··· 575 575 { 576 576 if (system_state == SYSTEM_BOOTING || 577 577 system_state == SYSTEM_RUNNING) { 578 - spin_lock(&stop_lock); 578 + raw_spin_lock(&stop_lock); 579 579 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 580 580 dump_stack(); 581 - spin_unlock(&stop_lock); 581 + raw_spin_unlock(&stop_lock); 582 582 } 583 583 584 584 set_cpu_online(cpu, false);
+10 -10
arch/arm/kernel/traps.c
··· 257 257 return ret; 258 258 } 259 259 260 - static DEFINE_SPINLOCK(die_lock); 260 + static DEFINE_RAW_SPINLOCK(die_lock); 261 261 262 262 /* 263 263 * This function is protected against re-entrancy. ··· 269 269 270 270 oops_enter(); 271 271 272 - spin_lock_irq(&die_lock); 272 + raw_spin_lock_irq(&die_lock); 273 273 console_verbose(); 274 274 bust_spinlocks(1); 275 275 if (!user_mode(regs)) ··· 281 281 282 282 bust_spinlocks(0); 283 283 add_taint(TAINT_DIE); 284 - spin_unlock_irq(&die_lock); 284 + raw_spin_unlock_irq(&die_lock); 285 285 oops_exit(); 286 286 287 287 if (in_interrupt()) ··· 324 324 #endif 325 325 326 326 static LIST_HEAD(undef_hook); 327 - static DEFINE_SPINLOCK(undef_lock); 327 + static DEFINE_RAW_SPINLOCK(undef_lock); 328 328 329 329 void register_undef_hook(struct undef_hook *hook) 330 330 { 331 331 unsigned long flags; 332 332 333 - spin_lock_irqsave(&undef_lock, flags); 333 + raw_spin_lock_irqsave(&undef_lock, flags); 334 334 list_add(&hook->node, &undef_hook); 335 - spin_unlock_irqrestore(&undef_lock, flags); 335 + raw_spin_unlock_irqrestore(&undef_lock, flags); 336 336 } 337 337 338 338 void unregister_undef_hook(struct undef_hook *hook) 339 339 { 340 340 unsigned long flags; 341 341 342 - spin_lock_irqsave(&undef_lock, flags); 342 + raw_spin_lock_irqsave(&undef_lock, flags); 343 343 list_del(&hook->node); 344 - spin_unlock_irqrestore(&undef_lock, flags); 344 + raw_spin_unlock_irqrestore(&undef_lock, flags); 345 345 } 346 346 347 347 static int call_undef_hook(struct pt_regs *regs, unsigned int instr) ··· 350 350 unsigned long flags; 351 351 int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; 352 352 353 - spin_lock_irqsave(&undef_lock, flags); 353 + raw_spin_lock_irqsave(&undef_lock, flags); 354 354 list_for_each_entry(hook, &undef_hook, node) 355 355 if ((instr & hook->instr_mask) == hook->instr_val && 356 356 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) 357 357 fn = hook->fn; 358 - spin_unlock_irqrestore(&undef_lock, flags); 358 + raw_spin_unlock_irqrestore(&undef_lock, flags); 359 359 360 360 return fn ? fn(regs, instr) : 1; 361 361 }
+1 -1
arch/arm/mach-footbridge/include/mach/hardware.h
··· 93 93 #define CPLD_FLASH_WR_ENABLE 1 94 94 95 95 #ifndef __ASSEMBLY__ 96 - extern spinlock_t nw_gpio_lock; 96 + extern raw_spinlock_t nw_gpio_lock; 97 97 extern void nw_gpio_modify_op(unsigned int mask, unsigned int set); 98 98 extern void nw_gpio_modify_io(unsigned int mask, unsigned int in); 99 99 extern unsigned int nw_gpio_read(void);
+7 -7
arch/arm/mach-footbridge/netwinder-hw.c
··· 68 68 /* 69 69 * This is a lock for accessing ports GP1_IO_BASE and GP2_IO_BASE 70 70 */ 71 - DEFINE_SPINLOCK(nw_gpio_lock); 71 + DEFINE_RAW_SPINLOCK(nw_gpio_lock); 72 72 EXPORT_SYMBOL(nw_gpio_lock); 73 73 74 74 static unsigned int current_gpio_op; ··· 327 327 /* 328 328 * Set Group1/Group2 outputs 329 329 */ 330 - spin_lock_irqsave(&nw_gpio_lock, flags); 330 + raw_spin_lock_irqsave(&nw_gpio_lock, flags); 331 331 nw_gpio_modify_op(-1, GPIO_RED_LED | GPIO_FAN); 332 - spin_unlock_irqrestore(&nw_gpio_lock, flags); 332 + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 333 333 } 334 334 335 335 /* ··· 390 390 { 391 391 unsigned long flags; 392 392 393 - spin_lock_irqsave(&nw_gpio_lock, flags); 393 + raw_spin_lock_irqsave(&nw_gpio_lock, flags); 394 394 nw_cpld_modify(-1, CPLD_UNMUTE | CPLD_7111_DISABLE); 395 - spin_unlock_irqrestore(&nw_gpio_lock, flags); 395 + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 396 396 } 397 397 398 398 static unsigned char rwa_unlock[] __initdata = ··· 616 616 cpld_init(); 617 617 rwa010_init(); 618 618 619 - spin_lock_irqsave(&nw_gpio_lock, flags); 619 + raw_spin_lock_irqsave(&nw_gpio_lock, flags); 620 620 nw_gpio_modify_op(GPIO_RED_LED|GPIO_GREEN_LED, DEFAULT_LEDS); 621 - spin_unlock_irqrestore(&nw_gpio_lock, flags); 621 + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 622 622 } 623 623 return 0; 624 624 }
+5 -5
arch/arm/mach-footbridge/netwinder-leds.c
··· 31 31 static char led_state; 32 32 static char hw_led_state; 33 33 34 - static DEFINE_SPINLOCK(leds_lock); 34 + static DEFINE_RAW_SPINLOCK(leds_lock); 35 35 36 36 static void netwinder_leds_event(led_event_t evt) 37 37 { 38 38 unsigned long flags; 39 39 40 - spin_lock_irqsave(&leds_lock, flags); 40 + raw_spin_lock_irqsave(&leds_lock, flags); 41 41 42 42 switch (evt) { 43 43 case led_start: ··· 117 117 break; 118 118 } 119 119 120 - spin_unlock_irqrestore(&leds_lock, flags); 120 + raw_spin_unlock_irqrestore(&leds_lock, flags); 121 121 122 122 if (led_state & LED_STATE_ENABLED) { 123 - spin_lock_irqsave(&nw_gpio_lock, flags); 123 + raw_spin_lock_irqsave(&nw_gpio_lock, flags); 124 124 nw_gpio_modify_op(GPIO_RED_LED | GPIO_GREEN_LED, hw_led_state); 125 - spin_unlock_irqrestore(&nw_gpio_lock, flags); 125 + raw_spin_unlock_irqrestore(&nw_gpio_lock, flags); 126 126 } 127 127 } 128 128
+3 -3
arch/arm/mach-integrator/core.c
··· 209 209 210 210 #define CM_CTRL IO_ADDRESS(INTEGRATOR_HDR_CTRL) 211 211 212 - static DEFINE_SPINLOCK(cm_lock); 212 + static DEFINE_RAW_SPINLOCK(cm_lock); 213 213 214 214 /** 215 215 * cm_control - update the CM_CTRL register. ··· 221 221 unsigned long flags; 222 222 u32 val; 223 223 224 - spin_lock_irqsave(&cm_lock, flags); 224 + raw_spin_lock_irqsave(&cm_lock, flags); 225 225 val = readl(CM_CTRL) & ~mask; 226 226 writel(val | set, CM_CTRL); 227 - spin_unlock_irqrestore(&cm_lock, flags); 227 + raw_spin_unlock_irqrestore(&cm_lock, flags); 228 228 } 229 229 230 230 EXPORT_SYMBOL(cm_control);
+7 -7
arch/arm/mach-integrator/pci_v3.c
··· 163 163 * 7:2 register number 164 164 * 165 165 */ 166 - static DEFINE_SPINLOCK(v3_lock); 166 + static DEFINE_RAW_SPINLOCK(v3_lock); 167 167 168 168 #define PCI_BUS_NONMEM_START 0x00000000 169 169 #define PCI_BUS_NONMEM_SIZE SZ_256M ··· 284 284 unsigned long flags; 285 285 u32 v; 286 286 287 - spin_lock_irqsave(&v3_lock, flags); 287 + raw_spin_lock_irqsave(&v3_lock, flags); 288 288 addr = v3_open_config_window(bus, devfn, where); 289 289 290 290 switch (size) { ··· 302 302 } 303 303 304 304 v3_close_config_window(); 305 - spin_unlock_irqrestore(&v3_lock, flags); 305 + raw_spin_unlock_irqrestore(&v3_lock, flags); 306 306 307 307 *val = v; 308 308 return PCIBIOS_SUCCESSFUL; ··· 314 314 unsigned long addr; 315 315 unsigned long flags; 316 316 317 - spin_lock_irqsave(&v3_lock, flags); 317 + raw_spin_lock_irqsave(&v3_lock, flags); 318 318 addr = v3_open_config_window(bus, devfn, where); 319 319 320 320 switch (size) { ··· 335 335 } 336 336 337 337 v3_close_config_window(); 338 - spin_unlock_irqrestore(&v3_lock, flags); 338 + raw_spin_unlock_irqrestore(&v3_lock, flags); 339 339 340 340 return PCIBIOS_SUCCESSFUL; 341 341 } ··· 513 513 hook_fault_code(8, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 514 514 hook_fault_code(10, v3_pci_fault, SIGBUS, 0, "external abort on non-linefetch"); 515 515 516 - spin_lock_irqsave(&v3_lock, flags); 516 + raw_spin_lock_irqsave(&v3_lock, flags); 517 517 518 518 /* 519 519 * Unlock V3 registers, but only if they were previously locked. ··· 586 586 printk(KERN_ERR "PCI: unable to grab PCI error " 587 587 "interrupt: %d\n", ret); 588 588 589 - spin_unlock_irqrestore(&v3_lock, flags); 589 + raw_spin_unlock_irqrestore(&v3_lock, flags); 590 590 } 591 591 592 592 void __init pci_v3_postinit(void)
+11 -11
arch/arm/mach-ixp4xx/common-pci.c
··· 54 54 * these transactions are atomic or we will end up 55 55 * with corrupt data on the bus or in a driver. 56 56 */ 57 - static DEFINE_SPINLOCK(ixp4xx_pci_lock); 57 + static DEFINE_RAW_SPINLOCK(ixp4xx_pci_lock); 58 58 59 59 /* 60 60 * Read from PCI config space ··· 62 62 static void crp_read(u32 ad_cbe, u32 *data) 63 63 { 64 64 unsigned long flags; 65 - spin_lock_irqsave(&ixp4xx_pci_lock, flags); 65 + raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); 66 66 *PCI_CRP_AD_CBE = ad_cbe; 67 67 *data = *PCI_CRP_RDATA; 68 - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 68 + raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 69 69 } 70 70 71 71 /* ··· 74 74 static void crp_write(u32 ad_cbe, u32 data) 75 75 { 76 76 unsigned long flags; 77 - spin_lock_irqsave(&ixp4xx_pci_lock, flags); 77 + raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); 78 78 *PCI_CRP_AD_CBE = CRP_AD_CBE_WRITE | ad_cbe; 79 79 *PCI_CRP_WDATA = data; 80 - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 80 + raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 81 81 } 82 82 83 83 static inline int check_master_abort(void) ··· 101 101 int retval = 0; 102 102 int i; 103 103 104 - spin_lock_irqsave(&ixp4xx_pci_lock, flags); 104 + raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); 105 105 106 106 *PCI_NP_AD = addr; 107 107 ··· 118 118 if(check_master_abort()) 119 119 retval = 1; 120 120 121 - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 121 + raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 122 122 return retval; 123 123 } 124 124 ··· 127 127 unsigned long flags; 128 128 int retval = 0; 129 129 130 - spin_lock_irqsave(&ixp4xx_pci_lock, flags); 130 + raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); 131 131 132 132 *PCI_NP_AD = addr; 133 133 ··· 140 140 if(check_master_abort()) 141 141 retval = 1; 142 142 143 - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 143 + raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 144 144 return retval; 145 145 } 146 146 ··· 149 149 unsigned long flags; 150 150 int retval = 0; 151 151 152 - spin_lock_irqsave(&ixp4xx_pci_lock, flags); 152 + raw_spin_lock_irqsave(&ixp4xx_pci_lock, flags); 153 153 154 154 *PCI_NP_AD = addr; 155 155 ··· 162 162 if(check_master_abort()) 163 163 retval = 1; 164 164 165 - spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 165 + raw_spin_unlock_irqrestore(&ixp4xx_pci_lock, flags); 166 166 return retval; 167 167 } 168 168
+3 -3
arch/arm/mach-shark/leds.c
··· 36 36 static short hw_led_state; 37 37 static short saved_state; 38 38 39 - static DEFINE_SPINLOCK(leds_lock); 39 + static DEFINE_RAW_SPINLOCK(leds_lock); 40 40 41 41 short sequoia_read(int addr) { 42 42 outw(addr,0x24); ··· 52 52 { 53 53 unsigned long flags; 54 54 55 - spin_lock_irqsave(&leds_lock, flags); 55 + raw_spin_lock_irqsave(&leds_lock, flags); 56 56 57 57 hw_led_state = sequoia_read(0x09); 58 58 ··· 144 144 if (led_state & LED_STATE_ENABLED) 145 145 sequoia_write(hw_led_state,0x09); 146 146 147 - spin_unlock_irqrestore(&leds_lock, flags); 147 + raw_spin_unlock_irqrestore(&leds_lock, flags); 148 148 } 149 149 150 150 static int __init leds_init(void)
+23 -23
arch/arm/mm/cache-l2x0.c
··· 29 29 #define CACHE_LINE_SIZE 32 30 30 31 31 static void __iomem *l2x0_base; 32 - static DEFINE_SPINLOCK(l2x0_lock); 32 + static DEFINE_RAW_SPINLOCK(l2x0_lock); 33 33 static uint32_t l2x0_way_mask; /* Bitmask of active ways */ 34 34 static uint32_t l2x0_size; 35 35 ··· 126 126 { 127 127 unsigned long flags; 128 128 129 - spin_lock_irqsave(&l2x0_lock, flags); 129 + raw_spin_lock_irqsave(&l2x0_lock, flags); 130 130 cache_sync(); 131 - spin_unlock_irqrestore(&l2x0_lock, flags); 131 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 132 132 } 133 133 134 134 static void __l2x0_flush_all(void) ··· 145 145 unsigned long flags; 146 146 147 147 /* clean all ways */ 148 - spin_lock_irqsave(&l2x0_lock, flags); 148 + raw_spin_lock_irqsave(&l2x0_lock, flags); 149 149 __l2x0_flush_all(); 150 - spin_unlock_irqrestore(&l2x0_lock, flags); 150 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 151 151 } 152 152 153 153 static void l2x0_clean_all(void) ··· 155 155 unsigned long flags; 156 156 157 157 /* clean all ways */ 158 - spin_lock_irqsave(&l2x0_lock, flags); 158 + raw_spin_lock_irqsave(&l2x0_lock, flags); 159 159 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); 160 160 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); 161 161 cache_sync(); 162 - spin_unlock_irqrestore(&l2x0_lock, flags); 162 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 163 163 } 164 164 165 165 static void l2x0_inv_all(void) ··· 167 167 unsigned long flags; 168 168 169 169 /* invalidate all ways */ 170 - spin_lock_irqsave(&l2x0_lock, flags); 170 + raw_spin_lock_irqsave(&l2x0_lock, flags); 171 171 /* Invalidating when L2 is enabled is a nono */ 172 172 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); 173 173 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 174 174 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 175 175 cache_sync(); 176 - spin_unlock_irqrestore(&l2x0_lock, flags); 176 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 177 177 } 178 178 179 179 static void l2x0_inv_range(unsigned long start, unsigned long end) ··· 181 181 void __iomem *base = l2x0_base; 182 182 unsigned long flags; 183 183 184 - spin_lock_irqsave(&l2x0_lock, flags); 184 + raw_spin_lock_irqsave(&l2x0_lock, flags); 185 185 if (start & (CACHE_LINE_SIZE - 1)) { 186 186 start &= ~(CACHE_LINE_SIZE - 1); 187 187 debug_writel(0x03); ··· 206 206 } 207 207 208 208 if (blk_end < end) { 209 - spin_unlock_irqrestore(&l2x0_lock, flags); 210 - spin_lock_irqsave(&l2x0_lock, flags); 209 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 210 + raw_spin_lock_irqsave(&l2x0_lock, flags); 211 211 } 212 212 } 213 213 cache_wait(base + L2X0_INV_LINE_PA, 1); 214 214 cache_sync(); 215 - spin_unlock_irqrestore(&l2x0_lock, flags); 215 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 216 216 } 217 217 218 218 static void l2x0_clean_range(unsigned long start, unsigned long end) ··· 225 225 return; 226 226 } 227 227 228 - spin_lock_irqsave(&l2x0_lock, flags); 228 + raw_spin_lock_irqsave(&l2x0_lock, flags); 229 229 start &= ~(CACHE_LINE_SIZE - 1); 230 230 while (start < end) { 231 231 unsigned long blk_end = start + min(end - start, 4096UL); ··· 236 236 } 237 237 238 238 if (blk_end < end) { 239 - spin_unlock_irqrestore(&l2x0_lock, flags); 240 - spin_lock_irqsave(&l2x0_lock, flags); 239 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 240 + raw_spin_lock_irqsave(&l2x0_lock, flags); 241 241 } 242 242 } 243 243 cache_wait(base + L2X0_CLEAN_LINE_PA, 1); 244 244 cache_sync(); 245 - spin_unlock_irqrestore(&l2x0_lock, flags); 245 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 246 246 } 247 247 248 248 static void l2x0_flush_range(unsigned long start, unsigned long end) ··· 255 255 return; 256 256 } 257 257 258 - spin_lock_irqsave(&l2x0_lock, flags); 258 + raw_spin_lock_irqsave(&l2x0_lock, flags); 259 259 start &= ~(CACHE_LINE_SIZE - 1); 260 260 while (start < end) { 261 261 unsigned long blk_end = start + min(end - start, 4096UL); ··· 268 268 debug_writel(0x00); 269 269 270 270 if (blk_end < end) { 271 - spin_unlock_irqrestore(&l2x0_lock, flags); 272 - spin_lock_irqsave(&l2x0_lock, flags); 271 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 272 + raw_spin_lock_irqsave(&l2x0_lock, flags); 273 273 } 274 274 } 275 275 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); 276 276 cache_sync(); 277 - spin_unlock_irqrestore(&l2x0_lock, flags); 277 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 278 278 } 279 279 280 280 static void l2x0_disable(void) 281 281 { 282 282 unsigned long flags; 283 283 284 - spin_lock_irqsave(&l2x0_lock, flags); 284 + raw_spin_lock_irqsave(&l2x0_lock, flags); 285 285 __l2x0_flush_all(); 286 286 writel_relaxed(0, l2x0_base + L2X0_CTRL); 287 287 dsb(); 288 - spin_unlock_irqrestore(&l2x0_lock, flags); 288 + raw_spin_unlock_irqrestore(&l2x0_lock, flags); 289 289 } 290 290 291 291 static void l2x0_unlock(__u32 cache_id)
+7 -7
arch/arm/mm/context.c
··· 16 16 #include <asm/mmu_context.h> 17 17 #include <asm/tlbflush.h> 18 18 19 - static DEFINE_SPINLOCK(cpu_asid_lock); 19 + static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 20 20 unsigned int cpu_last_asid = ASID_FIRST_VERSION; 21 21 #ifdef CONFIG_SMP 22 22 DEFINE_PER_CPU(struct mm_struct *, current_mm); ··· 31 31 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 32 32 { 33 33 mm->context.id = 0; 34 - spin_lock_init(&mm->context.id_lock); 34 + raw_spin_lock_init(&mm->context.id_lock); 35 35 } 36 36 37 37 static void flush_context(void) ··· 58 58 * the broadcast. This function is also called via IPI so the 59 59 * mm->context.id_lock has to be IRQ-safe. 60 60 */ 61 - spin_lock_irqsave(&mm->context.id_lock, flags); 61 + raw_spin_lock_irqsave(&mm->context.id_lock, flags); 62 62 if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { 63 63 /* 64 64 * Old version of ASID found. Set the new one and ··· 67 67 mm->context.id = asid; 68 68 cpumask_clear(mm_cpumask(mm)); 69 69 } 70 - spin_unlock_irqrestore(&mm->context.id_lock, flags); 70 + raw_spin_unlock_irqrestore(&mm->context.id_lock, flags); 71 71 72 72 /* 73 73 * Set the mm_cpumask(mm) bit for the current CPU. ··· 117 117 { 118 118 unsigned int asid; 119 119 120 - spin_lock(&cpu_asid_lock); 120 + raw_spin_lock(&cpu_asid_lock); 121 121 #ifdef CONFIG_SMP 122 122 /* 123 123 * Check the ASID again, in case the change was broadcast from ··· 125 125 */ 126 126 if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { 127 127 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); 128 - spin_unlock(&cpu_asid_lock); 128 + raw_spin_unlock(&cpu_asid_lock); 129 129 return; 130 130 } 131 131 #endif ··· 153 153 } 154 154 155 155 set_mm_context(mm, asid); 156 - spin_unlock(&cpu_asid_lock); 156 + raw_spin_unlock(&cpu_asid_lock); 157 157 }
+3 -3
arch/arm/mm/copypage-v4mc.c
··· 30 30 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 31 31 L_PTE_MT_MINICACHE) 32 32 33 - static DEFINE_SPINLOCK(minicache_lock); 33 + static DEFINE_RAW_SPINLOCK(minicache_lock); 34 34 35 35 /* 36 36 * ARMv4 mini-dcache optimised copy_user_highpage ··· 76 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 77 77 __flush_dcache_page(page_mapping(from), from); 78 78 79 - spin_lock(&minicache_lock); 79 + raw_spin_lock(&minicache_lock); 80 80 81 81 set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 82 82 flush_tlb_kernel_page(0xffff8000); 83 83 84 84 mc_copy_user_page((void *)0xffff8000, kto); 85 85 86 - spin_unlock(&minicache_lock); 86 + raw_spin_unlock(&minicache_lock); 87 87 88 88 kunmap_atomic(kto, KM_USER1); 89 89 }
+5 -5
arch/arm/mm/copypage-v6.c
··· 27 27 #define from_address (0xffff8000) 28 28 #define to_address (0xffffc000) 29 29 30 - static DEFINE_SPINLOCK(v6_lock); 30 + static DEFINE_RAW_SPINLOCK(v6_lock); 31 31 32 32 /* 33 33 * Copy the user page. No aliasing to deal with so we can just ··· 88 88 * Now copy the page using the same cache colour as the 89 89 * pages ultimate destination. 90 90 */ 91 - spin_lock(&v6_lock); 91 + raw_spin_lock(&v6_lock); 92 92 93 93 set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); 94 94 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); ··· 101 101 102 102 copy_page((void *)kto, (void *)kfrom); 103 103 104 - spin_unlock(&v6_lock); 104 + raw_spin_unlock(&v6_lock); 105 105 } 106 106 107 107 /* ··· 121 121 * Now clear the page using the same cache colour as 122 122 * the pages ultimate destination. 123 123 */ 124 - spin_lock(&v6_lock); 124 + raw_spin_lock(&v6_lock); 125 125 126 126 set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); 127 127 flush_tlb_kernel_page(to); 128 128 clear_page((void *)to); 129 129 130 - spin_unlock(&v6_lock); 130 + raw_spin_unlock(&v6_lock); 131 131 } 132 132 133 133 struct cpu_user_fns v6_user_fns __initdata = {
+3 -3
arch/arm/mm/copypage-xscale.c
··· 32 32 #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 33 33 L_PTE_MT_MINICACHE) 34 34 35 - static DEFINE_SPINLOCK(minicache_lock); 35 + static DEFINE_RAW_SPINLOCK(minicache_lock); 36 36 37 37 /* 38 38 * XScale mini-dcache optimised copy_user_highpage ··· 98 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 99 99 __flush_dcache_page(page_mapping(from), from); 100 100 101 - spin_lock(&minicache_lock); 101 + raw_spin_lock(&minicache_lock); 102 102 103 103 set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); 104 104 flush_tlb_kernel_page(COPYPAGE_MINICACHE); 105 105 106 106 mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); 107 107 108 - spin_unlock(&minicache_lock); 108 + raw_spin_unlock(&minicache_lock); 109 109 110 110 kunmap_atomic(kto, KM_USER1); 111 111 }
+12 -12
arch/powerpc/sysdev/uic.c
··· 47 47 int index; 48 48 int dcrbase; 49 49 50 - spinlock_t lock; 50 + raw_spinlock_t lock; 51 51 52 52 /* The remapper for this UIC */ 53 53 struct irq_host *irqhost; ··· 61 61 u32 er, sr; 62 62 63 63 sr = 1 << (31-src); 64 - spin_lock_irqsave(&uic->lock, flags); 64 + raw_spin_lock_irqsave(&uic->lock, flags); 65 65 /* ack level-triggered interrupts here */ 66 66 if (irqd_is_level_type(d)) 67 67 mtdcr(uic->dcrbase + UIC_SR, sr); 68 68 er = mfdcr(uic->dcrbase + UIC_ER); 69 69 er |= sr; 70 70 mtdcr(uic->dcrbase + UIC_ER, er); 71 - spin_unlock_irqrestore(&uic->lock, flags); 71 + raw_spin_unlock_irqrestore(&uic->lock, flags); 72 72 } 73 73 74 74 static void uic_mask_irq(struct irq_data *d) ··· 78 78 unsigned long flags; 79 79 u32 er; 80 80 81 - spin_lock_irqsave(&uic->lock, flags); 81 + raw_spin_lock_irqsave(&uic->lock, flags); 82 82 er = mfdcr(uic->dcrbase + UIC_ER); 83 83 er &= ~(1 << (31 - src)); 84 84 mtdcr(uic->dcrbase + UIC_ER, er); 85 - spin_unlock_irqrestore(&uic->lock, flags); 85 + raw_spin_unlock_irqrestore(&uic->lock, flags); 86 86 } 87 87 88 88 static void uic_ack_irq(struct irq_data *d) ··· 91 91 unsigned int src = irqd_to_hwirq(d); 92 92 unsigned long flags; 93 93 94 - spin_lock_irqsave(&uic->lock, flags); 94 + raw_spin_lock_irqsave(&uic->lock, flags); 95 95 mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src)); 96 - spin_unlock_irqrestore(&uic->lock, flags); 96 + raw_spin_unlock_irqrestore(&uic->lock, flags); 97 97 } 98 98 99 99 static void uic_mask_ack_irq(struct irq_data *d) ··· 104 104 u32 er, sr; 105 105 106 106 sr = 1 << (31-src); 107 - spin_lock_irqsave(&uic->lock, flags); 107 + raw_spin_lock_irqsave(&uic->lock, flags); 108 108 er = mfdcr(uic->dcrbase + UIC_ER); 109 109 er &= ~sr; 110 110 mtdcr(uic->dcrbase + UIC_ER, er); ··· 118 118 */ 119 119 if (!irqd_is_level_type(d)) 120 120 mtdcr(uic->dcrbase + UIC_SR, sr); 121 - spin_unlock_irqrestore(&uic->lock, flags); 121 + raw_spin_unlock_irqrestore(&uic->lock, flags); 122 122 } 123 123 124 124 static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) ··· 152 152 153 153 mask = ~(1 << (31 - src)); 154 154 155 - spin_lock_irqsave(&uic->lock, flags); 155 + raw_spin_lock_irqsave(&uic->lock, flags); 156 156 tr = mfdcr(uic->dcrbase + UIC_TR); 157 157 pr = mfdcr(uic->dcrbase + UIC_PR); 158 158 tr = (tr & mask) | (trigger << (31-src)); ··· 161 161 mtdcr(uic->dcrbase + UIC_PR, pr); 162 162 mtdcr(uic->dcrbase + UIC_TR, tr); 163 163 164 - spin_unlock_irqrestore(&uic->lock, flags); 164 + raw_spin_unlock_irqrestore(&uic->lock, flags); 165 165 166 166 return 0; 167 167 } ··· 254 254 if (! uic) 255 255 return NULL; /* FIXME: panic? */ 256 256 257 - spin_lock_init(&uic->lock); 257 + raw_spin_lock_init(&uic->lock); 258 258 indexp = of_get_property(node, "cell-index", &len); 259 259 if (!indexp || (len != sizeof(u32))) { 260 260 printk(KERN_ERR "uic: Device node %s has missing or invalid "
+5 -5
arch/x86/kernel/cpu/mcheck/mce_intel.c
··· 28 28 * cmci_discover_lock protects against parallel discovery attempts 29 29 * which could race against each other. 30 30 */ 31 - static DEFINE_SPINLOCK(cmci_discover_lock); 31 + static DEFINE_RAW_SPINLOCK(cmci_discover_lock); 32 32 33 33 #define CMCI_THRESHOLD 1 34 34 ··· 85 85 int hdr = 0; 86 86 int i; 87 87 88 - spin_lock_irqsave(&cmci_discover_lock, flags); 88 + raw_spin_lock_irqsave(&cmci_discover_lock, flags); 89 89 for (i = 0; i < banks; i++) { 90 90 u64 val; 91 91 ··· 116 116 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 117 117 } 118 118 } 119 - spin_unlock_irqrestore(&cmci_discover_lock, flags); 119 + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 120 120 if (hdr) 121 121 printk(KERN_CONT "\n"); 122 122 } ··· 150 150 151 151 if (!cmci_supported(&banks)) 152 152 return; 153 - spin_lock_irqsave(&cmci_discover_lock, flags); 153 + raw_spin_lock_irqsave(&cmci_discover_lock, flags); 154 154 for (i = 0; i < banks; i++) { 155 155 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 156 156 continue; ··· 160 160 wrmsrl(MSR_IA32_MCx_CTL2(i), val); 161 161 __clear_bit(i, __get_cpu_var(mce_banks_owned)); 162 162 } 163 - spin_unlock_irqrestore(&cmci_discover_lock, flags); 163 + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 164 164 } 165 165 166 166 /*
+2 -2
arch/x86/oprofile/nmi_int.c
··· 355 355 int cpu = smp_processor_id(); 356 356 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); 357 357 nmi_cpu_save_registers(msrs); 358 - spin_lock(&oprofilefs_lock); 358 + raw_spin_lock(&oprofilefs_lock); 359 359 model->setup_ctrs(model, msrs); 360 360 nmi_cpu_setup_mux(cpu, msrs); 361 - spin_unlock(&oprofilefs_lock); 361 + raw_spin_unlock(&oprofilefs_lock); 362 362 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); 363 363 apic_write(APIC_LVTPC, APIC_DM_NMI); 364 364 }
+5 -5
drivers/acpi/processor_idle.c
··· 852 852 } 853 853 854 854 static int c3_cpu_count; 855 - static DEFINE_SPINLOCK(c3_lock); 855 + static DEFINE_RAW_SPINLOCK(c3_lock); 856 856 857 857 /** 858 858 * acpi_idle_enter_bm - enters C3 with proper BM handling ··· 930 930 * without doing anything. 931 931 */ 932 932 if (pr->flags.bm_check && pr->flags.bm_control) { 933 - spin_lock(&c3_lock); 933 + raw_spin_lock(&c3_lock); 934 934 c3_cpu_count++; 935 935 /* Disable bus master arbitration when all CPUs are in C3 */ 936 936 if (c3_cpu_count == num_online_cpus()) 937 937 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 938 - spin_unlock(&c3_lock); 938 + raw_spin_unlock(&c3_lock); 939 939 } else if (!pr->flags.bm_check) { 940 940 ACPI_FLUSH_CPU_CACHE(); 941 941 } ··· 944 944 945 945 /* Re-enable bus master arbitration */ 946 946 if (pr->flags.bm_check && pr->flags.bm_control) { 947 - spin_lock(&c3_lock); 947 + raw_spin_lock(&c3_lock); 948 948 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 949 949 c3_cpu_count--; 950 - spin_unlock(&c3_lock); 950 + raw_spin_unlock(&c3_lock); 951 951 } 952 952 kt2 = ktime_get_real(); 953 953 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
+44 -34
drivers/dca/dca-core.c
··· 35 35 MODULE_LICENSE("GPL"); 36 36 MODULE_AUTHOR("Intel Corporation"); 37 37 38 - static DEFINE_SPINLOCK(dca_lock); 38 + static DEFINE_RAW_SPINLOCK(dca_lock); 39 39 40 40 static LIST_HEAD(dca_domains); 41 41 ··· 101 101 102 102 INIT_LIST_HEAD(&unregistered_providers); 103 103 104 - spin_lock_irqsave(&dca_lock, flags); 104 + raw_spin_lock_irqsave(&dca_lock, flags); 105 105 106 106 if (list_empty(&dca_domains)) { 107 - spin_unlock_irqrestore(&dca_lock, flags); 107 + raw_spin_unlock_irqrestore(&dca_lock, flags); 108 108 return; 109 109 } 110 110 ··· 116 116 117 117 dca_free_domain(domain); 118 118 119 - spin_unlock_irqrestore(&dca_lock, flags); 119 + raw_spin_unlock_irqrestore(&dca_lock, flags); 120 120 121 121 list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) { 122 122 dca_sysfs_remove_provider(dca); ··· 144 144 domain = dca_find_domain(rc); 145 145 146 146 if (!domain) { 147 - if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) { 147 + if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) 148 148 dca_providers_blocked = 1; 149 - } else { 150 - domain = dca_allocate_domain(rc); 151 - if (domain) 152 - list_add(&domain->node, &dca_domains); 153 - } 154 149 } 155 150 156 151 return domain; ··· 193 198 if (!dev) 194 199 return -EFAULT; 195 200 196 - spin_lock_irqsave(&dca_lock, flags); 201 + raw_spin_lock_irqsave(&dca_lock, flags); 197 202 198 203 /* check if the requester has not been added already */ 199 204 dca = dca_find_provider_by_dev(dev); 200 205 if (dca) { 201 - spin_unlock_irqrestore(&dca_lock, flags); 206 + raw_spin_unlock_irqrestore(&dca_lock, flags); 202 207 return -EEXIST; 203 208 } 204 209 205 210 pci_rc = dca_pci_rc_from_dev(dev); 206 211 domain = dca_find_domain(pci_rc); 207 212 if (!domain) { 208 - spin_unlock_irqrestore(&dca_lock, flags); 213 + raw_spin_unlock_irqrestore(&dca_lock, flags); 209 214 return -ENODEV; 210 215 } 211 216 ··· 215 220 break; 216 221 } 217 222 218 - spin_unlock_irqrestore(&dca_lock, flags); 223 + raw_spin_unlock_irqrestore(&dca_lock, flags); 219 224 220 225 if (slot < 0) 221 226 return slot; 222 227 223 228 err = dca_sysfs_add_req(dca, dev, slot); 224 229 if (err) { 225 - spin_lock_irqsave(&dca_lock, flags); 230 + raw_spin_lock_irqsave(&dca_lock, flags); 226 231 if (dca == dca_find_provider_by_dev(dev)) 227 232 dca->ops->remove_requester(dca, dev); 228 - spin_unlock_irqrestore(&dca_lock, flags); 233 + raw_spin_unlock_irqrestore(&dca_lock, flags); 229 234 return err; 230 235 } 231 236 ··· 246 251 if (!dev) 247 252 return -EFAULT; 248 253 249 - spin_lock_irqsave(&dca_lock, flags); 254 + raw_spin_lock_irqsave(&dca_lock, flags); 250 255 dca = dca_find_provider_by_dev(dev); 251 256 if (!dca) { 252 - spin_unlock_irqrestore(&dca_lock, flags); 257 + raw_spin_unlock_irqrestore(&dca_lock, flags); 253 258 return -ENODEV; 254 259 } 255 260 slot = dca->ops->remove_requester(dca, dev); 256 - spin_unlock_irqrestore(&dca_lock, flags); 261 + raw_spin_unlock_irqrestore(&dca_lock, flags); 257 262 258 263 if (slot < 0) 259 264 return slot; ··· 275 280 u8 tag; 276 281 unsigned long flags; 277 282 278 - spin_lock_irqsave(&dca_lock, flags); 283 + raw_spin_lock_irqsave(&dca_lock, flags); 279 284 280 285 dca = dca_find_provider_by_dev(dev); 281 286 if (!dca) { 282 - spin_unlock_irqrestore(&dca_lock, flags); 287 + raw_spin_unlock_irqrestore(&dca_lock, flags); 283 288 return -ENODEV; 284 289 } 285 290 tag = dca->ops->get_tag(dca, dev, cpu); 286 291 287 - spin_unlock_irqrestore(&dca_lock, flags); 292 + raw_spin_unlock_irqrestore(&dca_lock, flags); 288 293 return tag; 289 294 } 290 295 ··· 355 360 { 356 361 int err; 357 362 unsigned long flags; 358 - struct dca_domain *domain; 363 + struct dca_domain *domain, *newdomain = NULL; 359 364 360 - spin_lock_irqsave(&dca_lock, flags); 365 + raw_spin_lock_irqsave(&dca_lock, flags); 361 366 if (dca_providers_blocked) { 362 - spin_unlock_irqrestore(&dca_lock, flags); 367 + raw_spin_unlock_irqrestore(&dca_lock, flags); 363 368 return -ENODEV; 364 369 } 365 - spin_unlock_irqrestore(&dca_lock, flags); 370 + raw_spin_unlock_irqrestore(&dca_lock, flags); 366 371 367 372 err = dca_sysfs_add_provider(dca, dev); 368 373 if (err) 369 374 return err; 370 375 371 - spin_lock_irqsave(&dca_lock, flags); 376 + raw_spin_lock_irqsave(&dca_lock, flags); 372 377 domain = dca_get_domain(dev); 373 378 if (!domain) { 379 + struct pci_bus *rc; 380 + 374 381 if (dca_providers_blocked) { 375 - spin_unlock_irqrestore(&dca_lock, flags); 382 + raw_spin_unlock_irqrestore(&dca_lock, flags); 376 383 dca_sysfs_remove_provider(dca); 377 384 unregister_dca_providers(); 378 - } else { 379 - spin_unlock_irqrestore(&dca_lock, flags); 385 + return -ENODEV; 380 386 } 381 - return -ENODEV; 387 + 388 + raw_spin_unlock_irqrestore(&dca_lock, flags); 389 + rc = dca_pci_rc_from_dev(dev); 390 + newdomain = dca_allocate_domain(rc); 391 + if (!newdomain) 392 + return -ENODEV; 393 + raw_spin_lock_irqsave(&dca_lock, flags); 394 + /* Recheck, we might have raced after dropping the lock */ 395 + domain = dca_get_domain(dev); 396 + if (!domain) { 397 + domain = newdomain; 398 + newdomain = NULL; 399 + list_add(&domain->node, &dca_domains); 400 + } 382 401 } 383 402 list_add(&dca->node, &domain->dca_providers); 384 - spin_unlock_irqrestore(&dca_lock, flags); 403 + raw_spin_unlock_irqrestore(&dca_lock, flags); 385 404 386 405 blocking_notifier_call_chain(&dca_provider_chain, 387 406 DCA_PROVIDER_ADD, NULL); 407 + kfree(newdomain); 388 408 return 0; 389 409 } 390 410 EXPORT_SYMBOL_GPL(register_dca_provider); ··· 417 407 blocking_notifier_call_chain(&dca_provider_chain, 418 408 DCA_PROVIDER_REMOVE, NULL); 419 409 420 - spin_lock_irqsave(&dca_lock, flags); 410 + raw_spin_lock_irqsave(&dca_lock, flags); 421 411 422 412 list_del(&dca->node); 423 413 ··· 426 416 if (list_empty(&domain->dca_providers)) 427 417 dca_free_domain(domain); 428 418 429 - spin_unlock_irqrestore(&dca_lock, flags); 419 + raw_spin_unlock_irqrestore(&dca_lock, flags); 430 420 431 421 dca_sysfs_remove_provider(dca); 432 422 }
+24 -24
drivers/dma/ipu/ipu_irq.c
··· 81 81 /* Protects allocations from the above array of maps */ 82 82 static DEFINE_MUTEX(map_lock); 83 83 /* Protects register accesses and individual mappings */ 84 - static DEFINE_SPINLOCK(bank_lock); 84 + static DEFINE_RAW_SPINLOCK(bank_lock); 85 85 86 86 static struct ipu_irq_map *src2map(unsigned int src) 87 87 { ··· 101 101 uint32_t reg; 102 102 unsigned long lock_flags; 103 103 104 - spin_lock_irqsave(&bank_lock, lock_flags); 104 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 105 105 106 106 bank = map->bank; 107 107 if (!bank) { 108 - spin_unlock_irqrestore(&bank_lock, lock_flags); 108 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 109 109 pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); 110 110 return; 111 111 } ··· 114 114 reg |= (1UL << (map->source & 31)); 115 115 ipu_write_reg(bank->ipu, reg, bank->control); 116 116 117 - spin_unlock_irqrestore(&bank_lock, lock_flags); 117 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 118 118 } 119 119 120 120 static void ipu_irq_mask(struct irq_data *d) ··· 124 124 uint32_t reg; 125 125 unsigned long lock_flags; 126 126 127 - spin_lock_irqsave(&bank_lock, lock_flags); 127 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 128 128 129 129 bank = map->bank; 130 130 if (!bank) { 131 - spin_unlock_irqrestore(&bank_lock, lock_flags); 131 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 132 132 pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); 133 133 return; 134 134 } ··· 137 137 reg &= ~(1UL << (map->source & 31)); 138 138 ipu_write_reg(bank->ipu, reg, bank->control); 139 139 140 - spin_unlock_irqrestore(&bank_lock, lock_flags); 140 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 141 141 } 142 142 143 143 static void ipu_irq_ack(struct irq_data *d) ··· 146 146 struct ipu_irq_bank *bank; 147 147 unsigned long lock_flags; 148 148 149 - spin_lock_irqsave(&bank_lock, lock_flags); 149 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 150 150 151 151 bank = map->bank; 152 152 if (!bank) { 153 - spin_unlock_irqrestore(&bank_lock, lock_flags); 153 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 154 154 pr_err("IPU: %s(%u) - unmapped!\n", __func__, d->irq); 155 155 return; 156 156 } 157 157 158 158 ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); 159 - spin_unlock_irqrestore(&bank_lock, lock_flags); 159 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 160 160 } 161 161 162 162 /** ··· 172 172 unsigned long lock_flags; 173 173 bool ret; 174 174 175 - spin_lock_irqsave(&bank_lock, lock_flags); 175 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 176 176 bank = map->bank; 177 177 ret = bank && ipu_read_reg(bank->ipu, bank->status) & 178 178 (1UL << (map->source & 31)); 179 - spin_unlock_irqrestore(&bank_lock, lock_flags); 179 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 180 180 181 181 return ret; 182 182 } ··· 213 213 if (irq_map[i].source < 0) { 214 214 unsigned long lock_flags; 215 215 216 - spin_lock_irqsave(&bank_lock, lock_flags); 216 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 217 217 irq_map[i].source = source; 218 218 irq_map[i].bank = irq_bank + source / 32; 219 - spin_unlock_irqrestore(&bank_lock, lock_flags); 219 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 220 220 221 221 ret = irq_map[i].irq; 222 222 pr_debug("IPU: mapped source %u to IRQ %u\n", ··· 252 252 pr_debug("IPU: unmapped source %u from IRQ %u\n", 253 253 source, irq_map[i].irq); 254 254 255 - spin_lock_irqsave(&bank_lock, lock_flags); 255 + raw_spin_lock_irqsave(&bank_lock, lock_flags); 256 256 irq_map[i].source = -EINVAL; 257 257 irq_map[i].bank = NULL; 258 - spin_unlock_irqrestore(&bank_lock, lock_flags); 258 + raw_spin_unlock_irqrestore(&bank_lock, lock_flags); 259 259 260 260 ret = 0; 261 261 break; ··· 276 276 for (i = IPU_IRQ_NR_FN_BANKS; i < IPU_IRQ_NR_BANKS; i++) { 277 277 struct ipu_irq_bank *bank = irq_bank + i; 278 278 279 - spin_lock(&bank_lock); 279 + raw_spin_lock(&bank_lock); 280 280 status = ipu_read_reg(ipu, bank->status); 281 281 /* 282 282 * Don't think we have to clear all interrupts here, they will ··· 284 284 * might want to clear unhandled interrupts after the loop... 285 285 */ 286 286 status &= ipu_read_reg(ipu, bank->control); 287 - spin_unlock(&bank_lock); 287 + raw_spin_unlock(&bank_lock); 288 288 while ((line = ffs(status))) { 289 289 struct ipu_irq_map *map; 290 290 291 291 line--; 292 292 status &= ~(1UL << line); 293 293 294 - spin_lock(&bank_lock); 294 + raw_spin_lock(&bank_lock); 295 295 map = src2map(32 * i + line); 296 296 if (map) 297 297 irq = map->irq; 298 - spin_unlock(&bank_lock); 298 + raw_spin_unlock(&bank_lock); 299 299 300 300 if (!map) { 301 301 pr_err("IPU: Interrupt on unmapped source %u bank %d\n", ··· 317 317 for (i = 0; i < IPU_IRQ_NR_FN_BANKS; i++) { 318 318 struct ipu_irq_bank *bank = irq_bank + i; 319 319 320 - spin_lock(&bank_lock); 320 + raw_spin_lock(&bank_lock); 321 321 status = ipu_read_reg(ipu, bank->status); 322 322 /* Not clearing all interrupts, see above */ 323 323 status &= ipu_read_reg(ipu, bank->control); 324 - spin_unlock(&bank_lock); 324 + raw_spin_unlock(&bank_lock); 325 325 while ((line = ffs(status))) { 326 326 struct ipu_irq_map *map; 327 327 328 328 line--; 329 329 status &= ~(1UL << line); 330 330 331 - spin_lock(&bank_lock); 331 + raw_spin_lock(&bank_lock); 332 332 map = src2map(32 * i + line); 333 333 if (map) 334 334 irq = map->irq; 335 - spin_unlock(&bank_lock); 335 + raw_spin_unlock(&bank_lock); 336 336 337 337 if (!map) { 338 338 pr_err("IPU: Interrupt on unmapped source %u bank %d\n",
+24 -24
drivers/iommu/dmar.c
··· 652 652 (unsigned long long)iommu->cap, 653 653 (unsigned long long)iommu->ecap); 654 654 655 - spin_lock_init(&iommu->register_lock); 655 + raw_spin_lock_init(&iommu->register_lock); 656 656 657 657 drhd->iommu = iommu; 658 658 return 0; ··· 771 771 restart: 772 772 rc = 0; 773 773 774 - spin_lock_irqsave(&qi->q_lock, flags); 774 + raw_spin_lock_irqsave(&qi->q_lock, flags); 775 775 while (qi->free_cnt < 3) { 776 - spin_unlock_irqrestore(&qi->q_lock, flags); 776 + raw_spin_unlock_irqrestore(&qi->q_lock, flags); 777 777 cpu_relax(); 778 - spin_lock_irqsave(&qi->q_lock, flags); 778 + raw_spin_lock_irqsave(&qi->q_lock, flags); 779 779 } 780 780 781 781 index = qi->free_head; ··· 815 815 if (rc) 816 816 break; 817 817 818 - spin_unlock(&qi->q_lock); 818 + raw_spin_unlock(&qi->q_lock); 819 819 cpu_relax(); 820 - spin_lock(&qi->q_lock); 820 + raw_spin_lock(&qi->q_lock); 821 821 } 822 822 823 823 qi->desc_status[index] = QI_DONE; 824 824 825 825 reclaim_free_desc(qi); 826 - spin_unlock_irqrestore(&qi->q_lock, flags); 826 + raw_spin_unlock_irqrestore(&qi->q_lock, flags); 827 827 828 828 if (rc == -EAGAIN) 829 829 goto restart; ··· 912 912 if (!ecap_qis(iommu->ecap)) 913 913 return; 914 914 915 - spin_lock_irqsave(&iommu->register_lock, flags); 915 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 916 916 917 917 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 918 918 if (!(sts & DMA_GSTS_QIES)) ··· 932 932 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, 933 933 !(sts & DMA_GSTS_QIES), sts); 934 934 end: 935 - spin_unlock_irqrestore(&iommu->register_lock, flags); 935 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 936 936 } 937 937 938 938 /* ··· 947 947 qi->free_head = qi->free_tail = 0; 948 948 qi->free_cnt = QI_LENGTH; 949 949 950 - spin_lock_irqsave(&iommu->register_lock, flags); 950 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 951 951 952 952 /* write zero to the tail reg */ 953 953 writel(0, iommu->reg + DMAR_IQT_REG); ··· 960 960 /* Make sure hardware complete it */ 961 961 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); 962 962 963 - spin_unlock_irqrestore(&iommu->register_lock, flags); 963 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 964 964 } 965 965 966 966 /* ··· 1009 1009 qi->free_head = qi->free_tail = 0; 1010 1010 qi->free_cnt = QI_LENGTH; 1011 1011 1012 - spin_lock_init(&qi->q_lock); 1012 + raw_spin_lock_init(&qi->q_lock); 1013 1013 1014 1014 __dmar_enable_qi(iommu); 1015 1015 ··· 1075 1075 unsigned long flag; 1076 1076 1077 1077 /* unmask it */ 1078 - spin_lock_irqsave(&iommu->register_lock, flag); 1078 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1079 1079 writel(0, iommu->reg + DMAR_FECTL_REG); 1080 1080 /* Read a reg to force flush the post write */ 1081 1081 readl(iommu->reg + DMAR_FECTL_REG); 1082 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1082 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1083 1083 } 1084 1084 1085 1085 void dmar_msi_mask(struct irq_data *data) ··· 1088 1088 struct intel_iommu *iommu = irq_data_get_irq_handler_data(data); 1089 1089 1090 1090 /* mask it */ 1091 - spin_lock_irqsave(&iommu->register_lock, flag); 1091 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1092 1092 writel(DMA_FECTL_IM, iommu->reg + DMAR_FECTL_REG); 1093 1093 /* Read a reg to force flush the post write */ 1094 1094 readl(iommu->reg + DMAR_FECTL_REG); 1095 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1095 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1096 1096 } 1097 1097 1098 1098 void dmar_msi_write(int irq, struct msi_msg *msg) ··· 1100 1100 struct intel_iommu *iommu = irq_get_handler_data(irq); 1101 1101 unsigned long flag; 1102 1102 1103 - spin_lock_irqsave(&iommu->register_lock, flag); 1103 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1104 1104 writel(msg->data, iommu->reg + DMAR_FEDATA_REG); 1105 1105 writel(msg->address_lo, iommu->reg + DMAR_FEADDR_REG); 1106 1106 writel(msg->address_hi, iommu->reg + DMAR_FEUADDR_REG); 1107 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1107 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1108 1108 } 1109 1109 1110 1110 void dmar_msi_read(int irq, struct msi_msg *msg) ··· 1112 1112 struct intel_iommu *iommu = irq_get_handler_data(irq); 1113 1113 unsigned long flag; 1114 1114 1115 - spin_lock_irqsave(&iommu->register_lock, flag); 1115 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1116 1116 msg->data = readl(iommu->reg + DMAR_FEDATA_REG); 1117 1117 msg->address_lo = readl(iommu->reg + DMAR_FEADDR_REG); 1118 1118 msg->address_hi = readl(iommu->reg + DMAR_FEUADDR_REG); 1119 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1119 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1120 1120 } 1121 1121 1122 1122 static int dmar_fault_do_one(struct intel_iommu *iommu, int type, ··· 1153 1153 u32 fault_status; 1154 1154 unsigned long flag; 1155 1155 1156 - spin_lock_irqsave(&iommu->register_lock, flag); 1156 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1157 1157 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1158 1158 if (fault_status) 1159 1159 printk(KERN_ERR "DRHD: handling fault status reg %x\n", ··· 1192 1192 writel(DMA_FRCD_F, iommu->reg + reg + 1193 1193 fault_index * PRIMARY_FAULT_REG_LEN + 12); 1194 1194 1195 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1195 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1196 1196 1197 1197 dmar_fault_do_one(iommu, type, fault_reason, 1198 1198 source_id, guest_addr); ··· 1200 1200 fault_index++; 1201 1201 if (fault_index >= cap_num_fault_regs(iommu->cap)) 1202 1202 fault_index = 0; 1203 - spin_lock_irqsave(&iommu->register_lock, flag); 1203 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1204 1204 } 1205 1205 clear_rest: 1206 1206 /* clear all the other faults */ 1207 1207 fault_status = readl(iommu->reg + DMAR_FSTS_REG); 1208 1208 writel(fault_status, iommu->reg + DMAR_FSTS_REG); 1209 1209 1210 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1211 1211 return IRQ_HANDLED; 1212 1212 } 1213 1213
+18 -18
drivers/iommu/intel-iommu.c
··· 939 939 940 940 addr = iommu->root_entry; 941 941 942 - spin_lock_irqsave(&iommu->register_lock, flag); 942 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 943 943 dmar_writeq(iommu->reg + DMAR_RTADDR_REG, virt_to_phys(addr)); 944 944 945 945 writel(iommu->gcmd | DMA_GCMD_SRTP, iommu->reg + DMAR_GCMD_REG); ··· 948 948 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 949 949 readl, (sts & DMA_GSTS_RTPS), sts); 950 950 951 - spin_unlock_irqrestore(&iommu->register_lock, flag); 951 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 952 952 } 953 953 954 954 static void iommu_flush_write_buffer(struct intel_iommu *iommu) ··· 959 959 if (!rwbf_quirk && !cap_rwbf(iommu->cap)) 960 960 return; 961 961 962 - spin_lock_irqsave(&iommu->register_lock, flag); 962 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 963 963 writel(iommu->gcmd | DMA_GCMD_WBF, iommu->reg + DMAR_GCMD_REG); 964 964 965 965 /* Make sure hardware complete it */ 966 966 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 967 967 readl, (!(val & DMA_GSTS_WBFS)), val); 968 968 969 - spin_unlock_irqrestore(&iommu->register_lock, flag); 969 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 970 970 } 971 971 972 972 /* return value determine if we need a write buffer flush */ ··· 993 993 } 994 994 val |= DMA_CCMD_ICC; 995 995 996 - spin_lock_irqsave(&iommu->register_lock, flag); 996 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 997 997 dmar_writeq(iommu->reg + DMAR_CCMD_REG, val); 998 998 999 999 /* Make sure hardware complete it */ 1000 1000 IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, 1001 1001 dmar_readq, (!(val & DMA_CCMD_ICC)), val); 1002 1002 1003 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1003 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1004 1004 } 1005 1005 1006 1006 /* return value determine if we need a write buffer flush */ ··· 1039 1039 if (cap_write_drain(iommu->cap)) 1040 1040 val |= DMA_TLB_WRITE_DRAIN; 1041 1041 1042 - spin_lock_irqsave(&iommu->register_lock, flag); 1042 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1043 1043 /* Note: Only uses first TLB reg currently */ 1044 1044 if (val_iva) 1045 1045 dmar_writeq(iommu->reg + tlb_offset, val_iva); ··· 1049 1049 IOMMU_WAIT_OP(iommu, tlb_offset + 8, 1050 1050 dmar_readq, (!(val & DMA_TLB_IVT)), val); 1051 1051 1052 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1052 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1053 1053 1054 1054 /* check IOTLB invalidation granularity */ 1055 1055 if (DMA_TLB_IAIG(val) == 0) ··· 1165 1165 u32 pmen; 1166 1166 unsigned long flags; 1167 1167 1168 - spin_lock_irqsave(&iommu->register_lock, flags); 1168 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 1169 1169 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1170 1170 pmen &= ~DMA_PMEN_EPM; 1171 1171 writel(pmen, iommu->reg + DMAR_PMEN_REG); ··· 1174 1174 IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, 1175 1175 readl, !(pmen & DMA_PMEN_PRS), pmen); 1176 1176 1177 - spin_unlock_irqrestore(&iommu->register_lock, flags); 1177 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 1178 1178 } 1179 1179 1180 1180 static int iommu_enable_translation(struct intel_iommu *iommu) ··· 1182 1182 u32 sts; 1183 1183 unsigned long flags; 1184 1184 1185 - spin_lock_irqsave(&iommu->register_lock, flags); 1185 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 1186 1186 iommu->gcmd |= DMA_GCMD_TE; 1187 1187 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1188 1188 ··· 1190 1190 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1191 1191 readl, (sts & DMA_GSTS_TES), sts); 1192 1192 1193 - spin_unlock_irqrestore(&iommu->register_lock, flags); 1193 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 1194 1194 return 0; 1195 1195 } 1196 1196 ··· 1199 1199 u32 sts; 1200 1200 unsigned long flag; 1201 1201 1202 - spin_lock_irqsave(&iommu->register_lock, flag); 1202 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 1203 1203 iommu->gcmd &= ~DMA_GCMD_TE; 1204 1204 writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); 1205 1205 ··· 1207 1207 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 1208 1208 readl, (!(sts & DMA_GSTS_TES)), sts); 1209 1209 1210 - spin_unlock_irqrestore(&iommu->register_lock, flag); 1210 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 1211 1211 return 0; 1212 1212 } 1213 1213 ··· 3329 3329 for_each_active_iommu(iommu, drhd) { 3330 3330 iommu_disable_translation(iommu); 3331 3331 3332 - spin_lock_irqsave(&iommu->register_lock, flag); 3332 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 3333 3333 3334 3334 iommu->iommu_state[SR_DMAR_FECTL_REG] = 3335 3335 readl(iommu->reg + DMAR_FECTL_REG); ··· 3340 3340 iommu->iommu_state[SR_DMAR_FEUADDR_REG] = 3341 3341 readl(iommu->reg + DMAR_FEUADDR_REG); 3342 3342 3343 - spin_unlock_irqrestore(&iommu->register_lock, flag); 3343 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 3344 3344 } 3345 3345 return 0; 3346 3346 ··· 3367 3367 3368 3368 for_each_active_iommu(iommu, drhd) { 3369 3369 3370 - spin_lock_irqsave(&iommu->register_lock, flag); 3370 + raw_spin_lock_irqsave(&iommu->register_lock, flag); 3371 3371 3372 3372 writel(iommu->iommu_state[SR_DMAR_FECTL_REG], 3373 3373 iommu->reg + DMAR_FECTL_REG); ··· 3378 3378 writel(iommu->iommu_state[SR_DMAR_FEUADDR_REG], 3379 3379 iommu->reg + DMAR_FEUADDR_REG); 3380 3380 3381 - spin_unlock_irqrestore(&iommu->register_lock, flag); 3381 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag); 3382 3382 } 3383 3383 3384 3384 for_each_active_iommu(iommu, drhd)
+20 -20
drivers/iommu/intr_remapping.c
··· 54 54 } 55 55 early_param("intremap", setup_intremap); 56 56 57 - static DEFINE_SPINLOCK(irq_2_ir_lock); 57 + static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 58 58 59 59 static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 60 60 { ··· 71 71 if (!entry || !irq_iommu) 72 72 return -1; 73 73 74 - spin_lock_irqsave(&irq_2_ir_lock, flags); 74 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 75 75 76 76 index = irq_iommu->irte_index + irq_iommu->sub_handle; 77 77 *entry = *(irq_iommu->iommu->ir_table->base + index); 78 78 79 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 79 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 80 80 return 0; 81 81 } 82 82 ··· 110 110 return -1; 111 111 } 112 112 113 - spin_lock_irqsave(&irq_2_ir_lock, flags); 113 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 114 114 do { 115 115 for (i = index; i < index + count; i++) 116 116 if (table->base[i].present) ··· 122 122 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 123 123 124 124 if (index == start_index) { 125 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 125 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 126 126 printk(KERN_ERR "can't allocate an IRTE\n"); 127 127 return -1; 128 128 } ··· 136 136 irq_iommu->sub_handle = 0; 137 137 irq_iommu->irte_mask = mask; 138 138 139 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 139 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 140 140 141 141 return index; 142 142 } ··· 161 161 if (!irq_iommu) 162 162 return -1; 163 163 164 - spin_lock_irqsave(&irq_2_ir_lock, flags); 164 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 165 165 *sub_handle = irq_iommu->sub_handle; 166 166 index = irq_iommu->irte_index; 167 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 167 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 168 168 return index; 169 169 } 170 170 ··· 176 176 if (!irq_iommu) 177 177 return -1; 178 178 179 - spin_lock_irqsave(&irq_2_ir_lock, flags); 179 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 180 180 181 181 irq_iommu->iommu = iommu; 182 182 irq_iommu->irte_index = index; 183 183 irq_iommu->sub_handle = subhandle; 184 184 irq_iommu->irte_mask = 0; 185 185 186 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 186 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 187 187 188 188 return 0; 189 189 } ··· 199 199 if (!irq_iommu) 200 200 return -1; 201 201 202 - spin_lock_irqsave(&irq_2_ir_lock, flags); 202 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 203 203 204 204 iommu = irq_iommu->iommu; 205 205 ··· 211 211 __iommu_flush_cache(iommu, irte, sizeof(*irte)); 212 212 213 213 rc = qi_flush_iec(iommu, index, 0); 214 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 214 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 215 215 216 216 return rc; 217 217 } ··· 279 279 if (!irq_iommu) 280 280 return -1; 281 281 282 - spin_lock_irqsave(&irq_2_ir_lock, flags); 282 + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 283 283 284 284 rc = clear_entries(irq_iommu); 285 285 ··· 288 288 irq_iommu->sub_handle = 0; 289 289 irq_iommu->irte_mask = 0; 290 290 291 - spin_unlock_irqrestore(&irq_2_ir_lock, flags); 291 + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 292 292 293 293 return rc; 294 294 } ··· 418 418 419 419 addr = virt_to_phys((void *)iommu->ir_table->base); 420 420 421 - spin_lock_irqsave(&iommu->register_lock, flags); 421 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 422 422 423 423 dmar_writeq(iommu->reg + DMAR_IRTA_REG, 424 424 (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); ··· 429 429 430 430 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 431 431 readl, (sts & DMA_GSTS_IRTPS), sts); 432 - spin_unlock_irqrestore(&iommu->register_lock, flags); 432 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 433 433 434 434 /* 435 435 * global invalidation of interrupt entry cache before enabling ··· 437 437 */ 438 438 qi_global_iec(iommu); 439 439 440 - spin_lock_irqsave(&iommu->register_lock, flags); 440 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 441 441 442 442 /* Enable interrupt-remapping */ 443 443 iommu->gcmd |= DMA_GCMD_IRE; ··· 446 446 IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, 447 447 readl, (sts & DMA_GSTS_IRES), sts); 448 448 449 - spin_unlock_irqrestore(&iommu->register_lock, flags); 449 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 450 450 } 451 451 452 452 ··· 494 494 */ 495 495 qi_global_iec(iommu); 496 496 497 - spin_lock_irqsave(&iommu->register_lock, flags); 497 + raw_spin_lock_irqsave(&iommu->register_lock, flags); 498 498 499 499 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 500 500 if (!(sts & DMA_GSTS_IRES)) ··· 507 507 readl, !(sts & DMA_GSTS_IRES), sts); 508 508 509 509 end: 510 - spin_unlock_irqrestore(&iommu->register_lock, flags); 510 + raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 511 511 } 512 512 513 513 static int __init dmar_x2apic_optout(void)
+2 -2
drivers/oprofile/event_buffer.c
··· 82 82 { 83 83 unsigned long flags; 84 84 85 - spin_lock_irqsave(&oprofilefs_lock, flags); 85 + raw_spin_lock_irqsave(&oprofilefs_lock, flags); 86 86 buffer_size = oprofile_buffer_size; 87 87 buffer_watershed = oprofile_buffer_watershed; 88 - spin_unlock_irqrestore(&oprofilefs_lock, flags); 88 + raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); 89 89 90 90 if (buffer_watershed >= buffer_size) 91 91 return -EINVAL;
+2 -2
drivers/oprofile/oprofile_perf.c
··· 160 160 161 161 static int oprofile_perf_setup(void) 162 162 { 163 - spin_lock(&oprofilefs_lock); 163 + raw_spin_lock(&oprofilefs_lock); 164 164 op_perf_setup(); 165 - spin_unlock(&oprofilefs_lock); 165 + raw_spin_unlock(&oprofilefs_lock); 166 166 return 0; 167 167 } 168 168
+3 -3
drivers/oprofile/oprofilefs.c
··· 21 21 22 22 #define OPROFILEFS_MAGIC 0x6f70726f 23 23 24 - DEFINE_SPINLOCK(oprofilefs_lock); 24 + DEFINE_RAW_SPINLOCK(oprofilefs_lock); 25 25 26 26 static struct inode *oprofilefs_get_inode(struct super_block *sb, int mode) 27 27 { ··· 76 76 if (copy_from_user(tmpbuf, buf, count)) 77 77 return -EFAULT; 78 78 79 - spin_lock_irqsave(&oprofilefs_lock, flags); 79 + raw_spin_lock_irqsave(&oprofilefs_lock, flags); 80 80 *val = simple_strtoul(tmpbuf, NULL, 0); 81 - spin_unlock_irqrestore(&oprofilefs_lock, flags); 81 + raw_spin_unlock_irqrestore(&oprofilefs_lock, flags); 82 82 return 0; 83 83 } 84 84
+21 -21
drivers/video/console/vgacon.c
··· 50 50 #include <video/vga.h> 51 51 #include <asm/io.h> 52 52 53 - static DEFINE_SPINLOCK(vga_lock); 53 + static DEFINE_RAW_SPINLOCK(vga_lock); 54 54 static int cursor_size_lastfrom; 55 55 static int cursor_size_lastto; 56 56 static u32 vgacon_xres; ··· 157 157 * ddprintk might set the console position from interrupt 158 158 * handlers, thus the write has to be IRQ-atomic. 159 159 */ 160 - spin_lock_irqsave(&vga_lock, flags); 160 + raw_spin_lock_irqsave(&vga_lock, flags); 161 161 162 162 #ifndef SLOW_VGA 163 163 v1 = reg + (val & 0xff00); ··· 170 170 outb_p(reg + 1, vga_video_port_reg); 171 171 outb_p(val & 0xff, vga_video_port_val); 172 172 #endif 173 - spin_unlock_irqrestore(&vga_lock, flags); 173 + raw_spin_unlock_irqrestore(&vga_lock, flags); 174 174 } 175 175 176 176 static inline void vga_set_mem_top(struct vc_data *c) ··· 664 664 cursor_size_lastfrom = from; 665 665 cursor_size_lastto = to; 666 666 667 - spin_lock_irqsave(&vga_lock, flags); 667 + raw_spin_lock_irqsave(&vga_lock, flags); 668 668 if (vga_video_type >= VIDEO_TYPE_VGAC) { 669 669 outb_p(VGA_CRTC_CURSOR_START, vga_video_port_reg); 670 670 curs = inb_p(vga_video_port_val); ··· 682 682 outb_p(curs, vga_video_port_val); 683 683 outb_p(VGA_CRTC_CURSOR_END, vga_video_port_reg); 684 684 outb_p(cure, vga_video_port_val); 685 - spin_unlock_irqrestore(&vga_lock, flags); 685 + raw_spin_unlock_irqrestore(&vga_lock, flags); 686 686 } 687 687 688 688 static void vgacon_cursor(struct vc_data *c, int mode) ··· 757 757 unsigned int scanlines = height * c->vc_font.height; 758 758 u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan; 759 759 760 - spin_lock_irqsave(&vga_lock, flags); 760 + raw_spin_lock_irqsave(&vga_lock, flags); 761 761 762 762 vgacon_xres = width * VGA_FONTWIDTH; 763 763 vgacon_yres = height * c->vc_font.height; ··· 808 808 outb_p(vsync_end, vga_video_port_val); 809 809 } 810 810 811 - spin_unlock_irqrestore(&vga_lock, flags); 811 + raw_spin_unlock_irqrestore(&vga_lock, flags); 812 812 return 0; 813 813 } 814 814 ··· 891 891 { 892 892 /* save original values of VGA controller registers */ 893 893 if (!vga_vesa_blanked) { 894 - spin_lock_irq(&vga_lock); 894 + raw_spin_lock_irq(&vga_lock); 895 895 vga_state.SeqCtrlIndex = vga_r(state->vgabase, VGA_SEQ_I); 896 896 vga_state.CrtCtrlIndex = inb_p(vga_video_port_reg); 897 897 vga_state.CrtMiscIO = vga_r(state->vgabase, VGA_MIS_R); 898 - spin_unlock_irq(&vga_lock); 898 + raw_spin_unlock_irq(&vga_lock); 899 899 900 900 outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ 901 901 vga_state.HorizontalTotal = inb_p(vga_video_port_val); ··· 918 918 919 919 /* assure that video is enabled */ 920 920 /* "0x20" is VIDEO_ENABLE_bit in register 01 of sequencer */ 921 - spin_lock_irq(&vga_lock); 921 + raw_spin_lock_irq(&vga_lock); 922 922 vga_wseq(state->vgabase, VGA_SEQ_CLOCK_MODE, vga_state.ClockingMode | 0x20); 923 923 924 924 /* test for vertical retrace in process.... */ ··· 954 954 /* restore both index registers */ 955 955 vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); 956 956 outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); 957 - spin_unlock_irq(&vga_lock); 957 + raw_spin_unlock_irq(&vga_lock); 958 958 } 959 959 960 960 static void vga_vesa_unblank(struct vgastate *state) 961 961 { 962 962 /* restore original values of VGA controller registers */ 963 - spin_lock_irq(&vga_lock); 963 + raw_spin_lock_irq(&vga_lock); 964 964 vga_w(state->vgabase, VGA_MIS_W, vga_state.CrtMiscIO); 965 965 966 966 outb_p(0x00, vga_video_port_reg); /* HorizontalTotal */ ··· 985 985 /* restore index/control registers */ 986 986 vga_w(state->vgabase, VGA_SEQ_I, vga_state.SeqCtrlIndex); 987 987 outb_p(vga_state.CrtCtrlIndex, vga_video_port_reg); 988 - spin_unlock_irq(&vga_lock); 988 + raw_spin_unlock_irq(&vga_lock); 989 989 } 990 990 991 991 static void vga_pal_blank(struct vgastate *state) ··· 1104 1104 charmap += 4 * cmapsz; 1105 1105 #endif 1106 1106 1107 - spin_lock_irq(&vga_lock); 1107 + raw_spin_lock_irq(&vga_lock); 1108 1108 /* First, the Sequencer */ 1109 1109 vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x1); 1110 1110 /* CPU writes only to map 2 */ ··· 1120 1120 vga_wgfx(state->vgabase, VGA_GFX_MODE, 0x00); 1121 1121 /* map start at A000:0000 */ 1122 1122 vga_wgfx(state->vgabase, VGA_GFX_MISC, 0x00); 1123 - spin_unlock_irq(&vga_lock); 1123 + raw_spin_unlock_irq(&vga_lock); 1124 1124 1125 1125 if (arg) { 1126 1126 if (set) ··· 1147 1147 } 1148 1148 } 1149 1149 1150 - spin_lock_irq(&vga_lock); 1150 + raw_spin_lock_irq(&vga_lock); 1151 1151 /* First, the sequencer, Synchronous reset */ 1152 1152 vga_wseq(state->vgabase, VGA_SEQ_RESET, 0x01); 1153 1153 /* CPU writes to maps 0 and 1 */ ··· 1186 1186 inb_p(video_port_status); 1187 1187 vga_wattr(state->vgabase, VGA_AR_ENABLE_DISPLAY, 0); 1188 1188 } 1189 - spin_unlock_irq(&vga_lock); 1189 + raw_spin_unlock_irq(&vga_lock); 1190 1190 return 0; 1191 1191 } 1192 1192 ··· 1211 1211 registers; they are write-only on EGA, but it appears that they 1212 1212 are all don't care bits on EGA, so I guess it doesn't matter. */ 1213 1213 1214 - spin_lock_irq(&vga_lock); 1214 + raw_spin_lock_irq(&vga_lock); 1215 1215 outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ 1216 1216 ovr = inb_p(vga_video_port_val); 1217 1217 outb_p(0x09, vga_video_port_reg); /* Font size register */ 1218 1218 fsr = inb_p(vga_video_port_val); 1219 - spin_unlock_irq(&vga_lock); 1219 + raw_spin_unlock_irq(&vga_lock); 1220 1220 1221 1221 vde = maxscan & 0xff; /* Vertical display end reg */ 1222 1222 ovr = (ovr & 0xbd) + /* Overflow register */ 1223 1223 ((maxscan & 0x100) >> 7) + ((maxscan & 0x200) >> 3); 1224 1224 fsr = (fsr & 0xe0) + (fontheight - 1); /* Font size register */ 1225 1225 1226 - spin_lock_irq(&vga_lock); 1226 + raw_spin_lock_irq(&vga_lock); 1227 1227 outb_p(0x07, vga_video_port_reg); /* CRTC overflow register */ 1228 1228 outb_p(ovr, vga_video_port_val); 1229 1229 outb_p(0x09, vga_video_port_reg); /* Font size */ 1230 1230 outb_p(fsr, vga_video_port_val); 1231 1231 outb_p(0x12, vga_video_port_reg); /* Vertical display limit */ 1232 1232 outb_p(vde, vga_video_port_val); 1233 - spin_unlock_irq(&vga_lock); 1233 + raw_spin_unlock_irq(&vga_lock); 1234 1234 vga_video_font_height = fontheight; 1235 1235 1236 1236 for (i = 0; i < MAX_NR_CONSOLES; i++) {
+1 -1
include/linux/init_task.h
··· 42 42 .cputimer = { \ 43 43 .cputime = INIT_CPUTIME, \ 44 44 .running = 0, \ 45 - .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ 45 + .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \ 46 46 }, \ 47 47 .cred_guard_mutex = \ 48 48 __MUTEX_INITIALIZER(sig.cred_guard_mutex), \
+2 -2
include/linux/intel-iommu.h
··· 271 271 }; 272 272 273 273 struct q_inval { 274 - spinlock_t q_lock; 274 + raw_spinlock_t q_lock; 275 275 struct qi_desc *desc; /* invalidation queue */ 276 276 int *desc_status; /* desc status */ 277 277 int free_head; /* first free entry */ ··· 311 311 u64 cap; 312 312 u64 ecap; 313 313 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 314 - spinlock_t register_lock; /* protect register handling */ 314 + raw_spinlock_t register_lock; /* protect register handling */ 315 315 int seq_id; /* sequence id of the iommu */ 316 316 int agaw; /* agaw of this iommu */ 317 317 int msagaw; /* max sagaw of this iommu */
+1 -1
include/linux/kprobes.h
··· 181 181 int nmissed; 182 182 size_t data_size; 183 183 struct hlist_head free_instances; 184 - spinlock_t lock; 184 + raw_spinlock_t lock; 185 185 }; 186 186 187 187 struct kretprobe_instance {
+1 -1
include/linux/oprofile.h
··· 166 166 int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, size_t count); 167 167 168 168 /** lock for read/write safety */ 169 - extern spinlock_t oprofilefs_lock; 169 + extern raw_spinlock_t oprofilefs_lock; 170 170 171 171 /** 172 172 * Add the contents of a circular buffer to the event buffer.
+1 -1
include/linux/percpu_counter.h
··· 16 16 #ifdef CONFIG_SMP 17 17 18 18 struct percpu_counter { 19 - spinlock_t lock; 19 + raw_spinlock_t lock; 20 20 s64 count; 21 21 #ifdef CONFIG_HOTPLUG_CPU 22 22 struct list_head list; /* All percpu_counters are on a list */
+3 -3
include/linux/proportions.h
··· 58 58 */ 59 59 int shift; 60 60 unsigned long period; 61 - spinlock_t lock; /* protect the snapshot state */ 61 + raw_spinlock_t lock; /* protect the snapshot state */ 62 62 }; 63 63 64 64 int prop_local_init_percpu(struct prop_local_percpu *pl); ··· 106 106 */ 107 107 unsigned long period; 108 108 int shift; 109 - spinlock_t lock; /* protect the snapshot state */ 109 + raw_spinlock_t lock; /* protect the snapshot state */ 110 110 }; 111 111 112 112 #define INIT_PROP_LOCAL_SINGLE(name) \ 113 - { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 113 + { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 114 114 } 115 115 116 116 int prop_local_init_single(struct prop_local_single *pl);
+3 -3
include/linux/ratelimit.h
··· 8 8 #define DEFAULT_RATELIMIT_BURST 10 9 9 10 10 struct ratelimit_state { 11 - spinlock_t lock; /* protect the state */ 11 + raw_spinlock_t lock; /* protect the state */ 12 12 13 13 int interval; 14 14 int burst; ··· 20 20 #define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ 21 21 \ 22 22 struct ratelimit_state name = { \ 23 - .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ 23 + .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 24 24 .interval = interval_init, \ 25 25 .burst = burst_init, \ 26 26 } ··· 28 28 static inline void ratelimit_state_init(struct ratelimit_state *rs, 29 29 int interval, int burst) 30 30 { 31 - spin_lock_init(&rs->lock); 31 + raw_spin_lock_init(&rs->lock); 32 32 rs->interval = interval; 33 33 rs->burst = burst; 34 34 rs->printed = 0;
+1 -1
include/linux/rwsem-spinlock.h
··· 22 22 */ 23 23 struct rw_semaphore { 24 24 __s32 activity; 25 - spinlock_t wait_lock; 25 + raw_spinlock_t wait_lock; 26 26 struct list_head wait_list; 27 27 #ifdef CONFIG_DEBUG_LOCK_ALLOC 28 28 struct lockdep_map dep_map;
+6 -4
include/linux/rwsem.h
··· 25 25 /* All arch specific implementations share the same struct */ 26 26 struct rw_semaphore { 27 27 long count; 28 - spinlock_t wait_lock; 28 + raw_spinlock_t wait_lock; 29 29 struct list_head wait_list; 30 30 #ifdef CONFIG_DEBUG_LOCK_ALLOC 31 31 struct lockdep_map dep_map; ··· 56 56 # define __RWSEM_DEP_MAP_INIT(lockname) 57 57 #endif 58 58 59 - #define __RWSEM_INITIALIZER(name) \ 60 - { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED(name.wait_lock), \ 61 - LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) } 59 + #define __RWSEM_INITIALIZER(name) \ 60 + { RWSEM_UNLOCKED_VALUE, \ 61 + __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ 62 + LIST_HEAD_INIT((name).wait_list) \ 63 + __RWSEM_DEP_MAP_INIT(name) } 62 64 63 65 #define DECLARE_RWSEM(name) \ 64 66 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
+2 -2
include/linux/sched.h
··· 510 510 struct thread_group_cputimer { 511 511 struct task_cputime cputime; 512 512 int running; 513 - spinlock_t lock; 513 + raw_spinlock_t lock; 514 514 }; 515 515 516 516 #include <linux/rwsem.h> ··· 2566 2566 2567 2567 static inline void thread_group_cputime_init(struct signal_struct *sig) 2568 2568 { 2569 - spin_lock_init(&sig->cputimer.lock); 2569 + raw_spin_lock_init(&sig->cputimer.lock); 2570 2570 } 2571 2571 2572 2572 /*
+2 -2
include/linux/semaphore.h
··· 14 14 15 15 /* Please don't access any members of this structure directly */ 16 16 struct semaphore { 17 - spinlock_t lock; 17 + raw_spinlock_t lock; 18 18 unsigned int count; 19 19 struct list_head wait_list; 20 20 }; 21 21 22 22 #define __SEMAPHORE_INITIALIZER(name, n) \ 23 23 { \ 24 - .lock = __SPIN_LOCK_UNLOCKED((name).lock), \ 24 + .lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \ 25 25 .count = n, \ 26 26 .wait_list = LIST_HEAD_INIT((name).wait_list), \ 27 27 }
+9 -9
kernel/cgroup.c
··· 265 265 /* the list of cgroups eligible for automatic release. Protected by 266 266 * release_list_lock */ 267 267 static LIST_HEAD(release_list); 268 - static DEFINE_SPINLOCK(release_list_lock); 268 + static DEFINE_RAW_SPINLOCK(release_list_lock); 269 269 static void cgroup_release_agent(struct work_struct *work); 270 270 static DECLARE_WORK(release_agent_work, cgroup_release_agent); 271 271 static void check_for_release(struct cgroup *cgrp); ··· 4014 4014 finish_wait(&cgroup_rmdir_waitq, &wait); 4015 4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 4016 4016 4017 - spin_lock(&release_list_lock); 4017 + raw_spin_lock(&release_list_lock); 4018 4018 set_bit(CGRP_REMOVED, &cgrp->flags); 4019 4019 if (!list_empty(&cgrp->release_list)) 4020 4020 list_del_init(&cgrp->release_list); 4021 - spin_unlock(&release_list_lock); 4021 + raw_spin_unlock(&release_list_lock); 4022 4022 4023 4023 cgroup_lock_hierarchy(cgrp->root); 4024 4024 /* delete this cgroup from parent->children */ ··· 4671 4671 * already queued for a userspace notification, queue 4672 4672 * it now */ 4673 4673 int need_schedule_work = 0; 4674 - spin_lock(&release_list_lock); 4674 + raw_spin_lock(&release_list_lock); 4675 4675 if (!cgroup_is_removed(cgrp) && 4676 4676 list_empty(&cgrp->release_list)) { 4677 4677 list_add(&cgrp->release_list, &release_list); 4678 4678 need_schedule_work = 1; 4679 4679 } 4680 - spin_unlock(&release_list_lock); 4680 + raw_spin_unlock(&release_list_lock); 4681 4681 if (need_schedule_work) 4682 4682 schedule_work(&release_agent_work); 4683 4683 } ··· 4729 4729 { 4730 4730 BUG_ON(work != &release_agent_work); 4731 4731 mutex_lock(&cgroup_mutex); 4732 - spin_lock(&release_list_lock); 4732 + raw_spin_lock(&release_list_lock); 4733 4733 while (!list_empty(&release_list)) { 4734 4734 char *argv[3], *envp[3]; 4735 4735 int i; ··· 4738 4738 struct cgroup, 4739 4739 release_list); 4740 4740 list_del_init(&cgrp->release_list); 4741 - spin_unlock(&release_list_lock); 4741 + raw_spin_unlock(&release_list_lock); 4742 4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4743 4743 if (!pathbuf) 4744 4744 goto continue_free; ··· 4768 4768 continue_free: 4769 4769 kfree(pathbuf); 4770 4770 kfree(agentbuf); 4771 - spin_lock(&release_list_lock); 4771 + raw_spin_lock(&release_list_lock); 4772 4772 } 4773 - spin_unlock(&release_list_lock); 4773 + raw_spin_unlock(&release_list_lock); 4774 4774 mutex_unlock(&cgroup_mutex); 4775 4775 } 4776 4776
+17 -17
kernel/kprobes.c
··· 78 78 static DEFINE_MUTEX(kprobe_mutex); 79 79 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 80 80 static struct { 81 - spinlock_t lock ____cacheline_aligned_in_smp; 81 + raw_spinlock_t lock ____cacheline_aligned_in_smp; 82 82 } kretprobe_table_locks[KPROBE_TABLE_SIZE]; 83 83 84 - static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 84 + static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) 85 85 { 86 86 return &(kretprobe_table_locks[hash].lock); 87 87 } ··· 1013 1013 hlist_del(&ri->hlist); 1014 1014 INIT_HLIST_NODE(&ri->hlist); 1015 1015 if (likely(rp)) { 1016 - spin_lock(&rp->lock); 1016 + raw_spin_lock(&rp->lock); 1017 1017 hlist_add_head(&ri->hlist, &rp->free_instances); 1018 - spin_unlock(&rp->lock); 1018 + raw_spin_unlock(&rp->lock); 1019 1019 } else 1020 1020 /* Unregistering */ 1021 1021 hlist_add_head(&ri->hlist, head); ··· 1026 1026 __acquires(hlist_lock) 1027 1027 { 1028 1028 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1029 - spinlock_t *hlist_lock; 1029 + raw_spinlock_t *hlist_lock; 1030 1030 1031 1031 *head = &kretprobe_inst_table[hash]; 1032 1032 hlist_lock = kretprobe_table_lock_ptr(hash); 1033 - spin_lock_irqsave(hlist_lock, *flags); 1033 + raw_spin_lock_irqsave(hlist_lock, *flags); 1034 1034 } 1035 1035 1036 1036 static void __kprobes kretprobe_table_lock(unsigned long hash, 1037 1037 unsigned long *flags) 1038 1038 __acquires(hlist_lock) 1039 1039 { 1040 - spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1041 - spin_lock_irqsave(hlist_lock, *flags); 1040 + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1041 + raw_spin_lock_irqsave(hlist_lock, *flags); 1042 1042 } 1043 1043 1044 1044 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk, ··· 1046 1046 __releases(hlist_lock) 1047 1047 { 1048 1048 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS); 1049 - spinlock_t *hlist_lock; 1049 + raw_spinlock_t *hlist_lock; 1050 1050 1051 1051 hlist_lock = kretprobe_table_lock_ptr(hash); 1052 - spin_unlock_irqrestore(hlist_lock, *flags); 1052 + raw_spin_unlock_irqrestore(hlist_lock, *flags); 1053 1053 } 1054 1054 1055 1055 static void __kprobes kretprobe_table_unlock(unsigned long hash, 1056 1056 unsigned long *flags) 1057 1057 __releases(hlist_lock) 1058 1058 { 1059 - spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1060 - spin_unlock_irqrestore(hlist_lock, *flags); 1059 + raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 1060 + raw_spin_unlock_irqrestore(hlist_lock, *flags); 1061 1061 } 1062 1062 1063 1063 /* ··· 1663 1663 1664 1664 /*TODO: consider to only swap the RA after the last pre_handler fired */ 1665 1665 hash = hash_ptr(current, KPROBE_HASH_BITS); 1666 - spin_lock_irqsave(&rp->lock, flags); 1666 + raw_spin_lock_irqsave(&rp->lock, flags); 1667 1667 if (!hlist_empty(&rp->free_instances)) { 1668 1668 ri = hlist_entry(rp->free_instances.first, 1669 1669 struct kretprobe_instance, hlist); 1670 1670 hlist_del(&ri->hlist); 1671 - spin_unlock_irqrestore(&rp->lock, flags); 1671 + raw_spin_unlock_irqrestore(&rp->lock, flags); 1672 1672 1673 1673 ri->rp = rp; 1674 1674 ri->task = current; ··· 1685 1685 kretprobe_table_unlock(hash, &flags); 1686 1686 } else { 1687 1687 rp->nmissed++; 1688 - spin_unlock_irqrestore(&rp->lock, flags); 1688 + raw_spin_unlock_irqrestore(&rp->lock, flags); 1689 1689 } 1690 1690 return 0; 1691 1691 } ··· 1721 1721 rp->maxactive = num_possible_cpus(); 1722 1722 #endif 1723 1723 } 1724 - spin_lock_init(&rp->lock); 1724 + raw_spin_lock_init(&rp->lock); 1725 1725 INIT_HLIST_HEAD(&rp->free_instances); 1726 1726 for (i = 0; i < rp->maxactive; i++) { 1727 1727 inst = kmalloc(sizeof(struct kretprobe_instance) + ··· 1959 1959 for (i = 0; i < KPROBE_TABLE_SIZE; i++) { 1960 1960 INIT_HLIST_HEAD(&kprobe_table[i]); 1961 1961 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 1962 - spin_lock_init(&(kretprobe_table_locks[i].lock)); 1962 + raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); 1963 1963 } 1964 1964 1965 1965 /*
+7 -7
kernel/latencytop.c
··· 58 58 #include <linux/list.h> 59 59 #include <linux/stacktrace.h> 60 60 61 - static DEFINE_SPINLOCK(latency_lock); 61 + static DEFINE_RAW_SPINLOCK(latency_lock); 62 62 63 63 #define MAXLR 128 64 64 static struct latency_record latency_record[MAXLR]; ··· 72 72 if (!latencytop_enabled) 73 73 return; 74 74 75 - spin_lock_irqsave(&latency_lock, flags); 75 + raw_spin_lock_irqsave(&latency_lock, flags); 76 76 memset(&p->latency_record, 0, sizeof(p->latency_record)); 77 77 p->latency_record_count = 0; 78 - spin_unlock_irqrestore(&latency_lock, flags); 78 + raw_spin_unlock_irqrestore(&latency_lock, flags); 79 79 } 80 80 81 81 static void clear_global_latency_tracing(void) 82 82 { 83 83 unsigned long flags; 84 84 85 - spin_lock_irqsave(&latency_lock, flags); 85 + raw_spin_lock_irqsave(&latency_lock, flags); 86 86 memset(&latency_record, 0, sizeof(latency_record)); 87 - spin_unlock_irqrestore(&latency_lock, flags); 87 + raw_spin_unlock_irqrestore(&latency_lock, flags); 88 88 } 89 89 90 90 static void __sched ··· 190 190 lat.max = usecs; 191 191 store_stacktrace(tsk, &lat); 192 192 193 - spin_lock_irqsave(&latency_lock, flags); 193 + raw_spin_lock_irqsave(&latency_lock, flags); 194 194 195 195 account_global_scheduler_latency(tsk, &lat); 196 196 ··· 231 231 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); 232 232 233 233 out_unlock: 234 - spin_unlock_irqrestore(&latency_lock, flags); 234 + raw_spin_unlock_irqrestore(&latency_lock, flags); 235 235 } 236 236 237 237 static int lstats_show(struct seq_file *m, void *v)
+147 -9
kernel/lockdep.c
··· 96 96 97 97 static inline int graph_unlock(void) 98 98 { 99 - if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) 99 + if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) { 100 + /* 101 + * The lockdep graph lock isn't locked while we expect it to 102 + * be, we're confused now, bye! 103 + */ 100 104 return DEBUG_LOCKS_WARN_ON(1); 105 + } 101 106 102 107 current->lockdep_recursion--; 103 108 arch_spin_unlock(&lockdep_lock); ··· 139 134 static inline struct lock_class *hlock_class(struct held_lock *hlock) 140 135 { 141 136 if (!hlock->class_idx) { 137 + /* 138 + * Someone passed in garbage, we give up. 139 + */ 142 140 DEBUG_LOCKS_WARN_ON(1); 143 141 return NULL; 144 142 } ··· 695 687 */ 696 688 list_for_each_entry(class, hash_head, hash_entry) { 697 689 if (class->key == key) { 690 + /* 691 + * Huh! same key, different name? Did someone trample 692 + * on some memory? We're most confused. 693 + */ 698 694 WARN_ON_ONCE(class->name != lock->name); 699 695 return class; 700 696 } ··· 812 800 else if (subclass < NR_LOCKDEP_CACHING_CLASSES) 813 801 lock->class_cache[subclass] = class; 814 802 803 + /* 804 + * Hash collision, did we smoke some? We found a class with a matching 805 + * hash but the subclass -- which is hashed in -- didn't match. 806 + */ 815 807 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) 816 808 return NULL; 817 809 ··· 942 926 unsigned long nr; 943 927 944 928 nr = lock - list_entries; 945 - WARN_ON(nr >= nr_list_entries); 929 + WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 946 930 lock->parent = parent; 947 931 lock->class->dep_gen_id = lockdep_dependency_gen_id; 948 932 } ··· 952 936 unsigned long nr; 953 937 954 938 nr = lock - list_entries; 955 - WARN_ON(nr >= nr_list_entries); 939 + WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */ 956 940 return lock->class->dep_gen_id == lockdep_dependency_gen_id; 957 941 } 958 942 ··· 1212 1196 if (!debug_locks_off_graph_unlock()) 1213 1197 return 0; 1214 1198 1199 + /* 1200 + * Breadth-first-search failed, graph got corrupted? 1201 + */ 1215 1202 WARN(1, "lockdep bfs error:%d\n", ret); 1216 1203 1217 1204 return 0; ··· 1963 1944 if (!debug_locks_off_graph_unlock()) 1964 1945 return 0; 1965 1946 1947 + /* 1948 + * Clearly we all shouldn't be here, but since we made it we 1949 + * can reliable say we messed up our state. See the above two 1950 + * gotos for reasons why we could possibly end up here. 1951 + */ 1966 1952 WARN_ON(1); 1967 1953 1968 1954 return 0; ··· 1999 1975 struct held_lock *hlock_curr, *hlock_next; 2000 1976 int i, j; 2001 1977 1978 + /* 1979 + * We might need to take the graph lock, ensure we've got IRQs 1980 + * disabled to make this an IRQ-safe lock.. for recursion reasons 1981 + * lockdep won't complain about its own locking errors. 1982 + */ 2002 1983 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2003 1984 return 0; 2004 1985 /* ··· 2155 2126 hlock = curr->held_locks + i; 2156 2127 if (chain_key != hlock->prev_chain_key) { 2157 2128 debug_locks_off(); 2129 + /* 2130 + * We got mighty confused, our chain keys don't match 2131 + * with what we expect, someone trample on our task state? 2132 + */ 2158 2133 WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", 2159 2134 curr->lockdep_depth, i, 2160 2135 (unsigned long long)chain_key, ··· 2166 2133 return; 2167 2134 } 2168 2135 id = hlock->class_idx - 1; 2136 + /* 2137 + * Whoops ran out of static storage again? 2138 + */ 2169 2139 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 2170 2140 return; 2171 2141 ··· 2180 2144 } 2181 2145 if (chain_key != curr->curr_chain_key) { 2182 2146 debug_locks_off(); 2147 + /* 2148 + * More smoking hash instead of calculating it, damn see these 2149 + * numbers float.. I bet that a pink elephant stepped on my memory. 2150 + */ 2183 2151 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", 2184 2152 curr->lockdep_depth, i, 2185 2153 (unsigned long long)chain_key, ··· 2565 2525 return; 2566 2526 } 2567 2527 2528 + /* 2529 + * We're enabling irqs and according to our state above irqs weren't 2530 + * already enabled, yet we find the hardware thinks they are in fact 2531 + * enabled.. someone messed up their IRQ state tracing. 2532 + */ 2568 2533 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2569 2534 return; 2570 2535 2536 + /* 2537 + * See the fine text that goes along with this variable definition. 2538 + */ 2571 2539 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) 2572 2540 return; 2573 2541 2542 + /* 2543 + * Can't allow enabling interrupts while in an interrupt handler, 2544 + * that's general bad form and such. Recursion, limited stack etc.. 2545 + */ 2574 2546 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) 2575 2547 return; 2576 2548 ··· 2610 2558 if (unlikely(!debug_locks || current->lockdep_recursion)) 2611 2559 return; 2612 2560 2561 + /* 2562 + * So we're supposed to get called after you mask local IRQs, but for 2563 + * some reason the hardware doesn't quite think you did a proper job. 2564 + */ 2613 2565 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2614 2566 return; 2615 2567 ··· 2646 2590 if (unlikely(!debug_locks || current->lockdep_recursion)) 2647 2591 return; 2648 2592 2593 + /* 2594 + * We fancy IRQs being disabled here, see softirq.c, avoids 2595 + * funny state and nesting things. 2596 + */ 2649 2597 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2650 2598 return; 2651 2599 ··· 2686 2626 if (unlikely(!debug_locks || current->lockdep_recursion)) 2687 2627 return; 2688 2628 2629 + /* 2630 + * We fancy IRQs being disabled here, see softirq.c 2631 + */ 2689 2632 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2690 2633 return; 2691 2634 ··· 2700 2637 curr->softirq_disable_ip = ip; 2701 2638 curr->softirq_disable_event = ++curr->irq_events; 2702 2639 debug_atomic_inc(softirqs_off_events); 2640 + /* 2641 + * Whoops, we wanted softirqs off, so why aren't they? 2642 + */ 2703 2643 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2704 2644 } else 2705 2645 debug_atomic_inc(redundant_softirqs_off); ··· 2727 2661 if (!(gfp_mask & __GFP_FS)) 2728 2662 return; 2729 2663 2664 + /* 2665 + * Oi! Can't be having __GFP_FS allocations with IRQs disabled. 2666 + */ 2730 2667 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) 2731 2668 return; 2732 2669 ··· 2842 2773 return 0; 2843 2774 } 2844 2775 2845 - #else 2776 + #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2846 2777 2847 2778 static inline 2848 2779 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 2849 2780 enum lock_usage_bit new_bit) 2850 2781 { 2851 - WARN_ON(1); 2782 + WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */ 2852 2783 return 1; 2853 2784 } 2854 2785 ··· 2868 2799 { 2869 2800 } 2870 2801 2871 - #endif 2802 + #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */ 2872 2803 2873 2804 /* 2874 2805 * Mark a lock with a usage bit, and validate the state transition: ··· 2949 2880 lock->cpu = raw_smp_processor_id(); 2950 2881 #endif 2951 2882 2883 + /* 2884 + * Can't be having no nameless bastards around this place! 2885 + */ 2952 2886 if (DEBUG_LOCKS_WARN_ON(!name)) { 2953 2887 lock->name = "NULL"; 2954 2888 return; ··· 2959 2887 2960 2888 lock->name = name; 2961 2889 2890 + /* 2891 + * No key, no joy, we need to hash something. 2892 + */ 2962 2893 if (DEBUG_LOCKS_WARN_ON(!key)) 2963 2894 return; 2964 2895 /* ··· 2969 2894 */ 2970 2895 if (!static_obj(key)) { 2971 2896 printk("BUG: key %p not in .data!\n", key); 2897 + /* 2898 + * What it says above ^^^^^, I suggest you read it. 2899 + */ 2972 2900 DEBUG_LOCKS_WARN_ON(1); 2973 2901 return; 2974 2902 } ··· 3010 2932 if (unlikely(!debug_locks)) 3011 2933 return 0; 3012 2934 2935 + /* 2936 + * Lockdep should run with IRQs disabled, otherwise we could 2937 + * get an interrupt which would want to take locks, which would 2938 + * end up in lockdep and have you got a head-ache already? 2939 + */ 3013 2940 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3014 2941 return 0; 3015 2942 ··· 3046 2963 * dependency checks are done) 3047 2964 */ 3048 2965 depth = curr->lockdep_depth; 2966 + /* 2967 + * Ran out of static storage for our per-task lock stack again have we? 2968 + */ 3049 2969 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 3050 2970 return 0; 3051 2971 ··· 3067 2981 } 3068 2982 3069 2983 hlock = curr->held_locks + depth; 2984 + /* 2985 + * Plain impossible, we just registered it and checked it weren't no 2986 + * NULL like.. I bet this mushroom I ate was good! 2987 + */ 3070 2988 if (DEBUG_LOCKS_WARN_ON(!class)) 3071 2989 return 0; 3072 2990 hlock->class_idx = class_idx; ··· 3105 3015 * the hash, not class->key. 3106 3016 */ 3107 3017 id = class - lock_classes; 3018 + /* 3019 + * Whoops, we did it again.. ran straight out of our static allocation. 3020 + */ 3108 3021 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 3109 3022 return 0; 3110 3023 3111 3024 chain_key = curr->curr_chain_key; 3112 3025 if (!depth) { 3026 + /* 3027 + * How can we have a chain hash when we ain't got no keys?! 3028 + */ 3113 3029 if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) 3114 3030 return 0; 3115 3031 chain_head = 1; ··· 3187 3091 { 3188 3092 if (unlikely(!debug_locks)) 3189 3093 return 0; 3094 + /* 3095 + * Lockdep should run with IRQs disabled, recursion, head-ache, etc.. 3096 + */ 3190 3097 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 3191 3098 return 0; 3192 3099 ··· 3219 3120 if (!class) 3220 3121 return 0; 3221 3122 3123 + /* 3124 + * References, but not a lock we're actually ref-counting? 3125 + * State got messed up, follow the sites that change ->references 3126 + * and try to make sense of it. 3127 + */ 3222 3128 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) 3223 3129 return 0; 3224 3130 ··· 3246 3142 int i; 3247 3143 3248 3144 depth = curr->lockdep_depth; 3145 + /* 3146 + * This function is about (re)setting the class of a held lock, 3147 + * yet we're not actually holding any locks. Naughty user! 3148 + */ 3249 3149 if (DEBUG_LOCKS_WARN_ON(!depth)) 3250 3150 return 0; 3251 3151 ··· 3285 3177 return 0; 3286 3178 } 3287 3179 3180 + /* 3181 + * I took it apart and put it back together again, except now I have 3182 + * these 'spare' parts.. where shall I put them. 3183 + */ 3288 3184 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) 3289 3185 return 0; 3290 3186 return 1; ··· 3313 3201 * of held locks: 3314 3202 */ 3315 3203 depth = curr->lockdep_depth; 3204 + /* 3205 + * So we're all set to release this lock.. wait what lock? We don't 3206 + * own any locks, you've been drinking again? 3207 + */ 3316 3208 if (DEBUG_LOCKS_WARN_ON(!depth)) 3317 3209 return 0; 3318 3210 ··· 3369 3253 return 0; 3370 3254 } 3371 3255 3256 + /* 3257 + * We had N bottles of beer on the wall, we drank one, but now 3258 + * there's not N-1 bottles of beer left on the wall... 3259 + */ 3372 3260 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) 3373 3261 return 0; 3374 3262 return 1; ··· 3403 3283 return lock_release_non_nested(curr, lock, ip); 3404 3284 curr->lockdep_depth--; 3405 3285 3286 + /* 3287 + * No more locks, but somehow we've got hash left over, who left it? 3288 + */ 3406 3289 if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) 3407 3290 return 0; 3408 3291 ··· 3488 3365 * check if not in hardirq contexts: 3489 3366 */ 3490 3367 if (!hardirq_count()) { 3491 - if (softirq_count()) 3368 + if (softirq_count()) { 3369 + /* like the above, but with softirqs */ 3492 3370 DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); 3493 - else 3371 + } else { 3372 + /* lick the above, does it taste good? */ 3494 3373 DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); 3374 + } 3495 3375 } 3496 3376 3497 3377 if (!debug_locks) ··· 3632 3506 int i, contention_point, contending_point; 3633 3507 3634 3508 depth = curr->lockdep_depth; 3509 + /* 3510 + * Whee, we contended on this lock, except it seems we're not 3511 + * actually trying to acquire anything much at all.. 3512 + */ 3635 3513 if (DEBUG_LOCKS_WARN_ON(!depth)) 3636 3514 return; 3637 3515 ··· 3685 3555 int i, cpu; 3686 3556 3687 3557 depth = curr->lockdep_depth; 3558 + /* 3559 + * Yay, we acquired ownership of this lock we didn't try to 3560 + * acquire, how the heck did that happen? 3561 + */ 3688 3562 if (DEBUG_LOCKS_WARN_ON(!depth)) 3689 3563 return; 3690 3564 ··· 3893 3759 match |= class == lock->class_cache[j]; 3894 3760 3895 3761 if (unlikely(match)) { 3896 - if (debug_locks_off_graph_unlock()) 3762 + if (debug_locks_off_graph_unlock()) { 3763 + /* 3764 + * We all just reset everything, how did it match? 3765 + */ 3897 3766 WARN_ON(1); 3767 + } 3898 3768 goto out_restore; 3899 3769 } 3900 3770 }
+7 -7
kernel/posix-cpu-timers.c
··· 282 282 * it. 283 283 */ 284 284 thread_group_cputime(tsk, &sum); 285 - spin_lock_irqsave(&cputimer->lock, flags); 285 + raw_spin_lock_irqsave(&cputimer->lock, flags); 286 286 cputimer->running = 1; 287 287 update_gt_cputime(&cputimer->cputime, &sum); 288 288 } else 289 - spin_lock_irqsave(&cputimer->lock, flags); 289 + raw_spin_lock_irqsave(&cputimer->lock, flags); 290 290 *times = cputimer->cputime; 291 - spin_unlock_irqrestore(&cputimer->lock, flags); 291 + raw_spin_unlock_irqrestore(&cputimer->lock, flags); 292 292 } 293 293 294 294 /* ··· 999 999 struct thread_group_cputimer *cputimer = &sig->cputimer; 1000 1000 unsigned long flags; 1001 1001 1002 - spin_lock_irqsave(&cputimer->lock, flags); 1002 + raw_spin_lock_irqsave(&cputimer->lock, flags); 1003 1003 cputimer->running = 0; 1004 - spin_unlock_irqrestore(&cputimer->lock, flags); 1004 + raw_spin_unlock_irqrestore(&cputimer->lock, flags); 1005 1005 } 1006 1006 1007 1007 static u32 onecputick; ··· 1291 1291 if (sig->cputimer.running) { 1292 1292 struct task_cputime group_sample; 1293 1293 1294 - spin_lock(&sig->cputimer.lock); 1294 + raw_spin_lock(&sig->cputimer.lock); 1295 1295 group_sample = sig->cputimer.cputime; 1296 - spin_unlock(&sig->cputimer.lock); 1296 + raw_spin_unlock(&sig->cputimer.lock); 1297 1297 1298 1298 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1299 1299 return 1;
+23 -23
kernel/printk.c
··· 100 100 * It is also used in interesting ways to provide interlocking in 101 101 * console_unlock();. 102 102 */ 103 - static DEFINE_SPINLOCK(logbuf_lock); 103 + static DEFINE_RAW_SPINLOCK(logbuf_lock); 104 104 105 105 #define LOG_BUF_MASK (log_buf_len-1) 106 106 #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) ··· 212 212 return; 213 213 } 214 214 215 - spin_lock_irqsave(&logbuf_lock, flags); 215 + raw_spin_lock_irqsave(&logbuf_lock, flags); 216 216 log_buf_len = new_log_buf_len; 217 217 log_buf = new_log_buf; 218 218 new_log_buf_len = 0; ··· 230 230 log_start -= offset; 231 231 con_start -= offset; 232 232 log_end -= offset; 233 - spin_unlock_irqrestore(&logbuf_lock, flags); 233 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 234 234 235 235 pr_info("log_buf_len: %d\n", log_buf_len); 236 236 pr_info("early log buf free: %d(%d%%)\n", ··· 365 365 if (error) 366 366 goto out; 367 367 i = 0; 368 - spin_lock_irq(&logbuf_lock); 368 + raw_spin_lock_irq(&logbuf_lock); 369 369 while (!error && (log_start != log_end) && i < len) { 370 370 c = LOG_BUF(log_start); 371 371 log_start++; 372 - spin_unlock_irq(&logbuf_lock); 372 + raw_spin_unlock_irq(&logbuf_lock); 373 373 error = __put_user(c,buf); 374 374 buf++; 375 375 i++; 376 376 cond_resched(); 377 - spin_lock_irq(&logbuf_lock); 377 + raw_spin_lock_irq(&logbuf_lock); 378 378 } 379 - spin_unlock_irq(&logbuf_lock); 379 + raw_spin_unlock_irq(&logbuf_lock); 380 380 if (!error) 381 381 error = i; 382 382 break; ··· 399 399 count = len; 400 400 if (count > log_buf_len) 401 401 count = log_buf_len; 402 - spin_lock_irq(&logbuf_lock); 402 + raw_spin_lock_irq(&logbuf_lock); 403 403 if (count > logged_chars) 404 404 count = logged_chars; 405 405 if (do_clear) ··· 416 416 if (j + log_buf_len < log_end) 417 417 break; 418 418 c = LOG_BUF(j); 419 - spin_unlock_irq(&logbuf_lock); 419 + raw_spin_unlock_irq(&logbuf_lock); 420 420 error = __put_user(c,&buf[count-1-i]); 421 421 cond_resched(); 422 - spin_lock_irq(&logbuf_lock); 422 + raw_spin_lock_irq(&logbuf_lock); 423 423 } 424 - spin_unlock_irq(&logbuf_lock); 424 + raw_spin_unlock_irq(&logbuf_lock); 425 425 if (error) 426 426 break; 427 427 error = i; ··· 689 689 oops_timestamp = jiffies; 690 690 691 691 /* If a crash is occurring, make sure we can't deadlock */ 692 - spin_lock_init(&logbuf_lock); 692 + raw_spin_lock_init(&logbuf_lock); 693 693 /* And make sure that we print immediately */ 694 694 sema_init(&console_sem, 1); 695 695 } ··· 802 802 } 803 803 } 804 804 printk_cpu = UINT_MAX; 805 - spin_unlock(&logbuf_lock); 806 805 if (wake) 807 806 up(&console_sem); 807 + raw_spin_unlock(&logbuf_lock); 808 808 return retval; 809 809 } 810 810 static const char recursion_bug_msg [] = ··· 864 864 } 865 865 866 866 lockdep_off(); 867 - spin_lock(&logbuf_lock); 867 + raw_spin_lock(&logbuf_lock); 868 868 printk_cpu = this_cpu; 869 869 870 870 if (recursion_bug) { ··· 1257 1257 1258 1258 again: 1259 1259 for ( ; ; ) { 1260 - spin_lock_irqsave(&logbuf_lock, flags); 1260 + raw_spin_lock_irqsave(&logbuf_lock, flags); 1261 1261 wake_klogd |= log_start - log_end; 1262 1262 if (con_start == log_end) 1263 1263 break; /* Nothing to print */ 1264 1264 _con_start = con_start; 1265 1265 _log_end = log_end; 1266 1266 con_start = log_end; /* Flush */ 1267 - spin_unlock(&logbuf_lock); 1267 + raw_spin_unlock(&logbuf_lock); 1268 1268 stop_critical_timings(); /* don't trace print latency */ 1269 1269 call_console_drivers(_con_start, _log_end); 1270 1270 start_critical_timings(); ··· 1276 1276 if (unlikely(exclusive_console)) 1277 1277 exclusive_console = NULL; 1278 1278 1279 - spin_unlock(&logbuf_lock); 1279 + raw_spin_unlock(&logbuf_lock); 1280 1280 1281 1281 up(&console_sem); 1282 1282 ··· 1286 1286 * there's a new owner and the console_unlock() from them will do the 1287 1287 * flush, no worries. 1288 1288 */ 1289 - spin_lock(&logbuf_lock); 1289 + raw_spin_lock(&logbuf_lock); 1290 1290 if (con_start != log_end) 1291 1291 retry = 1; 1292 - spin_unlock_irqrestore(&logbuf_lock, flags); 1293 1292 if (retry && console_trylock()) 1294 1293 goto again; 1295 1294 1295 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 1296 1296 if (wake_klogd) 1297 1297 wake_up_klogd(); 1298 1298 } ··· 1522 1522 * console_unlock(); will print out the buffered messages 1523 1523 * for us. 1524 1524 */ 1525 - spin_lock_irqsave(&logbuf_lock, flags); 1525 + raw_spin_lock_irqsave(&logbuf_lock, flags); 1526 1526 con_start = log_start; 1527 - spin_unlock_irqrestore(&logbuf_lock, flags); 1527 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 1528 1528 /* 1529 1529 * We're about to replay the log buffer. Only do this to the 1530 1530 * just-registered console to avoid excessive message spam to ··· 1731 1731 /* Theoretically, the log could move on after we do this, but 1732 1732 there's not a lot we can do about that. The new messages 1733 1733 will overwrite the start of what we dump. */ 1734 - spin_lock_irqsave(&logbuf_lock, flags); 1734 + raw_spin_lock_irqsave(&logbuf_lock, flags); 1735 1735 end = log_end & LOG_BUF_MASK; 1736 1736 chars = logged_chars; 1737 - spin_unlock_irqrestore(&logbuf_lock, flags); 1737 + raw_spin_unlock_irqrestore(&logbuf_lock, flags); 1738 1738 1739 1739 if (chars > end) { 1740 1740 s1 = log_buf + log_buf_len - chars + end;
+12 -65
kernel/rtmutex-debug.c
··· 29 29 30 30 #include "rtmutex_common.h" 31 31 32 - # define TRACE_WARN_ON(x) WARN_ON(x) 33 - # define TRACE_BUG_ON(x) BUG_ON(x) 34 - 35 - # define TRACE_OFF() \ 36 - do { \ 37 - if (rt_trace_on) { \ 38 - rt_trace_on = 0; \ 39 - console_verbose(); \ 40 - if (raw_spin_is_locked(&current->pi_lock)) \ 41 - raw_spin_unlock(&current->pi_lock); \ 42 - } \ 43 - } while (0) 44 - 45 - # define TRACE_OFF_NOLOCK() \ 46 - do { \ 47 - if (rt_trace_on) { \ 48 - rt_trace_on = 0; \ 49 - console_verbose(); \ 50 - } \ 51 - } while (0) 52 - 53 - # define TRACE_BUG_LOCKED() \ 54 - do { \ 55 - TRACE_OFF(); \ 56 - BUG(); \ 57 - } while (0) 58 - 59 - # define TRACE_WARN_ON_LOCKED(c) \ 60 - do { \ 61 - if (unlikely(c)) { \ 62 - TRACE_OFF(); \ 63 - WARN_ON(1); \ 64 - } \ 65 - } while (0) 66 - 67 - # define TRACE_BUG_ON_LOCKED(c) \ 68 - do { \ 69 - if (unlikely(c)) \ 70 - TRACE_BUG_LOCKED(); \ 71 - } while (0) 72 - 73 - #ifdef CONFIG_SMP 74 - # define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) 75 - #else 76 - # define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) 77 - #endif 78 - 79 - /* 80 - * deadlock detection flag. We turn it off when we detect 81 - * the first problem because we dont want to recurse back 82 - * into the tracing code when doing error printk or 83 - * executing a BUG(): 84 - */ 85 - static int rt_trace_on = 1; 86 - 87 32 static void printk_task(struct task_struct *p) 88 33 { 89 34 if (p) ··· 56 111 57 112 void rt_mutex_debug_task_free(struct task_struct *task) 58 113 { 59 - WARN_ON(!plist_head_empty(&task->pi_waiters)); 60 - WARN_ON(task->pi_blocked_on); 114 + DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); 115 + DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); 61 116 } 62 117 63 118 /* ··· 70 125 { 71 126 struct task_struct *task; 72 127 73 - if (!rt_trace_on || detect || !act_waiter) 128 + if (!debug_locks || detect || !act_waiter) 74 129 return; 75 130 76 131 task = rt_mutex_owner(act_waiter->lock); ··· 84 139 { 85 140 struct task_struct *task; 86 141 87 - if (!waiter->deadlock_lock || !rt_trace_on) 142 + if (!waiter->deadlock_lock || !debug_locks) 88 143 return; 89 144 90 145 rcu_read_lock(); ··· 94 149 return; 95 150 } 96 151 97 - TRACE_OFF_NOLOCK(); 152 + if (!debug_locks_off()) { 153 + rcu_read_unlock(); 154 + return; 155 + } 98 156 99 157 printk("\n============================================\n"); 100 158 printk( "[ BUG: circular locking deadlock detected! ]\n"); ··· 128 180 129 181 printk("[ turning off deadlock detection." 130 182 "Please report this trace. ]\n\n"); 131 - local_irq_disable(); 132 183 } 133 184 134 185 void debug_rt_mutex_lock(struct rt_mutex *lock) ··· 136 189 137 190 void debug_rt_mutex_unlock(struct rt_mutex *lock) 138 191 { 139 - TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); 192 + DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); 140 193 } 141 194 142 195 void ··· 146 199 147 200 void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) 148 201 { 149 - TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock)); 202 + DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); 150 203 } 151 204 152 205 void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) ··· 160 213 void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 161 214 { 162 215 put_pid(waiter->deadlock_task_pid); 163 - TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); 164 - TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 216 + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); 217 + DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 165 218 memset(waiter, 0x22, sizeof(*waiter)); 166 219 } 167 220
+6 -6
kernel/sched_stats.h
··· 282 282 if (!cputimer->running) 283 283 return; 284 284 285 - spin_lock(&cputimer->lock); 285 + raw_spin_lock(&cputimer->lock); 286 286 cputimer->cputime.utime = 287 287 cputime_add(cputimer->cputime.utime, cputime); 288 - spin_unlock(&cputimer->lock); 288 + raw_spin_unlock(&cputimer->lock); 289 289 } 290 290 291 291 /** ··· 306 306 if (!cputimer->running) 307 307 return; 308 308 309 - spin_lock(&cputimer->lock); 309 + raw_spin_lock(&cputimer->lock); 310 310 cputimer->cputime.stime = 311 311 cputime_add(cputimer->cputime.stime, cputime); 312 - spin_unlock(&cputimer->lock); 312 + raw_spin_unlock(&cputimer->lock); 313 313 } 314 314 315 315 /** ··· 330 330 if (!cputimer->running) 331 331 return; 332 332 333 - spin_lock(&cputimer->lock); 333 + raw_spin_lock(&cputimer->lock); 334 334 cputimer->cputime.sum_exec_runtime += ns; 335 - spin_unlock(&cputimer->lock); 335 + raw_spin_unlock(&cputimer->lock); 336 336 }
+14 -14
kernel/semaphore.c
··· 54 54 { 55 55 unsigned long flags; 56 56 57 - spin_lock_irqsave(&sem->lock, flags); 57 + raw_spin_lock_irqsave(&sem->lock, flags); 58 58 if (likely(sem->count > 0)) 59 59 sem->count--; 60 60 else 61 61 __down(sem); 62 - spin_unlock_irqrestore(&sem->lock, flags); 62 + raw_spin_unlock_irqrestore(&sem->lock, flags); 63 63 } 64 64 EXPORT_SYMBOL(down); 65 65 ··· 77 77 unsigned long flags; 78 78 int result = 0; 79 79 80 - spin_lock_irqsave(&sem->lock, flags); 80 + raw_spin_lock_irqsave(&sem->lock, flags); 81 81 if (likely(sem->count > 0)) 82 82 sem->count--; 83 83 else 84 84 result = __down_interruptible(sem); 85 - spin_unlock_irqrestore(&sem->lock, flags); 85 + raw_spin_unlock_irqrestore(&sem->lock, flags); 86 86 87 87 return result; 88 88 } ··· 103 103 unsigned long flags; 104 104 int result = 0; 105 105 106 - spin_lock_irqsave(&sem->lock, flags); 106 + raw_spin_lock_irqsave(&sem->lock, flags); 107 107 if (likely(sem->count > 0)) 108 108 sem->count--; 109 109 else 110 110 result = __down_killable(sem); 111 - spin_unlock_irqrestore(&sem->lock, flags); 111 + raw_spin_unlock_irqrestore(&sem->lock, flags); 112 112 113 113 return result; 114 114 } ··· 132 132 unsigned long flags; 133 133 int count; 134 134 135 - spin_lock_irqsave(&sem->lock, flags); 135 + raw_spin_lock_irqsave(&sem->lock, flags); 136 136 count = sem->count - 1; 137 137 if (likely(count >= 0)) 138 138 sem->count = count; 139 - spin_unlock_irqrestore(&sem->lock, flags); 139 + raw_spin_unlock_irqrestore(&sem->lock, flags); 140 140 141 141 return (count < 0); 142 142 } ··· 157 157 unsigned long flags; 158 158 int result = 0; 159 159 160 - spin_lock_irqsave(&sem->lock, flags); 160 + raw_spin_lock_irqsave(&sem->lock, flags); 161 161 if (likely(sem->count > 0)) 162 162 sem->count--; 163 163 else 164 164 result = __down_timeout(sem, jiffies); 165 - spin_unlock_irqrestore(&sem->lock, flags); 165 + raw_spin_unlock_irqrestore(&sem->lock, flags); 166 166 167 167 return result; 168 168 } ··· 179 179 { 180 180 unsigned long flags; 181 181 182 - spin_lock_irqsave(&sem->lock, flags); 182 + raw_spin_lock_irqsave(&sem->lock, flags); 183 183 if (likely(list_empty(&sem->wait_list))) 184 184 sem->count++; 185 185 else 186 186 __up(sem); 187 - spin_unlock_irqrestore(&sem->lock, flags); 187 + raw_spin_unlock_irqrestore(&sem->lock, flags); 188 188 } 189 189 EXPORT_SYMBOL(up); 190 190 ··· 217 217 if (timeout <= 0) 218 218 goto timed_out; 219 219 __set_task_state(task, state); 220 - spin_unlock_irq(&sem->lock); 220 + raw_spin_unlock_irq(&sem->lock); 221 221 timeout = schedule_timeout(timeout); 222 - spin_lock_irq(&sem->lock); 222 + raw_spin_lock_irq(&sem->lock); 223 223 if (waiter.up) 224 224 return 0; 225 225 }
+3 -3
kernel/time/timer_stats.c
··· 81 81 /* 82 82 * Spinlock protecting the tables - not taken during lookup: 83 83 */ 84 - static DEFINE_SPINLOCK(table_lock); 84 + static DEFINE_RAW_SPINLOCK(table_lock); 85 85 86 86 /* 87 87 * Per-CPU lookup locks for fast hash lookup: ··· 188 188 prev = NULL; 189 189 curr = *head; 190 190 191 - spin_lock(&table_lock); 191 + raw_spin_lock(&table_lock); 192 192 /* 193 193 * Make sure we have not raced with another CPU: 194 194 */ ··· 215 215 *head = curr; 216 216 } 217 217 out_unlock: 218 - spin_unlock(&table_lock); 218 + raw_spin_unlock(&table_lock); 219 219 220 220 return curr; 221 221 }
+26 -26
kernel/trace/ring_buffer.c
··· 478 478 int cpu; 479 479 atomic_t record_disabled; 480 480 struct ring_buffer *buffer; 481 - spinlock_t reader_lock; /* serialize readers */ 481 + raw_spinlock_t reader_lock; /* serialize readers */ 482 482 arch_spinlock_t lock; 483 483 struct lock_class_key lock_key; 484 484 struct list_head *pages; ··· 1062 1062 1063 1063 cpu_buffer->cpu = cpu; 1064 1064 cpu_buffer->buffer = buffer; 1065 - spin_lock_init(&cpu_buffer->reader_lock); 1065 + raw_spin_lock_init(&cpu_buffer->reader_lock); 1066 1066 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1067 1067 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; 1068 1068 ··· 1259 1259 struct list_head *p; 1260 1260 unsigned i; 1261 1261 1262 - spin_lock_irq(&cpu_buffer->reader_lock); 1262 + raw_spin_lock_irq(&cpu_buffer->reader_lock); 1263 1263 rb_head_page_deactivate(cpu_buffer); 1264 1264 1265 1265 for (i = 0; i < nr_pages; i++) { ··· 1277 1277 rb_check_pages(cpu_buffer); 1278 1278 1279 1279 out: 1280 - spin_unlock_irq(&cpu_buffer->reader_lock); 1280 + raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1281 1281 } 1282 1282 1283 1283 static void ··· 1288 1288 struct list_head *p; 1289 1289 unsigned i; 1290 1290 1291 - spin_lock_irq(&cpu_buffer->reader_lock); 1291 + raw_spin_lock_irq(&cpu_buffer->reader_lock); 1292 1292 rb_head_page_deactivate(cpu_buffer); 1293 1293 1294 1294 for (i = 0; i < nr_pages; i++) { ··· 1303 1303 rb_check_pages(cpu_buffer); 1304 1304 1305 1305 out: 1306 - spin_unlock_irq(&cpu_buffer->reader_lock); 1306 + raw_spin_unlock_irq(&cpu_buffer->reader_lock); 1307 1307 } 1308 1308 1309 1309 /** ··· 2804 2804 2805 2805 cpu_buffer = iter->cpu_buffer; 2806 2806 2807 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2807 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 2808 2808 rb_iter_reset(iter); 2809 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2809 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 2810 2810 } 2811 2811 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); 2812 2812 ··· 3265 3265 again: 3266 3266 local_irq_save(flags); 3267 3267 if (dolock) 3268 - spin_lock(&cpu_buffer->reader_lock); 3268 + raw_spin_lock(&cpu_buffer->reader_lock); 3269 3269 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3270 3270 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3271 3271 rb_advance_reader(cpu_buffer); 3272 3272 if (dolock) 3273 - spin_unlock(&cpu_buffer->reader_lock); 3273 + raw_spin_unlock(&cpu_buffer->reader_lock); 3274 3274 local_irq_restore(flags); 3275 3275 3276 3276 if (event && event->type_len == RINGBUF_TYPE_PADDING) ··· 3295 3295 unsigned long flags; 3296 3296 3297 3297 again: 3298 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3298 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3299 3299 event = rb_iter_peek(iter, ts); 3300 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3300 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3301 3301 3302 3302 if (event && event->type_len == RINGBUF_TYPE_PADDING) 3303 3303 goto again; ··· 3337 3337 cpu_buffer = buffer->buffers[cpu]; 3338 3338 local_irq_save(flags); 3339 3339 if (dolock) 3340 - spin_lock(&cpu_buffer->reader_lock); 3340 + raw_spin_lock(&cpu_buffer->reader_lock); 3341 3341 3342 3342 event = rb_buffer_peek(cpu_buffer, ts, lost_events); 3343 3343 if (event) { ··· 3346 3346 } 3347 3347 3348 3348 if (dolock) 3349 - spin_unlock(&cpu_buffer->reader_lock); 3349 + raw_spin_unlock(&cpu_buffer->reader_lock); 3350 3350 local_irq_restore(flags); 3351 3351 3352 3352 out: ··· 3438 3438 3439 3439 cpu_buffer = iter->cpu_buffer; 3440 3440 3441 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3441 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3442 3442 arch_spin_lock(&cpu_buffer->lock); 3443 3443 rb_iter_reset(iter); 3444 3444 arch_spin_unlock(&cpu_buffer->lock); 3445 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3445 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3446 3446 } 3447 3447 EXPORT_SYMBOL_GPL(ring_buffer_read_start); 3448 3448 ··· 3477 3477 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; 3478 3478 unsigned long flags; 3479 3479 3480 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3480 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3481 3481 again: 3482 3482 event = rb_iter_peek(iter, ts); 3483 3483 if (!event) ··· 3488 3488 3489 3489 rb_advance_iter(iter); 3490 3490 out: 3491 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3491 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3492 3492 3493 3493 return event; 3494 3494 } ··· 3557 3557 3558 3558 atomic_inc(&cpu_buffer->record_disabled); 3559 3559 3560 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3560 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3561 3561 3562 3562 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3563 3563 goto out; ··· 3569 3569 arch_spin_unlock(&cpu_buffer->lock); 3570 3570 3571 3571 out: 3572 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3572 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3573 3573 3574 3574 atomic_dec(&cpu_buffer->record_disabled); 3575 3575 } ··· 3607 3607 cpu_buffer = buffer->buffers[cpu]; 3608 3608 local_irq_save(flags); 3609 3609 if (dolock) 3610 - spin_lock(&cpu_buffer->reader_lock); 3610 + raw_spin_lock(&cpu_buffer->reader_lock); 3611 3611 ret = rb_per_cpu_empty(cpu_buffer); 3612 3612 if (dolock) 3613 - spin_unlock(&cpu_buffer->reader_lock); 3613 + raw_spin_unlock(&cpu_buffer->reader_lock); 3614 3614 local_irq_restore(flags); 3615 3615 3616 3616 if (!ret) ··· 3641 3641 cpu_buffer = buffer->buffers[cpu]; 3642 3642 local_irq_save(flags); 3643 3643 if (dolock) 3644 - spin_lock(&cpu_buffer->reader_lock); 3644 + raw_spin_lock(&cpu_buffer->reader_lock); 3645 3645 ret = rb_per_cpu_empty(cpu_buffer); 3646 3646 if (dolock) 3647 - spin_unlock(&cpu_buffer->reader_lock); 3647 + raw_spin_unlock(&cpu_buffer->reader_lock); 3648 3648 local_irq_restore(flags); 3649 3649 3650 3650 return ret; ··· 3841 3841 if (!bpage) 3842 3842 goto out; 3843 3843 3844 - spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3844 + raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3845 3845 3846 3846 reader = rb_get_reader_page(cpu_buffer); 3847 3847 if (!reader) ··· 3964 3964 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); 3965 3965 3966 3966 out_unlock: 3967 - spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3967 + raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3968 3968 3969 3969 out: 3970 3970 return ret;
+5 -5
kernel/trace/trace.c
··· 341 341 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE; 342 342 343 343 static int trace_stop_count; 344 - static DEFINE_SPINLOCK(tracing_start_lock); 344 + static DEFINE_RAW_SPINLOCK(tracing_start_lock); 345 345 346 346 static void wakeup_work_handler(struct work_struct *work) 347 347 { ··· 960 960 if (tracing_disabled) 961 961 return; 962 962 963 - spin_lock_irqsave(&tracing_start_lock, flags); 963 + raw_spin_lock_irqsave(&tracing_start_lock, flags); 964 964 if (--trace_stop_count) { 965 965 if (trace_stop_count < 0) { 966 966 /* Someone screwed up their debugging */ ··· 985 985 986 986 ftrace_start(); 987 987 out: 988 - spin_unlock_irqrestore(&tracing_start_lock, flags); 988 + raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 989 989 } 990 990 991 991 /** ··· 1000 1000 unsigned long flags; 1001 1001 1002 1002 ftrace_stop(); 1003 - spin_lock_irqsave(&tracing_start_lock, flags); 1003 + raw_spin_lock_irqsave(&tracing_start_lock, flags); 1004 1004 if (trace_stop_count++) 1005 1005 goto out; 1006 1006 ··· 1018 1018 arch_spin_unlock(&ftrace_max_lock); 1019 1019 1020 1020 out: 1021 - spin_unlock_irqrestore(&tracing_start_lock, flags); 1021 + raw_spin_unlock_irqrestore(&tracing_start_lock, flags); 1022 1022 } 1023 1023 1024 1024 void trace_stop_cmdline_recording(void);
+3 -3
kernel/trace/trace_irqsoff.c
··· 23 23 24 24 static DEFINE_PER_CPU(int, tracing_cpu); 25 25 26 - static DEFINE_SPINLOCK(max_trace_lock); 26 + static DEFINE_RAW_SPINLOCK(max_trace_lock); 27 27 28 28 enum { 29 29 TRACER_IRQS_OFF = (1 << 1), ··· 321 321 if (!report_latency(delta)) 322 322 goto out; 323 323 324 - spin_lock_irqsave(&max_trace_lock, flags); 324 + raw_spin_lock_irqsave(&max_trace_lock, flags); 325 325 326 326 /* check if we are still the max latency */ 327 327 if (!report_latency(delta)) ··· 344 344 max_sequence++; 345 345 346 346 out_unlock: 347 - spin_unlock_irqrestore(&max_trace_lock, flags); 347 + raw_spin_unlock_irqrestore(&max_trace_lock, flags); 348 348 349 349 out: 350 350 data->critical_sequence = max_sequence;
+33 -33
lib/atomic64.c
··· 29 29 * Ensure each lock is in a separate cacheline. 30 30 */ 31 31 static union { 32 - spinlock_t lock; 32 + raw_spinlock_t lock; 33 33 char pad[L1_CACHE_BYTES]; 34 34 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; 35 35 36 - static inline spinlock_t *lock_addr(const atomic64_t *v) 36 + static inline raw_spinlock_t *lock_addr(const atomic64_t *v) 37 37 { 38 38 unsigned long addr = (unsigned long) v; 39 39 ··· 45 45 long long atomic64_read(const atomic64_t *v) 46 46 { 47 47 unsigned long flags; 48 - spinlock_t *lock = lock_addr(v); 48 + raw_spinlock_t *lock = lock_addr(v); 49 49 long long val; 50 50 51 - spin_lock_irqsave(lock, flags); 51 + raw_spin_lock_irqsave(lock, flags); 52 52 val = v->counter; 53 - spin_unlock_irqrestore(lock, flags); 53 + raw_spin_unlock_irqrestore(lock, flags); 54 54 return val; 55 55 } 56 56 EXPORT_SYMBOL(atomic64_read); ··· 58 58 void atomic64_set(atomic64_t *v, long long i) 59 59 { 60 60 unsigned long flags; 61 - spinlock_t *lock = lock_addr(v); 61 + raw_spinlock_t *lock = lock_addr(v); 62 62 63 - spin_lock_irqsave(lock, flags); 63 + raw_spin_lock_irqsave(lock, flags); 64 64 v->counter = i; 65 - spin_unlock_irqrestore(lock, flags); 65 + raw_spin_unlock_irqrestore(lock, flags); 66 66 } 67 67 EXPORT_SYMBOL(atomic64_set); 68 68 69 69 void atomic64_add(long long a, atomic64_t *v) 70 70 { 71 71 unsigned long flags; 72 - spinlock_t *lock = lock_addr(v); 72 + raw_spinlock_t *lock = lock_addr(v); 73 73 74 - spin_lock_irqsave(lock, flags); 74 + raw_spin_lock_irqsave(lock, flags); 75 75 v->counter += a; 76 - spin_unlock_irqrestore(lock, flags); 76 + raw_spin_unlock_irqrestore(lock, flags); 77 77 } 78 78 EXPORT_SYMBOL(atomic64_add); 79 79 80 80 long long atomic64_add_return(long long a, atomic64_t *v) 81 81 { 82 82 unsigned long flags; 83 - spinlock_t *lock = lock_addr(v); 83 + raw_spinlock_t *lock = lock_addr(v); 84 84 long long val; 85 85 86 - spin_lock_irqsave(lock, flags); 86 + raw_spin_lock_irqsave(lock, flags); 87 87 val = v->counter += a; 88 - spin_unlock_irqrestore(lock, flags); 88 + raw_spin_unlock_irqrestore(lock, flags); 89 89 return val; 90 90 } 91 91 EXPORT_SYMBOL(atomic64_add_return); ··· 93 93 void atomic64_sub(long long a, atomic64_t *v) 94 94 { 95 95 unsigned long flags; 96 - spinlock_t *lock = lock_addr(v); 96 + raw_spinlock_t *lock = lock_addr(v); 97 97 98 - spin_lock_irqsave(lock, flags); 98 + raw_spin_lock_irqsave(lock, flags); 99 99 v->counter -= a; 100 - spin_unlock_irqrestore(lock, flags); 100 + raw_spin_unlock_irqrestore(lock, flags); 101 101 } 102 102 EXPORT_SYMBOL(atomic64_sub); 103 103 104 104 long long atomic64_sub_return(long long a, atomic64_t *v) 105 105 { 106 106 unsigned long flags; 107 - spinlock_t *lock = lock_addr(v); 107 + raw_spinlock_t *lock = lock_addr(v); 108 108 long long val; 109 109 110 - spin_lock_irqsave(lock, flags); 110 + raw_spin_lock_irqsave(lock, flags); 111 111 val = v->counter -= a; 112 - spin_unlock_irqrestore(lock, flags); 112 + raw_spin_unlock_irqrestore(lock, flags); 113 113 return val; 114 114 } 115 115 EXPORT_SYMBOL(atomic64_sub_return); ··· 117 117 long long atomic64_dec_if_positive(atomic64_t *v) 118 118 { 119 119 unsigned long flags; 120 - spinlock_t *lock = lock_addr(v); 120 + raw_spinlock_t *lock = lock_addr(v); 121 121 long long val; 122 122 123 - spin_lock_irqsave(lock, flags); 123 + raw_spin_lock_irqsave(lock, flags); 124 124 val = v->counter - 1; 125 125 if (val >= 0) 126 126 v->counter = val; 127 - spin_unlock_irqrestore(lock, flags); 127 + raw_spin_unlock_irqrestore(lock, flags); 128 128 return val; 129 129 } 130 130 EXPORT_SYMBOL(atomic64_dec_if_positive); ··· 132 132 long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) 133 133 { 134 134 unsigned long flags; 135 - spinlock_t *lock = lock_addr(v); 135 + raw_spinlock_t *lock = lock_addr(v); 136 136 long long val; 137 137 138 - spin_lock_irqsave(lock, flags); 138 + raw_spin_lock_irqsave(lock, flags); 139 139 val = v->counter; 140 140 if (val == o) 141 141 v->counter = n; 142 - spin_unlock_irqrestore(lock, flags); 142 + raw_spin_unlock_irqrestore(lock, flags); 143 143 return val; 144 144 } 145 145 EXPORT_SYMBOL(atomic64_cmpxchg); ··· 147 147 long long atomic64_xchg(atomic64_t *v, long long new) 148 148 { 149 149 unsigned long flags; 150 - spinlock_t *lock = lock_addr(v); 150 + raw_spinlock_t *lock = lock_addr(v); 151 151 long long val; 152 152 153 - spin_lock_irqsave(lock, flags); 153 + raw_spin_lock_irqsave(lock, flags); 154 154 val = v->counter; 155 155 v->counter = new; 156 - spin_unlock_irqrestore(lock, flags); 156 + raw_spin_unlock_irqrestore(lock, flags); 157 157 return val; 158 158 } 159 159 EXPORT_SYMBOL(atomic64_xchg); ··· 161 161 int atomic64_add_unless(atomic64_t *v, long long a, long long u) 162 162 { 163 163 unsigned long flags; 164 - spinlock_t *lock = lock_addr(v); 164 + raw_spinlock_t *lock = lock_addr(v); 165 165 int ret = 0; 166 166 167 - spin_lock_irqsave(lock, flags); 167 + raw_spin_lock_irqsave(lock, flags); 168 168 if (v->counter != u) { 169 169 v->counter += a; 170 170 ret = 1; 171 171 } 172 - spin_unlock_irqrestore(lock, flags); 172 + raw_spin_unlock_irqrestore(lock, flags); 173 173 return ret; 174 174 } 175 175 EXPORT_SYMBOL(atomic64_add_unless); ··· 179 179 int i; 180 180 181 181 for (i = 0; i < NR_LOCKS; ++i) 182 - spin_lock_init(&atomic64_lock[i].lock); 182 + raw_spin_lock_init(&atomic64_lock[i].lock); 183 183 return 0; 184 184 } 185 185
+9 -9
lib/percpu_counter.c
··· 59 59 { 60 60 int cpu; 61 61 62 - spin_lock(&fbc->lock); 62 + raw_spin_lock(&fbc->lock); 63 63 for_each_possible_cpu(cpu) { 64 64 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 65 65 *pcount = 0; 66 66 } 67 67 fbc->count = amount; 68 - spin_unlock(&fbc->lock); 68 + raw_spin_unlock(&fbc->lock); 69 69 } 70 70 EXPORT_SYMBOL(percpu_counter_set); 71 71 ··· 76 76 preempt_disable(); 77 77 count = __this_cpu_read(*fbc->counters) + amount; 78 78 if (count >= batch || count <= -batch) { 79 - spin_lock(&fbc->lock); 79 + raw_spin_lock(&fbc->lock); 80 80 fbc->count += count; 81 81 __this_cpu_write(*fbc->counters, 0); 82 - spin_unlock(&fbc->lock); 82 + raw_spin_unlock(&fbc->lock); 83 83 } else { 84 84 __this_cpu_write(*fbc->counters, count); 85 85 } ··· 96 96 s64 ret; 97 97 int cpu; 98 98 99 - spin_lock(&fbc->lock); 99 + raw_spin_lock(&fbc->lock); 100 100 ret = fbc->count; 101 101 for_each_online_cpu(cpu) { 102 102 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 103 103 ret += *pcount; 104 104 } 105 - spin_unlock(&fbc->lock); 105 + raw_spin_unlock(&fbc->lock); 106 106 return ret; 107 107 } 108 108 EXPORT_SYMBOL(__percpu_counter_sum); ··· 110 110 int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 111 111 struct lock_class_key *key) 112 112 { 113 - spin_lock_init(&fbc->lock); 113 + raw_spin_lock_init(&fbc->lock); 114 114 lockdep_set_class(&fbc->lock, key); 115 115 fbc->count = amount; 116 116 fbc->counters = alloc_percpu(s32); ··· 173 173 s32 *pcount; 174 174 unsigned long flags; 175 175 176 - spin_lock_irqsave(&fbc->lock, flags); 176 + raw_spin_lock_irqsave(&fbc->lock, flags); 177 177 pcount = per_cpu_ptr(fbc->counters, cpu); 178 178 fbc->count += *pcount; 179 179 *pcount = 0; 180 - spin_unlock_irqrestore(&fbc->lock, flags); 180 + raw_spin_unlock_irqrestore(&fbc->lock, flags); 181 181 } 182 182 mutex_unlock(&percpu_counters_lock); 183 183 #endif
+6 -6
lib/proportions.c
··· 190 190 191 191 int prop_local_init_percpu(struct prop_local_percpu *pl) 192 192 { 193 - spin_lock_init(&pl->lock); 193 + raw_spin_lock_init(&pl->lock); 194 194 pl->shift = 0; 195 195 pl->period = 0; 196 196 return percpu_counter_init(&pl->events, 0); ··· 226 226 if (pl->period == global_period) 227 227 return; 228 228 229 - spin_lock_irqsave(&pl->lock, flags); 229 + raw_spin_lock_irqsave(&pl->lock, flags); 230 230 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 231 231 232 232 /* ··· 247 247 percpu_counter_set(&pl->events, 0); 248 248 249 249 pl->period = global_period; 250 - spin_unlock_irqrestore(&pl->lock, flags); 250 + raw_spin_unlock_irqrestore(&pl->lock, flags); 251 251 } 252 252 253 253 /* ··· 324 324 325 325 int prop_local_init_single(struct prop_local_single *pl) 326 326 { 327 - spin_lock_init(&pl->lock); 327 + raw_spin_lock_init(&pl->lock); 328 328 pl->shift = 0; 329 329 pl->period = 0; 330 330 pl->events = 0; ··· 356 356 if (pl->period == global_period) 357 357 return; 358 358 359 - spin_lock_irqsave(&pl->lock, flags); 359 + raw_spin_lock_irqsave(&pl->lock, flags); 360 360 prop_adjust_shift(&pl->shift, &pl->period, pg->shift); 361 361 /* 362 362 * For each missed period, we half the local counter. ··· 367 367 else 368 368 pl->events = 0; 369 369 pl->period = global_period; 370 - spin_unlock_irqrestore(&pl->lock, flags); 370 + raw_spin_unlock_irqrestore(&pl->lock, flags); 371 371 } 372 372 373 373 /*
+2 -2
lib/ratelimit.c
··· 39 39 * in addition to the one that will be printed by 40 40 * the entity that is holding the lock already: 41 41 */ 42 - if (!spin_trylock_irqsave(&rs->lock, flags)) 42 + if (!raw_spin_trylock_irqsave(&rs->lock, flags)) 43 43 return 0; 44 44 45 45 if (!rs->begin) ··· 60 60 rs->missed++; 61 61 ret = 0; 62 62 } 63 - spin_unlock_irqrestore(&rs->lock, flags); 63 + raw_spin_unlock_irqrestore(&rs->lock, flags); 64 64 65 65 return ret; 66 66 }
+19 -19
lib/rwsem-spinlock.c
··· 22 22 int ret = 1; 23 23 unsigned long flags; 24 24 25 - if (spin_trylock_irqsave(&sem->wait_lock, flags)) { 25 + if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 26 26 ret = (sem->activity != 0); 27 - spin_unlock_irqrestore(&sem->wait_lock, flags); 27 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 28 28 } 29 29 return ret; 30 30 } ··· 44 44 lockdep_init_map(&sem->dep_map, name, key, 0); 45 45 #endif 46 46 sem->activity = 0; 47 - spin_lock_init(&sem->wait_lock); 47 + raw_spin_lock_init(&sem->wait_lock); 48 48 INIT_LIST_HEAD(&sem->wait_list); 49 49 } 50 50 EXPORT_SYMBOL(__init_rwsem); ··· 145 145 struct task_struct *tsk; 146 146 unsigned long flags; 147 147 148 - spin_lock_irqsave(&sem->wait_lock, flags); 148 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 149 149 150 150 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 151 151 /* granted */ 152 152 sem->activity++; 153 - spin_unlock_irqrestore(&sem->wait_lock, flags); 153 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 154 154 goto out; 155 155 } 156 156 ··· 165 165 list_add_tail(&waiter.list, &sem->wait_list); 166 166 167 167 /* we don't need to touch the semaphore struct anymore */ 168 - spin_unlock_irqrestore(&sem->wait_lock, flags); 168 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 169 169 170 170 /* wait to be given the lock */ 171 171 for (;;) { ··· 189 189 int ret = 0; 190 190 191 191 192 - spin_lock_irqsave(&sem->wait_lock, flags); 192 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 193 193 194 194 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 195 195 /* granted */ ··· 197 197 ret = 1; 198 198 } 199 199 200 - spin_unlock_irqrestore(&sem->wait_lock, flags); 200 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 201 201 202 202 return ret; 203 203 } ··· 212 212 struct task_struct *tsk; 213 213 unsigned long flags; 214 214 215 - spin_lock_irqsave(&sem->wait_lock, flags); 215 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 216 216 217 217 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 218 218 /* granted */ 219 219 sem->activity = -1; 220 - spin_unlock_irqrestore(&sem->wait_lock, flags); 220 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 221 221 goto out; 222 222 } 223 223 ··· 232 232 list_add_tail(&waiter.list, &sem->wait_list); 233 233 234 234 /* we don't need to touch the semaphore struct anymore */ 235 - spin_unlock_irqrestore(&sem->wait_lock, flags); 235 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 236 236 237 237 /* wait to be given the lock */ 238 238 for (;;) { ··· 260 260 unsigned long flags; 261 261 int ret = 0; 262 262 263 - spin_lock_irqsave(&sem->wait_lock, flags); 263 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 264 264 265 265 if (sem->activity == 0 && list_empty(&sem->wait_list)) { 266 266 /* granted */ ··· 268 268 ret = 1; 269 269 } 270 270 271 - spin_unlock_irqrestore(&sem->wait_lock, flags); 271 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 272 272 273 273 return ret; 274 274 } ··· 280 280 { 281 281 unsigned long flags; 282 282 283 - spin_lock_irqsave(&sem->wait_lock, flags); 283 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 284 284 285 285 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 286 286 sem = __rwsem_wake_one_writer(sem); 287 287 288 - spin_unlock_irqrestore(&sem->wait_lock, flags); 288 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 289 289 } 290 290 291 291 /* ··· 295 295 { 296 296 unsigned long flags; 297 297 298 - spin_lock_irqsave(&sem->wait_lock, flags); 298 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 299 299 300 300 sem->activity = 0; 301 301 if (!list_empty(&sem->wait_list)) 302 302 sem = __rwsem_do_wake(sem, 1); 303 303 304 - spin_unlock_irqrestore(&sem->wait_lock, flags); 304 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 305 305 } 306 306 307 307 /* ··· 312 312 { 313 313 unsigned long flags; 314 314 315 - spin_lock_irqsave(&sem->wait_lock, flags); 315 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 316 316 317 317 sem->activity = 1; 318 318 if (!list_empty(&sem->wait_list)) 319 319 sem = __rwsem_do_wake(sem, 0); 320 320 321 - spin_unlock_irqrestore(&sem->wait_lock, flags); 321 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 322 322 } 323 323
+7 -7
lib/rwsem.c
··· 22 22 lockdep_init_map(&sem->dep_map, name, key, 0); 23 23 #endif 24 24 sem->count = RWSEM_UNLOCKED_VALUE; 25 - spin_lock_init(&sem->wait_lock); 25 + raw_spin_lock_init(&sem->wait_lock); 26 26 INIT_LIST_HEAD(&sem->wait_list); 27 27 } 28 28 ··· 180 180 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 181 181 182 182 /* set up my own style of waitqueue */ 183 - spin_lock_irq(&sem->wait_lock); 183 + raw_spin_lock_irq(&sem->wait_lock); 184 184 waiter.task = tsk; 185 185 waiter.flags = flags; 186 186 get_task_struct(tsk); ··· 204 204 adjustment == -RWSEM_ACTIVE_WRITE_BIAS) 205 205 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 206 206 207 - spin_unlock_irq(&sem->wait_lock); 207 + raw_spin_unlock_irq(&sem->wait_lock); 208 208 209 209 /* wait to be given the lock */ 210 210 for (;;) { ··· 245 245 { 246 246 unsigned long flags; 247 247 248 - spin_lock_irqsave(&sem->wait_lock, flags); 248 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 249 249 250 250 /* do nothing if list empty */ 251 251 if (!list_empty(&sem->wait_list)) 252 252 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); 253 253 254 - spin_unlock_irqrestore(&sem->wait_lock, flags); 254 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 255 255 256 256 return sem; 257 257 } ··· 265 265 { 266 266 unsigned long flags; 267 267 268 - spin_lock_irqsave(&sem->wait_lock, flags); 268 + raw_spin_lock_irqsave(&sem->wait_lock, flags); 269 269 270 270 /* do nothing if list empty */ 271 271 if (!list_empty(&sem->wait_list)) 272 272 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); 273 273 274 - spin_unlock_irqrestore(&sem->wait_lock, flags); 274 + raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 275 275 276 276 return sem; 277 277 }