Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

thermal/x86_pkg_temp: Make pkg_temp_lock a raw_spinlock_t

The spinlock pkg_temp_lock has the potential of being taken in atomic
context because it can be acquired from the thermal IRQ vector.
It's static and limited scope so go ahead and make it a raw spinlock.

Signed-off-by: Clark Williams <williams@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Zhang Rui <rui.zhang@intel.com>
Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Link: https://lore.kernel.org/r/20191008110021.2j44ayunal7fkb7i@linutronix.de

authored by

Clark Williams and committed by
Daniel Lezcano
afa58b49 fd96a316

+14 -13
+12 -12
drivers/thermal/intel/x86_pkg_temp_thermal.c
··· 63 63 /* Array of zone pointers */ 64 64 static struct zone_device **zones; 65 65 /* Serializes interrupt notification, work and hotplug */ 66 - static DEFINE_SPINLOCK(pkg_temp_lock); 66 + static DEFINE_RAW_SPINLOCK(pkg_temp_lock); 67 67 /* Protects zone operation in the work function against hotplug removal */ 68 68 static DEFINE_MUTEX(thermal_zone_mutex); 69 69 ··· 266 266 u64 msr_val, wr_val; 267 267 268 268 mutex_lock(&thermal_zone_mutex); 269 - spin_lock_irq(&pkg_temp_lock); 269 + raw_spin_lock_irq(&pkg_temp_lock); 270 270 ++pkg_work_cnt; 271 271 272 272 zonedev = pkg_temp_thermal_get_dev(cpu); 273 273 if (!zonedev) { 274 - spin_unlock_irq(&pkg_temp_lock); 274 + raw_spin_unlock_irq(&pkg_temp_lock); 275 275 mutex_unlock(&thermal_zone_mutex); 276 276 return; 277 277 } ··· 285 285 } 286 286 287 287 enable_pkg_thres_interrupt(); 288 - spin_unlock_irq(&pkg_temp_lock); 288 + raw_spin_unlock_irq(&pkg_temp_lock); 289 289 290 290 /* 291 291 * If tzone is not NULL, then thermal_zone_mutex will prevent the ··· 310 310 struct zone_device *zonedev; 311 311 unsigned long flags; 312 312 313 - spin_lock_irqsave(&pkg_temp_lock, flags); 313 + raw_spin_lock_irqsave(&pkg_temp_lock, flags); 314 314 ++pkg_interrupt_cnt; 315 315 316 316 disable_pkg_thres_interrupt(); ··· 322 322 pkg_thermal_schedule_work(zonedev->cpu, &zonedev->work); 323 323 } 324 324 325 - spin_unlock_irqrestore(&pkg_temp_lock, flags); 325 + raw_spin_unlock_irqrestore(&pkg_temp_lock, flags); 326 326 return 0; 327 327 } 328 328 ··· 368 368 zonedev->msr_pkg_therm_high); 369 369 370 370 cpumask_set_cpu(cpu, &zonedev->cpumask); 371 - spin_lock_irq(&pkg_temp_lock); 371 + raw_spin_lock_irq(&pkg_temp_lock); 372 372 zones[id] = zonedev; 373 - spin_unlock_irq(&pkg_temp_lock); 373 + raw_spin_unlock_irq(&pkg_temp_lock); 374 374 return 0; 375 375 } 376 376 ··· 407 407 } 408 408 409 409 /* Protect against work and interrupts */ 410 - spin_lock_irq(&pkg_temp_lock); 410 + raw_spin_lock_irq(&pkg_temp_lock); 411 411 412 412 /* 413 413 * Check whether this cpu was the current target and store the new ··· 439 439 * To cancel the work we need to drop the lock, otherwise 440 440 * we might deadlock if the work needs to be flushed. 441 441 */ 442 - spin_unlock_irq(&pkg_temp_lock); 442 + raw_spin_unlock_irq(&pkg_temp_lock); 443 443 cancel_delayed_work_sync(&zonedev->work); 444 - spin_lock_irq(&pkg_temp_lock); 444 + raw_spin_lock_irq(&pkg_temp_lock); 445 445 /* 446 446 * If this is not the last cpu in the package and the work 447 447 * did not run after we dropped the lock above, then we ··· 452 452 pkg_thermal_schedule_work(target, &zonedev->work); 453 453 } 454 454 455 - spin_unlock_irq(&pkg_temp_lock); 455 + raw_spin_unlock_irq(&pkg_temp_lock); 456 456 457 457 /* Final cleanup if this is the last cpu */ 458 458 if (lastcpu)
+2 -1
drivers/thermal/st/stm_thermal.c
··· 478 478 } 479 479 #endif /* CONFIG_PM_SLEEP */ 480 480 481 - SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume); 481 + static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, 482 + stm_thermal_suspend, stm_thermal_resume); 482 483 483 484 static const struct thermal_zone_of_device_ops stm_tz_ops = { 484 485 .get_temp = stm_thermal_get_temp,