Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal

Pull thermal fixes from Eduardo Valentin:
"In this -rc still very minor changes:

- Lee Jones fixes compilation warning in sti thermal driver
- Marjus Elfring removes unnecessary checks in exynos thermal driver
(as per coccinelle)
- Now we always update cpufreq policies, and thus get (hopefully)
always in sync with cpufreq, thanks to Yadwinder"

* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal:
thermal: Exynos: Deletion of unnecessary checks before two function calls
thermal: sti: Ignore suspend/resume functions when !PM_SLEEP
thermal: cpu_cooling: Update always cpufreq policy with thermal constraints

Changed files
+27 -22
drivers
+21 -16
drivers/thermal/cpu_cooling.c
··· 50 unsigned int cpufreq_state; 51 unsigned int cpufreq_val; 52 struct cpumask allowed_cpus; 53 }; 54 static DEFINE_IDR(cpufreq_idr); 55 static DEFINE_MUTEX(cooling_cpufreq_lock); 56 57 static unsigned int cpufreq_dev_count; 58 59 - /* notify_table passes value to the CPUFREQ_ADJUST callback function. */ 60 - #define NOTIFY_INVALID NULL 61 - static struct cpufreq_cooling_device *notify_device; 62 63 /** 64 * get_idr - function to get a unique id. ··· 286 287 cpufreq_device->cpufreq_state = cooling_state; 288 cpufreq_device->cpufreq_val = clip_freq; 289 - notify_device = cpufreq_device; 290 291 for_each_cpu(cpuid, mask) { 292 if (is_cpufreq_valid(cpuid)) 293 cpufreq_update_policy(cpuid); 294 } 295 - 296 - notify_device = NOTIFY_INVALID; 297 298 return 0; 299 } ··· 312 { 313 struct cpufreq_policy *policy = data; 314 unsigned long max_freq = 0; 315 316 - if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) 317 return 0; 318 319 - if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus)) 320 - max_freq = notify_device->cpufreq_val; 321 - else 322 - return 0; 323 324 - /* Never exceed user_policy.max */ 325 - if (max_freq > policy->user_policy.max) 326 - max_freq = policy->user_policy.max; 327 328 - if (policy->max != max_freq) 329 - cpufreq_verify_within_limits(policy, 0, max_freq); 330 331 return 0; 332 } ··· 489 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 490 CPUFREQ_POLICY_NOTIFIER); 491 cpufreq_dev_count++; 492 493 mutex_unlock(&cooling_cpufreq_lock); 494 ··· 553 554 cpufreq_dev = cdev->devdata; 555 mutex_lock(&cooling_cpufreq_lock); 556 cpufreq_dev_count--; 557 558 /* Unregister the notifier for the last cpufreq cooling device */
··· 50 unsigned int cpufreq_state; 51 unsigned int cpufreq_val; 52 struct cpumask allowed_cpus; 53 + struct list_head node; 54 }; 55 static DEFINE_IDR(cpufreq_idr); 56 static DEFINE_MUTEX(cooling_cpufreq_lock); 57 58 static unsigned int cpufreq_dev_count; 59 60 + static LIST_HEAD(cpufreq_dev_list); 61 62 /** 63 * get_idr - function to get a unique id. ··· 287 288 cpufreq_device->cpufreq_state = cooling_state; 289 cpufreq_device->cpufreq_val = clip_freq; 290 291 for_each_cpu(cpuid, mask) { 292 if (is_cpufreq_valid(cpuid)) 293 cpufreq_update_policy(cpuid); 294 } 295 296 return 0; 297 } ··· 316 { 317 struct cpufreq_policy *policy = data; 318 unsigned long max_freq = 0; 319 + struct cpufreq_cooling_device *cpufreq_dev; 320 321 + if (event != CPUFREQ_ADJUST) 322 return 0; 323 324 + mutex_lock(&cooling_cpufreq_lock); 325 + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 326 + if (!cpumask_test_cpu(policy->cpu, 327 + &cpufreq_dev->allowed_cpus)) 328 + continue; 329 330 + if (!cpufreq_dev->cpufreq_val) 331 + cpufreq_dev->cpufreq_val = get_cpu_frequency( 332 + cpumask_any(&cpufreq_dev->allowed_cpus), 333 + cpufreq_dev->cpufreq_state); 334 335 + max_freq = cpufreq_dev->cpufreq_val; 336 + 337 + if (policy->max != max_freq) 338 + cpufreq_verify_within_limits(policy, 0, max_freq); 339 + } 340 + mutex_unlock(&cooling_cpufreq_lock); 341 342 return 0; 343 } ··· 486 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 487 CPUFREQ_POLICY_NOTIFIER); 488 cpufreq_dev_count++; 489 + list_add(&cpufreq_dev->node, &cpufreq_dev_list); 490 491 mutex_unlock(&cooling_cpufreq_lock); 492 ··· 549 550 cpufreq_dev = cdev->devdata; 551 mutex_lock(&cooling_cpufreq_lock); 552 + list_del(&cpufreq_dev->node); 553 cpufreq_dev_count--; 554 555 /* Unregister the notifier for the last cpufreq cooling device */
+3 -6
drivers/thermal/samsung/exynos_thermal_common.c
··· 417 418 th_zone = sensor_conf->pzone_data; 419 420 - if (th_zone->therm_dev) 421 - thermal_zone_device_unregister(th_zone->therm_dev); 422 423 - for (i = 0; i < th_zone->cool_dev_size; i++) { 424 - if (th_zone->cool_dev[i]) 425 - cpufreq_cooling_unregister(th_zone->cool_dev[i]); 426 - } 427 428 dev_info(sensor_conf->dev, 429 "Exynos: Kernel Thermal management unregistered\n");
··· 417 418 th_zone = sensor_conf->pzone_data; 419 420 + thermal_zone_device_unregister(th_zone->therm_dev); 421 422 + for (i = 0; i < th_zone->cool_dev_size; ++i) 423 + cpufreq_cooling_unregister(th_zone->cool_dev[i]); 424 425 dev_info(sensor_conf->dev, 426 "Exynos: Kernel Thermal management unregistered\n");
+3
drivers/thermal/st/st_thermal.c
··· 275 } 276 EXPORT_SYMBOL_GPL(st_thermal_unregister); 277 278 static int st_thermal_suspend(struct device *dev) 279 { 280 struct platform_device *pdev = to_platform_device(dev); ··· 306 307 return 0; 308 } 309 SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); 310 EXPORT_SYMBOL_GPL(st_thermal_pm_ops); 311
··· 275 } 276 EXPORT_SYMBOL_GPL(st_thermal_unregister); 277 278 + #ifdef CONFIG_PM_SLEEP 279 static int st_thermal_suspend(struct device *dev) 280 { 281 struct platform_device *pdev = to_platform_device(dev); ··· 305 306 return 0; 307 } 308 + #endif 309 + 310 SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); 311 EXPORT_SYMBOL_GPL(st_thermal_pm_ops); 312