Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal into thermal-soc

Zhang Rui 53daf938 e2608180

+3361 -139
+23
Documentation/devicetree/bindings/thermal/hisilicon-thermal.txt
··· 1 + * Temperature Sensor on hisilicon SoCs 2 + 3 + ** Required properties : 4 + 5 + - compatible: "hisilicon,tsensor". 6 + - reg: physical base address of thermal sensor and length of memory mapped 7 + region. 8 + - interrupt: The interrupt number to the cpu. Defines the interrupt used 9 + by /SOCTHERM/tsensor. 10 + - clock-names: Input clock name, should be 'thermal_clk'. 11 + - clocks: phandles for clock specified in "clock-names" property. 12 + - #thermal-sensor-cells: Should be 1. See ./thermal.txt for a description. 13 + 14 + Example : 15 + 16 + tsensor: tsensor@0,f7030700 { 17 + compatible = "hisilicon,tsensor"; 18 + reg = <0x0 0xf7030700 0x0 0x1000>; 19 + interrupts = <0 7 0x4>; 20 + clocks = <&sys_ctrl HI6220_TSENSOR_CLK>; 21 + clock-names = "thermal_clk"; 22 + #thermal-sensor-cells = <1>; 23 + }
+57
Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt
··· 1 + Qualcomm QPNP PMIC Temperature Alarm 2 + 3 + QPNP temperature alarm peripherals are found inside of Qualcomm PMIC chips 4 + that utilize the Qualcomm SPMI implementation. These peripherals provide an 5 + interrupt signal and status register to identify high PMIC die temperature. 6 + 7 + Required properties: 8 + - compatible: Should contain "qcom,spmi-temp-alarm". 9 + - reg: Specifies the SPMI address and length of the controller's 10 + registers. 11 + - interrupts: PMIC temperature alarm interrupt. 12 + - #thermal-sensor-cells: Should be 0. See thermal.txt for a description. 13 + 14 + Optional properties: 15 + - io-channels: Should contain IIO channel specifier for the ADC channel, 16 + which report chip die temperature. 17 + - io-channel-names: Should contain "thermal". 18 + 19 + Example: 20 + 21 + pm8941_temp: thermal-alarm@2400 { 22 + compatible = "qcom,spmi-temp-alarm"; 23 + reg = <0x2400 0x100>; 24 + interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>; 25 + #thermal-sensor-cells = <0>; 26 + 27 + io-channels = <&pm8941_vadc VADC_DIE_TEMP>; 28 + io-channel-names = "thermal"; 29 + }; 30 + 31 + thermal-zones { 32 + pm8941 { 33 + polling-delay-passive = <250>; 34 + polling-delay = <1000>; 35 + 36 + thermal-sensors = <&pm8941_temp>; 37 + 38 + trips { 39 + passive { 40 + temperature = <1050000>; 41 + hysteresis = <2000>; 42 + type = "passive"; 43 + }; 44 + alert { 45 + temperature = <125000>; 46 + hysteresis = <2000>; 47 + type = "hot"; 48 + }; 49 + crit { 50 + temperature = <145000>; 51 + hysteresis = <2000>; 52 + type = "critical"; 53 + }; 54 + }; 55 + }; 56 + }; 57 +
+9
Documentation/devicetree/bindings/thermal/thermal.txt
··· 167 167 by means of sensor ID. Additional coefficients are 168 168 interpreted as constant offset. 169 169 170 + - sustainable-power: An estimate of the sustainable power (in mW) that the 171 + Type: unsigned thermal zone can dissipate at the desired 172 + Size: one cell control temperature. For reference, the 173 + sustainable power of a 4'' phone is typically 174 + 2000mW, while on a 10'' tablet is around 175 + 4500mW. 176 + 170 177 Note: The delay properties are bound to the maximum dT/dt (temperature 171 178 derivative over time) in two situations for a thermal zone: 172 179 (i) - when passive cooling is activated (polling-delay-passive); and ··· 552 545 * z = c1*x1 + c2*x2 + c3*x3 553 546 */ 554 547 coefficients = <1200 -345 890>; 548 + 549 + sustainable-power = <2500>; 555 550 556 551 trips { 557 552 /* Trips are based on resulting linear equation */
+155 -1
Documentation/thermal/cpu-cooling-api.txt
··· 36 36 np: pointer to the cooling device device tree node 37 37 clip_cpus: cpumask of cpus where the frequency constraints will happen. 38 38 39 - 1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 39 + 1.1.3 struct thermal_cooling_device *cpufreq_power_cooling_register( 40 + const struct cpumask *clip_cpus, u32 capacitance, 41 + get_static_t plat_static_func) 42 + 43 + Similar to cpufreq_cooling_register, this function registers a cpufreq 44 + cooling device. Using this function, the cooling device will 45 + implement the power extensions by using a simple cpu power model. The 46 + cpus must have registered their OPPs using the OPP library. 47 + 48 + The additional parameters are needed for the power model (See 2. Power 49 + models). "capacitance" is the dynamic power coefficient (See 2.1 50 + Dynamic power). "plat_static_func" is a function to calculate the 51 + static power consumed by these cpus (See 2.2 Static power). 52 + 53 + 1.1.4 struct thermal_cooling_device *of_cpufreq_power_cooling_register( 54 + struct device_node *np, const struct cpumask *clip_cpus, u32 capacitance, 55 + get_static_t plat_static_func) 56 + 57 + Similar to cpufreq_power_cooling_register, this function register a 58 + cpufreq cooling device with power extensions using the device tree 59 + information supplied by the np parameter. 60 + 61 + 1.1.5 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 40 62 41 63 This interface function unregisters the "thermal-cpufreq-%x" cooling device. 42 64 43 65 cdev: Cooling device pointer which has to be unregistered. 66 + 67 + 2. Power models 68 + 69 + The power API registration functions provide a simple power model for 70 + CPUs. The current power is calculated as dynamic + (optionally) 71 + static power. This power model requires that the operating-points of 72 + the CPUs are registered using the kernel's opp library and the 73 + `cpufreq_frequency_table` is assigned to the `struct device` of the 74 + cpu. If you are using CONFIG_CPUFREQ_DT then the 75 + `cpufreq_frequency_table` should already be assigned to the cpu 76 + device. 77 + 78 + The `plat_static_func` parameter of `cpufreq_power_cooling_register()` 79 + and `of_cpufreq_power_cooling_register()` is optional. If you don't 80 + provide it, only dynamic power will be considered. 81 + 82 + 2.1 Dynamic power 83 + 84 + The dynamic power consumption of a processor depends on many factors. 85 + For a given processor implementation the primary factors are: 86 + 87 + - The time the processor spends running, consuming dynamic power, as 88 + compared to the time in idle states where dynamic consumption is 89 + negligible. Herein we refer to this as 'utilisation'. 90 + - The voltage and frequency levels as a result of DVFS. The DVFS 91 + level is a dominant factor governing power consumption. 92 + - In running time the 'execution' behaviour (instruction types, memory 93 + access patterns and so forth) causes, in most cases, a second order 94 + variation. In pathological cases this variation can be significant, 95 + but typically it is of a much lesser impact than the factors above. 96 + 97 + A high level dynamic power consumption model may then be represented as: 98 + 99 + Pdyn = f(run) * Voltage^2 * Frequency * Utilisation 100 + 101 + f(run) here represents the described execution behaviour and its 102 + result has a units of Watts/Hz/Volt^2 (this often expressed in 103 + mW/MHz/uVolt^2) 104 + 105 + The detailed behaviour for f(run) could be modelled on-line. However, 106 + in practice, such an on-line model has dependencies on a number of 107 + implementation specific processor support and characterisation 108 + factors. Therefore, in initial implementation that contribution is 109 + represented as a constant coefficient. This is a simplification 110 + consistent with the relative contribution to overall power variation. 111 + 112 + In this simplified representation our model becomes: 113 + 114 + Pdyn = Capacitance * Voltage^2 * Frequency * Utilisation 115 + 116 + Where `capacitance` is a constant that represents an indicative 117 + running time dynamic power coefficient in fundamental units of 118 + mW/MHz/uVolt^2. Typical values for mobile CPUs might lie in range 119 + from 100 to 500. For reference, the approximate values for the SoC in 120 + ARM's Juno Development Platform are 530 for the Cortex-A57 cluster and 121 + 140 for the Cortex-A53 cluster. 122 + 123 + 124 + 2.2 Static power 125 + 126 + Static leakage power consumption depends on a number of factors. For a 127 + given circuit implementation the primary factors are: 128 + 129 + - Time the circuit spends in each 'power state' 130 + - Temperature 131 + - Operating voltage 132 + - Process grade 133 + 134 + The time the circuit spends in each 'power state' for a given 135 + evaluation period at first order means OFF or ON. However, 136 + 'retention' states can also be supported that reduce power during 137 + inactive periods without loss of context. 138 + 139 + Note: The visibility of state entries to the OS can vary, according to 140 + platform specifics, and this can then impact the accuracy of a model 141 + based on OS state information alone. It might be possible in some 142 + cases to extract more accurate information from system resources. 143 + 144 + The temperature, operating voltage and process 'grade' (slow to fast) 145 + of the circuit are all significant factors in static leakage power 146 + consumption. All of these have complex relationships to static power. 147 + 148 + Circuit implementation specific factors include the chosen silicon 149 + process as well as the type, number and size of transistors in both 150 + the logic gates and any RAM elements included. 151 + 152 + The static power consumption modelling must take into account the 153 + power managed regions that are implemented. Taking the example of an 154 + ARM processor cluster, the modelling would take into account whether 155 + each CPU can be powered OFF separately or if only a single power 156 + region is implemented for the complete cluster. 157 + 158 + In one view, there are others, a static power consumption model can 159 + then start from a set of reference values for each power managed 160 + region (e.g. CPU, Cluster/L2) in each state (e.g. ON, OFF) at an 161 + arbitrary process grade, voltage and temperature point. These values 162 + are then scaled for all of the following: the time in each state, the 163 + process grade, the current temperature and the operating voltage. 164 + However, since both implementation specific and complex relationships 165 + dominate the estimate, the appropriate interface to the model from the 166 + cpu cooling device is to provide a function callback that calculates 167 + the static power in this platform. When registering the cpu cooling 168 + device pass a function pointer that follows the `get_static_t` 169 + prototype: 170 + 171 + int plat_get_static(cpumask_t *cpumask, int interval, 172 + unsigned long voltage, u32 &power); 173 + 174 + `cpumask` is the cpumask of the cpus involved in the calculation. 175 + `voltage` is the voltage at which they are operating. The function 176 + should calculate the average static power for the last `interval` 177 + milliseconds. It returns 0 on success, -E* on error. If it 178 + succeeds, it should store the static power in `power`. Reading the 179 + temperature of the cpus described by `cpumask` is left for 180 + plat_get_static() to do as the platform knows best which thermal 181 + sensor is closest to the cpu. 182 + 183 + If `plat_static_func` is NULL, static power is considered to be 184 + negligible for this platform and only dynamic power is considered. 185 + 186 + The platform specific callback can then use any combination of tables 187 + and/or equations to permute the estimated value. Process grade 188 + information is not passed to the model since access to such data, from 189 + on-chip measurement capability or manufacture time data, is platform 190 + specific. 191 + 192 + Note: the significance of static power for CPUs in comparison to 193 + dynamic power is highly dependent on implementation. Given the 194 + potential complexity in implementation, the importance and accuracy of 195 + its inclusion when using cpu cooling devices should be assessed on a 196 + case by case basis. 197 +
+247
Documentation/thermal/power_allocator.txt
··· 1 + Power allocator governor tunables 2 + ================================= 3 + 4 + Trip points 5 + ----------- 6 + 7 + The governor requires the following two passive trip points: 8 + 9 + 1. "switch on" trip point: temperature above which the governor 10 + control loop starts operating. This is the first passive trip 11 + point of the thermal zone. 12 + 13 + 2. "desired temperature" trip point: it should be higher than the 14 + "switch on" trip point. This the target temperature the governor 15 + is controlling for. This is the last passive trip point of the 16 + thermal zone. 17 + 18 + PID Controller 19 + -------------- 20 + 21 + The power allocator governor implements a 22 + Proportional-Integral-Derivative controller (PID controller) with 23 + temperature as the control input and power as the controlled output: 24 + 25 + P_max = k_p * e + k_i * err_integral + k_d * diff_err + sustainable_power 26 + 27 + where 28 + e = desired_temperature - current_temperature 29 + err_integral is the sum of previous errors 30 + diff_err = e - previous_error 31 + 32 + It is similar to the one depicted below: 33 + 34 + k_d 35 + | 36 + current_temp | 37 + | v 38 + | +----------+ +---+ 39 + | +----->| diff_err |-->| X |------+ 40 + | | +----------+ +---+ | 41 + | | | tdp actor 42 + | | k_i | | get_requested_power() 43 + | | | | | | | 44 + | | | | | | | ... 45 + v | v v v v v 46 + +---+ | +-------+ +---+ +---+ +---+ +----------+ 47 + | S |-------+----->| sum e |----->| X |--->| S |-->| S |-->|power | 48 + +---+ | +-------+ +---+ +---+ +---+ |allocation| 49 + ^ | ^ +----------+ 50 + | | | | | 51 + | | +---+ | | | 52 + | +------->| X |-------------------+ v v 53 + | +---+ granted performance 54 + desired_temperature ^ 55 + | 56 + | 57 + k_po/k_pu 58 + 59 + Sustainable power 60 + ----------------- 61 + 62 + An estimate of the sustainable dissipatable power (in mW) should be 63 + provided while registering the thermal zone. This estimates the 64 + sustained power that can be dissipated at the desired control 65 + temperature. This is the maximum sustained power for allocation at 66 + the desired maximum temperature. The actual sustained power can vary 67 + for a number of reasons. The closed loop controller will take care of 68 + variations such as environmental conditions, and some factors related 69 + to the speed-grade of the silicon. `sustainable_power` is therefore 70 + simply an estimate, and may be tuned to affect the aggressiveness of 71 + the thermal ramp. For reference, the sustainable power of a 4" phone 72 + is typically 2000mW, while on a 10" tablet is around 4500mW (may vary 73 + depending on screen size). 74 + 75 + If you are using device tree, do add it as a property of the 76 + thermal-zone. For example: 77 + 78 + thermal-zones { 79 + soc_thermal { 80 + polling-delay = <1000>; 81 + polling-delay-passive = <100>; 82 + sustainable-power = <2500>; 83 + ... 84 + 85 + Instead, if the thermal zone is registered from the platform code, pass a 86 + `thermal_zone_params` that has a `sustainable_power`. If no 87 + `thermal_zone_params` were being passed, then something like below 88 + will suffice: 89 + 90 + static const struct thermal_zone_params tz_params = { 91 + .sustainable_power = 3500, 92 + }; 93 + 94 + and then pass `tz_params` as the 5th parameter to 95 + `thermal_zone_device_register()` 96 + 97 + k_po and k_pu 98 + ------------- 99 + 100 + The implementation of the PID controller in the power allocator 101 + thermal governor allows the configuration of two proportional term 102 + constants: `k_po` and `k_pu`. `k_po` is the proportional term 103 + constant during temperature overshoot periods (current temperature is 104 + above "desired temperature" trip point). Conversely, `k_pu` is the 105 + proportional term constant during temperature undershoot periods 106 + (current temperature below "desired temperature" trip point). 107 + 108 + These controls are intended as the primary mechanism for configuring 109 + the permitted thermal "ramp" of the system. For instance, a lower 110 + `k_pu` value will provide a slower ramp, at the cost of capping 111 + available capacity at a low temperature. On the other hand, a high 112 + value of `k_pu` will result in the governor granting very high power 113 + whilst temperature is low, and may lead to temperature overshooting. 114 + 115 + The default value for `k_pu` is: 116 + 117 + 2 * sustainable_power / (desired_temperature - switch_on_temp) 118 + 119 + This means that at `switch_on_temp` the output of the controller's 120 + proportional term will be 2 * `sustainable_power`. The default value 121 + for `k_po` is: 122 + 123 + sustainable_power / (desired_temperature - switch_on_temp) 124 + 125 + Focusing on the proportional and feed forward values of the PID 126 + controller equation we have: 127 + 128 + P_max = k_p * e + sustainable_power 129 + 130 + The proportional term is proportional to the difference between the 131 + desired temperature and the current one. When the current temperature 132 + is the desired one, then the proportional component is zero and 133 + `P_max` = `sustainable_power`. That is, the system should operate in 134 + thermal equilibrium under constant load. `sustainable_power` is only 135 + an estimate, which is the reason for closed-loop control such as this. 136 + 137 + Expanding `k_pu` we get: 138 + P_max = 2 * sustainable_power * (T_set - T) / (T_set - T_on) + 139 + sustainable_power 140 + 141 + where 142 + T_set is the desired temperature 143 + T is the current temperature 144 + T_on is the switch on temperature 145 + 146 + When the current temperature is the switch_on temperature, the above 147 + formula becomes: 148 + 149 + P_max = 2 * sustainable_power * (T_set - T_on) / (T_set - T_on) + 150 + sustainable_power = 2 * sustainable_power + sustainable_power = 151 + 3 * sustainable_power 152 + 153 + Therefore, the proportional term alone linearly decreases power from 154 + 3 * `sustainable_power` to `sustainable_power` as the temperature 155 + rises from the switch on temperature to the desired temperature. 156 + 157 + k_i and integral_cutoff 158 + ----------------------- 159 + 160 + `k_i` configures the PID loop's integral term constant. This term 161 + allows the PID controller to compensate for long term drift and for 162 + the quantized nature of the output control: cooling devices can't set 163 + the exact power that the governor requests. When the temperature 164 + error is below `integral_cutoff`, errors are accumulated in the 165 + integral term. This term is then multiplied by `k_i` and the result 166 + added to the output of the controller. Typically `k_i` is set low (1 167 + or 2) and `integral_cutoff` is 0. 168 + 169 + k_d 170 + --- 171 + 172 + `k_d` configures the PID loop's derivative term constant. It's 173 + recommended to leave it as the default: 0. 174 + 175 + Cooling device power API 176 + ======================== 177 + 178 + Cooling devices controlled by this governor must supply the additional 179 + "power" API in their `cooling_device_ops`. It consists on three ops: 180 + 181 + 1. int get_requested_power(struct thermal_cooling_device *cdev, 182 + struct thermal_zone_device *tz, u32 *power); 183 + @cdev: The `struct thermal_cooling_device` pointer 184 + @tz: thermal zone in which we are currently operating 185 + @power: pointer in which to store the calculated power 186 + 187 + `get_requested_power()` calculates the power requested by the device 188 + in milliwatts and stores it in @power . It should return 0 on 189 + success, -E* on failure. This is currently used by the power 190 + allocator governor to calculate how much power to give to each cooling 191 + device. 192 + 193 + 2. int state2power(struct thermal_cooling_device *cdev, struct 194 + thermal_zone_device *tz, unsigned long state, u32 *power); 195 + @cdev: The `struct thermal_cooling_device` pointer 196 + @tz: thermal zone in which we are currently operating 197 + @state: A cooling device state 198 + @power: pointer in which to store the equivalent power 199 + 200 + Convert cooling device state @state into power consumption in 201 + milliwatts and store it in @power. It should return 0 on success, -E* 202 + on failure. This is currently used by thermal core to calculate the 203 + maximum power that an actor can consume. 204 + 205 + 3. int power2state(struct thermal_cooling_device *cdev, u32 power, 206 + unsigned long *state); 207 + @cdev: The `struct thermal_cooling_device` pointer 208 + @power: power in milliwatts 209 + @state: pointer in which to store the resulting state 210 + 211 + Calculate a cooling device state that would make the device consume at 212 + most @power mW and store it in @state. It should return 0 on success, 213 + -E* on failure. This is currently used by the thermal core to convert 214 + a given power set by the power allocator governor to a state that the 215 + cooling device can set. It is a function because this conversion may 216 + depend on external factors that may change so this function should the 217 + best conversion given "current circumstances". 218 + 219 + Cooling device weights 220 + ---------------------- 221 + 222 + Weights are a mechanism to bias the allocation among cooling 223 + devices. They express the relative power efficiency of different 224 + cooling devices. Higher weight can be used to express higher power 225 + efficiency. Weighting is relative such that if each cooling device 226 + has a weight of one they are considered equal. This is particularly 227 + useful in heterogeneous systems where two cooling devices may perform 228 + the same kind of compute, but with different efficiency. For example, 229 + a system with two different types of processors. 230 + 231 + If the thermal zone is registered using 232 + `thermal_zone_device_register()` (i.e., platform code), then weights 233 + are passed as part of the thermal zone's `thermal_bind_parameters`. 234 + If the platform is registered using device tree, then they are passed 235 + as the `contribution` property of each map in the `cooling-maps` node. 236 + 237 + Limitations of the power allocator governor 238 + =========================================== 239 + 240 + The power allocator governor's PID controller works best if there is a 241 + periodic tick. If you have a driver that calls 242 + `thermal_zone_device_update()` (or anything that ends up calling the 243 + governor's `throttle()` function) repetitively, the governor response 244 + won't be very good. Note that this is not particular to this 245 + governor, step-wise will also misbehave if you call its throttle() 246 + faster than the normal thermal framework tick (due to interrupts for 247 + example) as it will overreact.
+94 -5
Documentation/thermal/sysfs-api.txt
··· 95 95 1.3 interface for binding a thermal zone device with a thermal cooling device 96 96 1.3.1 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, 97 97 int trip, struct thermal_cooling_device *cdev, 98 - unsigned long upper, unsigned long lower); 98 + unsigned long upper, unsigned long lower, unsigned int weight); 99 99 100 100 This interface function bind a thermal cooling device to the certain trip 101 101 point of a thermal zone device. ··· 110 110 lower:the Minimum cooling state can be used for this trip point. 111 111 THERMAL_NO_LIMIT means no lower limit, 112 112 and the cooling device can be in cooling state 0. 113 + weight: the influence of this cooling device in this thermal 114 + zone. See 1.4.1 below for more information. 113 115 114 116 1.3.2 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, 115 117 int trip, struct thermal_cooling_device *cdev); ··· 129 127 This structure defines the following parameters that are used to bind 130 128 a zone with a cooling device for a particular trip point. 131 129 .cdev: The cooling device pointer 132 - .weight: The 'influence' of a particular cooling device on this zone. 133 - This is on a percentage scale. The sum of all these weights 134 - (for a particular zone) cannot exceed 100. 130 + .weight: The 'influence' of a particular cooling device on this 131 + zone. This is relative to the rest of the cooling 132 + devices. For example, if all cooling devices have a 133 + weight of 1, then they all contribute the same. You can 134 + use percentages if you want, but it's not mandatory. A 135 + weight of 0 means that this cooling device doesn't 136 + contribute to the cooling of this zone unless all cooling 137 + devices have a weight of 0. If all weights are 0, then 138 + they all contribute the same. 135 139 .trip_mask:This is a bit mask that gives the binding relation between 136 140 this thermal zone and cdev, for a particular trip point. 137 141 If nth bit is set, then the cdev and thermal zone are bound ··· 184 176 |---trip_point_[0-*]_type: Trip point type 185 177 |---trip_point_[0-*]_hyst: Hysteresis value for this trip point 186 178 |---emul_temp: Emulated temperature set node 179 + |---sustainable_power: Sustainable dissipatable power 180 + |---k_po: Proportional term during temperature overshoot 181 + |---k_pu: Proportional term during temperature undershoot 182 + |---k_i: PID's integral term in the power allocator gov 183 + |---k_d: PID's derivative term in the power allocator 184 + |---integral_cutoff: Offset above which errors are accumulated 185 + |---slope: Slope constant applied as linear extrapolation 186 + |---offset: Offset constant applied as linear extrapolation 187 187 188 188 Thermal cooling device sys I/F, created once it's registered: 189 189 /sys/class/thermal/cooling_device[0-*]: ··· 208 192 /sys/class/thermal/thermal_zone[0-*]: 209 193 |---cdev[0-*]: [0-*]th cooling device in current thermal zone 210 194 |---cdev[0-*]_trip_point: Trip point that cdev[0-*] is associated with 195 + |---cdev[0-*]_weight: Influence of the cooling device in 196 + this thermal zone 211 197 212 198 Besides the thermal zone device sysfs I/F and cooling device sysfs I/F, 213 199 the generic thermal driver also creates a hwmon sysfs I/F for each _type_ ··· 283 265 point. 284 266 RO, Optional 285 267 268 + cdev[0-*]_weight 269 + The influence of cdev[0-*] in this thermal zone. This value 270 + is relative to the rest of cooling devices in the thermal 271 + zone. For example, if a cooling device has a weight double 272 + than that of other, it's twice as effective in cooling the 273 + thermal zone. 274 + RW, Optional 275 + 286 276 passive 287 277 Attribute is only present for zones in which the passive cooling 288 278 policy is not supported by native thermal driver. Default is zero ··· 314 288 WARNING: Be careful while enabling this option on production systems, 315 289 because userland can easily disable the thermal policy by simply 316 290 flooding this sysfs node with low temperature values. 291 + 292 + sustainable_power 293 + An estimate of the sustained power that can be dissipated by 294 + the thermal zone. Used by the power allocator governor. For 295 + more information see Documentation/thermal/power_allocator.txt 296 + Unit: milliwatts 297 + RW, Optional 298 + 299 + k_po 300 + The proportional term of the power allocator governor's PID 301 + controller during temperature overshoot. Temperature overshoot 302 + is when the current temperature is above the "desired 303 + temperature" trip point. For more information see 304 + Documentation/thermal/power_allocator.txt 305 + RW, Optional 306 + 307 + k_pu 308 + The proportional term of the power allocator governor's PID 309 + controller during temperature undershoot. Temperature undershoot 310 + is when the current temperature is below the "desired 311 + temperature" trip point. For more information see 312 + Documentation/thermal/power_allocator.txt 313 + RW, Optional 314 + 315 + k_i 316 + The integral term of the power allocator governor's PID 317 + controller. This term allows the PID controller to compensate 318 + for long term drift. For more information see 319 + Documentation/thermal/power_allocator.txt 320 + RW, Optional 321 + 322 + k_d 323 + The derivative term of the power allocator governor's PID 324 + controller. For more information see 325 + Documentation/thermal/power_allocator.txt 326 + RW, Optional 327 + 328 + integral_cutoff 329 + Temperature offset from the desired temperature trip point 330 + above which the integral term of the power allocator 331 + governor's PID controller starts accumulating errors. For 332 + example, if integral_cutoff is 0, then the integral term only 333 + accumulates error when temperature is above the desired 334 + temperature trip point. For more information see 335 + Documentation/thermal/power_allocator.txt 336 + RW, Optional 337 + 338 + slope 339 + The slope constant used in a linear extrapolation model 340 + to determine a hotspot temperature based off the sensor's 341 + raw readings. It is up to the device driver to determine 342 + the usage of these values. 343 + RW, Optional 344 + 345 + offset 346 + The offset constant used in a linear extrapolation model 347 + to determine a hotspot temperature based off the sensor's 348 + raw readings. It is up to the device driver to determine 349 + the usage of these values. 350 + RW, Optional 317 351 318 352 ***************************** 319 353 * Cooling device attributes * ··· 404 318 active[0] and active[1] at the same time, it may register itself as a 405 319 thermal_zone_device (thermal_zone1) with 4 trip points in all. 406 320 It has one processor and one fan, which are both registered as 407 - thermal_cooling_device. 321 + thermal_cooling_device. Both are considered to have the same 322 + effectiveness in cooling the thermal zone. 408 323 409 324 If the processor is listed in _PSL method, and the fan is listed in _AL0 410 325 method, the sys I/F structure will be built like this: ··· 427 340 |---trip_point_3_type: active1 428 341 |---cdev0: --->/sys/class/thermal/cooling_device0 429 342 |---cdev0_trip_point: 1 /* cdev0 can be used for passive */ 343 + |---cdev0_weight: 1024 430 344 |---cdev1: --->/sys/class/thermal/cooling_device3 431 345 |---cdev1_trip_point: 2 /* cdev1 can be used for active[0]*/ 346 + |---cdev1_weight: 1024 432 347 433 348 |cooling_device0: 434 349 |---type: Processor
+6 -3
drivers/acpi/thermal.c
··· 800 800 result = 801 801 thermal_zone_bind_cooling_device 802 802 (thermal, trip, cdev, 803 - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); 803 + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, 804 + THERMAL_WEIGHT_DEFAULT); 804 805 else 805 806 result = 806 807 thermal_zone_unbind_cooling_device ··· 825 824 if (bind) 826 825 result = thermal_zone_bind_cooling_device 827 826 (thermal, trip, cdev, 828 - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT); 827 + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, 828 + THERMAL_WEIGHT_DEFAULT); 829 829 else 830 830 result = thermal_zone_unbind_cooling_device 831 831 (thermal, trip, cdev); ··· 843 841 result = thermal_zone_bind_cooling_device 844 842 (thermal, THERMAL_TRIPS_NONE, 845 843 cdev, THERMAL_NO_LIMIT, 846 - THERMAL_NO_LIMIT); 844 + THERMAL_NO_LIMIT, 845 + THERMAL_WEIGHT_DEFAULT); 847 846 else 848 847 result = thermal_zone_unbind_cooling_device 849 848 (thermal, THERMAL_TRIPS_NONE,
+2 -1
drivers/platform/x86/acerhdf.c
··· 372 372 return 0; 373 373 374 374 if (thermal_zone_bind_cooling_device(thermal, 0, cdev, 375 - THERMAL_NO_LIMIT, THERMAL_NO_LIMIT)) { 375 + THERMAL_NO_LIMIT, THERMAL_NO_LIMIT, 376 + THERMAL_WEIGHT_DEFAULT)) { 376 377 pr_err("error binding cooling dev\n"); 377 378 return -EINVAL; 378 379 }
+44
drivers/thermal/Kconfig
··· 42 42 Say 'Y' here if you need to build thermal infrastructure 43 43 based on device tree. 44 44 45 + config THERMAL_WRITABLE_TRIPS 46 + bool "Enable writable trip points" 47 + help 48 + This option allows the system integrator to choose whether 49 + trip temperatures can be changed from userspace. The 50 + writable trips need to be specified when setting up the 51 + thermal zone but the choice here takes precedence. 52 + 53 + Say 'Y' here if you would like to allow userspace tools to 54 + change trip temperatures. 55 + 45 56 choice 46 57 prompt "Default Thermal governor" 47 58 default THERMAL_DEFAULT_GOV_STEP_WISE ··· 82 71 Select this if you want to let the user space manage the 83 72 platform thermals. 84 73 74 + config THERMAL_DEFAULT_GOV_POWER_ALLOCATOR 75 + bool "power_allocator" 76 + select THERMAL_GOV_POWER_ALLOCATOR 77 + help 78 + Select this if you want to control temperature based on 79 + system and device power allocation. This governor can only 80 + operate on cooling devices that implement the power API. 81 + 85 82 endchoice 86 83 87 84 config THERMAL_GOV_FAIR_SHARE ··· 117 98 bool "User_space thermal governor" 118 99 help 119 100 Enable this to let the user space manage the platform thermals. 101 + 102 + config THERMAL_GOV_POWER_ALLOCATOR 103 + bool "Power allocator thermal governor" 104 + help 105 + Enable this to manage platform thermals by dynamically 106 + allocating and limiting power to devices. 120 107 121 108 config CPU_THERMAL 122 109 bool "generic cpu cooling support" ··· 160 135 WARNING: Be careful while enabling this option on production systems, 161 136 because userland can easily disable the thermal policy by simply 162 137 flooding this sysfs node with low temperature values. 138 + 139 + config HISI_THERMAL 140 + tristate "Hisilicon thermal driver" 141 + depends on ARCH_HISI && CPU_THERMAL && OF 142 + help 143 + Enable this to plug hisilicon's thermal sensor driver into the Linux 144 + thermal framework. cpufreq is used as the cooling device to throttle 145 + CPUs when the passive trip is crossed. 163 146 164 147 config IMX_THERMAL 165 148 tristate "Temperature sensor driver for Freescale i.MX SoCs" ··· 331 298 depends on ARCH_STI && OF 332 299 source "drivers/thermal/st/Kconfig" 333 300 endmenu 301 + 302 + config QCOM_SPMI_TEMP_ALARM 303 + tristate "Qualcomm SPMI PMIC Temperature Alarm" 304 + depends on OF && SPMI && IIO 305 + select REGMAP_SPMI 306 + help 307 + This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP) 308 + PMIC devices. It shows up in sysfs as a thermal sensor with multiple 309 + trip points. The temperature reported by the thermal sensor reflects the 310 + real time die temperature if an ADC is present or an estimate of the 311 + temperature based upon the over temperature stage value. 334 312 335 313 endif
+3
drivers/thermal/Makefile
··· 14 14 thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o 15 15 thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o 16 16 thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o 17 + thermal_sys-$(CONFIG_THERMAL_GOV_POWER_ALLOCATOR) += power_allocator.o 17 18 18 19 # cpufreq cooling 19 20 thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o ··· 23 22 thermal_sys-$(CONFIG_CLOCK_THERMAL) += clock_cooling.o 24 23 25 24 # platform thermal drivers 25 + obj-$(CONFIG_QCOM_SPMI_TEMP_ALARM) += qcom-spmi-temp-alarm.o 26 26 obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o 27 27 obj-$(CONFIG_ROCKCHIP_THERMAL) += rockchip_thermal.o 28 28 obj-$(CONFIG_RCAR_THERMAL) += rcar_thermal.o ··· 41 39 obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/ 42 40 obj-$(CONFIG_ST_THERMAL) += st/ 43 41 obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o 42 + obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
+569 -18
drivers/thermal/cpu_cooling.c
··· 26 26 #include <linux/thermal.h> 27 27 #include <linux/cpufreq.h> 28 28 #include <linux/err.h> 29 + #include <linux/pm_opp.h> 29 30 #include <linux/slab.h> 30 31 #include <linux/cpu.h> 31 32 #include <linux/cpu_cooling.h> 33 + 34 + #include <trace/events/thermal.h> 32 35 33 36 /* 34 37 * Cooling state <-> CPUFreq frequency ··· 48 45 */ 49 46 50 47 /** 48 + * struct power_table - frequency to power conversion 49 + * @frequency: frequency in KHz 50 + * @power: power in mW 51 + * 52 + * This structure is built when the cooling device registers and helps 53 + * in translating frequency to power and viceversa. 54 + */ 55 + struct power_table { 56 + u32 frequency; 57 + u32 power; 58 + }; 59 + 60 + /** 51 61 * struct cpufreq_cooling_device - data for cooling device with cpufreq 52 62 * @id: unique integer value corresponding to each cpufreq_cooling_device 53 63 * registered. ··· 74 58 * cpufreq frequencies. 75 59 * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. 76 60 * @node: list_head to link all cpufreq_cooling_device together. 61 + * @last_load: load measured by the latest call to cpufreq_get_actual_power() 62 + * @time_in_idle: previous reading of the absolute time that this cpu was idle 63 + * @time_in_idle_timestamp: wall time of the last invocation of 64 + * get_cpu_idle_time_us() 65 + * @dyn_power_table: array of struct power_table for frequency to power 66 + * conversion, sorted in ascending order. 67 + * @dyn_power_table_entries: number of entries in the @dyn_power_table array 68 + * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered 69 + * @plat_get_static_power: callback to calculate the static power 77 70 * 78 71 * This structure is required for keeping information of each registered 79 72 * cpufreq_cooling_device. ··· 96 71 unsigned int *freq_table; /* In descending order */ 97 72 struct cpumask allowed_cpus; 98 73 struct list_head node; 74 + u32 last_load; 75 + u64 *time_in_idle; 76 + u64 *time_in_idle_timestamp; 77 + struct power_table *dyn_power_table; 78 + int dyn_power_table_entries; 79 + struct device *cpu_dev; 80 + get_static_t plat_get_static_power; 99 81 }; 100 82 static DEFINE_IDR(cpufreq_idr); 101 83 static DEFINE_MUTEX(cooling_cpufreq_lock); ··· 218 186 unsigned long max_freq = 0; 219 187 struct cpufreq_cooling_device *cpufreq_dev; 220 188 221 - if (event != CPUFREQ_ADJUST) 222 - return 0; 189 + switch (event) { 223 190 224 - mutex_lock(&cooling_cpufreq_lock); 225 - list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 226 - if (!cpumask_test_cpu(policy->cpu, 227 - &cpufreq_dev->allowed_cpus)) 228 - continue; 191 + case CPUFREQ_ADJUST: 192 + mutex_lock(&cooling_cpufreq_lock); 193 + list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { 194 + if (!cpumask_test_cpu(policy->cpu, 195 + &cpufreq_dev->allowed_cpus)) 196 + continue; 229 197 230 - max_freq = cpufreq_dev->cpufreq_val; 198 + max_freq = cpufreq_dev->cpufreq_val; 231 199 232 - if (policy->max != max_freq) 233 - cpufreq_verify_within_limits(policy, 0, max_freq); 200 + if (policy->max != max_freq) 201 + cpufreq_verify_within_limits(policy, 0, 202 + max_freq); 203 + } 204 + mutex_unlock(&cooling_cpufreq_lock); 205 + break; 206 + default: 207 + return NOTIFY_DONE; 234 208 } 235 - mutex_unlock(&cooling_cpufreq_lock); 236 209 237 - return 0; 210 + return NOTIFY_OK; 211 + } 212 + 213 + /** 214 + * build_dyn_power_table() - create a dynamic power to frequency table 215 + * @cpufreq_device: the cpufreq cooling device in which to store the table 216 + * @capacitance: dynamic power coefficient for these cpus 217 + * 218 + * Build a dynamic power to frequency table for this cpu and store it 219 + * in @cpufreq_device. This table will be used in cpu_power_to_freq() and 220 + * cpu_freq_to_power() to convert between power and frequency 221 + * efficiently. Power is stored in mW, frequency in KHz. The 222 + * resulting table is in ascending order. 223 + * 224 + * Return: 0 on success, -E* on error. 225 + */ 226 + static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, 227 + u32 capacitance) 228 + { 229 + struct power_table *power_table; 230 + struct dev_pm_opp *opp; 231 + struct device *dev = NULL; 232 + int num_opps = 0, cpu, i, ret = 0; 233 + unsigned long freq; 234 + 235 + rcu_read_lock(); 236 + 237 + for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { 238 + dev = get_cpu_device(cpu); 239 + if (!dev) { 240 + dev_warn(&cpufreq_device->cool_dev->device, 241 + "No cpu device for cpu %d\n", cpu); 242 + continue; 243 + } 244 + 245 + num_opps = dev_pm_opp_get_opp_count(dev); 246 + if (num_opps > 0) { 247 + break; 248 + } else if (num_opps < 0) { 249 + ret = num_opps; 250 + goto unlock; 251 + } 252 + } 253 + 254 + if (num_opps == 0) { 255 + ret = -EINVAL; 256 + goto unlock; 257 + } 258 + 259 + power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); 260 + if (!power_table) { 261 + ret = -ENOMEM; 262 + goto unlock; 263 + } 264 + 265 + for (freq = 0, i = 0; 266 + opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); 267 + freq++, i++) { 268 + u32 freq_mhz, voltage_mv; 269 + u64 power; 270 + 271 + freq_mhz = freq / 1000000; 272 + voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; 273 + 274 + /* 275 + * Do the multiplication with MHz and millivolt so as 276 + * to not overflow. 277 + */ 278 + power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv; 279 + do_div(power, 1000000000); 280 + 281 + /* frequency is stored in power_table in KHz */ 282 + power_table[i].frequency = freq / 1000; 283 + 284 + /* power is stored in mW */ 285 + power_table[i].power = power; 286 + } 287 + 288 + if (i == 0) { 289 + ret = PTR_ERR(opp); 290 + goto unlock; 291 + } 292 + 293 + cpufreq_device->cpu_dev = dev; 294 + cpufreq_device->dyn_power_table = power_table; 295 + cpufreq_device->dyn_power_table_entries = i; 296 + 297 + unlock: 298 + rcu_read_unlock(); 299 + return ret; 300 + } 301 + 302 + static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device, 303 + u32 freq) 304 + { 305 + int i; 306 + struct power_table *pt = cpufreq_device->dyn_power_table; 307 + 308 + for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) 309 + if (freq < pt[i].frequency) 310 + break; 311 + 312 + return pt[i - 1].power; 313 + } 314 + 315 + static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, 316 + u32 power) 317 + { 318 + int i; 319 + struct power_table *pt = cpufreq_device->dyn_power_table; 320 + 321 + for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) 322 + if (power < pt[i].power) 323 + break; 324 + 325 + return pt[i - 1].frequency; 326 + } 327 + 328 + /** 329 + * get_load() - get load for a cpu since last updated 330 + * @cpufreq_device: &struct cpufreq_cooling_device for this cpu 331 + * @cpu: cpu number 332 + * 333 + * Return: The average load of cpu @cpu in percentage since this 334 + * function was last called. 335 + */ 336 + static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) 337 + { 338 + u32 load; 339 + u64 now, now_idle, delta_time, delta_idle; 340 + 341 + now_idle = get_cpu_idle_time(cpu, &now, 0); 342 + delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; 343 + delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; 344 + 345 + if (delta_time <= delta_idle) 346 + load = 0; 347 + else 348 + load = div64_u64(100 * (delta_time - delta_idle), delta_time); 349 + 350 + cpufreq_device->time_in_idle[cpu] = now_idle; 351 + cpufreq_device->time_in_idle_timestamp[cpu] = now; 352 + 353 + return load; 354 + } 355 + 356 + /** 357 + * get_static_power() - calculate the static power consumed by the cpus 358 + * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev 359 + * @tz: thermal zone device in which we're operating 360 + * @freq: frequency in KHz 361 + * @power: pointer in which to store the calculated static power 362 + * 363 + * Calculate the static power consumed by the cpus described by 364 + * @cpu_actor running at frequency @freq. This function relies on a 365 + * platform specific function that should have been provided when the 366 + * actor was registered. If it wasn't, the static power is assumed to 367 + * be negligible. The calculated static power is stored in @power. 368 + * 369 + * Return: 0 on success, -E* on failure. 370 + */ 371 + static int get_static_power(struct cpufreq_cooling_device *cpufreq_device, 372 + struct thermal_zone_device *tz, unsigned long freq, 373 + u32 *power) 374 + { 375 + struct dev_pm_opp *opp; 376 + unsigned long voltage; 377 + struct cpumask *cpumask = &cpufreq_device->allowed_cpus; 378 + unsigned long freq_hz = freq * 1000; 379 + 380 + if (!cpufreq_device->plat_get_static_power || 381 + !cpufreq_device->cpu_dev) { 382 + *power = 0; 383 + return 0; 384 + } 385 + 386 + rcu_read_lock(); 387 + 388 + opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, 389 + true); 390 + voltage = dev_pm_opp_get_voltage(opp); 391 + 392 + rcu_read_unlock(); 393 + 394 + if (voltage == 0) { 395 + dev_warn_ratelimited(cpufreq_device->cpu_dev, 396 + "Failed to get voltage for frequency %lu: %ld\n", 397 + freq_hz, IS_ERR(opp) ? PTR_ERR(opp) : 0); 398 + return -EINVAL; 399 + } 400 + 401 + return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay, 402 + voltage, power); 403 + } 404 + 405 + /** 406 + * get_dynamic_power() - calculate the dynamic power 407 + * @cpufreq_device: &cpufreq_cooling_device for this cdev 408 + * @freq: current frequency 409 + * 410 + * Return: the dynamic power consumed by the cpus described by 411 + * @cpufreq_device. 412 + */ 413 + static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device, 414 + unsigned long freq) 415 + { 416 + u32 raw_cpu_power; 417 + 418 + raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq); 419 + return (raw_cpu_power * cpufreq_device->last_load) / 100; 238 420 } 239 421 240 422 /* cpufreq cooling device callback functions are defined below */ ··· 526 280 return 0; 527 281 } 528 282 283 + /** 284 + * cpufreq_get_requested_power() - get the current power 285 + * @cdev: &thermal_cooling_device pointer 286 + * @tz: a valid thermal zone device pointer 287 + * @power: pointer in which to store the resulting power 288 + * 289 + * Calculate the current power consumption of the cpus in milliwatts 290 + * and store it in @power. This function should actually calculate 291 + * the requested power, but it's hard to get the frequency that 292 + * cpufreq would have assigned if there were no thermal limits. 293 + * Instead, we calculate the current power on the assumption that the 294 + * immediate future will look like the immediate past. 295 + * 296 + * We use the current frequency and the average load since this 297 + * function was last called. In reality, there could have been 298 + * multiple opps since this function was last called and that affects 299 + * the load calculation. While it's not perfectly accurate, this 300 + * simplification is good enough and works. REVISIT this, as more 301 + * complex code may be needed if experiments show that it's not 302 + * accurate enough. 303 + * 304 + * Return: 0 on success, -E* if getting the static power failed. 305 + */ 306 + static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, 307 + struct thermal_zone_device *tz, 308 + u32 *power) 309 + { 310 + unsigned long freq; 311 + int i = 0, cpu, ret; 312 + u32 static_power, dynamic_power, total_load = 0; 313 + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 314 + u32 *load_cpu = NULL; 315 + 316 + cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); 317 + 318 + /* 319 + * All the CPUs are offline, thus the requested power by 320 + * the cdev is 0 321 + */ 322 + if (cpu >= nr_cpu_ids) { 323 + *power = 0; 324 + return 0; 325 + } 326 + 327 + freq = cpufreq_quick_get(cpu); 328 + 329 + if (trace_thermal_power_cpu_get_power_enabled()) { 330 + u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus); 331 + 332 + load_cpu = devm_kcalloc(&cdev->device, ncpus, sizeof(*load_cpu), 333 + GFP_KERNEL); 334 + } 335 + 336 + for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { 337 + u32 load; 338 + 339 + if (cpu_online(cpu)) 340 + load = get_load(cpufreq_device, cpu); 341 + else 342 + load = 0; 343 + 344 + total_load += load; 345 + if (trace_thermal_power_cpu_limit_enabled() && load_cpu) 346 + load_cpu[i] = load; 347 + 348 + i++; 349 + } 350 + 351 + cpufreq_device->last_load = total_load; 352 + 353 + dynamic_power = get_dynamic_power(cpufreq_device, freq); 354 + ret = get_static_power(cpufreq_device, tz, freq, &static_power); 355 + if (ret) { 356 + if (load_cpu) 357 + devm_kfree(&cdev->device, load_cpu); 358 + return ret; 359 + } 360 + 361 + if (load_cpu) { 362 + trace_thermal_power_cpu_get_power( 363 + &cpufreq_device->allowed_cpus, 364 + freq, load_cpu, i, dynamic_power, static_power); 365 + 366 + devm_kfree(&cdev->device, load_cpu); 367 + } 368 + 369 + *power = static_power + dynamic_power; 370 + return 0; 371 + } 372 + 373 + /** 374 + * cpufreq_state2power() - convert a cpu cdev state to power consumed 375 + * @cdev: &thermal_cooling_device pointer 376 + * @tz: a valid thermal zone device pointer 377 + * @state: cooling device state to be converted 378 + * @power: pointer in which to store the resulting power 379 + * 380 + * Convert cooling device state @state into power consumption in 381 + * milliwatts assuming 100% load. Store the calculated power in 382 + * @power. 383 + * 384 + * Return: 0 on success, -EINVAL if the cooling device state could not 385 + * be converted into a frequency or other -E* if there was an error 386 + * when calculating the static power. 387 + */ 388 + static int cpufreq_state2power(struct thermal_cooling_device *cdev, 389 + struct thermal_zone_device *tz, 390 + unsigned long state, u32 *power) 391 + { 392 + unsigned int freq, num_cpus; 393 + cpumask_t cpumask; 394 + u32 static_power, dynamic_power; 395 + int ret; 396 + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 397 + 398 + cpumask_and(&cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); 399 + num_cpus = cpumask_weight(&cpumask); 400 + 401 + /* None of our cpus are online, so no power */ 402 + if (num_cpus == 0) { 403 + *power = 0; 404 + return 0; 405 + } 406 + 407 + freq = cpufreq_device->freq_table[state]; 408 + if (!freq) 409 + return -EINVAL; 410 + 411 + dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus; 412 + ret = get_static_power(cpufreq_device, tz, freq, &static_power); 413 + if (ret) 414 + return ret; 415 + 416 + *power = static_power + dynamic_power; 417 + return 0; 418 + } 419 + 420 + /** 421 + * cpufreq_power2state() - convert power to a cooling device state 422 + * @cdev: &thermal_cooling_device pointer 423 + * @tz: a valid thermal zone device pointer 424 + * @power: power in milliwatts to be converted 425 + * @state: pointer in which to store the resulting state 426 + * 427 + * Calculate a cooling device state for the cpus described by @cdev 428 + * that would allow them to consume at most @power mW and store it in 429 + * @state. Note that this calculation depends on external factors 430 + * such as the cpu load or the current static power. Calling this 431 + * function with the same power as input can yield different cooling 432 + * device states depending on those external factors. 433 + * 434 + * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if 435 + * the calculated frequency could not be converted to a valid state. 436 + * The latter should not happen unless the frequencies available to 437 + * cpufreq have changed since the initialization of the cpu cooling 438 + * device. 439 + */ 440 + static int cpufreq_power2state(struct thermal_cooling_device *cdev, 441 + struct thermal_zone_device *tz, u32 power, 442 + unsigned long *state) 443 + { 444 + unsigned int cpu, cur_freq, target_freq; 445 + int ret; 446 + s32 dyn_power; 447 + u32 last_load, normalised_power, static_power; 448 + struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; 449 + 450 + cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); 451 + 452 + /* None of our cpus are online */ 453 + if (cpu >= nr_cpu_ids) 454 + return -ENODEV; 455 + 456 + cur_freq = cpufreq_quick_get(cpu); 457 + ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power); 458 + if (ret) 459 + return ret; 460 + 461 + dyn_power = power - static_power; 462 + dyn_power = dyn_power > 0 ? dyn_power : 0; 463 + last_load = cpufreq_device->last_load ?: 1; 464 + normalised_power = (dyn_power * 100) / last_load; 465 + target_freq = cpu_power_to_freq(cpufreq_device, normalised_power); 466 + 467 + *state = cpufreq_cooling_get_level(cpu, target_freq); 468 + if (*state == THERMAL_CSTATE_INVALID) { 469 + dev_warn_ratelimited(&cdev->device, 470 + "Failed to convert %dKHz for cpu %d into a cdev state\n", 471 + target_freq, cpu); 472 + return -EINVAL; 473 + } 474 + 475 + trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus, 476 + target_freq, *state, power); 477 + return 0; 478 + } 479 + 529 480 /* Bind cpufreq callbacks to thermal cooling device ops */ 530 - static struct thermal_cooling_device_ops const cpufreq_cooling_ops = { 481 + static struct thermal_cooling_device_ops cpufreq_cooling_ops = { 531 482 .get_max_state = cpufreq_get_max_state, 532 483 .get_cur_state = cpufreq_get_cur_state, 533 484 .set_cur_state = cpufreq_set_cur_state, ··· 754 311 * @np: a valid struct device_node to the cooling device device tree node 755 312 * @clip_cpus: cpumask of cpus where the frequency constraints will happen. 756 313 * Normally this should be same as cpufreq policy->related_cpus. 314 + * @capacitance: dynamic power coefficient for these cpus 315 + * @plat_static_func: function to calculate the static power consumed by these 316 + * cpus (optional) 757 317 * 758 318 * This interface function registers the cpufreq cooling device with the name 759 319 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq ··· 768 322 */ 769 323 static struct thermal_cooling_device * 770 324 __cpufreq_cooling_register(struct device_node *np, 771 - const struct cpumask *clip_cpus) 325 + const struct cpumask *clip_cpus, u32 capacitance, 326 + get_static_t plat_static_func) 772 327 { 773 328 struct thermal_cooling_device *cool_dev; 774 329 struct cpufreq_cooling_device *cpufreq_dev; 775 330 char dev_name[THERMAL_NAME_LENGTH]; 776 331 struct cpufreq_frequency_table *pos, *table; 777 - unsigned int freq, i; 332 + unsigned int freq, i, num_cpus; 778 333 int ret; 779 334 780 335 table = cpufreq_frequency_get_table(cpumask_first(clip_cpus)); ··· 788 341 if (!cpufreq_dev) 789 342 return ERR_PTR(-ENOMEM); 790 343 344 + num_cpus = cpumask_weight(clip_cpus); 345 + cpufreq_dev->time_in_idle = kcalloc(num_cpus, 346 + sizeof(*cpufreq_dev->time_in_idle), 347 + GFP_KERNEL); 348 + if (!cpufreq_dev->time_in_idle) { 349 + cool_dev = ERR_PTR(-ENOMEM); 350 + goto free_cdev; 351 + } 352 + 353 + cpufreq_dev->time_in_idle_timestamp = 354 + kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp), 355 + GFP_KERNEL); 356 + if (!cpufreq_dev->time_in_idle_timestamp) { 357 + cool_dev = ERR_PTR(-ENOMEM); 358 + goto free_time_in_idle; 359 + } 360 + 791 361 /* Find max levels */ 792 362 cpufreq_for_each_valid_entry(pos, table) 793 363 cpufreq_dev->max_level++; ··· 813 349 cpufreq_dev->max_level, GFP_KERNEL); 814 350 if (!cpufreq_dev->freq_table) { 815 351 cool_dev = ERR_PTR(-ENOMEM); 816 - goto free_cdev; 352 + goto free_time_in_idle_timestamp; 817 353 } 818 354 819 355 /* max_level is an index, not a counter */ 820 356 cpufreq_dev->max_level--; 821 357 822 358 cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); 359 + 360 + if (capacitance) { 361 + cpufreq_cooling_ops.get_requested_power = 362 + cpufreq_get_requested_power; 363 + cpufreq_cooling_ops.state2power = cpufreq_state2power; 364 + cpufreq_cooling_ops.power2state = cpufreq_power2state; 365 + cpufreq_dev->plat_get_static_power = plat_static_func; 366 + 367 + ret = build_dyn_power_table(cpufreq_dev, capacitance); 368 + if (ret) { 369 + cool_dev = ERR_PTR(ret); 370 + goto free_table; 371 + } 372 + } 823 373 824 374 ret = get_idr(&cpufreq_idr, &cpufreq_dev->id); 825 375 if (ret) { ··· 880 402 release_idr(&cpufreq_idr, cpufreq_dev->id); 881 403 free_table: 882 404 kfree(cpufreq_dev->freq_table); 405 + free_time_in_idle_timestamp: 406 + kfree(cpufreq_dev->time_in_idle_timestamp); 407 + free_time_in_idle: 408 + kfree(cpufreq_dev->time_in_idle); 883 409 free_cdev: 884 410 kfree(cpufreq_dev); 885 411 ··· 904 422 struct thermal_cooling_device * 905 423 cpufreq_cooling_register(const struct cpumask *clip_cpus) 906 424 { 907 - return __cpufreq_cooling_register(NULL, clip_cpus); 425 + return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL); 908 426 } 909 427 EXPORT_SYMBOL_GPL(cpufreq_cooling_register); 910 428 ··· 928 446 if (!np) 929 447 return ERR_PTR(-EINVAL); 930 448 931 - return __cpufreq_cooling_register(np, clip_cpus); 449 + return __cpufreq_cooling_register(np, clip_cpus, 0, NULL); 932 450 } 933 451 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); 452 + 453 + /** 454 + * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions 455 + * @clip_cpus: cpumask of cpus where the frequency constraints will happen 456 + * @capacitance: dynamic power coefficient for these cpus 457 + * @plat_static_func: function to calculate the static power consumed by these 458 + * cpus (optional) 459 + * 460 + * This interface function registers the cpufreq cooling device with 461 + * the name "thermal-cpufreq-%x". This api can support multiple 462 + * instances of cpufreq cooling devices. Using this function, the 463 + * cooling device will implement the power extensions by using a 464 + * simple cpu power model. The cpus must have registered their OPPs 465 + * using the OPP library. 466 + * 467 + * An optional @plat_static_func may be provided to calculate the 468 + * static power consumed by these cpus. If the platform's static 469 + * power consumption is unknown or negligible, make it NULL. 470 + * 471 + * Return: a valid struct thermal_cooling_device pointer on success, 472 + * on failure, it returns a corresponding ERR_PTR(). 473 + */ 474 + struct thermal_cooling_device * 475 + cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance, 476 + get_static_t plat_static_func) 477 + { 478 + return __cpufreq_cooling_register(NULL, clip_cpus, capacitance, 479 + plat_static_func); 480 + } 481 + EXPORT_SYMBOL(cpufreq_power_cooling_register); 482 + 483 + /** 484 + * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions 485 + * @np: a valid struct device_node to the cooling device device tree node 486 + * @clip_cpus: cpumask of cpus where the frequency constraints will happen 487 + * @capacitance: dynamic power coefficient for these cpus 488 + * @plat_static_func: function to calculate the static power consumed by these 489 + * cpus (optional) 490 + * 491 + * This interface function registers the cpufreq cooling device with 492 + * the name "thermal-cpufreq-%x". This api can support multiple 493 + * instances of cpufreq cooling devices. Using this API, the cpufreq 494 + * cooling device will be linked to the device tree node provided. 495 + * Using this function, the cooling device will implement the power 496 + * extensions by using a simple cpu power model. The cpus must have 497 + * registered their OPPs using the OPP library. 498 + * 499 + * An optional @plat_static_func may be provided to calculate the 500 + * static power consumed by these cpus. If the platform's static 501 + * power consumption is unknown or negligible, make it NULL. 502 + * 503 + * Return: a valid struct thermal_cooling_device pointer on success, 504 + * on failure, it returns a corresponding ERR_PTR(). 505 + */ 506 + struct thermal_cooling_device * 507 + of_cpufreq_power_cooling_register(struct device_node *np, 508 + const struct cpumask *clip_cpus, 509 + u32 capacitance, 510 + get_static_t plat_static_func) 511 + { 512 + if (!np) 513 + return ERR_PTR(-EINVAL); 514 + 515 + return __cpufreq_cooling_register(np, clip_cpus, capacitance, 516 + plat_static_func); 517 + } 518 + EXPORT_SYMBOL(of_cpufreq_power_cooling_register); 934 519 935 520 /** 936 521 * cpufreq_cooling_unregister - function to remove cpufreq cooling device. ··· 1024 475 1025 476 thermal_cooling_device_unregister(cpufreq_dev->cool_dev); 1026 477 release_idr(&cpufreq_idr, cpufreq_dev->id); 478 + kfree(cpufreq_dev->time_in_idle_timestamp); 479 + kfree(cpufreq_dev->time_in_idle); 1027 480 kfree(cpufreq_dev->freq_table); 1028 481 kfree(cpufreq_dev); 1029 482 }
+1 -1
drivers/thermal/db8500_thermal.c
··· 76 76 upper = lower = i > max_state ? max_state : i; 77 77 78 78 ret = thermal_zone_bind_cooling_device(thermal, i, cdev, 79 - upper, lower); 79 + upper, lower, THERMAL_WEIGHT_DEFAULT); 80 80 81 81 dev_info(&cdev->device, "%s bind to %d: %d-%s\n", cdev->type, 82 82 i, ret, ret ? "fail" : "succeed");
+24 -19
drivers/thermal/fair_share.c
··· 59 59 } 60 60 61 61 static long get_target_state(struct thermal_zone_device *tz, 62 - struct thermal_cooling_device *cdev, int weight, int level) 62 + struct thermal_cooling_device *cdev, int percentage, int level) 63 63 { 64 64 unsigned long max_state; 65 65 66 66 cdev->ops->get_max_state(cdev, &max_state); 67 67 68 - return (long)(weight * level * max_state) / (100 * tz->trips); 68 + return (long)(percentage * level * max_state) / (100 * tz->trips); 69 69 } 70 70 71 71 /** 72 - * fair_share_throttle - throttles devices asscciated with the given zone 72 + * fair_share_throttle - throttles devices associated with the given zone 73 73 * @tz - thermal_zone_device 74 74 * 75 75 * Throttling Logic: This uses three parameters to calculate the new ··· 77 77 * 78 78 * Parameters used for Throttling: 79 79 * P1. max_state: Maximum throttle state exposed by the cooling device. 80 - * P2. weight[i]/100: 80 + * P2. percentage[i]/100: 81 81 * How 'effective' the 'i'th device is, in cooling the given zone. 82 82 * P3. cur_trip_level/max_no_of_trips: 83 83 * This describes the extent to which the devices should be throttled. ··· 88 88 */ 89 89 static int fair_share_throttle(struct thermal_zone_device *tz, int trip) 90 90 { 91 - const struct thermal_zone_params *tzp; 92 - struct thermal_cooling_device *cdev; 93 91 struct thermal_instance *instance; 94 - int i; 92 + int total_weight = 0; 93 + int total_instance = 0; 95 94 int cur_trip_level = get_trip_level(tz); 96 95 97 - if (!tz->tzp || !tz->tzp->tbp) 98 - return -EINVAL; 99 - 100 - tzp = tz->tzp; 101 - 102 - for (i = 0; i < tzp->num_tbps; i++) { 103 - if (!tzp->tbp[i].cdev) 96 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 97 + if (instance->trip != trip) 104 98 continue; 105 99 106 - cdev = tzp->tbp[i].cdev; 107 - instance = get_thermal_instance(tz, cdev, trip); 108 - if (!instance) 100 + total_weight += instance->weight; 101 + total_instance++; 102 + } 103 + 104 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 105 + int percentage; 106 + struct thermal_cooling_device *cdev = instance->cdev; 107 + 108 + if (instance->trip != trip) 109 109 continue; 110 110 111 - instance->target = get_target_state(tz, cdev, 112 - tzp->tbp[i].weight, cur_trip_level); 111 + if (!total_weight) 112 + percentage = 100 / total_instance; 113 + else 114 + percentage = (instance->weight * 100) / total_weight; 115 + 116 + instance->target = get_target_state(tz, cdev, percentage, 117 + cur_trip_level); 113 118 114 119 instance->cdev->updated = false; 115 120 thermal_cdev_update(cdev);
+421
drivers/thermal/hisi_thermal.c
··· 1 + /* 2 + * Hisilicon thermal sensor driver 3 + * 4 + * Copyright (c) 2014-2015 Hisilicon Limited. 5 + * Copyright (c) 2014-2015 Linaro Limited. 6 + * 7 + * Xinwei Kong <kong.kongxinwei@hisilicon.com> 8 + * Leo Yan <leo.yan@linaro.org> 9 + * 10 + * This program is free software; you can redistribute it and/or modify 11 + * it under the terms of the GNU General Public License version 2 as 12 + * published by the Free Software Foundation. 13 + * 14 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 15 + * kind, whether express or implied; without even the implied warranty 16 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 + * GNU General Public License for more details. 18 + */ 19 + 20 + #include <linux/cpufreq.h> 21 + #include <linux/delay.h> 22 + #include <linux/interrupt.h> 23 + #include <linux/module.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/io.h> 26 + 27 + #include "thermal_core.h" 28 + 29 + #define TEMP0_TH (0x4) 30 + #define TEMP0_RST_TH (0x8) 31 + #define TEMP0_CFG (0xC) 32 + #define TEMP0_EN (0x10) 33 + #define TEMP0_INT_EN (0x14) 34 + #define TEMP0_INT_CLR (0x18) 35 + #define TEMP0_RST_MSK (0x1C) 36 + #define TEMP0_VALUE (0x28) 37 + 38 + #define HISI_TEMP_BASE (-60) 39 + #define HISI_TEMP_RESET (100000) 40 + 41 + #define HISI_MAX_SENSORS 4 42 + 43 + struct hisi_thermal_sensor { 44 + struct hisi_thermal_data *thermal; 45 + struct thermal_zone_device *tzd; 46 + 47 + long sensor_temp; 48 + uint32_t id; 49 + uint32_t thres_temp; 50 + }; 51 + 52 + struct hisi_thermal_data { 53 + struct mutex thermal_lock; /* protects register data */ 54 + struct platform_device *pdev; 55 + struct clk *clk; 56 + struct hisi_thermal_sensor sensors[HISI_MAX_SENSORS]; 57 + 58 + int irq, irq_bind_sensor; 59 + bool irq_enabled; 60 + 61 + void __iomem *regs; 62 + }; 63 + 64 + /* in millicelsius */ 65 + static inline int _step_to_temp(int step) 66 + { 67 + /* 68 + * Every step equals (1 * 200) / 255 celsius, and finally 69 + * need convert to millicelsius. 70 + */ 71 + return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000; 72 + } 73 + 74 + static inline long _temp_to_step(long temp) 75 + { 76 + return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200); 77 + } 78 + 79 + static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data, 80 + struct hisi_thermal_sensor *sensor) 81 + { 82 + long val; 83 + 84 + mutex_lock(&data->thermal_lock); 85 + 86 + /* disable interrupt */ 87 + writel(0x0, data->regs + TEMP0_INT_EN); 88 + writel(0x1, data->regs + TEMP0_INT_CLR); 89 + 90 + /* disable module firstly */ 91 + writel(0x0, data->regs + TEMP0_EN); 92 + 93 + /* select sensor id */ 94 + writel((sensor->id << 12), data->regs + TEMP0_CFG); 95 + 96 + /* enable module */ 97 + writel(0x1, data->regs + TEMP0_EN); 98 + 99 + usleep_range(3000, 5000); 100 + 101 + val = readl(data->regs + TEMP0_VALUE); 102 + val = _step_to_temp(val); 103 + 104 + mutex_unlock(&data->thermal_lock); 105 + 106 + return val; 107 + } 108 + 109 + static void hisi_thermal_enable_bind_irq_sensor 110 + (struct hisi_thermal_data *data) 111 + { 112 + struct hisi_thermal_sensor *sensor; 113 + 114 + mutex_lock(&data->thermal_lock); 115 + 116 + sensor = &data->sensors[data->irq_bind_sensor]; 117 + 118 + /* setting the hdak time */ 119 + writel(0x0, data->regs + TEMP0_CFG); 120 + 121 + /* disable module firstly */ 122 + writel(0x0, data->regs + TEMP0_RST_MSK); 123 + writel(0x0, data->regs + TEMP0_EN); 124 + 125 + /* select sensor id */ 126 + writel((sensor->id << 12), data->regs + TEMP0_CFG); 127 + 128 + /* enable for interrupt */ 129 + writel(_temp_to_step(sensor->thres_temp) | 0x0FFFFFF00, 130 + data->regs + TEMP0_TH); 131 + 132 + writel(_temp_to_step(HISI_TEMP_RESET), data->regs + TEMP0_RST_TH); 133 + 134 + /* enable module */ 135 + writel(0x1, data->regs + TEMP0_RST_MSK); 136 + writel(0x1, data->regs + TEMP0_EN); 137 + 138 + writel(0x0, data->regs + TEMP0_INT_CLR); 139 + writel(0x1, data->regs + TEMP0_INT_EN); 140 + 141 + usleep_range(3000, 5000); 142 + 143 + mutex_unlock(&data->thermal_lock); 144 + } 145 + 146 + static void hisi_thermal_disable_sensor(struct hisi_thermal_data *data) 147 + { 148 + mutex_lock(&data->thermal_lock); 149 + 150 + /* disable sensor module */ 151 + writel(0x0, data->regs + TEMP0_INT_EN); 152 + writel(0x0, data->regs + TEMP0_RST_MSK); 153 + writel(0x0, data->regs + TEMP0_EN); 154 + 155 + mutex_unlock(&data->thermal_lock); 156 + } 157 + 158 + static int hisi_thermal_get_temp(void *_sensor, long *temp) 159 + { 160 + struct hisi_thermal_sensor *sensor = _sensor; 161 + struct hisi_thermal_data *data = sensor->thermal; 162 + 163 + int sensor_id = 0, i; 164 + long max_temp = 0; 165 + 166 + *temp = hisi_thermal_get_sensor_temp(data, sensor); 167 + 168 + sensor->sensor_temp = *temp; 169 + 170 + for (i = 0; i < HISI_MAX_SENSORS; i++) { 171 + if (data->sensors[i].sensor_temp >= max_temp) { 172 + max_temp = data->sensors[i].sensor_temp; 173 + sensor_id = i; 174 + } 175 + } 176 + 177 + mutex_lock(&data->thermal_lock); 178 + data->irq_bind_sensor = sensor_id; 179 + mutex_unlock(&data->thermal_lock); 180 + 181 + dev_dbg(&data->pdev->dev, "id=%d, irq=%d, temp=%ld, thres=%d\n", 182 + sensor->id, data->irq_enabled, *temp, sensor->thres_temp); 183 + /* 184 + * Bind irq to sensor for two cases: 185 + * Reenable alarm IRQ if temperature below threshold; 186 + * if irq has been enabled, always set it; 187 + */ 188 + if (data->irq_enabled) { 189 + hisi_thermal_enable_bind_irq_sensor(data); 190 + return 0; 191 + } 192 + 193 + if (max_temp < sensor->thres_temp) { 194 + data->irq_enabled = true; 195 + hisi_thermal_enable_bind_irq_sensor(data); 196 + enable_irq(data->irq); 197 + } 198 + 199 + return 0; 200 + } 201 + 202 + static struct thermal_zone_of_device_ops hisi_of_thermal_ops = { 203 + .get_temp = hisi_thermal_get_temp, 204 + }; 205 + 206 + static irqreturn_t hisi_thermal_alarm_irq(int irq, void *dev) 207 + { 208 + struct hisi_thermal_data *data = dev; 209 + 210 + disable_irq_nosync(irq); 211 + data->irq_enabled = false; 212 + 213 + return IRQ_WAKE_THREAD; 214 + } 215 + 216 + static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev) 217 + { 218 + struct hisi_thermal_data *data = dev; 219 + struct hisi_thermal_sensor *sensor; 220 + int i; 221 + 222 + mutex_lock(&data->thermal_lock); 223 + sensor = &data->sensors[data->irq_bind_sensor]; 224 + 225 + dev_crit(&data->pdev->dev, "THERMAL ALARM: T > %d\n", 226 + sensor->thres_temp / 1000); 227 + mutex_unlock(&data->thermal_lock); 228 + 229 + for (i = 0; i < HISI_MAX_SENSORS; i++) 230 + thermal_zone_device_update(data->sensors[i].tzd); 231 + 232 + return IRQ_HANDLED; 233 + } 234 + 235 + static int hisi_thermal_register_sensor(struct platform_device *pdev, 236 + struct hisi_thermal_data *data, 237 + struct hisi_thermal_sensor *sensor, 238 + int index) 239 + { 240 + int ret, i; 241 + const struct thermal_trip *trip; 242 + 243 + sensor->id = index; 244 + sensor->thermal = data; 245 + 246 + sensor->tzd = thermal_zone_of_sensor_register(&pdev->dev, sensor->id, 247 + sensor, &hisi_of_thermal_ops); 248 + if (IS_ERR(sensor->tzd)) { 249 + ret = PTR_ERR(sensor->tzd); 250 + dev_err(&pdev->dev, "failed to register sensor id %d: %d\n", 251 + sensor->id, ret); 252 + return ret; 253 + } 254 + 255 + trip = of_thermal_get_trip_points(sensor->tzd); 256 + 257 + for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) { 258 + if (trip[i].type == THERMAL_TRIP_PASSIVE) { 259 + sensor->thres_temp = trip[i].temperature; 260 + break; 261 + } 262 + } 263 + 264 + return 0; 265 + } 266 + 267 + static const struct of_device_id of_hisi_thermal_match[] = { 268 + { .compatible = "hisilicon,tsensor" }, 269 + { /* end */ } 270 + }; 271 + MODULE_DEVICE_TABLE(of, of_hisi_thermal_match); 272 + 273 + static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor, 274 + bool on) 275 + { 276 + struct thermal_zone_device *tzd = sensor->tzd; 277 + 278 + tzd->ops->set_mode(tzd, 279 + on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED); 280 + } 281 + 282 + static int hisi_thermal_probe(struct platform_device *pdev) 283 + { 284 + struct hisi_thermal_data *data; 285 + struct resource *res; 286 + int i; 287 + int ret; 288 + 289 + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 290 + if (!data) 291 + return -ENOMEM; 292 + 293 + mutex_init(&data->thermal_lock); 294 + data->pdev = pdev; 295 + 296 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 297 + data->regs = devm_ioremap_resource(&pdev->dev, res); 298 + if (IS_ERR(data->regs)) { 299 + dev_err(&pdev->dev, "failed to get io address\n"); 300 + return PTR_ERR(data->regs); 301 + } 302 + 303 + data->irq = platform_get_irq(pdev, 0); 304 + if (data->irq < 0) 305 + return data->irq; 306 + 307 + ret = devm_request_threaded_irq(&pdev->dev, data->irq, 308 + hisi_thermal_alarm_irq, 309 + hisi_thermal_alarm_irq_thread, 310 + 0, "hisi_thermal", data); 311 + if (ret < 0) { 312 + dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret); 313 + return ret; 314 + } 315 + 316 + platform_set_drvdata(pdev, data); 317 + 318 + data->clk = devm_clk_get(&pdev->dev, "thermal_clk"); 319 + if (IS_ERR(data->clk)) { 320 + ret = PTR_ERR(data->clk); 321 + if (ret != -EPROBE_DEFER) 322 + dev_err(&pdev->dev, 323 + "failed to get thermal clk: %d\n", ret); 324 + return ret; 325 + } 326 + 327 + /* enable clock for thermal */ 328 + ret = clk_prepare_enable(data->clk); 329 + if (ret) { 330 + dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); 331 + return ret; 332 + } 333 + 334 + for (i = 0; i < HISI_MAX_SENSORS; ++i) { 335 + ret = hisi_thermal_register_sensor(pdev, data, 336 + &data->sensors[i], i); 337 + if (ret) { 338 + dev_err(&pdev->dev, 339 + "failed to register thermal sensor: %d\n", ret); 340 + goto err_get_sensor_data; 341 + } 342 + } 343 + 344 + hisi_thermal_enable_bind_irq_sensor(data); 345 + data->irq_enabled = true; 346 + 347 + for (i = 0; i < HISI_MAX_SENSORS; i++) 348 + hisi_thermal_toggle_sensor(&data->sensors[i], true); 349 + 350 + return 0; 351 + 352 + err_get_sensor_data: 353 + clk_disable_unprepare(data->clk); 354 + 355 + return ret; 356 + } 357 + 358 + static int hisi_thermal_remove(struct platform_device *pdev) 359 + { 360 + struct hisi_thermal_data *data = platform_get_drvdata(pdev); 361 + int i; 362 + 363 + for (i = 0; i < HISI_MAX_SENSORS; i++) { 364 + struct hisi_thermal_sensor *sensor = &data->sensors[i]; 365 + 366 + hisi_thermal_toggle_sensor(sensor, false); 367 + thermal_zone_of_sensor_unregister(&pdev->dev, sensor->tzd); 368 + } 369 + 370 + hisi_thermal_disable_sensor(data); 371 + clk_disable_unprepare(data->clk); 372 + 373 + return 0; 374 + } 375 + 376 + #ifdef CONFIG_PM_SLEEP 377 + static int hisi_thermal_suspend(struct device *dev) 378 + { 379 + struct hisi_thermal_data *data = dev_get_drvdata(dev); 380 + 381 + hisi_thermal_disable_sensor(data); 382 + data->irq_enabled = false; 383 + 384 + clk_disable_unprepare(data->clk); 385 + 386 + return 0; 387 + } 388 + 389 + static int hisi_thermal_resume(struct device *dev) 390 + { 391 + struct hisi_thermal_data *data = dev_get_drvdata(dev); 392 + 393 + clk_prepare_enable(data->clk); 394 + 395 + data->irq_enabled = true; 396 + hisi_thermal_enable_bind_irq_sensor(data); 397 + 398 + return 0; 399 + } 400 + #endif 401 + 402 + static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops, 403 + hisi_thermal_suspend, hisi_thermal_resume); 404 + 405 + static struct platform_driver hisi_thermal_driver = { 406 + .driver = { 407 + .name = "hisi_thermal", 408 + .owner = THIS_MODULE, 409 + .pm = &hisi_thermal_pm_ops, 410 + .of_match_table = of_hisi_thermal_match, 411 + }, 412 + .probe = hisi_thermal_probe, 413 + .remove = hisi_thermal_remove, 414 + }; 415 + 416 + module_platform_driver(hisi_thermal_driver); 417 + 418 + MODULE_AUTHOR("Xinwei Kong <kong.kongxinwei@hisilicon.com>"); 419 + MODULE_AUTHOR("Leo Yan <leo.yan@linaro.org>"); 420 + MODULE_DESCRIPTION("Hisilicon thermal driver"); 421 + MODULE_LICENSE("GPL v2");
+2 -1
drivers/thermal/imx_thermal.c
··· 306 306 307 307 ret = thermal_zone_bind_cooling_device(tz, IMX_TRIP_PASSIVE, cdev, 308 308 THERMAL_NO_LIMIT, 309 - THERMAL_NO_LIMIT); 309 + THERMAL_NO_LIMIT, 310 + THERMAL_WEIGHT_DEFAULT); 310 311 if (ret) { 311 312 dev_err(&tz->device, 312 313 "binding zone %s with cdev %s failed:%d\n",
+36 -5
drivers/thermal/of-thermal.c
··· 58 58 * @mode: current thermal zone device mode (enabled/disabled) 59 59 * @passive_delay: polling interval while passive cooling is activated 60 60 * @polling_delay: zone polling interval 61 + * @slope: slope of the temperature adjustment curve 62 + * @offset: offset of the temperature adjustment curve 61 63 * @ntrips: number of trip points 62 64 * @trips: an array of trip points (0..ntrips - 1) 63 65 * @num_tbps: number of thermal bind params ··· 72 70 enum thermal_device_mode mode; 73 71 int passive_delay; 74 72 int polling_delay; 73 + int slope; 74 + int offset; 75 75 76 76 /* trip data */ 77 77 int ntrips; ··· 231 227 ret = thermal_zone_bind_cooling_device(thermal, 232 228 tbp->trip_id, cdev, 233 229 tbp->max, 234 - tbp->min); 230 + tbp->min, 231 + tbp->usage); 235 232 if (ret) 236 233 return ret; 237 234 } ··· 586 581 u32 prop; 587 582 588 583 /* Default weight. Usage is optional */ 589 - __tbp->usage = 0; 584 + __tbp->usage = THERMAL_WEIGHT_DEFAULT; 590 585 ret = of_property_read_u32(np, "contribution", &prop); 591 586 if (ret == 0) 592 587 __tbp->usage = prop; ··· 720 715 * @np parameter and fills the read data into a __thermal_zone data structure 721 716 * and return this pointer. 722 717 * 723 - * TODO: Missing properties to parse: thermal-sensor-names and coefficients 718 + * TODO: Missing properties to parse: thermal-sensor-names 724 719 * 725 720 * Return: On success returns a valid struct __thermal_zone, 726 721 * otherwise, it returns a corresponding ERR_PTR(). Caller must ··· 732 727 struct device_node *child = NULL, *gchild; 733 728 struct __thermal_zone *tz; 734 729 int ret, i; 735 - u32 prop; 730 + u32 prop, coef[2]; 736 731 737 732 if (!np) { 738 733 pr_err("no thermal zone np\n"); ··· 756 751 goto free_tz; 757 752 } 758 753 tz->polling_delay = prop; 754 + 755 + /* 756 + * REVIST: for now, the thermal framework supports only 757 + * one sensor per thermal zone. Thus, we are considering 758 + * only the first two values as slope and offset. 759 + */ 760 + ret = of_property_read_u32_array(np, "coefficients", coef, 2); 761 + if (ret == 0) { 762 + tz->slope = coef[0]; 763 + tz->offset = coef[1]; 764 + } else { 765 + tz->slope = 1; 766 + tz->offset = 0; 767 + } 759 768 760 769 /* trips */ 761 770 child = of_get_child_by_name(np, "trips"); ··· 884 865 for_each_child_of_node(np, child) { 885 866 struct thermal_zone_device *zone; 886 867 struct thermal_zone_params *tzp; 868 + int i, mask = 0; 869 + u32 prop; 887 870 888 871 /* Check whether child is enabled or not */ 889 872 if (!of_device_is_available(child)) ··· 912 891 /* No hwmon because there might be hwmon drivers registering */ 913 892 tzp->no_hwmon = true; 914 893 894 + if (!of_property_read_u32(child, "sustainable-power", &prop)) 895 + tzp->sustainable_power = prop; 896 + 897 + for (i = 0; i < tz->ntrips; i++) 898 + mask |= 1 << i; 899 + 900 + /* these two are left for temperature drivers to use */ 901 + tzp->slope = tz->slope; 902 + tzp->offset = tz->offset; 903 + 915 904 zone = thermal_zone_device_register(child->name, tz->ntrips, 916 - 0, tz, 905 + mask, tz, 917 906 ops, tzp, 918 907 tz->passive_delay, 919 908 tz->polling_delay);
+539
drivers/thermal/power_allocator.c
··· 1 + /* 2 + * A power allocator to manage temperature 3 + * 4 + * Copyright (C) 2014 ARM Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 + * kind, whether express or implied; without even the implied warranty 12 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #define pr_fmt(fmt) "Power allocator: " fmt 17 + 18 + #include <linux/rculist.h> 19 + #include <linux/slab.h> 20 + #include <linux/thermal.h> 21 + 22 + #define CREATE_TRACE_POINTS 23 + #include <trace/events/thermal_power_allocator.h> 24 + 25 + #include "thermal_core.h" 26 + 27 + #define FRAC_BITS 10 28 + #define int_to_frac(x) ((x) << FRAC_BITS) 29 + #define frac_to_int(x) ((x) >> FRAC_BITS) 30 + 31 + /** 32 + * mul_frac() - multiply two fixed-point numbers 33 + * @x: first multiplicand 34 + * @y: second multiplicand 35 + * 36 + * Return: the result of multiplying two fixed-point numbers. The 37 + * result is also a fixed-point number. 38 + */ 39 + static inline s64 mul_frac(s64 x, s64 y) 40 + { 41 + return (x * y) >> FRAC_BITS; 42 + } 43 + 44 + /** 45 + * div_frac() - divide two fixed-point numbers 46 + * @x: the dividend 47 + * @y: the divisor 48 + * 49 + * Return: the result of dividing two fixed-point numbers. The 50 + * result is also a fixed-point number. 51 + */ 52 + static inline s64 div_frac(s64 x, s64 y) 53 + { 54 + return div_s64(x << FRAC_BITS, y); 55 + } 56 + 57 + /** 58 + * struct power_allocator_params - parameters for the power allocator governor 59 + * @err_integral: accumulated error in the PID controller. 60 + * @prev_err: error in the previous iteration of the PID controller. 61 + * Used to calculate the derivative term. 62 + * @trip_switch_on: first passive trip point of the thermal zone. The 63 + * governor switches on when this trip point is crossed. 64 + * @trip_max_desired_temperature: last passive trip point of the thermal 65 + * zone. The temperature we are 66 + * controlling for. 67 + */ 68 + struct power_allocator_params { 69 + s64 err_integral; 70 + s32 prev_err; 71 + int trip_switch_on; 72 + int trip_max_desired_temperature; 73 + }; 74 + 75 + /** 76 + * pid_controller() - PID controller 77 + * @tz: thermal zone we are operating in 78 + * @current_temp: the current temperature in millicelsius 79 + * @control_temp: the target temperature in millicelsius 80 + * @max_allocatable_power: maximum allocatable power for this thermal zone 81 + * 82 + * This PID controller increases the available power budget so that the 83 + * temperature of the thermal zone gets as close as possible to 84 + * @control_temp and limits the power if it exceeds it. k_po is the 85 + * proportional term when we are overshooting, k_pu is the 86 + * proportional term when we are undershooting. integral_cutoff is a 87 + * threshold below which we stop accumulating the error. The 88 + * accumulated error is only valid if the requested power will make 89 + * the system warmer. If the system is mostly idle, there's no point 90 + * in accumulating positive error. 91 + * 92 + * Return: The power budget for the next period. 93 + */ 94 + static u32 pid_controller(struct thermal_zone_device *tz, 95 + unsigned long current_temp, 96 + unsigned long control_temp, 97 + u32 max_allocatable_power) 98 + { 99 + s64 p, i, d, power_range; 100 + s32 err, max_power_frac; 101 + struct power_allocator_params *params = tz->governor_data; 102 + 103 + max_power_frac = int_to_frac(max_allocatable_power); 104 + 105 + err = ((s32)control_temp - (s32)current_temp); 106 + err = int_to_frac(err); 107 + 108 + /* Calculate the proportional term */ 109 + p = mul_frac(err < 0 ? tz->tzp->k_po : tz->tzp->k_pu, err); 110 + 111 + /* 112 + * Calculate the integral term 113 + * 114 + * if the error is less than cut off allow integration (but 115 + * the integral is limited to max power) 116 + */ 117 + i = mul_frac(tz->tzp->k_i, params->err_integral); 118 + 119 + if (err < int_to_frac(tz->tzp->integral_cutoff)) { 120 + s64 i_next = i + mul_frac(tz->tzp->k_i, err); 121 + 122 + if (abs64(i_next) < max_power_frac) { 123 + i = i_next; 124 + params->err_integral += err; 125 + } 126 + } 127 + 128 + /* 129 + * Calculate the derivative term 130 + * 131 + * We do err - prev_err, so with a positive k_d, a decreasing 132 + * error (i.e. driving closer to the line) results in less 133 + * power being applied, slowing down the controller) 134 + */ 135 + d = mul_frac(tz->tzp->k_d, err - params->prev_err); 136 + d = div_frac(d, tz->passive_delay); 137 + params->prev_err = err; 138 + 139 + power_range = p + i + d; 140 + 141 + /* feed-forward the known sustainable dissipatable power */ 142 + power_range = tz->tzp->sustainable_power + frac_to_int(power_range); 143 + 144 + power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power); 145 + 146 + trace_thermal_power_allocator_pid(tz, frac_to_int(err), 147 + frac_to_int(params->err_integral), 148 + frac_to_int(p), frac_to_int(i), 149 + frac_to_int(d), power_range); 150 + 151 + return power_range; 152 + } 153 + 154 + /** 155 + * divvy_up_power() - divvy the allocated power between the actors 156 + * @req_power: each actor's requested power 157 + * @max_power: each actor's maximum available power 158 + * @num_actors: size of the @req_power, @max_power and @granted_power's array 159 + * @total_req_power: sum of @req_power 160 + * @power_range: total allocated power 161 + * @granted_power: output array: each actor's granted power 162 + * @extra_actor_power: an appropriately sized array to be used in the 163 + * function as temporary storage of the extra power given 164 + * to the actors 165 + * 166 + * This function divides the total allocated power (@power_range) 167 + * fairly between the actors. It first tries to give each actor a 168 + * share of the @power_range according to how much power it requested 169 + * compared to the rest of the actors. For example, if only one actor 170 + * requests power, then it receives all the @power_range. If 171 + * three actors each requests 1mW, each receives a third of the 172 + * @power_range. 173 + * 174 + * If any actor received more than their maximum power, then that 175 + * surplus is re-divvied among the actors based on how far they are 176 + * from their respective maximums. 177 + * 178 + * Granted power for each actor is written to @granted_power, which 179 + * should've been allocated by the calling function. 180 + */ 181 + static void divvy_up_power(u32 *req_power, u32 *max_power, int num_actors, 182 + u32 total_req_power, u32 power_range, 183 + u32 *granted_power, u32 *extra_actor_power) 184 + { 185 + u32 extra_power, capped_extra_power; 186 + int i; 187 + 188 + /* 189 + * Prevent division by 0 if none of the actors request power. 190 + */ 191 + if (!total_req_power) 192 + total_req_power = 1; 193 + 194 + capped_extra_power = 0; 195 + extra_power = 0; 196 + for (i = 0; i < num_actors; i++) { 197 + u64 req_range = req_power[i] * power_range; 198 + 199 + granted_power[i] = DIV_ROUND_CLOSEST_ULL(req_range, 200 + total_req_power); 201 + 202 + if (granted_power[i] > max_power[i]) { 203 + extra_power += granted_power[i] - max_power[i]; 204 + granted_power[i] = max_power[i]; 205 + } 206 + 207 + extra_actor_power[i] = max_power[i] - granted_power[i]; 208 + capped_extra_power += extra_actor_power[i]; 209 + } 210 + 211 + if (!extra_power) 212 + return; 213 + 214 + /* 215 + * Re-divvy the reclaimed extra among actors based on 216 + * how far they are from the max 217 + */ 218 + extra_power = min(extra_power, capped_extra_power); 219 + if (capped_extra_power > 0) 220 + for (i = 0; i < num_actors; i++) 221 + granted_power[i] += (extra_actor_power[i] * 222 + extra_power) / capped_extra_power; 223 + } 224 + 225 + static int allocate_power(struct thermal_zone_device *tz, 226 + unsigned long current_temp, 227 + unsigned long control_temp) 228 + { 229 + struct thermal_instance *instance; 230 + struct power_allocator_params *params = tz->governor_data; 231 + u32 *req_power, *max_power, *granted_power, *extra_actor_power; 232 + u32 total_req_power, max_allocatable_power; 233 + u32 total_granted_power, power_range; 234 + int i, num_actors, total_weight, ret = 0; 235 + int trip_max_desired_temperature = params->trip_max_desired_temperature; 236 + 237 + mutex_lock(&tz->lock); 238 + 239 + num_actors = 0; 240 + total_weight = 0; 241 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 242 + if ((instance->trip == trip_max_desired_temperature) && 243 + cdev_is_power_actor(instance->cdev)) { 244 + num_actors++; 245 + total_weight += instance->weight; 246 + } 247 + } 248 + 249 + /* 250 + * We need to allocate three arrays of the same size: 251 + * req_power, max_power and granted_power. They are going to 252 + * be needed until this function returns. Allocate them all 253 + * in one go to simplify the allocation and deallocation 254 + * logic. 255 + */ 256 + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); 257 + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); 258 + BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); 259 + req_power = devm_kcalloc(&tz->device, num_actors * 4, 260 + sizeof(*req_power), GFP_KERNEL); 261 + if (!req_power) { 262 + ret = -ENOMEM; 263 + goto unlock; 264 + } 265 + 266 + max_power = &req_power[num_actors]; 267 + granted_power = &req_power[2 * num_actors]; 268 + extra_actor_power = &req_power[3 * num_actors]; 269 + 270 + i = 0; 271 + total_req_power = 0; 272 + max_allocatable_power = 0; 273 + 274 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 275 + int weight; 276 + struct thermal_cooling_device *cdev = instance->cdev; 277 + 278 + if (instance->trip != trip_max_desired_temperature) 279 + continue; 280 + 281 + if (!cdev_is_power_actor(cdev)) 282 + continue; 283 + 284 + if (cdev->ops->get_requested_power(cdev, tz, &req_power[i])) 285 + continue; 286 + 287 + if (!total_weight) 288 + weight = 1 << FRAC_BITS; 289 + else 290 + weight = instance->weight; 291 + 292 + req_power[i] = frac_to_int(weight * req_power[i]); 293 + 294 + if (power_actor_get_max_power(cdev, tz, &max_power[i])) 295 + continue; 296 + 297 + total_req_power += req_power[i]; 298 + max_allocatable_power += max_power[i]; 299 + 300 + i++; 301 + } 302 + 303 + power_range = pid_controller(tz, current_temp, control_temp, 304 + max_allocatable_power); 305 + 306 + divvy_up_power(req_power, max_power, num_actors, total_req_power, 307 + power_range, granted_power, extra_actor_power); 308 + 309 + total_granted_power = 0; 310 + i = 0; 311 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 312 + if (instance->trip != trip_max_desired_temperature) 313 + continue; 314 + 315 + if (!cdev_is_power_actor(instance->cdev)) 316 + continue; 317 + 318 + power_actor_set_power(instance->cdev, instance, 319 + granted_power[i]); 320 + total_granted_power += granted_power[i]; 321 + 322 + i++; 323 + } 324 + 325 + trace_thermal_power_allocator(tz, req_power, total_req_power, 326 + granted_power, total_granted_power, 327 + num_actors, power_range, 328 + max_allocatable_power, current_temp, 329 + (s32)control_temp - (s32)current_temp); 330 + 331 + devm_kfree(&tz->device, req_power); 332 + unlock: 333 + mutex_unlock(&tz->lock); 334 + 335 + return ret; 336 + } 337 + 338 + static int get_governor_trips(struct thermal_zone_device *tz, 339 + struct power_allocator_params *params) 340 + { 341 + int i, ret, last_passive; 342 + bool found_first_passive; 343 + 344 + found_first_passive = false; 345 + last_passive = -1; 346 + ret = -EINVAL; 347 + 348 + for (i = 0; i < tz->trips; i++) { 349 + enum thermal_trip_type type; 350 + 351 + ret = tz->ops->get_trip_type(tz, i, &type); 352 + if (ret) 353 + return ret; 354 + 355 + if (!found_first_passive) { 356 + if (type == THERMAL_TRIP_PASSIVE) { 357 + params->trip_switch_on = i; 358 + found_first_passive = true; 359 + } 360 + } else if (type == THERMAL_TRIP_PASSIVE) { 361 + last_passive = i; 362 + } else { 363 + break; 364 + } 365 + } 366 + 367 + if (last_passive != -1) { 368 + params->trip_max_desired_temperature = last_passive; 369 + ret = 0; 370 + } else { 371 + ret = -EINVAL; 372 + } 373 + 374 + return ret; 375 + } 376 + 377 + static void reset_pid_controller(struct power_allocator_params *params) 378 + { 379 + params->err_integral = 0; 380 + params->prev_err = 0; 381 + } 382 + 383 + static void allow_maximum_power(struct thermal_zone_device *tz) 384 + { 385 + struct thermal_instance *instance; 386 + struct power_allocator_params *params = tz->governor_data; 387 + 388 + list_for_each_entry(instance, &tz->thermal_instances, tz_node) { 389 + if ((instance->trip != params->trip_max_desired_temperature) || 390 + (!cdev_is_power_actor(instance->cdev))) 391 + continue; 392 + 393 + instance->target = 0; 394 + instance->cdev->updated = false; 395 + thermal_cdev_update(instance->cdev); 396 + } 397 + } 398 + 399 + /** 400 + * power_allocator_bind() - bind the power_allocator governor to a thermal zone 401 + * @tz: thermal zone to bind it to 402 + * 403 + * Check that the thermal zone is valid for this governor, that is, it 404 + * has two thermal trips. If so, initialize the PID controller 405 + * parameters and bind it to the thermal zone. 406 + * 407 + * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM 408 + * if we ran out of memory. 409 + */ 410 + static int power_allocator_bind(struct thermal_zone_device *tz) 411 + { 412 + int ret; 413 + struct power_allocator_params *params; 414 + unsigned long switch_on_temp, control_temp; 415 + u32 temperature_threshold; 416 + 417 + if (!tz->tzp || !tz->tzp->sustainable_power) { 418 + dev_err(&tz->device, 419 + "power_allocator: missing sustainable_power\n"); 420 + return -EINVAL; 421 + } 422 + 423 + params = devm_kzalloc(&tz->device, sizeof(*params), GFP_KERNEL); 424 + if (!params) 425 + return -ENOMEM; 426 + 427 + ret = get_governor_trips(tz, params); 428 + if (ret) { 429 + dev_err(&tz->device, 430 + "thermal zone %s has wrong trip setup for power allocator\n", 431 + tz->type); 432 + goto free; 433 + } 434 + 435 + ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 436 + &switch_on_temp); 437 + if (ret) 438 + goto free; 439 + 440 + ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, 441 + &control_temp); 442 + if (ret) 443 + goto free; 444 + 445 + temperature_threshold = control_temp - switch_on_temp; 446 + 447 + tz->tzp->k_po = tz->tzp->k_po ?: 448 + int_to_frac(tz->tzp->sustainable_power) / temperature_threshold; 449 + tz->tzp->k_pu = tz->tzp->k_pu ?: 450 + int_to_frac(2 * tz->tzp->sustainable_power) / 451 + temperature_threshold; 452 + tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000; 453 + /* 454 + * The default for k_d and integral_cutoff is 0, so we can 455 + * leave them as they are. 456 + */ 457 + 458 + reset_pid_controller(params); 459 + 460 + tz->governor_data = params; 461 + 462 + return 0; 463 + 464 + free: 465 + devm_kfree(&tz->device, params); 466 + return ret; 467 + } 468 + 469 + static void power_allocator_unbind(struct thermal_zone_device *tz) 470 + { 471 + dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id); 472 + devm_kfree(&tz->device, tz->governor_data); 473 + tz->governor_data = NULL; 474 + } 475 + 476 + static int power_allocator_throttle(struct thermal_zone_device *tz, int trip) 477 + { 478 + int ret; 479 + unsigned long switch_on_temp, control_temp, current_temp; 480 + struct power_allocator_params *params = tz->governor_data; 481 + 482 + /* 483 + * We get called for every trip point but we only need to do 484 + * our calculations once 485 + */ 486 + if (trip != params->trip_max_desired_temperature) 487 + return 0; 488 + 489 + ret = thermal_zone_get_temp(tz, &current_temp); 490 + if (ret) { 491 + dev_warn(&tz->device, "Failed to get temperature: %d\n", ret); 492 + return ret; 493 + } 494 + 495 + ret = tz->ops->get_trip_temp(tz, params->trip_switch_on, 496 + &switch_on_temp); 497 + if (ret) { 498 + dev_warn(&tz->device, 499 + "Failed to get switch on temperature: %d\n", ret); 500 + return ret; 501 + } 502 + 503 + if (current_temp < switch_on_temp) { 504 + tz->passive = 0; 505 + reset_pid_controller(params); 506 + allow_maximum_power(tz); 507 + return 0; 508 + } 509 + 510 + tz->passive = 1; 511 + 512 + ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature, 513 + &control_temp); 514 + if (ret) { 515 + dev_warn(&tz->device, 516 + "Failed to get the maximum desired temperature: %d\n", 517 + ret); 518 + return ret; 519 + } 520 + 521 + return allocate_power(tz, current_temp, control_temp); 522 + } 523 + 524 + static struct thermal_governor thermal_gov_power_allocator = { 525 + .name = "power_allocator", 526 + .bind_to_tz = power_allocator_bind, 527 + .unbind_from_tz = power_allocator_unbind, 528 + .throttle = power_allocator_throttle, 529 + }; 530 + 531 + int thermal_gov_power_allocator_register(void) 532 + { 533 + return thermal_register_governor(&thermal_gov_power_allocator); 534 + } 535 + 536 + void thermal_gov_power_allocator_unregister(void) 537 + { 538 + thermal_unregister_governor(&thermal_gov_power_allocator); 539 + }
+309
drivers/thermal/qcom-spmi-temp-alarm.c
··· 1 + /* 2 + * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 and 6 + * only version 2 as published by the Free Software Foundation. 7 + * 8 + * This program is distributed in the hope that it will be useful, 9 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 + * GNU General Public License for more details. 12 + */ 13 + 14 + #include <linux/delay.h> 15 + #include <linux/err.h> 16 + #include <linux/iio/consumer.h> 17 + #include <linux/interrupt.h> 18 + #include <linux/module.h> 19 + #include <linux/of.h> 20 + #include <linux/of_device.h> 21 + #include <linux/platform_device.h> 22 + #include <linux/regmap.h> 23 + #include <linux/thermal.h> 24 + 25 + #define QPNP_TM_REG_TYPE 0x04 26 + #define QPNP_TM_REG_SUBTYPE 0x05 27 + #define QPNP_TM_REG_STATUS 0x08 28 + #define QPNP_TM_REG_SHUTDOWN_CTRL1 0x40 29 + #define QPNP_TM_REG_ALARM_CTRL 0x46 30 + 31 + #define QPNP_TM_TYPE 0x09 32 + #define QPNP_TM_SUBTYPE 0x08 33 + 34 + #define STATUS_STAGE_MASK 0x03 35 + 36 + #define SHUTDOWN_CTRL1_THRESHOLD_MASK 0x03 37 + 38 + #define ALARM_CTRL_FORCE_ENABLE 0x80 39 + 40 + /* 41 + * Trip point values based on threshold control 42 + * 0 = {105 C, 125 C, 145 C} 43 + * 1 = {110 C, 130 C, 150 C} 44 + * 2 = {115 C, 135 C, 155 C} 45 + * 3 = {120 C, 140 C, 160 C} 46 + */ 47 + #define TEMP_STAGE_STEP 20000 /* Stage step: 20.000 C */ 48 + #define TEMP_STAGE_HYSTERESIS 2000 49 + 50 + #define TEMP_THRESH_MIN 105000 /* Threshold Min: 105 C */ 51 + #define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */ 52 + 53 + #define THRESH_MIN 0 54 + 55 + /* Temperature in Milli Celsius reported during stage 0 if no ADC is present */ 56 + #define DEFAULT_TEMP 37000 57 + 58 + struct qpnp_tm_chip { 59 + struct regmap *map; 60 + struct thermal_zone_device *tz_dev; 61 + long temp; 62 + unsigned int thresh; 63 + unsigned int stage; 64 + unsigned int prev_stage; 65 + unsigned int base; 66 + struct iio_channel *adc; 67 + }; 68 + 69 + static int qpnp_tm_read(struct qpnp_tm_chip *chip, u16 addr, u8 *data) 70 + { 71 + unsigned int val; 72 + int ret; 73 + 74 + ret = regmap_read(chip->map, chip->base + addr, &val); 75 + if (ret < 0) 76 + return ret; 77 + 78 + *data = val; 79 + return 0; 80 + } 81 + 82 + static int qpnp_tm_write(struct qpnp_tm_chip *chip, u16 addr, u8 data) 83 + { 84 + return regmap_write(chip->map, chip->base + addr, data); 85 + } 86 + 87 + /* 88 + * This function updates the internal temp value based on the 89 + * current thermal stage and threshold as well as the previous stage 90 + */ 91 + static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip) 92 + { 93 + unsigned int stage; 94 + int ret; 95 + u8 reg = 0; 96 + 97 + ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg); 98 + if (ret < 0) 99 + return ret; 100 + 101 + stage = reg & STATUS_STAGE_MASK; 102 + 103 + if (stage > chip->stage) { 104 + /* increasing stage, use lower bound */ 105 + chip->temp = (stage - 1) * TEMP_STAGE_STEP + 106 + chip->thresh * TEMP_THRESH_STEP + 107 + TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; 108 + } else if (stage < chip->stage) { 109 + /* decreasing stage, use upper bound */ 110 + chip->temp = stage * TEMP_STAGE_STEP + 111 + chip->thresh * TEMP_THRESH_STEP - 112 + TEMP_STAGE_HYSTERESIS + TEMP_THRESH_MIN; 113 + } 114 + 115 + chip->stage = stage; 116 + 117 + return 0; 118 + } 119 + 120 + static int qpnp_tm_get_temp(void *data, long *temp) 121 + { 122 + struct qpnp_tm_chip *chip = data; 123 + int ret, mili_celsius; 124 + 125 + if (!temp) 126 + return -EINVAL; 127 + 128 + if (IS_ERR(chip->adc)) { 129 + ret = qpnp_tm_update_temp_no_adc(chip); 130 + if (ret < 0) 131 + return ret; 132 + } else { 133 + ret = iio_read_channel_processed(chip->adc, &mili_celsius); 134 + if (ret < 0) 135 + return ret; 136 + 137 + chip->temp = mili_celsius; 138 + } 139 + 140 + *temp = chip->temp < 0 ? 0 : chip->temp; 141 + 142 + return 0; 143 + } 144 + 145 + static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = { 146 + .get_temp = qpnp_tm_get_temp, 147 + }; 148 + 149 + static irqreturn_t qpnp_tm_isr(int irq, void *data) 150 + { 151 + struct qpnp_tm_chip *chip = data; 152 + 153 + thermal_zone_device_update(chip->tz_dev); 154 + 155 + return IRQ_HANDLED; 156 + } 157 + 158 + /* 159 + * This function initializes the internal temp value based on only the 160 + * current thermal stage and threshold. Setup threshold control and 161 + * disable shutdown override. 162 + */ 163 + static int qpnp_tm_init(struct qpnp_tm_chip *chip) 164 + { 165 + int ret; 166 + u8 reg; 167 + 168 + chip->thresh = THRESH_MIN; 169 + chip->temp = DEFAULT_TEMP; 170 + 171 + ret = qpnp_tm_read(chip, QPNP_TM_REG_STATUS, &reg); 172 + if (ret < 0) 173 + return ret; 174 + 175 + chip->stage = reg & STATUS_STAGE_MASK; 176 + 177 + if (chip->stage) 178 + chip->temp = chip->thresh * TEMP_THRESH_STEP + 179 + (chip->stage - 1) * TEMP_STAGE_STEP + 180 + TEMP_THRESH_MIN; 181 + 182 + /* 183 + * Set threshold and disable software override of stage 2 and 3 184 + * shutdowns. 185 + */ 186 + reg = chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK; 187 + ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg); 188 + if (ret < 0) 189 + return ret; 190 + 191 + /* Enable the thermal alarm PMIC module in always-on mode. */ 192 + reg = ALARM_CTRL_FORCE_ENABLE; 193 + ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg); 194 + 195 + return ret; 196 + } 197 + 198 + static int qpnp_tm_probe(struct platform_device *pdev) 199 + { 200 + struct qpnp_tm_chip *chip; 201 + struct device_node *node; 202 + u8 type, subtype; 203 + u32 res[2]; 204 + int ret, irq; 205 + 206 + node = pdev->dev.of_node; 207 + 208 + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); 209 + if (!chip) 210 + return -ENOMEM; 211 + 212 + dev_set_drvdata(&pdev->dev, chip); 213 + 214 + chip->map = dev_get_regmap(pdev->dev.parent, NULL); 215 + if (!chip->map) 216 + return -ENXIO; 217 + 218 + ret = of_property_read_u32_array(node, "reg", res, 2); 219 + if (ret < 0) 220 + return ret; 221 + 222 + irq = platform_get_irq(pdev, 0); 223 + if (irq < 0) 224 + return irq; 225 + 226 + /* ADC based measurements are optional */ 227 + chip->adc = iio_channel_get(&pdev->dev, "thermal"); 228 + if (PTR_ERR(chip->adc) == -EPROBE_DEFER) 229 + return PTR_ERR(chip->adc); 230 + 231 + chip->base = res[0]; 232 + 233 + ret = qpnp_tm_read(chip, QPNP_TM_REG_TYPE, &type); 234 + if (ret < 0) { 235 + dev_err(&pdev->dev, "could not read type\n"); 236 + goto fail; 237 + } 238 + 239 + ret = qpnp_tm_read(chip, QPNP_TM_REG_SUBTYPE, &subtype); 240 + if (ret < 0) { 241 + dev_err(&pdev->dev, "could not read subtype\n"); 242 + goto fail; 243 + } 244 + 245 + if (type != QPNP_TM_TYPE || subtype != QPNP_TM_SUBTYPE) { 246 + dev_err(&pdev->dev, "invalid type 0x%02x or subtype 0x%02x\n", 247 + type, subtype); 248 + ret = -ENODEV; 249 + goto fail; 250 + } 251 + 252 + ret = qpnp_tm_init(chip); 253 + if (ret < 0) { 254 + dev_err(&pdev->dev, "init failed\n"); 255 + goto fail; 256 + } 257 + 258 + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, qpnp_tm_isr, 259 + IRQF_ONESHOT, node->name, chip); 260 + if (ret < 0) 261 + goto fail; 262 + 263 + chip->tz_dev = thermal_zone_of_sensor_register(&pdev->dev, 0, chip, 264 + &qpnp_tm_sensor_ops); 265 + if (IS_ERR(chip->tz_dev)) { 266 + dev_err(&pdev->dev, "failed to register sensor\n"); 267 + ret = PTR_ERR(chip->tz_dev); 268 + goto fail; 269 + } 270 + 271 + return 0; 272 + 273 + fail: 274 + if (!IS_ERR(chip->adc)) 275 + iio_channel_release(chip->adc); 276 + 277 + return ret; 278 + } 279 + 280 + static int qpnp_tm_remove(struct platform_device *pdev) 281 + { 282 + struct qpnp_tm_chip *chip = dev_get_drvdata(&pdev->dev); 283 + 284 + thermal_zone_of_sensor_unregister(&pdev->dev, chip->tz_dev); 285 + if (!IS_ERR(chip->adc)) 286 + iio_channel_release(chip->adc); 287 + 288 + return 0; 289 + } 290 + 291 + static const struct of_device_id qpnp_tm_match_table[] = { 292 + { .compatible = "qcom,spmi-temp-alarm" }, 293 + { } 294 + }; 295 + MODULE_DEVICE_TABLE(of, qpnp_tm_match_table); 296 + 297 + static struct platform_driver qpnp_tm_driver = { 298 + .driver = { 299 + .name = "spmi-temp-alarm", 300 + .of_match_table = qpnp_tm_match_table, 301 + }, 302 + .probe = qpnp_tm_probe, 303 + .remove = qpnp_tm_remove, 304 + }; 305 + module_platform_driver(qpnp_tm_driver); 306 + 307 + MODULE_ALIAS("platform:spmi-temp-alarm"); 308 + MODULE_DESCRIPTION("QPNP PMIC Temperature Alarm driver"); 309 + MODULE_LICENSE("GPL v2");
+185 -2
drivers/thermal/samsung/exynos_tmu.c
··· 97 97 #define EXYNOS4412_MUX_ADDR_VALUE 6 98 98 #define EXYNOS4412_MUX_ADDR_SHIFT 20 99 99 100 + /* Exynos5433 specific registers */ 101 + #define EXYNOS5433_TMU_REG_CONTROL1 0x024 102 + #define EXYNOS5433_TMU_SAMPLING_INTERVAL 0x02c 103 + #define EXYNOS5433_TMU_COUNTER_VALUE0 0x030 104 + #define EXYNOS5433_TMU_COUNTER_VALUE1 0x034 105 + #define EXYNOS5433_TMU_REG_CURRENT_TEMP1 0x044 106 + #define EXYNOS5433_THD_TEMP_RISE3_0 0x050 107 + #define EXYNOS5433_THD_TEMP_RISE7_4 0x054 108 + #define EXYNOS5433_THD_TEMP_FALL3_0 0x060 109 + #define EXYNOS5433_THD_TEMP_FALL7_4 0x064 110 + #define EXYNOS5433_TMU_REG_INTEN 0x0c0 111 + #define EXYNOS5433_TMU_REG_INTPEND 0x0c8 112 + #define EXYNOS5433_TMU_EMUL_CON 0x110 113 + #define EXYNOS5433_TMU_PD_DET_EN 0x130 114 + 115 + #define EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT 16 116 + #define EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT 23 117 + #define EXYNOS5433_TRIMINFO_SENSOR_ID_MASK \ 118 + (0xf << EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT) 119 + #define EXYNOS5433_TRIMINFO_CALIB_SEL_MASK BIT(23) 120 + 121 + #define EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING 0 122 + #define EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING 1 123 + 124 + #define EXYNOS5433_PD_DET_EN 1 125 + 100 126 /*exynos5440 specific registers*/ 101 127 #define EXYNOS5440_TMU_S0_7_TRIM 0x000 102 128 #define EXYNOS5440_TMU_S0_7_CTRL 0x020 ··· 510 484 return ret; 511 485 } 512 486 487 + static int exynos5433_tmu_initialize(struct platform_device *pdev) 488 + { 489 + struct exynos_tmu_data *data = platform_get_drvdata(pdev); 490 + struct exynos_tmu_platform_data *pdata = data->pdata; 491 + struct thermal_zone_device *tz = data->tzd; 492 + unsigned int status, trim_info; 493 + unsigned int rising_threshold = 0, falling_threshold = 0; 494 + unsigned long temp, temp_hist; 495 + int ret = 0, threshold_code, i, sensor_id, cal_type; 496 + 497 + status = readb(data->base + EXYNOS_TMU_REG_STATUS); 498 + if (!status) { 499 + ret = -EBUSY; 500 + goto out; 501 + } 502 + 503 + trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); 504 + sanitize_temp_error(data, trim_info); 505 + 506 + /* Read the temperature sensor id */ 507 + sensor_id = (trim_info & EXYNOS5433_TRIMINFO_SENSOR_ID_MASK) 508 + >> EXYNOS5433_TRIMINFO_SENSOR_ID_SHIFT; 509 + dev_info(&pdev->dev, "Temperature sensor ID: 0x%x\n", sensor_id); 510 + 511 + /* Read the calibration mode */ 512 + writel(trim_info, data->base + EXYNOS_TMU_REG_TRIMINFO); 513 + cal_type = (trim_info & EXYNOS5433_TRIMINFO_CALIB_SEL_MASK) 514 + >> EXYNOS5433_TRIMINFO_CALIB_SEL_SHIFT; 515 + 516 + switch (cal_type) { 517 + case EXYNOS5433_TRIMINFO_ONE_POINT_TRIMMING: 518 + pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 519 + break; 520 + case EXYNOS5433_TRIMINFO_TWO_POINT_TRIMMING: 521 + pdata->cal_type = TYPE_TWO_POINT_TRIMMING; 522 + break; 523 + default: 524 + pdata->cal_type = TYPE_ONE_POINT_TRIMMING; 525 + break; 526 + }; 527 + 528 + dev_info(&pdev->dev, "Calibration type is %d-point calibration\n", 529 + cal_type ? 2 : 1); 530 + 531 + /* Write temperature code for rising and falling threshold */ 532 + for (i = 0; i < of_thermal_get_ntrips(tz); i++) { 533 + int rising_reg_offset, falling_reg_offset; 534 + int j = 0; 535 + 536 + switch (i) { 537 + case 0: 538 + case 1: 539 + case 2: 540 + case 3: 541 + rising_reg_offset = EXYNOS5433_THD_TEMP_RISE3_0; 542 + falling_reg_offset = EXYNOS5433_THD_TEMP_FALL3_0; 543 + j = i; 544 + break; 545 + case 4: 546 + case 5: 547 + case 6: 548 + case 7: 549 + rising_reg_offset = EXYNOS5433_THD_TEMP_RISE7_4; 550 + falling_reg_offset = EXYNOS5433_THD_TEMP_FALL7_4; 551 + j = i - 4; 552 + break; 553 + default: 554 + continue; 555 + } 556 + 557 + /* Write temperature code for rising threshold */ 558 + tz->ops->get_trip_temp(tz, i, &temp); 559 + temp /= MCELSIUS; 560 + threshold_code = temp_to_code(data, temp); 561 + 562 + rising_threshold = readl(data->base + rising_reg_offset); 563 + rising_threshold |= (threshold_code << j * 8); 564 + writel(rising_threshold, data->base + rising_reg_offset); 565 + 566 + /* Write temperature code for falling threshold */ 567 + tz->ops->get_trip_hyst(tz, i, &temp_hist); 568 + temp_hist = temp - (temp_hist / MCELSIUS); 569 + threshold_code = temp_to_code(data, temp_hist); 570 + 571 + falling_threshold = readl(data->base + falling_reg_offset); 572 + falling_threshold &= ~(0xff << j * 8); 573 + falling_threshold |= (threshold_code << j * 8); 574 + writel(falling_threshold, data->base + falling_reg_offset); 575 + } 576 + 577 + data->tmu_clear_irqs(data); 578 + out: 579 + return ret; 580 + } 581 + 513 582 static int exynos5440_tmu_initialize(struct platform_device *pdev) 514 583 { 515 584 struct exynos_tmu_data *data = platform_get_drvdata(pdev); ··· 764 643 writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 765 644 } 766 645 646 + static void exynos5433_tmu_control(struct platform_device *pdev, bool on) 647 + { 648 + struct exynos_tmu_data *data = platform_get_drvdata(pdev); 649 + struct thermal_zone_device *tz = data->tzd; 650 + unsigned int con, interrupt_en, pd_det_en; 651 + 652 + con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); 653 + 654 + if (on) { 655 + con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); 656 + interrupt_en = 657 + (of_thermal_is_trip_valid(tz, 7) 658 + << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | 659 + (of_thermal_is_trip_valid(tz, 6) 660 + << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | 661 + (of_thermal_is_trip_valid(tz, 5) 662 + << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | 663 + (of_thermal_is_trip_valid(tz, 4) 664 + << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | 665 + (of_thermal_is_trip_valid(tz, 3) 666 + << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | 667 + (of_thermal_is_trip_valid(tz, 2) 668 + << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | 669 + (of_thermal_is_trip_valid(tz, 1) 670 + << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | 671 + (of_thermal_is_trip_valid(tz, 0) 672 + << EXYNOS7_TMU_INTEN_RISE0_SHIFT); 673 + 674 + interrupt_en |= 675 + interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; 676 + } else { 677 + con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); 678 + interrupt_en = 0; /* Disable all interrupts */ 679 + } 680 + 681 + pd_det_en = on ? EXYNOS5433_PD_DET_EN : 0; 682 + 683 + writel(pd_det_en, data->base + EXYNOS5433_TMU_PD_DET_EN); 684 + writel(interrupt_en, data->base + EXYNOS5433_TMU_REG_INTEN); 685 + writel(con, data->base + EXYNOS_TMU_REG_CONTROL); 686 + } 687 + 767 688 static void exynos5440_tmu_control(struct platform_device *pdev, bool on) 768 689 { 769 690 struct exynos_tmu_data *data = platform_get_drvdata(pdev); ··· 933 770 934 771 if (data->soc == SOC_ARCH_EXYNOS5260) 935 772 emul_con = EXYNOS5260_EMUL_CON; 773 + if (data->soc == SOC_ARCH_EXYNOS5433) 774 + emul_con = EXYNOS5433_TMU_EMUL_CON; 936 775 else if (data->soc == SOC_ARCH_EXYNOS7) 937 776 emul_con = EXYNOS7_TMU_REG_EMUL_CON; 938 777 else ··· 1047 882 } else if (data->soc == SOC_ARCH_EXYNOS7) { 1048 883 tmu_intstat = EXYNOS7_TMU_REG_INTPEND; 1049 884 tmu_intclear = EXYNOS7_TMU_REG_INTPEND; 885 + } else if (data->soc == SOC_ARCH_EXYNOS5433) { 886 + tmu_intstat = EXYNOS5433_TMU_REG_INTPEND; 887 + tmu_intclear = EXYNOS5433_TMU_REG_INTPEND; 1050 888 } else { 1051 889 tmu_intstat = EXYNOS_TMU_REG_INTSTAT; 1052 890 tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; ··· 1094 926 { .compatible = "samsung,exynos5260-tmu", }, 1095 927 { .compatible = "samsung,exynos5420-tmu", }, 1096 928 { .compatible = "samsung,exynos5420-tmu-ext-triminfo", }, 929 + { .compatible = "samsung,exynos5433-tmu", }, 1097 930 { .compatible = "samsung,exynos5440-tmu", }, 1098 931 { .compatible = "samsung,exynos7-tmu", }, 1099 932 { /* sentinel */ }, ··· 1118 949 else if (of_device_is_compatible(np, 1119 950 "samsung,exynos5420-tmu-ext-triminfo")) 1120 951 return SOC_ARCH_EXYNOS5420_TRIMINFO; 952 + else if (of_device_is_compatible(np, "samsung,exynos5433-tmu")) 953 + return SOC_ARCH_EXYNOS5433; 1121 954 else if (of_device_is_compatible(np, "samsung,exynos5440-tmu")) 1122 955 return SOC_ARCH_EXYNOS5440; 1123 956 else if (of_device_is_compatible(np, "samsung,exynos7-tmu")) ··· 1240 1069 data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1241 1070 data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1242 1071 break; 1072 + case SOC_ARCH_EXYNOS5433: 1073 + data->tmu_initialize = exynos5433_tmu_initialize; 1074 + data->tmu_control = exynos5433_tmu_control; 1075 + data->tmu_read = exynos4412_tmu_read; 1076 + data->tmu_set_emulation = exynos4412_tmu_set_emulation; 1077 + data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; 1078 + break; 1243 1079 case SOC_ARCH_EXYNOS5440: 1244 1080 data->tmu_initialize = exynos5440_tmu_initialize; 1245 1081 data->tmu_control = exynos5440_tmu_control; ··· 1350 1172 goto err_clk_sec; 1351 1173 } 1352 1174 1353 - if (data->soc == SOC_ARCH_EXYNOS7) { 1175 + switch (data->soc) { 1176 + case SOC_ARCH_EXYNOS5433: 1177 + case SOC_ARCH_EXYNOS7: 1354 1178 data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); 1355 1179 if (IS_ERR(data->sclk)) { 1356 1180 dev_err(&pdev->dev, "Failed to get sclk\n"); ··· 1364 1184 goto err_clk; 1365 1185 } 1366 1186 } 1367 - } 1187 + break; 1188 + default: 1189 + break; 1190 + }; 1368 1191 1369 1192 ret = exynos_tmu_initialize(pdev); 1370 1193 if (ret) {
+1
drivers/thermal/samsung/exynos_tmu.h
··· 33 33 SOC_ARCH_EXYNOS5260, 34 34 SOC_ARCH_EXYNOS5420, 35 35 SOC_ARCH_EXYNOS5420_TRIMINFO, 36 + SOC_ARCH_EXYNOS5433, 36 37 SOC_ARCH_EXYNOS5440, 37 38 SOC_ARCH_EXYNOS7, 38 39 };
+297 -17
drivers/thermal/thermal_core.c
··· 75 75 return NULL; 76 76 } 77 77 78 + /** 79 + * bind_previous_governor() - bind the previous governor of the thermal zone 80 + * @tz: a valid pointer to a struct thermal_zone_device 81 + * @failed_gov_name: the name of the governor that failed to register 82 + * 83 + * Register the previous governor of the thermal zone after a new 84 + * governor has failed to be bound. 85 + */ 86 + static void bind_previous_governor(struct thermal_zone_device *tz, 87 + const char *failed_gov_name) 88 + { 89 + if (tz->governor && tz->governor->bind_to_tz) { 90 + if (tz->governor->bind_to_tz(tz)) { 91 + dev_err(&tz->device, 92 + "governor %s failed to bind and the previous one (%s) failed to bind again, thermal zone %s has no governor\n", 93 + failed_gov_name, tz->governor->name, tz->type); 94 + tz->governor = NULL; 95 + } 96 + } 97 + } 98 + 99 + /** 100 + * thermal_set_governor() - Switch to another governor 101 + * @tz: a valid pointer to a struct thermal_zone_device 102 + * @new_gov: pointer to the new governor 103 + * 104 + * Change the governor of thermal zone @tz. 105 + * 106 + * Return: 0 on success, an error if the new governor's bind_to_tz() failed. 107 + */ 108 + static int thermal_set_governor(struct thermal_zone_device *tz, 109 + struct thermal_governor *new_gov) 110 + { 111 + int ret = 0; 112 + 113 + if (tz->governor && tz->governor->unbind_from_tz) 114 + tz->governor->unbind_from_tz(tz); 115 + 116 + if (new_gov && new_gov->bind_to_tz) { 117 + ret = new_gov->bind_to_tz(tz); 118 + if (ret) { 119 + bind_previous_governor(tz, new_gov->name); 120 + 121 + return ret; 122 + } 123 + } 124 + 125 + tz->governor = new_gov; 126 + 127 + return ret; 128 + } 129 + 78 130 int thermal_register_governor(struct thermal_governor *governor) 79 131 { 80 132 int err; ··· 159 107 160 108 name = pos->tzp->governor_name; 161 109 162 - if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) 163 - pos->governor = governor; 110 + if (!strncasecmp(name, governor->name, THERMAL_NAME_LENGTH)) { 111 + int ret; 112 + 113 + ret = thermal_set_governor(pos, governor); 114 + if (ret) 115 + dev_err(&pos->device, 116 + "Failed to set governor %s for thermal zone %s: %d\n", 117 + governor->name, pos->type, ret); 118 + } 164 119 } 165 120 166 121 mutex_unlock(&thermal_list_lock); ··· 193 134 list_for_each_entry(pos, &thermal_tz_list, node) { 194 135 if (!strncasecmp(pos->governor->name, governor->name, 195 136 THERMAL_NAME_LENGTH)) 196 - pos->governor = NULL; 137 + thermal_set_governor(pos, NULL); 197 138 } 198 139 199 140 mutex_unlock(&thermal_list_lock); ··· 277 218 278 219 static void __bind(struct thermal_zone_device *tz, int mask, 279 220 struct thermal_cooling_device *cdev, 280 - unsigned long *limits) 221 + unsigned long *limits, 222 + unsigned int weight) 281 223 { 282 224 int i, ret; 283 225 ··· 293 233 upper = limits[i * 2 + 1]; 294 234 } 295 235 ret = thermal_zone_bind_cooling_device(tz, i, cdev, 296 - upper, lower); 236 + upper, lower, 237 + weight); 297 238 if (ret) 298 239 print_bind_err_msg(tz, cdev, ret); 299 240 } ··· 341 280 continue; 342 281 tzp->tbp[i].cdev = cdev; 343 282 __bind(pos, tzp->tbp[i].trip_mask, cdev, 344 - tzp->tbp[i].binding_limits); 283 + tzp->tbp[i].binding_limits, 284 + tzp->tbp[i].weight); 345 285 } 346 286 } 347 287 ··· 381 319 continue; 382 320 tzp->tbp[i].cdev = pos; 383 321 __bind(tz, tzp->tbp[i].trip_mask, pos, 384 - tzp->tbp[i].binding_limits); 322 + tzp->tbp[i].binding_limits, 323 + tzp->tbp[i].weight); 385 324 } 386 325 } 387 326 exit: ··· 776 713 thermal_zone_bind_cooling_device(tz, 777 714 THERMAL_TRIPS_NONE, cdev, 778 715 THERMAL_NO_LIMIT, 779 - THERMAL_NO_LIMIT); 716 + THERMAL_NO_LIMIT, 717 + THERMAL_WEIGHT_DEFAULT); 780 718 } 781 719 mutex_unlock(&thermal_list_lock); 782 720 if (!tz->passive_delay) ··· 829 765 if (!gov) 830 766 goto exit; 831 767 832 - tz->governor = gov; 833 - ret = count; 768 + ret = thermal_set_governor(tz, gov); 769 + if (!ret) 770 + ret = count; 834 771 835 772 exit: 836 773 mutex_unlock(&tz->lock); ··· 874 809 } 875 810 static DEVICE_ATTR(emul_temp, S_IWUSR, NULL, emul_temp_store); 876 811 #endif/*CONFIG_THERMAL_EMULATION*/ 812 + 813 + static ssize_t 814 + sustainable_power_show(struct device *dev, struct device_attribute *devattr, 815 + char *buf) 816 + { 817 + struct thermal_zone_device *tz = to_thermal_zone(dev); 818 + 819 + if (tz->tzp) 820 + return sprintf(buf, "%u\n", tz->tzp->sustainable_power); 821 + else 822 + return -EIO; 823 + } 824 + 825 + static ssize_t 826 + sustainable_power_store(struct device *dev, struct device_attribute *devattr, 827 + const char *buf, size_t count) 828 + { 829 + struct thermal_zone_device *tz = to_thermal_zone(dev); 830 + u32 sustainable_power; 831 + 832 + if (!tz->tzp) 833 + return -EIO; 834 + 835 + if (kstrtou32(buf, 10, &sustainable_power)) 836 + return -EINVAL; 837 + 838 + tz->tzp->sustainable_power = sustainable_power; 839 + 840 + return count; 841 + } 842 + static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show, 843 + sustainable_power_store); 844 + 845 + #define create_s32_tzp_attr(name) \ 846 + static ssize_t \ 847 + name##_show(struct device *dev, struct device_attribute *devattr, \ 848 + char *buf) \ 849 + { \ 850 + struct thermal_zone_device *tz = to_thermal_zone(dev); \ 851 + \ 852 + if (tz->tzp) \ 853 + return sprintf(buf, "%u\n", tz->tzp->name); \ 854 + else \ 855 + return -EIO; \ 856 + } \ 857 + \ 858 + static ssize_t \ 859 + name##_store(struct device *dev, struct device_attribute *devattr, \ 860 + const char *buf, size_t count) \ 861 + { \ 862 + struct thermal_zone_device *tz = to_thermal_zone(dev); \ 863 + s32 value; \ 864 + \ 865 + if (!tz->tzp) \ 866 + return -EIO; \ 867 + \ 868 + if (kstrtos32(buf, 10, &value)) \ 869 + return -EINVAL; \ 870 + \ 871 + tz->tzp->name = value; \ 872 + \ 873 + return count; \ 874 + } \ 875 + static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, name##_show, name##_store) 876 + 877 + create_s32_tzp_attr(k_po); 878 + create_s32_tzp_attr(k_pu); 879 + create_s32_tzp_attr(k_i); 880 + create_s32_tzp_attr(k_d); 881 + create_s32_tzp_attr(integral_cutoff); 882 + create_s32_tzp_attr(slope); 883 + create_s32_tzp_attr(offset); 884 + #undef create_s32_tzp_attr 885 + 886 + static struct device_attribute *dev_tzp_attrs[] = { 887 + &dev_attr_sustainable_power, 888 + &dev_attr_k_po, 889 + &dev_attr_k_pu, 890 + &dev_attr_k_i, 891 + &dev_attr_k_d, 892 + &dev_attr_integral_cutoff, 893 + &dev_attr_slope, 894 + &dev_attr_offset, 895 + }; 896 + 897 + static int create_tzp_attrs(struct device *dev) 898 + { 899 + int i; 900 + 901 + for (i = 0; i < ARRAY_SIZE(dev_tzp_attrs); i++) { 902 + int ret; 903 + struct device_attribute *dev_attr = dev_tzp_attrs[i]; 904 + 905 + ret = device_create_file(dev, dev_attr); 906 + if (ret) 907 + return ret; 908 + } 909 + 910 + return 0; 911 + } 912 + 913 + /** 914 + * power_actor_get_max_power() - get the maximum power that a cdev can consume 915 + * @cdev: pointer to &thermal_cooling_device 916 + * @tz: a valid thermal zone device pointer 917 + * @max_power: pointer in which to store the maximum power 918 + * 919 + * Calculate the maximum power consumption in milliwats that the 920 + * cooling device can currently consume and store it in @max_power. 921 + * 922 + * Return: 0 on success, -EINVAL if @cdev doesn't support the 923 + * power_actor API or -E* on other error. 924 + */ 925 + int power_actor_get_max_power(struct thermal_cooling_device *cdev, 926 + struct thermal_zone_device *tz, u32 *max_power) 927 + { 928 + if (!cdev_is_power_actor(cdev)) 929 + return -EINVAL; 930 + 931 + return cdev->ops->state2power(cdev, tz, 0, max_power); 932 + } 933 + 934 + /** 935 + * power_actor_set_power() - limit the maximum power that a cooling device can consume 936 + * @cdev: pointer to &thermal_cooling_device 937 + * @instance: thermal instance to update 938 + * @power: the power in milliwatts 939 + * 940 + * Set the cooling device to consume at most @power milliwatts. 941 + * 942 + * Return: 0 on success, -EINVAL if the cooling device does not 943 + * implement the power actor API or -E* for other failures. 944 + */ 945 + int power_actor_set_power(struct thermal_cooling_device *cdev, 946 + struct thermal_instance *instance, u32 power) 947 + { 948 + unsigned long state; 949 + int ret; 950 + 951 + if (!cdev_is_power_actor(cdev)) 952 + return -EINVAL; 953 + 954 + ret = cdev->ops->power2state(cdev, instance->tz, power, &state); 955 + if (ret) 956 + return ret; 957 + 958 + instance->target = state; 959 + cdev->updated = false; 960 + thermal_cdev_update(cdev); 961 + 962 + return 0; 963 + } 877 964 878 965 static DEVICE_ATTR(type, 0444, type_show, NULL); 879 966 static DEVICE_ATTR(temp, 0444, temp_show, NULL); ··· 1134 917 NULL, 1135 918 }; 1136 919 920 + static ssize_t 921 + thermal_cooling_device_weight_show(struct device *dev, 922 + struct device_attribute *attr, char *buf) 923 + { 924 + struct thermal_instance *instance; 925 + 926 + instance = container_of(attr, struct thermal_instance, weight_attr); 927 + 928 + return sprintf(buf, "%d\n", instance->weight); 929 + } 930 + 931 + static ssize_t 932 + thermal_cooling_device_weight_store(struct device *dev, 933 + struct device_attribute *attr, 934 + const char *buf, size_t count) 935 + { 936 + struct thermal_instance *instance; 937 + int ret, weight; 938 + 939 + ret = kstrtoint(buf, 0, &weight); 940 + if (ret) 941 + return ret; 942 + 943 + instance = container_of(attr, struct thermal_instance, weight_attr); 944 + instance->weight = weight; 945 + 946 + return count; 947 + } 1137 948 /* Device management */ 1138 949 1139 950 /** ··· 1176 931 * @lower: the Minimum cooling state can be used for this trip point. 1177 932 * THERMAL_NO_LIMIT means no lower limit, 1178 933 * and the cooling device can be in cooling state 0. 934 + * @weight: The weight of the cooling device to be bound to the 935 + * thermal zone. Use THERMAL_WEIGHT_DEFAULT for the 936 + * default value 1179 937 * 1180 938 * This interface function bind a thermal cooling device to the certain trip 1181 939 * point of a thermal zone device. ··· 1189 941 int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, 1190 942 int trip, 1191 943 struct thermal_cooling_device *cdev, 1192 - unsigned long upper, unsigned long lower) 944 + unsigned long upper, unsigned long lower, 945 + unsigned int weight) 1193 946 { 1194 947 struct thermal_instance *dev; 1195 948 struct thermal_instance *pos; ··· 1235 986 dev->upper = upper; 1236 987 dev->lower = lower; 1237 988 dev->target = THERMAL_NO_TARGET; 989 + dev->weight = weight; 1238 990 1239 991 result = get_idr(&tz->idr, &tz->lock, &dev->id); 1240 992 if (result) ··· 1256 1006 if (result) 1257 1007 goto remove_symbol_link; 1258 1008 1009 + sprintf(dev->weight_attr_name, "cdev%d_weight", dev->id); 1010 + sysfs_attr_init(&dev->weight_attr.attr); 1011 + dev->weight_attr.attr.name = dev->weight_attr_name; 1012 + dev->weight_attr.attr.mode = S_IWUSR | S_IRUGO; 1013 + dev->weight_attr.show = thermal_cooling_device_weight_show; 1014 + dev->weight_attr.store = thermal_cooling_device_weight_store; 1015 + result = device_create_file(&tz->device, &dev->weight_attr); 1016 + if (result) 1017 + goto remove_trip_file; 1018 + 1259 1019 mutex_lock(&tz->lock); 1260 1020 mutex_lock(&cdev->lock); 1261 1021 list_for_each_entry(pos, &tz->thermal_instances, tz_node) ··· 1283 1023 if (!result) 1284 1024 return 0; 1285 1025 1026 + device_remove_file(&tz->device, &dev->weight_attr); 1027 + remove_trip_file: 1286 1028 device_remove_file(&tz->device, &dev->attr); 1287 1029 remove_symbol_link: 1288 1030 sysfs_remove_link(&tz->device.kobj, dev->name); ··· 1639 1377 tz->trip_temp_attrs[indx].name; 1640 1378 tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; 1641 1379 tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; 1642 - if (mask & (1 << indx)) { 1380 + if (IS_ENABLED(CONFIG_THERMAL_WRITABLE_TRIPS) && 1381 + mask & (1 << indx)) { 1643 1382 tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; 1644 1383 tz->trip_temp_attrs[indx].attr.store = 1645 1384 trip_point_temp_store; ··· 1717 1454 struct thermal_zone_device *thermal_zone_device_register(const char *type, 1718 1455 int trips, int mask, void *devdata, 1719 1456 struct thermal_zone_device_ops *ops, 1720 - const struct thermal_zone_params *tzp, 1457 + struct thermal_zone_params *tzp, 1721 1458 int passive_delay, int polling_delay) 1722 1459 { 1723 1460 struct thermal_zone_device *tz; ··· 1725 1462 int result; 1726 1463 int count; 1727 1464 int passive = 0; 1465 + struct thermal_governor *governor; 1728 1466 1729 1467 if (type && strlen(type) >= THERMAL_NAME_LENGTH) 1730 1468 return ERR_PTR(-EINVAL); ··· 1812 1548 if (result) 1813 1549 goto unregister; 1814 1550 1551 + /* Add thermal zone params */ 1552 + result = create_tzp_attrs(&tz->device); 1553 + if (result) 1554 + goto unregister; 1555 + 1815 1556 /* Update 'this' zone's governor information */ 1816 1557 mutex_lock(&thermal_governor_lock); 1817 1558 1818 1559 if (tz->tzp) 1819 - tz->governor = __find_governor(tz->tzp->governor_name); 1560 + governor = __find_governor(tz->tzp->governor_name); 1820 1561 else 1821 - tz->governor = def_governor; 1562 + governor = def_governor; 1563 + 1564 + result = thermal_set_governor(tz, governor); 1565 + if (result) { 1566 + mutex_unlock(&thermal_governor_lock); 1567 + goto unregister; 1568 + } 1822 1569 1823 1570 mutex_unlock(&thermal_governor_lock); 1824 1571 ··· 1918 1643 device_remove_file(&tz->device, &dev_attr_mode); 1919 1644 device_remove_file(&tz->device, &dev_attr_policy); 1920 1645 remove_trip_attrs(tz); 1921 - tz->governor = NULL; 1646 + thermal_set_governor(tz, NULL); 1922 1647 1923 1648 thermal_remove_hwmon_sysfs(tz); 1924 1649 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); ··· 2074 1799 if (result) 2075 1800 return result; 2076 1801 2077 - return thermal_gov_user_space_register(); 1802 + result = thermal_gov_user_space_register(); 1803 + if (result) 1804 + return result; 1805 + 1806 + return thermal_gov_power_allocator_register(); 2078 1807 } 2079 1808 2080 1809 static void thermal_unregister_governors(void) ··· 2087 1808 thermal_gov_fair_share_unregister(); 2088 1809 thermal_gov_bang_bang_unregister(); 2089 1810 thermal_gov_user_space_unregister(); 1811 + thermal_gov_power_allocator_unregister(); 2090 1812 } 2091 1813 2092 1814 static int __init thermal_init(void)
+11
drivers/thermal/thermal_core.h
··· 46 46 unsigned long target; /* expected cooling state */ 47 47 char attr_name[THERMAL_NAME_LENGTH]; 48 48 struct device_attribute attr; 49 + char weight_attr_name[THERMAL_NAME_LENGTH]; 50 + struct device_attribute weight_attr; 49 51 struct list_head tz_node; /* node in tz->thermal_instances */ 50 52 struct list_head cdev_node; /* node in cdev->thermal_instances */ 53 + unsigned int weight; /* The weight of the cooling device */ 51 54 }; 52 55 53 56 int thermal_register_governor(struct thermal_governor *); ··· 87 84 static inline int thermal_gov_user_space_register(void) { return 0; } 88 85 static inline void thermal_gov_user_space_unregister(void) {} 89 86 #endif /* CONFIG_THERMAL_GOV_USER_SPACE */ 87 + 88 + #ifdef CONFIG_THERMAL_GOV_POWER_ALLOCATOR 89 + int thermal_gov_power_allocator_register(void); 90 + void thermal_gov_power_allocator_unregister(void); 91 + #else 92 + static inline int thermal_gov_power_allocator_register(void) { return 0; } 93 + static inline void thermal_gov_power_allocator_unregister(void) {} 94 + #endif /* CONFIG_THERMAL_GOV_POWER_ALLOCATOR */ 90 95 91 96 /* device tree support */ 92 97 #ifdef CONFIG_THERMAL_OF
+50 -54
drivers/thermal/ti-soc-thermal/ti-bandgap.c
··· 43 43 44 44 #include "ti-bandgap.h" 45 45 46 + static int ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id); 47 + 46 48 /*** Helper functions to access registers and their bitfields ***/ 47 49 48 50 /** ··· 105 103 */ 106 104 static int ti_bandgap_power(struct ti_bandgap *bgp, bool on) 107 105 { 108 - int i, ret = 0; 106 + int i; 109 107 110 - if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) { 111 - ret = -ENOTSUPP; 112 - goto exit; 113 - } 108 + if (!TI_BANDGAP_HAS(bgp, POWER_SWITCH)) 109 + return -ENOTSUPP; 114 110 115 111 for (i = 0; i < bgp->conf->sensor_count; i++) 116 112 /* active on 0 */ 117 113 RMW_BITS(bgp, i, temp_sensor_ctrl, bgap_tempsoff_mask, !on); 118 - 119 - exit: 120 - return ret; 114 + return 0; 121 115 } 122 116 123 117 /** ··· 261 263 int ti_bandgap_adc_to_mcelsius(struct ti_bandgap *bgp, int adc_val, int *t) 262 264 { 263 265 const struct ti_bandgap_data *conf = bgp->conf; 264 - int ret = 0; 265 266 266 267 /* look up for temperature in the table and return the temperature */ 267 - if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) { 268 - ret = -ERANGE; 269 - goto exit; 270 - } 268 + if (adc_val < conf->adc_start_val || adc_val > conf->adc_end_val) 269 + return -ERANGE; 271 270 272 271 *t = bgp->conf->conv_table[adc_val - conf->adc_start_val]; 273 - 274 - exit: 275 - return ret; 272 + return 0; 276 273 } 277 274 278 275 /** ··· 288 295 { 289 296 const struct ti_bandgap_data *conf = bgp->conf; 290 297 const int *conv_table = bgp->conf->conv_table; 291 - int high, low, mid, ret = 0; 298 + int high, low, mid; 292 299 293 300 low = 0; 294 301 high = conf->adc_end_val - conf->adc_start_val; 295 302 mid = (high + low) / 2; 296 303 297 - if (temp < conv_table[low] || temp > conv_table[high]) { 298 - ret = -ERANGE; 299 - goto exit; 300 - } 304 + if (temp < conv_table[low] || temp > conv_table[high]) 305 + return -ERANGE; 301 306 302 307 while (low < high) { 303 308 if (temp < conv_table[mid]) ··· 306 315 } 307 316 308 317 *adc = conf->adc_start_val + low; 309 - 310 - exit: 311 - return ret; 318 + return 0; 312 319 } 313 320 314 321 /** ··· 332 343 */ 333 344 ret = ti_bandgap_adc_to_mcelsius(bgp, adc_val, &temp); 334 345 if (ret < 0) 335 - goto exit; 346 + return ret; 336 347 337 348 temp += hyst_val; 338 349 339 350 ret = ti_bandgap_mcelsius_to_adc(bgp, temp, sum); 340 - 341 - exit: 342 351 return ret; 343 352 } 344 353 ··· 455 468 */ 456 469 static inline int ti_bandgap_validate(struct ti_bandgap *bgp, int id) 457 470 { 458 - int ret = 0; 459 - 460 471 if (!bgp || IS_ERR(bgp)) { 461 472 pr_err("%s: invalid bandgap pointer\n", __func__); 462 - ret = -EINVAL; 463 - goto exit; 473 + return -EINVAL; 464 474 } 465 475 466 476 if ((id < 0) || (id >= bgp->conf->sensor_count)) { 467 477 dev_err(bgp->dev, "%s: sensor id out of range (%d)\n", 468 478 __func__, id); 469 - ret = -ERANGE; 479 + return -ERANGE; 470 480 } 471 481 472 - exit: 473 - return ret; 482 + return 0; 474 483 } 475 484 476 485 /** ··· 494 511 495 512 ret = ti_bandgap_validate(bgp, id); 496 513 if (ret) 497 - goto exit; 514 + return ret; 498 515 499 - if (!TI_BANDGAP_HAS(bgp, TALERT)) { 500 - ret = -ENOTSUPP; 501 - goto exit; 502 - } 516 + if (!TI_BANDGAP_HAS(bgp, TALERT)) 517 + return -ENOTSUPP; 503 518 504 519 ts_data = bgp->conf->sensors[id].ts_data; 505 520 tsr = bgp->conf->sensors[id].registers; ··· 510 529 } 511 530 512 531 if (ret) 513 - goto exit; 532 + return ret; 514 533 515 534 ret = ti_bandgap_mcelsius_to_adc(bgp, val, &adc_val); 516 535 if (ret < 0) 517 - goto exit; 536 + return ret; 518 537 519 538 spin_lock(&bgp->lock); 520 539 ret = ti_bandgap_update_alert_threshold(bgp, id, adc_val, hot); 521 540 spin_unlock(&bgp->lock); 522 - 523 - exit: 524 541 return ret; 525 542 } 526 543 ··· 561 582 562 583 temp = ti_bandgap_readl(bgp, tsr->bgap_threshold); 563 584 temp = (temp & mask) >> __ffs(mask); 564 - ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 585 + ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 565 586 if (ret) { 566 587 dev_err(bgp->dev, "failed to read thot\n"); 567 588 ret = -EIO; ··· 831 852 if (ret) 832 853 return ret; 833 854 855 + if (!TI_BANDGAP_HAS(bgp, MODE_CONFIG)) { 856 + ret = ti_bandgap_force_single_read(bgp, id); 857 + if (ret) 858 + return ret; 859 + } 860 + 834 861 spin_lock(&bgp->lock); 835 862 temp = ti_bandgap_read_temp(bgp, id); 836 863 spin_unlock(&bgp->lock); 837 864 838 - ret |= ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 865 + ret = ti_bandgap_adc_to_mcelsius(bgp, temp, &temp); 839 866 if (ret) 840 867 return -EIO; 841 868 ··· 902 917 static int 903 918 ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) 904 919 { 905 - u32 temp = 0, counter = 1000; 920 + u32 counter = 1000; 921 + struct temp_sensor_registers *tsr; 906 922 907 923 /* Select single conversion mode */ 908 924 if (TI_BANDGAP_HAS(bgp, MODE_CONFIG)) ··· 911 925 912 926 /* Start of Conversion = 1 */ 913 927 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 1); 914 - /* Wait until DTEMP is updated */ 915 - temp = ti_bandgap_read_temp(bgp, id); 916 928 917 - while ((temp == 0) && --counter) 918 - temp = ti_bandgap_read_temp(bgp, id); 919 - /* REVISIT: Check correct condition for end of conversion */ 929 + /* Wait for EOCZ going up */ 930 + tsr = bgp->conf->sensors[id].registers; 931 + 932 + while (--counter) { 933 + if (ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) & 934 + tsr->bgap_eocz_mask) 935 + break; 936 + } 920 937 921 938 /* Start of Conversion = 0 */ 922 939 RMW_BITS(bgp, id, temp_sensor_ctrl, bgap_soc_mask, 0); 940 + 941 + /* Wait for EOCZ going down */ 942 + counter = 1000; 943 + while (--counter) { 944 + if (!(ti_bandgap_readl(bgp, tsr->temp_sensor_ctrl) & 945 + tsr->bgap_eocz_mask)) 946 + break; 947 + } 923 948 924 949 return 0; 925 950 } ··· 1217 1220 goto free_irqs; 1218 1221 } 1219 1222 1220 - bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); 1223 + bgp->div_clk = clk_get(NULL, bgp->conf->div_ck_name); 1221 1224 ret = IS_ERR(bgp->div_clk); 1222 1225 if (ret) { 1223 - dev_err(&pdev->dev, 1224 - "failed to request div_ts_ck clock ref\n"); 1226 + dev_err(&pdev->dev, "failed to request div_ts_ck clock ref\n"); 1225 1227 ret = PTR_ERR(bgp->div_clk); 1226 1228 goto free_irqs; 1227 1229 }
+3 -2
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
··· 75 75 } 76 76 77 77 /* thermal zone ops */ 78 - /* Get temperature callback function for thermal zone*/ 78 + /* Get temperature callback function for thermal zone */ 79 79 static inline int __ti_thermal_get_temp(void *devdata, long *temp) 80 80 { 81 81 struct thermal_zone_device *pcb_tz = NULL; ··· 146 146 return thermal_zone_bind_cooling_device(thermal, 0, cdev, 147 147 /* bind with min and max states defined by cpu_cooling */ 148 148 THERMAL_NO_LIMIT, 149 - THERMAL_NO_LIMIT); 149 + THERMAL_NO_LIMIT, 150 + THERMAL_WEIGHT_DEFAULT); 150 151 } 151 152 152 153 /* Unbind callback functions for thermal zone */
+1 -1
drivers/thermal/x86_pkg_temp_thermal.c
··· 68 68 struct thermal_zone_device *tzone; 69 69 }; 70 70 71 - static const struct thermal_zone_params pkg_temp_tz_params = { 71 + static struct thermal_zone_params pkg_temp_tz_params = { 72 72 .no_hwmon = true, 73 73 }; 74 74
+39
include/linux/cpu_cooling.h
··· 28 28 #include <linux/thermal.h> 29 29 #include <linux/cpumask.h> 30 30 31 + typedef int (*get_static_t)(cpumask_t *cpumask, int interval, 32 + unsigned long voltage, u32 *power); 33 + 31 34 #ifdef CONFIG_CPU_THERMAL 32 35 /** 33 36 * cpufreq_cooling_register - function to create cpufreq cooling device. ··· 38 35 */ 39 36 struct thermal_cooling_device * 40 37 cpufreq_cooling_register(const struct cpumask *clip_cpus); 38 + 39 + struct thermal_cooling_device * 40 + cpufreq_power_cooling_register(const struct cpumask *clip_cpus, 41 + u32 capacitance, get_static_t plat_static_func); 41 42 42 43 /** 43 44 * of_cpufreq_cooling_register - create cpufreq cooling device based on DT. ··· 52 45 struct thermal_cooling_device * 53 46 of_cpufreq_cooling_register(struct device_node *np, 54 47 const struct cpumask *clip_cpus); 48 + 49 + struct thermal_cooling_device * 50 + of_cpufreq_power_cooling_register(struct device_node *np, 51 + const struct cpumask *clip_cpus, 52 + u32 capacitance, 53 + get_static_t plat_static_func); 55 54 #else 56 55 static inline struct thermal_cooling_device * 57 56 of_cpufreq_cooling_register(struct device_node *np, 58 57 const struct cpumask *clip_cpus) 59 58 { 60 59 return ERR_PTR(-ENOSYS); 60 + } 61 + 62 + static inline struct thermal_cooling_device * 63 + of_cpufreq_power_cooling_register(struct device_node *np, 64 + const struct cpumask *clip_cpus, 65 + u32 capacitance, 66 + get_static_t plat_static_func) 67 + { 68 + return NULL; 61 69 } 62 70 #endif 63 71 ··· 90 68 return ERR_PTR(-ENOSYS); 91 69 } 92 70 static inline struct thermal_cooling_device * 71 + cpufreq_power_cooling_register(const struct cpumask *clip_cpus, 72 + u32 capacitance, get_static_t plat_static_func) 73 + { 74 + return NULL; 75 + } 76 + 77 + static inline struct thermal_cooling_device * 93 78 of_cpufreq_cooling_register(struct device_node *np, 94 79 const struct cpumask *clip_cpus) 95 80 { 96 81 return ERR_PTR(-ENOSYS); 97 82 } 83 + 84 + static inline struct thermal_cooling_device * 85 + of_cpufreq_power_cooling_register(struct device_node *np, 86 + const struct cpumask *clip_cpus, 87 + u32 capacitance, 88 + get_static_t plat_static_func) 89 + { 90 + return NULL; 91 + } 92 + 98 93 static inline 99 94 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 100 95 {
+88 -9
include/linux/thermal.h
··· 40 40 /* No upper/lower limit requirement */ 41 41 #define THERMAL_NO_LIMIT ((u32)~0) 42 42 43 + /* Default weight of a bound cooling device */ 44 + #define THERMAL_WEIGHT_DEFAULT 0 45 + 43 46 /* Unit conversion macros */ 44 47 #define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 45 48 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) ··· 59 56 #define DEFAULT_THERMAL_GOVERNOR "fair_share" 60 57 #elif defined(CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE) 61 58 #define DEFAULT_THERMAL_GOVERNOR "user_space" 59 + #elif defined(CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR) 60 + #define DEFAULT_THERMAL_GOVERNOR "power_allocator" 62 61 #endif 63 62 64 63 struct thermal_zone_device; 65 64 struct thermal_cooling_device; 65 + struct thermal_instance; 66 66 67 67 enum thermal_device_mode { 68 68 THERMAL_DEVICE_DISABLED = 0, ··· 119 113 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); 120 114 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); 121 115 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); 116 + int (*get_requested_power)(struct thermal_cooling_device *, 117 + struct thermal_zone_device *, u32 *); 118 + int (*state2power)(struct thermal_cooling_device *, 119 + struct thermal_zone_device *, unsigned long, u32 *); 120 + int (*power2state)(struct thermal_cooling_device *, 121 + struct thermal_zone_device *, u32, unsigned long *); 122 122 }; 123 123 124 124 struct thermal_cooling_device { ··· 156 144 * @devdata: private pointer for device private data 157 145 * @trips: number of trip points the thermal zone supports 158 146 * @passive_delay: number of milliseconds to wait between polls when 159 - * performing passive cooling. Currenty only used by the 160 - * step-wise governor 147 + * performing passive cooling. 161 148 * @polling_delay: number of milliseconds to wait between polls when 162 149 * checking whether trip points have been crossed (0 for 163 150 * interrupt driven systems) ··· 166 155 * @last_temperature: previous temperature read 167 156 * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION 168 157 * @passive: 1 if you've crossed a passive trip point, 0 otherwise. 169 - * Currenty only used by the step-wise governor. 170 158 * @forced_passive: If > 0, temperature at which to switch on all ACPI 171 159 * processor cooling devices. Currently only used by the 172 160 * step-wise governor. 173 161 * @ops: operations this &thermal_zone_device supports 174 162 * @tzp: thermal zone parameters 175 163 * @governor: pointer to the governor for this thermal zone 164 + * @governor_data: private pointer for governor data 176 165 * @thermal_instances: list of &struct thermal_instance of this thermal zone 177 166 * @idr: &struct idr to generate unique id for this zone's cooling 178 167 * devices ··· 197 186 int passive; 198 187 unsigned int forced_passive; 199 188 struct thermal_zone_device_ops *ops; 200 - const struct thermal_zone_params *tzp; 189 + struct thermal_zone_params *tzp; 201 190 struct thermal_governor *governor; 191 + void *governor_data; 202 192 struct list_head thermal_instances; 203 193 struct idr idr; 204 194 struct mutex lock; ··· 210 198 /** 211 199 * struct thermal_governor - structure that holds thermal governor information 212 200 * @name: name of the governor 201 + * @bind_to_tz: callback called when binding to a thermal zone. If it 202 + * returns 0, the governor is bound to the thermal zone, 203 + * otherwise it fails. 204 + * @unbind_from_tz: callback called when a governor is unbound from a 205 + * thermal zone. 213 206 * @throttle: callback called for every trip point even if temperature is 214 207 * below the trip point temperature 215 208 * @governor_list: node in thermal_governor_list (in thermal_core.c) 216 209 */ 217 210 struct thermal_governor { 218 211 char name[THERMAL_NAME_LENGTH]; 212 + int (*bind_to_tz)(struct thermal_zone_device *tz); 213 + void (*unbind_from_tz)(struct thermal_zone_device *tz); 219 214 int (*throttle)(struct thermal_zone_device *tz, int trip); 220 215 struct list_head governor_list; 221 216 }; ··· 233 214 234 215 /* 235 216 * This is a measure of 'how effectively these devices can 236 - * cool 'this' thermal zone. The shall be determined by platform 237 - * characterization. This is on a 'percentage' scale. 238 - * See Documentation/thermal/sysfs-api.txt for more information. 217 + * cool 'this' thermal zone. It shall be determined by 218 + * platform characterization. This value is relative to the 219 + * rest of the weights so a cooling device whose weight is 220 + * double that of another cooling device is twice as 221 + * effective. See Documentation/thermal/sysfs-api.txt for more 222 + * information. 239 223 */ 240 224 int weight; 241 225 ··· 275 253 276 254 int num_tbps; /* Number of tbp entries */ 277 255 struct thermal_bind_params *tbp; 256 + 257 + /* 258 + * Sustainable power (heat) that this thermal zone can dissipate in 259 + * mW 260 + */ 261 + u32 sustainable_power; 262 + 263 + /* 264 + * Proportional parameter of the PID controller when 265 + * overshooting (i.e., when temperature is below the target) 266 + */ 267 + s32 k_po; 268 + 269 + /* 270 + * Proportional parameter of the PID controller when 271 + * undershooting 272 + */ 273 + s32 k_pu; 274 + 275 + /* Integral parameter of the PID controller */ 276 + s32 k_i; 277 + 278 + /* Derivative parameter of the PID controller */ 279 + s32 k_d; 280 + 281 + /* threshold below which the error is no longer accumulated */ 282 + s32 integral_cutoff; 283 + 284 + /* 285 + * @slope: slope of a linear temperature adjustment curve. 286 + * Used by thermal zone drivers. 287 + */ 288 + int slope; 289 + /* 290 + * @offset: offset of a linear temperature adjustment curve. 291 + * Used by thermal zone drivers (default 0). 292 + */ 293 + int offset; 278 294 }; 279 295 280 296 struct thermal_genl_event { ··· 376 316 #endif 377 317 378 318 #if IS_ENABLED(CONFIG_THERMAL) 319 + static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) 320 + { 321 + return cdev->ops->get_requested_power && cdev->ops->state2power && 322 + cdev->ops->power2state; 323 + } 324 + 325 + int power_actor_get_max_power(struct thermal_cooling_device *, 326 + struct thermal_zone_device *tz, u32 *max_power); 327 + int power_actor_set_power(struct thermal_cooling_device *, 328 + struct thermal_instance *, u32); 379 329 struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, 380 330 void *, struct thermal_zone_device_ops *, 381 - const struct thermal_zone_params *, int, int); 331 + struct thermal_zone_params *, int, int); 382 332 void thermal_zone_device_unregister(struct thermal_zone_device *); 383 333 384 334 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 385 335 struct thermal_cooling_device *, 386 - unsigned long, unsigned long); 336 + unsigned long, unsigned long, 337 + unsigned int); 387 338 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 388 339 struct thermal_cooling_device *); 389 340 void thermal_zone_device_update(struct thermal_zone_device *); ··· 414 343 void thermal_cdev_update(struct thermal_cooling_device *); 415 344 void thermal_notify_framework(struct thermal_zone_device *, int); 416 345 #else 346 + static inline bool cdev_is_power_actor(struct thermal_cooling_device *cdev) 347 + { return false; } 348 + static inline int power_actor_get_max_power(struct thermal_cooling_device *cdev, 349 + struct thermal_zone_device *tz, u32 *max_power) 350 + { return 0; } 351 + static inline int power_actor_set_power(struct thermal_cooling_device *cdev, 352 + struct thermal_instance *tz, u32 power) 353 + { return 0; } 417 354 static inline struct thermal_zone_device *thermal_zone_device_register( 418 355 const char *type, int trips, int mask, void *devdata, 419 356 struct thermal_zone_device_ops *ops,
+58
include/trace/events/thermal.h
··· 77 77 __entry->trip_type) 78 78 ); 79 79 80 + TRACE_EVENT(thermal_power_cpu_get_power, 81 + TP_PROTO(const struct cpumask *cpus, unsigned long freq, u32 *load, 82 + size_t load_len, u32 dynamic_power, u32 static_power), 83 + 84 + TP_ARGS(cpus, freq, load, load_len, dynamic_power, static_power), 85 + 86 + TP_STRUCT__entry( 87 + __bitmask(cpumask, num_possible_cpus()) 88 + __field(unsigned long, freq ) 89 + __dynamic_array(u32, load, load_len) 90 + __field(size_t, load_len ) 91 + __field(u32, dynamic_power ) 92 + __field(u32, static_power ) 93 + ), 94 + 95 + TP_fast_assign( 96 + __assign_bitmask(cpumask, cpumask_bits(cpus), 97 + num_possible_cpus()); 98 + __entry->freq = freq; 99 + memcpy(__get_dynamic_array(load), load, 100 + load_len * sizeof(*load)); 101 + __entry->load_len = load_len; 102 + __entry->dynamic_power = dynamic_power; 103 + __entry->static_power = static_power; 104 + ), 105 + 106 + TP_printk("cpus=%s freq=%lu load={%s} dynamic_power=%d static_power=%d", 107 + __get_bitmask(cpumask), __entry->freq, 108 + __print_array(__get_dynamic_array(load), __entry->load_len, 4), 109 + __entry->dynamic_power, __entry->static_power) 110 + ); 111 + 112 + TRACE_EVENT(thermal_power_cpu_limit, 113 + TP_PROTO(const struct cpumask *cpus, unsigned int freq, 114 + unsigned long cdev_state, u32 power), 115 + 116 + TP_ARGS(cpus, freq, cdev_state, power), 117 + 118 + TP_STRUCT__entry( 119 + __bitmask(cpumask, num_possible_cpus()) 120 + __field(unsigned int, freq ) 121 + __field(unsigned long, cdev_state) 122 + __field(u32, power ) 123 + ), 124 + 125 + TP_fast_assign( 126 + __assign_bitmask(cpumask, cpumask_bits(cpus), 127 + num_possible_cpus()); 128 + __entry->freq = freq; 129 + __entry->cdev_state = cdev_state; 130 + __entry->power = power; 131 + ), 132 + 133 + TP_printk("cpus=%s freq=%u cdev_state=%lu power=%u", 134 + __get_bitmask(cpumask), __entry->freq, __entry->cdev_state, 135 + __entry->power) 136 + ); 137 + 80 138 #endif /* _TRACE_THERMAL_H */ 81 139 82 140 /* This part must be outside protection */
+87
include/trace/events/thermal_power_allocator.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM thermal_power_allocator 3 + 4 + #if !defined(_TRACE_THERMAL_POWER_ALLOCATOR_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_THERMAL_POWER_ALLOCATOR_H 6 + 7 + #include <linux/tracepoint.h> 8 + 9 + TRACE_EVENT(thermal_power_allocator, 10 + TP_PROTO(struct thermal_zone_device *tz, u32 *req_power, 11 + u32 total_req_power, u32 *granted_power, 12 + u32 total_granted_power, size_t num_actors, 13 + u32 power_range, u32 max_allocatable_power, 14 + unsigned long current_temp, s32 delta_temp), 15 + TP_ARGS(tz, req_power, total_req_power, granted_power, 16 + total_granted_power, num_actors, power_range, 17 + max_allocatable_power, current_temp, delta_temp), 18 + TP_STRUCT__entry( 19 + __field(int, tz_id ) 20 + __dynamic_array(u32, req_power, num_actors ) 21 + __field(u32, total_req_power ) 22 + __dynamic_array(u32, granted_power, num_actors) 23 + __field(u32, total_granted_power ) 24 + __field(size_t, num_actors ) 25 + __field(u32, power_range ) 26 + __field(u32, max_allocatable_power ) 27 + __field(unsigned long, current_temp ) 28 + __field(s32, delta_temp ) 29 + ), 30 + TP_fast_assign( 31 + __entry->tz_id = tz->id; 32 + memcpy(__get_dynamic_array(req_power), req_power, 33 + num_actors * sizeof(*req_power)); 34 + __entry->total_req_power = total_req_power; 35 + memcpy(__get_dynamic_array(granted_power), granted_power, 36 + num_actors * sizeof(*granted_power)); 37 + __entry->total_granted_power = total_granted_power; 38 + __entry->num_actors = num_actors; 39 + __entry->power_range = power_range; 40 + __entry->max_allocatable_power = max_allocatable_power; 41 + __entry->current_temp = current_temp; 42 + __entry->delta_temp = delta_temp; 43 + ), 44 + 45 + TP_printk("thermal_zone_id=%d req_power={%s} total_req_power=%u granted_power={%s} total_granted_power=%u power_range=%u max_allocatable_power=%u current_temperature=%lu delta_temperature=%d", 46 + __entry->tz_id, 47 + __print_array(__get_dynamic_array(req_power), 48 + __entry->num_actors, 4), 49 + __entry->total_req_power, 50 + __print_array(__get_dynamic_array(granted_power), 51 + __entry->num_actors, 4), 52 + __entry->total_granted_power, __entry->power_range, 53 + __entry->max_allocatable_power, __entry->current_temp, 54 + __entry->delta_temp) 55 + ); 56 + 57 + TRACE_EVENT(thermal_power_allocator_pid, 58 + TP_PROTO(struct thermal_zone_device *tz, s32 err, s32 err_integral, 59 + s64 p, s64 i, s64 d, s32 output), 60 + TP_ARGS(tz, err, err_integral, p, i, d, output), 61 + TP_STRUCT__entry( 62 + __field(int, tz_id ) 63 + __field(s32, err ) 64 + __field(s32, err_integral) 65 + __field(s64, p ) 66 + __field(s64, i ) 67 + __field(s64, d ) 68 + __field(s32, output ) 69 + ), 70 + TP_fast_assign( 71 + __entry->tz_id = tz->id; 72 + __entry->err = err; 73 + __entry->err_integral = err_integral; 74 + __entry->p = p; 75 + __entry->i = i; 76 + __entry->d = d; 77 + __entry->output = output; 78 + ), 79 + 80 + TP_printk("thermal_zone_id=%d err=%d err_integral=%d p=%lld i=%lld d=%lld output=%d", 81 + __entry->tz_id, __entry->err, __entry->err_integral, 82 + __entry->p, __entry->i, __entry->d, __entry->output) 83 + ); 84 + #endif /* _TRACE_THERMAL_POWER_ALLOCATOR_H */ 85 + 86 + /* This part must be outside protection */ 87 + #include <trace/define_trace.h>