Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'pm-cpufreq' and 'pm-cpuidle'

Merge cpufreq and cpuidle changes for 5.18-rc1:

- Make the schedutil cpufreq governor use to_gov_attr_set() instead
of open coding it (Kevin Hao).

- Replace acpi_bus_get_device() with acpi_fetch_acpi_dev() in the
cpufreq longhaul driver (Rafael Wysocki).

- Unify show() and store() naming in cpufreq and make it use
__ATTR_XX (Lianjie Zhang).

- Make the intel_pstate driver use the EPP value set by the firmware
by default (Srinivas Pandruvada).

- Re-order the init checks in the powernow-k8 cpufreq driver (Mario
Limonciello).

- Make the ACPI processor idle driver check for architectural
support for LPI to avoid using it on x86 by mistake (Mario
Limonciello).

- Add Sapphire Rapids Xeon support to the intel_idle driver (Artem
Bityutskiy).

- Add 'preferred_cstates' module argument to the intel_idle driver
to work around C1 and C1E handling issue on Sapphire Rapids (Artem
Bityutskiy).

- Add core C6 optimization on Sapphire Rapids to the intel_idle
driver (Artem Bityutskiy).

- Optimize the haltpoll cpuidle driver a bit (Li RongQing).

- Remove leftover text from intel_idle() kerneldoc comment and fix
up white space in intel_idle (Rafael Wysocki).

* pm-cpufreq:
cpufreq: powernow-k8: Re-order the init checks
cpufreq: intel_pstate: Use firmware default EPP
cpufreq: unify show() and store() naming and use __ATTR_XX
cpufreq: longhaul: Replace acpi_bus_get_device()
cpufreq: schedutil: Use to_gov_attr_set() to get the gov_attr_set
cpufreq: Move to_gov_attr_set() to cpufreq.h

* pm-cpuidle:
cpuidle: intel_idle: Drop redundant backslash at line end
cpuidle: intel_idle: Update intel_idle() kerneldoc comment
cpuidle: haltpoll: Call cpuidle_poll_state_init() later
intel_idle: add core C6 optimization for SPR
intel_idle: add 'preferred_cstates' module argument
intel_idle: add SPR support
ACPI: processor idle: Check for architectural support for LPI
cpuidle: PSCI: Move the `has_lpi` check to the beginning of the function

+184 -50
+3 -3
arch/arm64/kernel/cpuidle.c
··· 54 54 struct acpi_lpi_state *lpi; 55 55 struct acpi_processor *pr = per_cpu(processors, cpu); 56 56 57 + if (unlikely(!pr || !pr->flags.has_lpi)) 58 + return -EINVAL; 59 + 57 60 /* 58 61 * If the PSCI cpu_suspend function hook has not been initialized 59 62 * idle states must not be enabled, so bail out 60 63 */ 61 64 if (!psci_ops.cpu_suspend) 62 65 return -EOPNOTSUPP; 63 - 64 - if (unlikely(!pr || !pr->flags.has_lpi)) 65 - return -EINVAL; 66 66 67 67 count = pr->power.count - 1; 68 68 if (count <= 0)
+10 -5
drivers/acpi/processor_idle.c
··· 1080 1080 return 0; 1081 1081 } 1082 1082 1083 + int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1084 + { 1085 + return -EOPNOTSUPP; 1086 + } 1087 + 1083 1088 static int acpi_processor_get_lpi_info(struct acpi_processor *pr) 1084 1089 { 1085 1090 int ret, i; ··· 1092 1087 acpi_handle handle = pr->handle, pr_ahandle; 1093 1088 struct acpi_device *d = NULL; 1094 1089 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr; 1090 + 1091 + /* make sure our architecture has support */ 1092 + ret = acpi_processor_ffh_lpi_probe(pr->id); 1093 + if (ret == -EOPNOTSUPP) 1094 + return ret; 1095 1095 1096 1096 if (!osc_pc_lpi_support_confirmed) 1097 1097 return -EOPNOTSUPP; ··· 1147 1137 pr->flags.power = 1; 1148 1138 1149 1139 return 0; 1150 - } 1151 - 1152 - int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu) 1153 - { 1154 - return -ENODEV; 1155 1140 } 1156 1141 1157 1142 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+5 -5
drivers/cpufreq/cpufreq_conservative.c
··· 146 146 147 147 /************************** sysfs interface ************************/ 148 148 149 - static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, 149 + static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, 150 150 const char *buf, size_t count) 151 151 { 152 152 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 161 161 return count; 162 162 } 163 163 164 - static ssize_t store_up_threshold(struct gov_attr_set *attr_set, 164 + static ssize_t up_threshold_store(struct gov_attr_set *attr_set, 165 165 const char *buf, size_t count) 166 166 { 167 167 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 177 177 return count; 178 178 } 179 179 180 - static ssize_t store_down_threshold(struct gov_attr_set *attr_set, 180 + static ssize_t down_threshold_store(struct gov_attr_set *attr_set, 181 181 const char *buf, size_t count) 182 182 { 183 183 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 195 195 return count; 196 196 } 197 197 198 - static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, 198 + static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, 199 199 const char *buf, size_t count) 200 200 { 201 201 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 220 220 return count; 221 221 } 222 222 223 - static ssize_t store_freq_step(struct gov_attr_set *attr_set, const char *buf, 223 + static ssize_t freq_step_store(struct gov_attr_set *attr_set, const char *buf, 224 224 size_t count) 225 225 { 226 226 struct dbs_data *dbs_data = to_dbs_data(attr_set);
+3 -3
drivers/cpufreq/cpufreq_governor.c
··· 27 27 28 28 /* Common sysfs tunables */ 29 29 /* 30 - * store_sampling_rate - update sampling rate effective immediately if needed. 30 + * sampling_rate_store - update sampling rate effective immediately if needed. 31 31 * 32 32 * If new rate is smaller than the old, simply updating 33 33 * dbs.sampling_rate might not be appropriate. For example, if the ··· 41 41 * This must be called with dbs_data->mutex held, otherwise traversing 42 42 * policy_dbs_list isn't safe. 43 43 */ 44 - ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, 44 + ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, 45 45 size_t count) 46 46 { 47 47 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 80 80 81 81 return count; 82 82 } 83 - EXPORT_SYMBOL_GPL(store_sampling_rate); 83 + EXPORT_SYMBOL_GPL(sampling_rate_store); 84 84 85 85 /** 86 86 * gov_update_cpu_data - Update CPU load data.
+5 -7
drivers/cpufreq/cpufreq_governor.h
··· 51 51 } 52 52 53 53 #define gov_show_one(_gov, file_name) \ 54 - static ssize_t show_##file_name \ 54 + static ssize_t file_name##_show \ 55 55 (struct gov_attr_set *attr_set, char *buf) \ 56 56 { \ 57 57 struct dbs_data *dbs_data = to_dbs_data(attr_set); \ ··· 60 60 } 61 61 62 62 #define gov_show_one_common(file_name) \ 63 - static ssize_t show_##file_name \ 63 + static ssize_t file_name##_show \ 64 64 (struct gov_attr_set *attr_set, char *buf) \ 65 65 { \ 66 66 struct dbs_data *dbs_data = to_dbs_data(attr_set); \ ··· 68 68 } 69 69 70 70 #define gov_attr_ro(_name) \ 71 - static struct governor_attr _name = \ 72 - __ATTR(_name, 0444, show_##_name, NULL) 71 + static struct governor_attr _name = __ATTR_RO(_name) 73 72 74 73 #define gov_attr_rw(_name) \ 75 - static struct governor_attr _name = \ 76 - __ATTR(_name, 0644, show_##_name, store_##_name) 74 + static struct governor_attr _name = __ATTR_RW(_name) 77 75 78 76 /* Common to all CPUs of a policy */ 79 77 struct policy_dbs_info { ··· 174 176 (struct cpufreq_policy *, unsigned int, unsigned int), 175 177 unsigned int powersave_bias); 176 178 void od_unregister_powersave_bias_handler(void); 177 - ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf, 179 + ssize_t sampling_rate_store(struct gov_attr_set *attr_set, const char *buf, 178 180 size_t count); 179 181 void gov_update_cpu_data(struct dbs_data *dbs_data); 180 182 #endif /* _CPUFREQ_GOVERNOR_H */
-5
drivers/cpufreq/cpufreq_governor_attr_set.c
··· 8 8 9 9 #include "cpufreq_governor.h" 10 10 11 - static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) 12 - { 13 - return container_of(kobj, struct gov_attr_set, kobj); 14 - } 15 - 16 11 static inline struct governor_attr *to_gov_attr(struct attribute *attr) 17 12 { 18 13 return container_of(attr, struct governor_attr, attr);
+5 -5
drivers/cpufreq/cpufreq_ondemand.c
··· 202 202 /************************** sysfs interface ************************/ 203 203 static struct dbs_governor od_dbs_gov; 204 204 205 - static ssize_t store_io_is_busy(struct gov_attr_set *attr_set, const char *buf, 205 + static ssize_t io_is_busy_store(struct gov_attr_set *attr_set, const char *buf, 206 206 size_t count) 207 207 { 208 208 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 220 220 return count; 221 221 } 222 222 223 - static ssize_t store_up_threshold(struct gov_attr_set *attr_set, 223 + static ssize_t up_threshold_store(struct gov_attr_set *attr_set, 224 224 const char *buf, size_t count) 225 225 { 226 226 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 237 237 return count; 238 238 } 239 239 240 - static ssize_t store_sampling_down_factor(struct gov_attr_set *attr_set, 240 + static ssize_t sampling_down_factor_store(struct gov_attr_set *attr_set, 241 241 const char *buf, size_t count) 242 242 { 243 243 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 265 265 return count; 266 266 } 267 267 268 - static ssize_t store_ignore_nice_load(struct gov_attr_set *attr_set, 268 + static ssize_t ignore_nice_load_store(struct gov_attr_set *attr_set, 269 269 const char *buf, size_t count) 270 270 { 271 271 struct dbs_data *dbs_data = to_dbs_data(attr_set); ··· 290 290 return count; 291 291 } 292 292 293 - static ssize_t store_powersave_bias(struct gov_attr_set *attr_set, 293 + static ssize_t powersave_bias_store(struct gov_attr_set *attr_set, 294 294 const char *buf, size_t count) 295 295 { 296 296 struct dbs_data *dbs_data = to_dbs_data(attr_set);
+32 -6
drivers/cpufreq/intel_pstate.c
··· 1692 1692 } 1693 1693 } 1694 1694 1695 + static void intel_pstate_update_epp_defaults(struct cpudata *cpudata) 1696 + { 1697 + cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1698 + 1699 + /* 1700 + * If this CPU gen doesn't call for change in balance_perf 1701 + * EPP return. 1702 + */ 1703 + if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) 1704 + return; 1705 + 1706 + /* 1707 + * If powerup EPP is something other than chipset default 0x80 and 1708 + * - is more performance oriented than 0x80 (default balance_perf EPP) 1709 + * - But less performance oriented than performance EPP 1710 + * then use this as new balance_perf EPP. 1711 + */ 1712 + if (cpudata->epp_default < HWP_EPP_BALANCE_PERFORMANCE && 1713 + cpudata->epp_default > HWP_EPP_PERFORMANCE) { 1714 + epp_values[EPP_INDEX_BALANCE_PERFORMANCE] = cpudata->epp_default; 1715 + return; 1716 + } 1717 + 1718 + /* 1719 + * Use hard coded value per gen to update the balance_perf 1720 + * and default EPP. 1721 + */ 1722 + cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; 1723 + intel_pstate_set_epp(cpudata, cpudata->epp_default); 1724 + } 1725 + 1695 1726 static void intel_pstate_hwp_enable(struct cpudata *cpudata) 1696 1727 { 1697 1728 /* First disable HWP notification interrupt till we activate again */ ··· 1736 1705 if (cpudata->epp_default >= 0) 1737 1706 return; 1738 1707 1739 - if (epp_values[EPP_INDEX_BALANCE_PERFORMANCE] == HWP_EPP_BALANCE_PERFORMANCE) { 1740 - cpudata->epp_default = intel_pstate_get_epp(cpudata, 0); 1741 - } else { 1742 - cpudata->epp_default = epp_values[EPP_INDEX_BALANCE_PERFORMANCE]; 1743 - intel_pstate_set_epp(cpudata, cpudata->epp_default); 1744 - } 1708 + intel_pstate_update_epp_defaults(cpudata); 1745 1709 } 1746 1710 1747 1711 static int atom_get_min_pstate(void)
+2 -2
drivers/cpufreq/longhaul.c
··· 668 668 u32 nesting_level, 669 669 void *context, void **return_value) 670 670 { 671 - struct acpi_device *d; 671 + struct acpi_device *d = acpi_fetch_acpi_dev(obj_handle); 672 672 673 - if (acpi_bus_get_device(obj_handle, &d)) 673 + if (!d) 674 674 return 0; 675 675 676 676 *return_value = acpi_driver_data(d);
+3 -3
drivers/cpufreq/powernow-k8.c
··· 1172 1172 unsigned int i, supported_cpus = 0; 1173 1173 int ret; 1174 1174 1175 + if (!x86_match_cpu(powernow_k8_ids)) 1176 + return -ENODEV; 1177 + 1175 1178 if (boot_cpu_has(X86_FEATURE_HW_PSTATE)) { 1176 1179 __request_acpi_cpufreq(); 1177 1180 return -ENODEV; 1178 1181 } 1179 - 1180 - if (!x86_match_cpu(powernow_k8_ids)) 1181 - return -ENODEV; 1182 1182 1183 1183 cpus_read_lock(); 1184 1184 for_each_online_cpu(i) {
+2 -2
drivers/cpuidle/cpuidle-haltpoll.c
··· 108 108 if (boot_option_idle_override != IDLE_NO_OVERRIDE) 109 109 return -ENODEV; 110 110 111 - cpuidle_poll_state_init(drv); 112 - 113 111 if (!kvm_para_available() || !haltpoll_want()) 114 112 return -ENODEV; 113 + 114 + cpuidle_poll_state_init(drv); 115 115 116 116 ret = cpuidle_register_driver(drv); 117 117 if (ret < 0)
+108 -3
drivers/idle/intel_idle.c
··· 64 64 /* intel_idle.max_cstate=0 disables driver */ 65 65 static int max_cstate = CPUIDLE_STATE_MAX - 1; 66 66 static unsigned int disabled_states_mask; 67 + static unsigned int preferred_states_mask; 67 68 68 69 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 69 70 ··· 121 120 * 122 121 * If the local APIC timer is not known to be reliable in the target idle state, 123 122 * enable one-shot tick broadcasting for the target CPU before executing MWAIT. 124 - * 125 - * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to 126 - * flushing user TLBs. 127 123 * 128 124 * Must be called under local_irq_disable(). 129 125 */ ··· 759 761 .enter = NULL } 760 762 }; 761 763 764 + /* 765 + * On Sapphire Rapids Xeon C1 has to be disabled if C1E is enabled, and vice 766 + * versa. On SPR C1E is enabled only if "C1E promotion" bit is set in 767 + * MSR_IA32_POWER_CTL. But in this case there effectively no C1, because C1 768 + * requests are promoted to C1E. If the "C1E promotion" bit is cleared, then 769 + * both C1 and C1E requests end up with C1, so there is effectively no C1E. 770 + * 771 + * By default we enable C1 and disable C1E by marking it with 772 + * 'CPUIDLE_FLAG_UNUSABLE'. 773 + */ 774 + static struct cpuidle_state spr_cstates[] __initdata = { 775 + { 776 + .name = "C1", 777 + .desc = "MWAIT 0x00", 778 + .flags = MWAIT2flg(0x00), 779 + .exit_latency = 1, 780 + .target_residency = 1, 781 + .enter = &intel_idle, 782 + .enter_s2idle = intel_idle_s2idle, }, 783 + { 784 + .name = "C1E", 785 + .desc = "MWAIT 0x01", 786 + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE | 787 + CPUIDLE_FLAG_UNUSABLE, 788 + .exit_latency = 2, 789 + .target_residency = 4, 790 + .enter = &intel_idle, 791 + .enter_s2idle = intel_idle_s2idle, }, 792 + { 793 + .name = "C6", 794 + .desc = "MWAIT 0x20", 795 + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, 796 + .exit_latency = 290, 797 + .target_residency = 800, 798 + .enter = &intel_idle, 799 + .enter_s2idle = intel_idle_s2idle, }, 800 + { 801 + .enter = NULL } 802 + }; 803 + 762 804 static struct cpuidle_state atom_cstates[] __initdata = { 763 805 { 764 806 .name = "C1E", ··· 1142 1104 .use_acpi = true, 1143 1105 }; 1144 1106 1107 + static const struct idle_cpu idle_cpu_spr __initconst = { 1108 + .state_table = spr_cstates, 1109 + .disable_promotion_to_c1e = true, 1110 + .use_acpi = true, 1111 + }; 1112 + 1145 1113 static const struct idle_cpu idle_cpu_avn __initconst = { 1146 1114 .state_table = avn_cstates, 1147 1115 .disable_promotion_to_c1e = true, ··· 1210 1166 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &idle_cpu_skx), 1211 1167 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &idle_cpu_icx), 1212 1168 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &idle_cpu_icx), 1169 + X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr), 1213 1170 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl), 1214 1171 X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl), 1215 1172 X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt), ··· 1398 1353 static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; } 1399 1354 #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */ 1400 1355 1356 + static void c1e_promotion_enable(void); 1357 + 1401 1358 /** 1402 1359 * ivt_idle_state_table_update - Tune the idle states table for Ivy Town. 1403 1360 * ··· 1570 1523 } 1571 1524 } 1572 1525 1526 + /** 1527 + * spr_idle_state_table_update - Adjust Sapphire Rapids idle states table. 1528 + */ 1529 + static void __init spr_idle_state_table_update(void) 1530 + { 1531 + unsigned long long msr; 1532 + 1533 + /* Check if user prefers C1E over C1. */ 1534 + if (preferred_states_mask & BIT(2)) { 1535 + if (preferred_states_mask & BIT(1)) 1536 + /* Both can't be enabled, stick to the defaults. */ 1537 + return; 1538 + 1539 + spr_cstates[0].flags |= CPUIDLE_FLAG_UNUSABLE; 1540 + spr_cstates[1].flags &= ~CPUIDLE_FLAG_UNUSABLE; 1541 + 1542 + /* Enable C1E using the "C1E promotion" bit. */ 1543 + c1e_promotion_enable(); 1544 + disable_promotion_to_c1e = false; 1545 + } 1546 + 1547 + /* 1548 + * By default, the C6 state assumes the worst-case scenario of package 1549 + * C6. However, if PC6 is disabled, we update the numbers to match 1550 + * core C6. 1551 + */ 1552 + rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr); 1553 + 1554 + /* Limit value 2 and above allow for PC6. */ 1555 + if ((msr & 0x7) < 2) { 1556 + spr_cstates[2].exit_latency = 190; 1557 + spr_cstates[2].target_residency = 600; 1558 + } 1559 + } 1560 + 1573 1561 static bool __init intel_idle_verify_cstate(unsigned int mwait_hint) 1574 1562 { 1575 1563 unsigned int mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint) + 1; ··· 1638 1556 break; 1639 1557 case INTEL_FAM6_SKYLAKE_X: 1640 1558 skx_idle_state_table_update(); 1559 + break; 1560 + case INTEL_FAM6_SAPPHIRERAPIDS_X: 1561 + spr_idle_state_table_update(); 1641 1562 break; 1642 1563 } 1643 1564 ··· 1712 1627 rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 1713 1628 msr_bits &= ~auto_demotion_disable_flags; 1714 1629 wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits); 1630 + } 1631 + 1632 + static void c1e_promotion_enable(void) 1633 + { 1634 + unsigned long long msr_bits; 1635 + 1636 + rdmsrl(MSR_IA32_POWER_CTL, msr_bits); 1637 + msr_bits |= 0x2; 1638 + wrmsrl(MSR_IA32_POWER_CTL, msr_bits); 1715 1639 } 1716 1640 1717 1641 static void c1e_promotion_disable(void) ··· 1892 1798 */ 1893 1799 module_param_named(states_off, disabled_states_mask, uint, 0444); 1894 1800 MODULE_PARM_DESC(states_off, "Mask of disabled idle states"); 1801 + /* 1802 + * Some platforms come with mutually exclusive C-states, so that if one is 1803 + * enabled, the other C-states must not be used. Example: C1 and C1E on 1804 + * Sapphire Rapids platform. This parameter allows for selecting the 1805 + * preferred C-states among the groups of mutually exclusive C-states - the 1806 + * selected C-states will be registered, the other C-states from the mutually 1807 + * exclusive group won't be registered. If the platform has no mutually 1808 + * exclusive C-states, this parameter has no effect. 1809 + */ 1810 + module_param_named(preferred_cstates, preferred_states_mask, uint, 0444); 1811 + MODULE_PARM_DESC(preferred_cstates, "Mask of preferred idle states");
+5
include/linux/cpufreq.h
··· 661 661 /* sysfs ops for cpufreq governors */ 662 662 extern const struct sysfs_ops governor_sysfs_ops; 663 663 664 + static inline struct gov_attr_set *to_gov_attr_set(struct kobject *kobj) 665 + { 666 + return container_of(kobj, struct gov_attr_set, kobj); 667 + } 668 + 664 669 void gov_attr_set_init(struct gov_attr_set *attr_set, struct list_head *list_node); 665 670 void gov_attr_set_get(struct gov_attr_set *attr_set, struct list_head *list_node); 666 671 unsigned int gov_attr_set_put(struct gov_attr_set *attr_set, struct list_head *list_node);
+1 -1
kernel/sched/cpufreq_schedutil.c
··· 539 539 540 540 static void sugov_tunables_free(struct kobject *kobj) 541 541 { 542 - struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj); 542 + struct gov_attr_set *attr_set = to_gov_attr_set(kobj); 543 543 544 544 kfree(to_sugov_tunables(attr_set)); 545 545 }