at v4.9 272 lines 7.6 kB view raw
1/* 2 * Generic OPP helper interface for CPU device 3 * 4 * Copyright (C) 2009-2014 Texas Instruments Incorporated. 5 * Nishanth Menon 6 * Romit Dasgupta 7 * Kevin Hilman 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16#include <linux/cpu.h> 17#include <linux/cpufreq.h> 18#include <linux/err.h> 19#include <linux/errno.h> 20#include <linux/export.h> 21#include <linux/slab.h> 22 23#include "opp.h" 24 25#ifdef CONFIG_CPU_FREQ 26 27/** 28 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device 29 * @dev: device for which we do this operation 30 * @table: Cpufreq table returned back to caller 31 * 32 * Generate a cpufreq table for a provided device- this assumes that the 33 * opp table is already initialized and ready for usage. 34 * 35 * This function allocates required memory for the cpufreq table. It is 36 * expected that the caller does the required maintenance such as freeing 37 * the table as required. 38 * 39 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM 40 * if no memory available for the operation (table is not populated), returns 0 41 * if successful and table is populated. 42 * 43 * WARNING: It is important for the callers to ensure refreshing their copy of 44 * the table if any of the mentioned functions have been invoked in the interim. 45 * 46 * Locking: The internal opp_table and opp structures are RCU protected. 47 * Since we just use the regular accessor functions to access the internal data 48 * structures, we use RCU read lock inside this function. As a result, users of 49 * this function DONOT need to use explicit locks for invoking. 50 */ 51int dev_pm_opp_init_cpufreq_table(struct device *dev, 52 struct cpufreq_frequency_table **table) 53{ 54 struct dev_pm_opp *opp; 55 struct cpufreq_frequency_table *freq_table = NULL; 56 int i, max_opps, ret = 0; 57 unsigned long rate; 58 59 rcu_read_lock(); 60 61 max_opps = dev_pm_opp_get_opp_count(dev); 62 if (max_opps <= 0) { 63 ret = max_opps ? max_opps : -ENODATA; 64 goto out; 65 } 66 67 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); 68 if (!freq_table) { 69 ret = -ENOMEM; 70 goto out; 71 } 72 73 for (i = 0, rate = 0; i < max_opps; i++, rate++) { 74 /* find next rate */ 75 opp = dev_pm_opp_find_freq_ceil(dev, &rate); 76 if (IS_ERR(opp)) { 77 ret = PTR_ERR(opp); 78 goto out; 79 } 80 freq_table[i].driver_data = i; 81 freq_table[i].frequency = rate / 1000; 82 83 /* Is Boost/turbo opp ? */ 84 if (dev_pm_opp_is_turbo(opp)) 85 freq_table[i].flags = CPUFREQ_BOOST_FREQ; 86 } 87 88 freq_table[i].driver_data = i; 89 freq_table[i].frequency = CPUFREQ_TABLE_END; 90 91 *table = &freq_table[0]; 92 93out: 94 rcu_read_unlock(); 95 if (ret) 96 kfree(freq_table); 97 98 return ret; 99} 100EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); 101 102/** 103 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table 104 * @dev: device for which we do this operation 105 * @table: table to free 106 * 107 * Free up the table allocated by dev_pm_opp_init_cpufreq_table 108 */ 109void dev_pm_opp_free_cpufreq_table(struct device *dev, 110 struct cpufreq_frequency_table **table) 111{ 112 if (!table) 113 return; 114 115 kfree(*table); 116 *table = NULL; 117} 118EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); 119#endif /* CONFIG_CPU_FREQ */ 120 121void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) 122{ 123 struct device *cpu_dev; 124 int cpu; 125 126 WARN_ON(cpumask_empty(cpumask)); 127 128 for_each_cpu(cpu, cpumask) { 129 cpu_dev = get_cpu_device(cpu); 130 if (!cpu_dev) { 131 pr_err("%s: failed to get cpu%d device\n", __func__, 132 cpu); 133 continue; 134 } 135 136 if (of) 137 dev_pm_opp_of_remove_table(cpu_dev); 138 else 139 dev_pm_opp_remove_table(cpu_dev); 140 } 141} 142 143/** 144 * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask 145 * @cpumask: cpumask for which OPP table needs to be removed 146 * 147 * This removes the OPP tables for CPUs present in the @cpumask. 148 * This should be used to remove all the OPPs entries associated with 149 * the cpus in @cpumask. 150 * 151 * Locking: The internal opp_table and opp structures are RCU protected. 152 * Hence this function internally uses RCU updater strategy with mutex locks 153 * to keep the integrity of the internal data structures. Callers should ensure 154 * that this function is *NOT* called under RCU protection or in contexts where 155 * mutex cannot be locked. 156 */ 157void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) 158{ 159 _dev_pm_opp_cpumask_remove_table(cpumask, false); 160} 161EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); 162 163/** 164 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs 165 * @cpu_dev: CPU device for which we do this operation 166 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev 167 * 168 * This marks OPP table of the @cpu_dev as shared by the CPUs present in 169 * @cpumask. 170 * 171 * Returns -ENODEV if OPP table isn't already present. 172 * 173 * Locking: The internal opp_table and opp structures are RCU protected. 174 * Hence this function internally uses RCU updater strategy with mutex locks 175 * to keep the integrity of the internal data structures. Callers should ensure 176 * that this function is *NOT* called under RCU protection or in contexts where 177 * mutex cannot be locked. 178 */ 179int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, 180 const struct cpumask *cpumask) 181{ 182 struct opp_device *opp_dev; 183 struct opp_table *opp_table; 184 struct device *dev; 185 int cpu, ret = 0; 186 187 mutex_lock(&opp_table_lock); 188 189 opp_table = _find_opp_table(cpu_dev); 190 if (IS_ERR(opp_table)) { 191 ret = PTR_ERR(opp_table); 192 goto unlock; 193 } 194 195 for_each_cpu(cpu, cpumask) { 196 if (cpu == cpu_dev->id) 197 continue; 198 199 dev = get_cpu_device(cpu); 200 if (!dev) { 201 dev_err(cpu_dev, "%s: failed to get cpu%d device\n", 202 __func__, cpu); 203 continue; 204 } 205 206 opp_dev = _add_opp_dev(dev, opp_table); 207 if (!opp_dev) { 208 dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n", 209 __func__, cpu); 210 continue; 211 } 212 213 /* Mark opp-table as multiple CPUs are sharing it now */ 214 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; 215 } 216unlock: 217 mutex_unlock(&opp_table_lock); 218 219 return ret; 220} 221EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); 222 223/** 224 * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev 225 * @cpu_dev: CPU device for which we do this operation 226 * @cpumask: cpumask to update with information of sharing CPUs 227 * 228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. 229 * 230 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP 231 * table's status is access-unknown. 232 * 233 * Locking: The internal opp_table and opp structures are RCU protected. 234 * Hence this function internally uses RCU updater strategy with mutex locks 235 * to keep the integrity of the internal data structures. Callers should ensure 236 * that this function is *NOT* called under RCU protection or in contexts where 237 * mutex cannot be locked. 238 */ 239int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) 240{ 241 struct opp_device *opp_dev; 242 struct opp_table *opp_table; 243 int ret = 0; 244 245 mutex_lock(&opp_table_lock); 246 247 opp_table = _find_opp_table(cpu_dev); 248 if (IS_ERR(opp_table)) { 249 ret = PTR_ERR(opp_table); 250 goto unlock; 251 } 252 253 if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { 254 ret = -EINVAL; 255 goto unlock; 256 } 257 258 cpumask_clear(cpumask); 259 260 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { 261 list_for_each_entry(opp_dev, &opp_table->dev_list, node) 262 cpumask_set_cpu(opp_dev->dev->id, cpumask); 263 } else { 264 cpumask_set_cpu(cpu_dev->id, cpumask); 265 } 266 267unlock: 268 mutex_unlock(&opp_table_lock); 269 270 return ret; 271} 272EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);