Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cpufreq-macros' into pm-cpufreq

+208 -216
+19
Documentation/cpu-freq/cpu-drivers.txt
··· 228 228 stage. Just pass the values to this function, and the unsigned int 229 229 index returns the number of the frequency table entry which contains 230 230 the frequency the CPU shall be set to. 231 + 232 + The following macros can be used as iterators over cpufreq_frequency_table: 233 + 234 + cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency 235 + table. 236 + 237 + cpufreq-for_each_valid_entry(pos, table) - iterates over all entries, 238 + excluding CPUFREQ_ENTRY_INVALID frequencies. 239 + Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and 240 + "table" - the cpufreq_frequency_table * you want to iterate over. 241 + 242 + For example: 243 + 244 + struct cpufreq_frequency_table *pos, *driver_freq_table; 245 + 246 + cpufreq_for_each_entry(pos, driver_freq_table) { 247 + /* Do something with pos */ 248 + pos->frequency = ... 249 + }
+5 -4
arch/arm/mach-davinci/da850.c
··· 1092 1092 1093 1093 static int da850_round_armrate(struct clk *clk, unsigned long rate) 1094 1094 { 1095 - int i, ret = 0, diff; 1095 + int ret = 0, diff; 1096 1096 unsigned int best = (unsigned int) -1; 1097 1097 struct cpufreq_frequency_table *table = cpufreq_info.freq_table; 1098 + struct cpufreq_frequency_table *pos; 1098 1099 1099 1100 rate /= 1000; /* convert to kHz */ 1100 1101 1101 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 1102 - diff = table[i].frequency - rate; 1102 + cpufreq_for_each_entry(pos, table) { 1103 + diff = pos->frequency - rate; 1103 1104 if (diff < 0) 1104 1105 diff = -diff; 1105 1106 1106 1107 if (diff < best) { 1107 1108 best = diff; 1108 - ret = table[i].frequency; 1109 + ret = pos->frequency; 1109 1110 } 1110 1111 } 1111 1112
+5 -11
arch/mips/loongson/lemote-2f/clock.c
··· 91 91 92 92 int clk_set_rate(struct clk *clk, unsigned long rate) 93 93 { 94 + struct cpufreq_frequency_table *pos; 94 95 int ret = 0; 95 96 int regval; 96 - int i; 97 97 98 98 if (likely(clk->ops && clk->ops->set_rate)) { 99 99 unsigned long flags; ··· 106 106 if (unlikely(clk->flags & CLK_RATE_PROPAGATES)) 107 107 propagate_rate(clk); 108 108 109 - for (i = 0; loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END; 110 - i++) { 111 - if (loongson2_clockmod_table[i].frequency == 112 - CPUFREQ_ENTRY_INVALID) 113 - continue; 114 - if (rate == loongson2_clockmod_table[i].frequency) 109 + cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table) 110 + if (rate == pos->frequency) 115 111 break; 116 - } 117 - if (rate != loongson2_clockmod_table[i].frequency) 112 + if (rate != pos->frequency) 118 113 return -ENOTSUPP; 119 114 120 115 clk->rate = rate; 121 116 122 117 regval = LOONGSON_CHIPCFG0; 123 - regval = (regval & ~0x7) | 124 - (loongson2_clockmod_table[i].driver_data - 1); 118 + regval = (regval & ~0x7) | (pos->driver_data - 1); 125 119 LOONGSON_CHIPCFG0 = regval; 126 120 127 121 return ret;
+4 -5
drivers/cpufreq/acpi-cpufreq.c
··· 213 213 214 214 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) 215 215 { 216 - int i; 216 + struct cpufreq_frequency_table *pos; 217 217 struct acpi_processor_performance *perf; 218 218 219 219 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) ··· 223 223 224 224 perf = data->acpi_data; 225 225 226 - for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 227 - if (msr == perf->states[data->freq_table[i].driver_data].status) 228 - return data->freq_table[i].frequency; 229 - } 226 + cpufreq_for_each_entry(pos, data->freq_table) 227 + if (msr == perf->states[pos->driver_data].status) 228 + return pos->frequency; 230 229 return data->freq_table[0].frequency; 231 230 } 232 231
+8 -8
drivers/cpufreq/arm_big_little.c
··· 226 226 /* get the minimum frequency in the cpufreq_frequency_table */ 227 227 static inline u32 get_table_min(struct cpufreq_frequency_table *table) 228 228 { 229 - int i; 229 + struct cpufreq_frequency_table *pos; 230 230 uint32_t min_freq = ~0; 231 - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) 232 - if (table[i].frequency < min_freq) 233 - min_freq = table[i].frequency; 231 + cpufreq_for_each_entry(pos, table) 232 + if (pos->frequency < min_freq) 233 + min_freq = pos->frequency; 234 234 return min_freq; 235 235 } 236 236 237 237 /* get the maximum frequency in the cpufreq_frequency_table */ 238 238 static inline u32 get_table_max(struct cpufreq_frequency_table *table) 239 239 { 240 - int i; 240 + struct cpufreq_frequency_table *pos; 241 241 uint32_t max_freq = 0; 242 - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) 243 - if (table[i].frequency > max_freq) 244 - max_freq = table[i].frequency; 242 + cpufreq_for_each_entry(pos, table) 243 + if (pos->frequency > max_freq) 244 + max_freq = pos->frequency; 245 245 return max_freq; 246 246 } 247 247
+11
drivers/cpufreq/cpufreq.c
··· 237 237 } 238 238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 239 239 240 + bool cpufreq_next_valid(struct cpufreq_frequency_table **pos) 241 + { 242 + while ((*pos)->frequency != CPUFREQ_TABLE_END) 243 + if ((*pos)->frequency != CPUFREQ_ENTRY_INVALID) 244 + return true; 245 + else 246 + (*pos)++; 247 + return false; 248 + } 249 + EXPORT_SYMBOL_GPL(cpufreq_next_valid); 250 + 240 251 /********************************************************************* 241 252 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 242 253 *********************************************************************/
+8 -16
drivers/cpufreq/cpufreq_stats.c
··· 182 182 183 183 static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) 184 184 { 185 - unsigned int i, j, count = 0, ret = 0; 185 + unsigned int i, count = 0, ret = 0; 186 186 struct cpufreq_stats *stat; 187 187 unsigned int alloc_size; 188 188 unsigned int cpu = policy->cpu; 189 - struct cpufreq_frequency_table *table; 189 + struct cpufreq_frequency_table *pos, *table; 190 190 191 191 table = cpufreq_frequency_get_table(cpu); 192 192 if (unlikely(!table)) ··· 205 205 stat->cpu = cpu; 206 206 per_cpu(cpufreq_stats_table, cpu) = stat; 207 207 208 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 209 - unsigned int freq = table[i].frequency; 210 - if (freq == CPUFREQ_ENTRY_INVALID) 211 - continue; 208 + cpufreq_for_each_valid_entry(pos, table) 212 209 count++; 213 - } 214 210 215 211 alloc_size = count * sizeof(int) + count * sizeof(u64); 216 212 ··· 224 228 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 225 229 stat->trans_table = stat->freq_table + count; 226 230 #endif 227 - j = 0; 228 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 229 - unsigned int freq = table[i].frequency; 230 - if (freq == CPUFREQ_ENTRY_INVALID) 231 - continue; 232 - if (freq_table_get_index(stat, freq) == -1) 233 - stat->freq_table[j++] = freq; 234 - } 235 - stat->state_num = j; 231 + i = 0; 232 + cpufreq_for_each_valid_entry(pos, table) 233 + if (freq_table_get_index(stat, pos->frequency) == -1) 234 + stat->freq_table[i++] = pos->frequency; 235 + stat->state_num = i; 236 236 spin_lock(&cpufreq_stats_lock); 237 237 stat->last_time = get_jiffies_64(); 238 238 stat->last_index = freq_table_get_index(stat, policy->cur);
+3 -5
drivers/cpufreq/dbx500-cpufreq.c
··· 45 45 46 46 static int dbx500_cpufreq_probe(struct platform_device *pdev) 47 47 { 48 - int i = 0; 48 + struct cpufreq_frequency_table *pos; 49 49 50 50 freq_table = dev_get_platdata(&pdev->dev); 51 51 if (!freq_table) { ··· 60 60 } 61 61 62 62 pr_info("dbx500-cpufreq: Available frequencies:\n"); 63 - while (freq_table[i].frequency != CPUFREQ_TABLE_END) { 64 - pr_info(" %d Mhz\n", freq_table[i].frequency/1000); 65 - i++; 66 - } 63 + cpufreq_for_each_entry(pos, freq_table) 64 + pr_info(" %d Mhz\n", pos->frequency / 1000); 67 65 68 66 return cpufreq_register_driver(&dbx500_cpufreq_driver); 69 67 }
+4 -5
drivers/cpufreq/elanfreq.c
··· 147 147 static int elanfreq_cpu_init(struct cpufreq_policy *policy) 148 148 { 149 149 struct cpuinfo_x86 *c = &cpu_data(0); 150 - unsigned int i; 150 + struct cpufreq_frequency_table *pos; 151 151 152 152 /* capability check */ 153 153 if ((c->x86_vendor != X86_VENDOR_AMD) || ··· 159 159 max_freq = elanfreq_get_cpu_frequency(0); 160 160 161 161 /* table init */ 162 - for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { 163 - if (elanfreq_table[i].frequency > max_freq) 164 - elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; 165 - } 162 + cpufreq_for_each_entry(pos, elanfreq_table) 163 + if (pos->frequency > max_freq) 164 + pos->frequency = CPUFREQ_ENTRY_INVALID; 166 165 167 166 /* cpuinfo and default policy values */ 168 167 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
+5 -6
drivers/cpufreq/exynos-cpufreq.c
··· 29 29 static int exynos_cpufreq_get_index(unsigned int freq) 30 30 { 31 31 struct cpufreq_frequency_table *freq_table = exynos_info->freq_table; 32 - int index; 32 + struct cpufreq_frequency_table *pos; 33 33 34 - for (index = 0; 35 - freq_table[index].frequency != CPUFREQ_TABLE_END; index++) 36 - if (freq_table[index].frequency == freq) 34 + cpufreq_for_each_entry(pos, freq_table) 35 + if (pos->frequency == freq) 37 36 break; 38 37 39 - if (freq_table[index].frequency == CPUFREQ_TABLE_END) 38 + if (pos->frequency == CPUFREQ_TABLE_END) 40 39 return -EINVAL; 41 40 42 - return index; 41 + return pos - freq_table; 43 42 } 44 43 45 44 static int exynos_cpufreq_scale(unsigned int target_freq)
+15 -15
drivers/cpufreq/exynos5440-cpufreq.c
··· 114 114 115 115 static int init_div_table(void) 116 116 { 117 - struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table; 117 + struct cpufreq_frequency_table *pos, *freq_tbl = dvfs_info->freq_table; 118 118 unsigned int tmp, clk_div, ema_div, freq, volt_id; 119 - int i = 0; 120 119 struct dev_pm_opp *opp; 121 120 122 121 rcu_read_lock(); 123 - for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) { 124 - 122 + cpufreq_for_each_entry(pos, freq_tbl) { 125 123 opp = dev_pm_opp_find_freq_exact(dvfs_info->dev, 126 - freq_tbl[i].frequency * 1000, true); 124 + pos->frequency * 1000, true); 127 125 if (IS_ERR(opp)) { 128 126 rcu_read_unlock(); 129 127 dev_err(dvfs_info->dev, 130 128 "failed to find valid OPP for %u KHZ\n", 131 - freq_tbl[i].frequency); 129 + pos->frequency); 132 130 return PTR_ERR(opp); 133 131 } 134 132 135 - freq = freq_tbl[i].frequency / 1000; /* In MHZ */ 133 + freq = pos->frequency / 1000; /* In MHZ */ 136 134 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK) 137 135 << P0_7_CPUCLKDEV_SHIFT; 138 136 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK) ··· 155 157 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT) 156 158 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT)); 157 159 158 - __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i); 160 + __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * 161 + (pos - freq_tbl)); 159 162 } 160 163 161 164 rcu_read_unlock(); ··· 165 166 166 167 static void exynos_enable_dvfs(unsigned int cur_frequency) 167 168 { 168 - unsigned int tmp, i, cpu; 169 + unsigned int tmp, cpu; 169 170 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table; 171 + struct cpufreq_frequency_table *pos; 170 172 /* Disable DVFS */ 171 173 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL); 172 174 ··· 182 182 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN); 183 183 184 184 /* Set initial performance index */ 185 - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 186 - if (freq_table[i].frequency == cur_frequency) 185 + cpufreq_for_each_entry(pos, freq_table) 186 + if (pos->frequency == cur_frequency) 187 187 break; 188 188 189 - if (freq_table[i].frequency == CPUFREQ_TABLE_END) { 189 + if (pos->frequency == CPUFREQ_TABLE_END) { 190 190 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n"); 191 191 /* Assign the highest frequency */ 192 - i = 0; 193 - cur_frequency = freq_table[i].frequency; 192 + pos = freq_table; 193 + cur_frequency = pos->frequency; 194 194 } 195 195 196 196 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ", ··· 199 199 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) { 200 200 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); 201 201 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT); 202 - tmp |= (i << C0_3_PSTATE_NEW_SHIFT); 202 + tmp |= ((pos - freq_table) << C0_3_PSTATE_NEW_SHIFT); 203 203 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4); 204 204 } 205 205
+25 -31
drivers/cpufreq/freq_table.c
··· 21 21 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 22 22 struct cpufreq_frequency_table *table) 23 23 { 24 + struct cpufreq_frequency_table *pos; 24 25 unsigned int min_freq = ~0; 25 26 unsigned int max_freq = 0; 26 - unsigned int i; 27 + unsigned int freq; 27 28 28 - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 29 - unsigned int freq = table[i].frequency; 30 - if (freq == CPUFREQ_ENTRY_INVALID) { 31 - pr_debug("table entry %u is invalid, skipping\n", i); 29 + cpufreq_for_each_valid_entry(pos, table) { 30 + freq = pos->frequency; 32 31 33 - continue; 34 - } 35 32 if (!cpufreq_boost_enabled() 36 - && (table[i].flags & CPUFREQ_BOOST_FREQ)) 33 + && (pos->flags & CPUFREQ_BOOST_FREQ)) 37 34 continue; 38 35 39 - pr_debug("table entry %u: %u kHz\n", i, freq); 36 + pr_debug("table entry %u: %u kHz\n", (int)(pos - table), freq); 40 37 if (freq < min_freq) 41 38 min_freq = freq; 42 39 if (freq > max_freq) ··· 54 57 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, 55 58 struct cpufreq_frequency_table *table) 56 59 { 57 - unsigned int next_larger = ~0, freq, i = 0; 60 + struct cpufreq_frequency_table *pos; 61 + unsigned int freq, next_larger = ~0; 58 62 bool found = false; 59 63 60 64 pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", ··· 63 65 64 66 cpufreq_verify_within_cpu_limits(policy); 65 67 66 - for (; freq = table[i].frequency, freq != CPUFREQ_TABLE_END; i++) { 67 - if (freq == CPUFREQ_ENTRY_INVALID) 68 - continue; 68 + cpufreq_for_each_valid_entry(pos, table) { 69 + freq = pos->frequency; 70 + 69 71 if ((freq >= policy->min) && (freq <= policy->max)) { 70 72 found = true; 71 73 break; ··· 116 118 .driver_data = ~0, 117 119 .frequency = 0, 118 120 }; 119 - unsigned int i; 121 + struct cpufreq_frequency_table *pos; 122 + unsigned int freq, i = 0; 120 123 121 124 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", 122 125 target_freq, relation, policy->cpu); ··· 131 132 break; 132 133 } 133 134 134 - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 135 - unsigned int freq = table[i].frequency; 136 - if (freq == CPUFREQ_ENTRY_INVALID) 137 - continue; 135 + cpufreq_for_each_valid_entry(pos, table) { 136 + freq = pos->frequency; 137 + 138 + i = pos - table; 138 139 if ((freq < policy->min) || (freq > policy->max)) 139 140 continue; 140 141 switch (relation) { ··· 183 184 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 184 185 unsigned int freq) 185 186 { 186 - struct cpufreq_frequency_table *table; 187 - int i; 187 + struct cpufreq_frequency_table *pos, *table; 188 188 189 189 table = cpufreq_frequency_get_table(policy->cpu); 190 190 if (unlikely(!table)) { ··· 191 193 return -ENOENT; 192 194 } 193 195 194 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 195 - if (table[i].frequency == freq) 196 - return i; 197 - } 196 + cpufreq_for_each_valid_entry(pos, table) 197 + if (pos->frequency == freq) 198 + return pos - table; 198 199 199 200 return -EINVAL; 200 201 } ··· 205 208 static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, 206 209 bool show_boost) 207 210 { 208 - unsigned int i = 0; 209 211 ssize_t count = 0; 210 - struct cpufreq_frequency_table *table = policy->freq_table; 212 + struct cpufreq_frequency_table *pos, *table = policy->freq_table; 211 213 212 214 if (!table) 213 215 return -ENODEV; 214 216 215 - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { 216 - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 217 - continue; 217 + cpufreq_for_each_valid_entry(pos, table) { 218 218 /* 219 219 * show_boost = true and driver_data = BOOST freq 220 220 * display BOOST freqs ··· 223 229 * show_boost = false and driver_data != BOOST freq 224 230 * display NON BOOST freqs 225 231 */ 226 - if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ)) 232 + if (show_boost ^ (pos->flags & CPUFREQ_BOOST_FREQ)) 227 233 continue; 228 234 229 - count += sprintf(&buf[count], "%d ", table[i].frequency); 235 + count += sprintf(&buf[count], "%d ", pos->frequency); 230 236 } 231 237 count += sprintf(&buf[count], "\n"); 232 238
+5 -6
drivers/cpufreq/longhaul.c
··· 530 530 531 531 static void longhaul_setup_voltagescaling(void) 532 532 { 533 + struct cpufreq_frequency_table *freq_pos; 533 534 union msr_longhaul longhaul; 534 535 struct mV_pos minvid, maxvid, vid; 535 536 unsigned int j, speed, pos, kHz_step, numvscales; ··· 609 608 /* Calculate kHz for one voltage step */ 610 609 kHz_step = (highest_speed - min_vid_speed) / numvscales; 611 610 612 - j = 0; 613 - while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { 614 - speed = longhaul_table[j].frequency; 611 + cpufreq_for_each_entry(freq_pos, longhaul_table) { 612 + speed = freq_pos->frequency; 615 613 if (speed > min_vid_speed) 616 614 pos = (speed - min_vid_speed) / kHz_step + minvid.pos; 617 615 else 618 616 pos = minvid.pos; 619 - longhaul_table[j].driver_data |= mV_vrm_table[pos] << 8; 617 + freq_pos->driver_data |= mV_vrm_table[pos] << 8; 620 618 vid = vrm_mV_table[mV_vrm_table[pos]]; 621 619 printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", 622 - speed, j, vid.mV); 623 - j++; 620 + speed, (int)(freq_pos - longhaul_table), vid.mV); 624 621 } 625 622 626 623 can_scale_voltage = 1;
+5 -5
drivers/cpufreq/pasemi-cpufreq.c
··· 136 136 137 137 static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) 138 138 { 139 + struct cpufreq_frequency_table *pos; 139 140 const u32 *max_freqp; 140 141 u32 max_freq; 141 - int i, cur_astate; 142 + int cur_astate; 142 143 struct resource res; 143 144 struct device_node *cpu, *dn; 144 145 int err = -ENODEV; ··· 198 197 pr_debug("initializing frequency table\n"); 199 198 200 199 /* initialize frequency table */ 201 - for (i=0; pas_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { 202 - pas_freqs[i].frequency = 203 - get_astate_freq(pas_freqs[i].driver_data) * 100000; 204 - pr_debug("%d: %d\n", i, pas_freqs[i].frequency); 200 + cpufreq_for_each_entry(pos, pas_freqs) { 201 + pos->frequency = get_astate_freq(pos->driver_data) * 100000; 202 + pr_debug("%d: %d\n", (int)(pos - pas_freqs), pos->frequency); 205 203 } 206 204 207 205 cur_astate = get_cur_astate(policy->cpu);
+7 -7
drivers/cpufreq/powernow-k6.c
··· 151 151 152 152 static int powernow_k6_cpu_init(struct cpufreq_policy *policy) 153 153 { 154 + struct cpufreq_frequency_table *pos; 154 155 unsigned int i, f; 155 156 unsigned khz; 156 157 ··· 169 168 } 170 169 } 171 170 if (param_max_multiplier) { 172 - for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 173 - if (clock_ratio[i].driver_data == param_max_multiplier) { 171 + cpufreq_for_each_entry(pos, clock_ratio) 172 + if (pos->driver_data == param_max_multiplier) { 174 173 max_multiplier = param_max_multiplier; 175 174 goto have_max_multiplier; 176 175 } 177 - } 178 176 printk(KERN_ERR "powernow-k6: invalid max_multiplier parameter, valid parameters 20, 30, 35, 40, 45, 50, 55, 60\n"); 179 177 return -EINVAL; 180 178 } ··· 201 201 param_busfreq = busfreq * 10; 202 202 203 203 /* table init */ 204 - for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { 205 - f = clock_ratio[i].driver_data; 204 + cpufreq_for_each_entry(pos, clock_ratio) { 205 + f = pos->driver_data; 206 206 if (f > max_multiplier) 207 - clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; 207 + pos->frequency = CPUFREQ_ENTRY_INVALID; 208 208 else 209 - clock_ratio[i].frequency = busfreq * f; 209 + pos->frequency = busfreq * f; 210 210 } 211 211 212 212 /* cpuinfo and default policy values */
+5 -4
drivers/cpufreq/ppc_cbe_cpufreq.c
··· 67 67 68 68 static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) 69 69 { 70 + struct cpufreq_frequency_table *pos; 70 71 const u32 *max_freqp; 71 72 u32 max_freq; 72 - int i, cur_pmode; 73 + int cur_pmode; 73 74 struct device_node *cpu; 74 75 75 76 cpu = of_get_cpu_node(policy->cpu, NULL); ··· 103 102 pr_debug("initializing frequency table\n"); 104 103 105 104 /* initialize frequency table */ 106 - for (i=0; cbe_freqs[i].frequency!=CPUFREQ_TABLE_END; i++) { 107 - cbe_freqs[i].frequency = max_freq / cbe_freqs[i].driver_data; 108 - pr_debug("%d: %d\n", i, cbe_freqs[i].frequency); 105 + cpufreq_for_each_entry(pos, cbe_freqs) { 106 + pos->frequency = max_freq / pos->driver_data; 107 + pr_debug("%d: %d\n", (int)(pos - cbe_freqs), pos->frequency); 109 108 } 110 109 111 110 /* if DEBUG is enabled set_pmode() measures the latency
+17 -23
drivers/cpufreq/s3c2416-cpufreq.c
··· 266 266 static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) 267 267 { 268 268 int count, v, i, found; 269 - struct cpufreq_frequency_table *freq; 269 + struct cpufreq_frequency_table *pos; 270 270 struct s3c2416_dvfs *dvfs; 271 271 272 272 count = regulator_count_voltages(s3c_freq->vddarm); ··· 275 275 return; 276 276 } 277 277 278 - freq = s3c_freq->freq_table; 279 - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { 280 - if (freq->frequency == CPUFREQ_ENTRY_INVALID) 281 - continue; 278 + if (!count) 279 + goto out; 282 280 283 - dvfs = &s3c2416_dvfs_table[freq->driver_data]; 281 + cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) { 282 + dvfs = &s3c2416_dvfs_table[pos->driver_data]; 284 283 found = 0; 285 284 286 285 /* Check only the min-voltage, more is always ok on S3C2416 */ ··· 291 292 292 293 if (!found) { 293 294 pr_debug("cpufreq: %dkHz unsupported by regulator\n", 294 - freq->frequency); 295 - freq->frequency = CPUFREQ_ENTRY_INVALID; 295 + pos->frequency); 296 + pos->frequency = CPUFREQ_ENTRY_INVALID; 296 297 } 297 - 298 - freq++; 299 298 } 300 299 300 + out: 301 301 /* Guessed */ 302 302 s3c_freq->regulator_latency = 1 * 1000 * 1000; 303 303 } ··· 336 338 static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) 337 339 { 338 340 struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; 339 - struct cpufreq_frequency_table *freq; 341 + struct cpufreq_frequency_table *pos; 340 342 struct clk *msysclk; 341 343 unsigned long rate; 342 344 int ret; ··· 425 427 s3c_freq->regulator_latency = 0; 426 428 #endif 427 429 428 - freq = s3c_freq->freq_table; 429 - while (freq->frequency != CPUFREQ_TABLE_END) { 430 + cpufreq_for_each_entry(pos, s3c_freq->freq_table) { 430 431 /* special handling for dvs mode */ 431 - if (freq->driver_data == 0) { 432 + if (pos->driver_data == 0) { 432 433 if (!s3c_freq->hclk) { 433 434 pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n", 434 - freq->frequency); 435 - freq->frequency = CPUFREQ_ENTRY_INVALID; 435 + pos->frequency); 436 + pos->frequency = CPUFREQ_ENTRY_INVALID; 436 437 } else { 437 - freq++; 438 438 continue; 439 439 } 440 440 } 441 441 442 442 /* Check for frequencies we can generate */ 443 443 rate = clk_round_rate(s3c_freq->armdiv, 444 - freq->frequency * 1000); 444 + pos->frequency * 1000); 445 445 rate /= 1000; 446 - if (rate != freq->frequency) { 446 + if (rate != pos->frequency) { 447 447 pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n", 448 - freq->frequency, rate); 449 - freq->frequency = CPUFREQ_ENTRY_INVALID; 448 + pos->frequency, rate); 449 + pos->frequency = CPUFREQ_ENTRY_INVALID; 450 450 } 451 - 452 - freq++; 453 451 } 454 452 455 453 /* Datasheet says PLL stabalisation time must be at least 300us,
+5 -10
drivers/cpufreq/s3c64xx-cpufreq.c
··· 118 118 pr_err("Unable to check supported voltages\n"); 119 119 } 120 120 121 - freq = s3c64xx_freq_table; 122 - while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) { 123 - if (freq->frequency == CPUFREQ_ENTRY_INVALID) 124 - continue; 121 + if (!count) 122 + goto out; 125 123 124 + cpufreq_for_each_valid_entry(freq, s3c64xx_freq_table) { 126 125 dvfs = &s3c64xx_dvfs_table[freq->driver_data]; 127 126 found = 0; 128 127 ··· 136 137 freq->frequency); 137 138 freq->frequency = CPUFREQ_ENTRY_INVALID; 138 139 } 139 - 140 - freq++; 141 140 } 142 141 142 + out: 143 143 /* Guess based on having to do an I2C/SPI write; in future we 144 144 * will be able to query the regulator performance here. */ 145 145 regulator_latency = 1 * 1000 * 1000; ··· 177 179 } 178 180 #endif 179 181 180 - freq = s3c64xx_freq_table; 181 - while (freq->frequency != CPUFREQ_TABLE_END) { 182 + cpufreq_for_each_entry(freq, s3c64xx_freq_table) { 182 183 unsigned long r; 183 184 184 185 /* Check for frequencies we can generate */ ··· 193 196 * frequency is the maximum we can support. */ 194 197 if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000) 195 198 freq->frequency = CPUFREQ_ENTRY_INVALID; 196 - 197 - freq++; 198 199 } 199 200 200 201 /* Datasheet says PLL stabalisation time (if we were to use
+8 -11
drivers/mfd/db8500-prcmu.c
··· 1734 1734 1735 1735 static long round_armss_rate(unsigned long rate) 1736 1736 { 1737 + struct cpufreq_frequency_table *pos; 1737 1738 long freq = 0; 1738 - int i = 0; 1739 1739 1740 1740 /* cpufreq table frequencies is in KHz. */ 1741 1741 rate = rate / 1000; 1742 1742 1743 1743 /* Find the corresponding arm opp from the cpufreq table. */ 1744 - while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1745 - freq = db8500_cpufreq_table[i].frequency; 1744 + cpufreq_for_each_entry(pos, db8500_cpufreq_table) { 1745 + freq = pos->frequency; 1746 1746 if (freq == rate) 1747 1747 break; 1748 - i++; 1749 1748 } 1750 1749 1751 1750 /* Return the last valid value, even if a match was not found. */ ··· 1885 1886 1886 1887 static int set_armss_rate(unsigned long rate) 1887 1888 { 1888 - int i = 0; 1889 + struct cpufreq_frequency_table *pos; 1889 1890 1890 1891 /* cpufreq table frequencies is in KHz. */ 1891 1892 rate = rate / 1000; 1892 1893 1893 1894 /* Find the corresponding arm opp from the cpufreq table. */ 1894 - while (db8500_cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { 1895 - if (db8500_cpufreq_table[i].frequency == rate) 1895 + cpufreq_for_each_entry(pos, db8500_cpufreq_table) 1896 + if (pos->frequency == rate) 1896 1897 break; 1897 - i++; 1898 - } 1899 1898 1900 - if (db8500_cpufreq_table[i].frequency != rate) 1899 + if (pos->frequency != rate) 1901 1900 return -EINVAL; 1902 1901 1903 1902 /* Set the new arm opp. */ 1904 - return db8500_prcmu_set_arm_opp(db8500_cpufreq_table[i].driver_data); 1903 + return db8500_prcmu_set_arm_opp(pos->driver_data); 1905 1904 } 1906 1905 1907 1906 static int set_plldsi_rate(unsigned long rate)
+5 -9
drivers/net/irda/sh_sir.c
··· 217 217 static u32 sh_sir_find_sclk(struct clk *irda_clk) 218 218 { 219 219 struct cpufreq_frequency_table *freq_table = irda_clk->freq_table; 220 + struct cpufreq_frequency_table *pos; 220 221 struct clk *pclk = clk_get(NULL, "peripheral_clk"); 221 222 u32 limit, min = 0xffffffff, tmp; 222 - int i, index = 0; 223 + int index = 0; 223 224 224 225 limit = clk_get_rate(pclk); 225 226 clk_put(pclk); 226 227 227 228 /* IrDA can not set over peripheral_clk */ 228 - for (i = 0; 229 - freq_table[i].frequency != CPUFREQ_TABLE_END; 230 - i++) { 231 - u32 freq = freq_table[i].frequency; 232 - 233 - if (freq == CPUFREQ_ENTRY_INVALID) 234 - continue; 229 + cpufreq_for_each_valid_entry(pos, freq_table) { 230 + u32 freq = pos->frequency; 235 231 236 232 /* IrDA should not over peripheral_clk */ 237 233 if (freq > limit) ··· 236 240 tmp = freq % SCLK_BASE; 237 241 if (tmp < min) { 238 242 min = tmp; 239 - index = i; 243 + index = pos - freq_table; 240 244 } 241 245 } 242 246
+5 -15
drivers/sh/clk/core.c
··· 196 196 struct cpufreq_frequency_table *freq_table, 197 197 unsigned long rate) 198 198 { 199 - int i; 199 + struct cpufreq_frequency_table *pos; 200 200 201 - for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 202 - unsigned long freq = freq_table[i].frequency; 203 - 204 - if (freq == CPUFREQ_ENTRY_INVALID) 205 - continue; 206 - 207 - if (freq == rate) 208 - return i; 209 - } 201 + cpufreq_for_each_valid_entry(pos, freq_table) 202 + if (pos->frequency == rate) 203 + return pos - freq_table; 210 204 211 205 return -ENOENT; 212 206 } ··· 569 575 return abs(target - *best_freq); 570 576 } 571 577 572 - for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END; 573 - freq++) { 574 - if (freq->frequency == CPUFREQ_ENTRY_INVALID) 575 - continue; 576 - 578 + cpufreq_for_each_valid_entry(freq, parent->freq_table) { 577 579 if (unlikely(freq->frequency / target <= div_min - 1)) { 578 580 unsigned long freq_max; 579 581
+13 -20
drivers/thermal/cpu_cooling.c
··· 144 144 unsigned int *output, 145 145 enum cpufreq_cooling_property property) 146 146 { 147 - int i, j; 147 + int i; 148 148 unsigned long max_level = 0, level = 0; 149 149 unsigned int freq = CPUFREQ_ENTRY_INVALID; 150 150 int descend = -1; 151 - struct cpufreq_frequency_table *table = 151 + struct cpufreq_frequency_table *pos, *table = 152 152 cpufreq_frequency_get_table(cpu); 153 153 154 154 if (!output) ··· 157 157 if (!table) 158 158 return -EINVAL; 159 159 160 - for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 161 - /* ignore invalid entries */ 162 - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 163 - continue; 164 - 160 + cpufreq_for_each_valid_entry(pos, table) { 165 161 /* ignore duplicate entry */ 166 - if (freq == table[i].frequency) 162 + if (freq == pos->frequency) 167 163 continue; 168 164 169 165 /* get the frequency order */ 170 166 if (freq != CPUFREQ_ENTRY_INVALID && descend == -1) 171 - descend = !!(freq > table[i].frequency); 167 + descend = freq > pos->frequency; 172 168 173 - freq = table[i].frequency; 169 + freq = pos->frequency; 174 170 max_level++; 175 171 } 176 172 ··· 186 190 if (property == GET_FREQ) 187 191 level = descend ? input : (max_level - input); 188 192 189 - for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 190 - /* ignore invalid entry */ 191 - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) 192 - continue; 193 - 193 + i = 0; 194 + cpufreq_for_each_valid_entry(pos, table) { 194 195 /* ignore duplicate entry */ 195 - if (freq == table[i].frequency) 196 + if (freq == pos->frequency) 196 197 continue; 197 198 198 199 /* now we have a valid frequency entry */ 199 - freq = table[i].frequency; 200 + freq = pos->frequency; 200 201 201 202 if (property == GET_LEVEL && (unsigned int)input == freq) { 202 203 /* get level by frequency */ 203 - *output = descend ? j : (max_level - j); 204 + *output = descend ? i : (max_level - i); 204 205 return 0; 205 206 } 206 - if (property == GET_FREQ && level == j) { 207 + if (property == GET_FREQ && level == i) { 207 208 /* get frequency by level */ 208 209 *output = freq; 209 210 return 0; 210 211 } 211 - j++; 212 + i++; 212 213 } 213 214 214 215 return -EINVAL;
+21
include/linux/cpufreq.h
··· 468 468 * order */ 469 469 }; 470 470 471 + bool cpufreq_next_valid(struct cpufreq_frequency_table **pos); 472 + 473 + /* 474 + * cpufreq_for_each_entry - iterate over a cpufreq_frequency_table 475 + * @pos: the cpufreq_frequency_table * to use as a loop cursor. 476 + * @table: the cpufreq_frequency_table * to iterate over. 477 + */ 478 + 479 + #define cpufreq_for_each_entry(pos, table) \ 480 + for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) 481 + 482 + /* 483 + * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table 484 + * excluding CPUFREQ_ENTRY_INVALID frequencies. 485 + * @pos: the cpufreq_frequency_table * to use as a loop cursor. 486 + * @table: the cpufreq_frequency_table * to iterate over. 487 + */ 488 + 489 + #define cpufreq_for_each_valid_entry(pos, table) \ 490 + for (pos = table; cpufreq_next_valid(&pos); pos++) 491 + 471 492 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 472 493 struct cpufreq_frequency_table *table); 473 494