Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

governors: unify last_state_idx

Since this field is shared by all governors, move it to
cpuidle device structure.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

authored by

Marcelo Tosatti and committed by
Rafael J. Wysocki
7d4daeed 259231a0

+19 -20
+10 -11
drivers/cpuidle/governors/ladder.c
··· 38 38 39 39 struct ladder_device { 40 40 struct ladder_device_state states[CPUIDLE_STATE_MAX]; 41 - int last_state_idx; 42 41 }; 43 42 44 43 static DEFINE_PER_CPU(struct ladder_device, ladder_devices); ··· 48 49 * @old_idx: the current state index 49 50 * @new_idx: the new target state index 50 51 */ 51 - static inline void ladder_do_selection(struct ladder_device *ldev, 52 + static inline void ladder_do_selection(struct cpuidle_device *dev, 53 + struct ladder_device *ldev, 52 54 int old_idx, int new_idx) 53 55 { 54 56 ldev->states[old_idx].stats.promotion_count = 0; 55 57 ldev->states[old_idx].stats.demotion_count = 0; 56 - ldev->last_state_idx = new_idx; 58 + dev->last_state_idx = new_idx; 57 59 } 58 60 59 61 /** ··· 68 68 { 69 69 struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); 70 70 struct ladder_device_state *last_state; 71 - int last_residency, last_idx = ldev->last_state_idx; 71 + int last_residency, last_idx = dev->last_state_idx; 72 72 int first_idx = drv->states[0].flags & CPUIDLE_FLAG_POLLING ? 1 : 0; 73 73 int latency_req = cpuidle_governor_latency_req(dev->cpu); 74 74 75 75 /* Special case when user has set very strict latency requirement */ 76 76 if (unlikely(latency_req == 0)) { 77 - ladder_do_selection(ldev, last_idx, 0); 77 + ladder_do_selection(dev, ldev, last_idx, 0); 78 78 return 0; 79 79 } 80 80 ··· 91 91 last_state->stats.promotion_count++; 92 92 last_state->stats.demotion_count = 0; 93 93 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 94 - ladder_do_selection(ldev, last_idx, last_idx + 1); 94 + ladder_do_selection(dev, ldev, last_idx, last_idx + 1); 95 95 return last_idx + 1; 96 96 } 97 97 } ··· 107 107 if (drv->states[i].exit_latency <= latency_req) 108 108 break; 109 109 } 110 - ladder_do_selection(ldev, last_idx, i); 110 + ladder_do_selection(dev, ldev, last_idx, i); 111 111 return i; 112 112 } 113 113 ··· 116 116 last_state->stats.demotion_count++; 117 117 last_state->stats.promotion_count = 0; 118 118 if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { 119 - ladder_do_selection(ldev, last_idx, last_idx - 1); 119 + ladder_do_selection(dev, ldev, last_idx, last_idx - 1); 120 120 return last_idx - 1; 121 121 } 122 122 } ··· 139 139 struct ladder_device_state *lstate; 140 140 struct cpuidle_state *state; 141 141 142 - ldev->last_state_idx = first_idx; 142 + dev->last_state_idx = first_idx; 143 143 144 144 for (i = first_idx; i < drv->state_count; i++) { 145 145 state = &drv->states[i]; ··· 167 167 */ 168 168 static void ladder_reflect(struct cpuidle_device *dev, int index) 169 169 { 170 - struct ladder_device *ldev = this_cpu_ptr(&ladder_devices); 171 170 if (index > 0) 172 - ldev->last_state_idx = index; 171 + dev->last_state_idx = index; 173 172 } 174 173 175 174 static struct cpuidle_governor ladder_governor = {
+2 -3
drivers/cpuidle/governors/menu.c
··· 117 117 */ 118 118 119 119 struct menu_device { 120 - int last_state_idx; 121 120 int needs_update; 122 121 int tick_wakeup; 123 122 ··· 454 455 { 455 456 struct menu_device *data = this_cpu_ptr(&menu_devices); 456 457 457 - data->last_state_idx = index; 458 + dev->last_state_idx = index; 458 459 data->needs_update = 1; 459 460 data->tick_wakeup = tick_nohz_idle_got_tick(); 460 461 } ··· 467 468 static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) 468 469 { 469 470 struct menu_device *data = this_cpu_ptr(&menu_devices); 470 - int last_idx = data->last_state_idx; 471 + int last_idx = dev->last_state_idx; 471 472 struct cpuidle_state *target = &drv->states[last_idx]; 472 473 unsigned int measured_us; 473 474 unsigned int new_factor;
+6 -6
drivers/cpuidle/governors/teo.c
··· 96 96 * @time_span_ns: Time between idle state selection and post-wakeup update. 97 97 * @sleep_length_ns: Time till the closest timer event (at the selection time). 98 98 * @states: Idle states data corresponding to this CPU. 99 - * @last_state: Idle state entered by the CPU last time. 100 99 * @interval_idx: Index of the most recent saved idle interval. 101 100 * @intervals: Saved idle duration values. 102 101 */ ··· 103 104 u64 time_span_ns; 104 105 u64 sleep_length_ns; 105 106 struct teo_idle_state states[CPUIDLE_STATE_MAX]; 106 - int last_state; 107 107 int interval_idx; 108 108 unsigned int intervals[INTERVALS]; 109 109 }; ··· 128 130 */ 129 131 measured_us = sleep_length_us; 130 132 } else { 131 - unsigned int lat = drv->states[cpu_data->last_state].exit_latency; 133 + unsigned int lat; 134 + 135 + lat = drv->states[dev->last_state_idx].exit_latency; 132 136 133 137 measured_us = ktime_to_us(cpu_data->time_span_ns); 134 138 /* ··· 245 245 int max_early_idx, idx, i; 246 246 ktime_t delta_tick; 247 247 248 - if (cpu_data->last_state >= 0) { 248 + if (dev->last_state_idx >= 0) { 249 249 teo_update(drv, dev); 250 - cpu_data->last_state = -1; 250 + dev->last_state_idx = -1; 251 251 } 252 252 253 253 cpu_data->time_span_ns = local_clock(); ··· 394 394 { 395 395 struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu); 396 396 397 - cpu_data->last_state = state; 397 + dev->last_state_idx = state; 398 398 /* 399 399 * If the wakeup was not "natural", but triggered by one of the safety 400 400 * nets, assume that the CPU might have been idle for the entire sleep
+1
include/linux/cpuidle.h
··· 85 85 unsigned int cpu; 86 86 ktime_t next_hrtimer; 87 87 88 + int last_state_idx; 88 89 int last_residency; 89 90 u64 poll_limit_ns; 90 91 struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];