Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/headers: sched/clock: Mark all functions 'notrace', remove CC_FLAGS_FTRACE build asymmetry

Mark all non-init functions in kernel/sched.c as 'notrace', instead of
turning them all off via CC_FLAGS_FTRACE.

This is going to allow the treatment of this file as any other scheduler
file, and it can be #include-ed in compound compilation units as well.

Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Peter Zijlstra <peterz@infradead.org>

+21 -24
-3
kernel/sched/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 - ifdef CONFIG_FUNCTION_TRACER 3 - CFLAGS_REMOVE_clock.o = $(CC_FLAGS_FTRACE) 4 - endif 5 2 6 3 # The compilers are complaining about unused variables inside an if(0) scope 7 4 # block. This is daft, shut them up.
+21 -21
kernel/sched/clock.c
··· 61 61 * This is default implementation. 62 62 * Architectures and sub-architectures can override this. 63 63 */ 64 - unsigned long long __weak sched_clock(void) 64 + notrace unsigned long long __weak sched_clock(void) 65 65 { 66 66 return (unsigned long long)(jiffies - INITIAL_JIFFIES) 67 67 * (NSEC_PER_SEC / HZ); ··· 95 95 96 96 static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); 97 97 98 - static inline struct sched_clock_data *this_scd(void) 98 + notrace static inline struct sched_clock_data *this_scd(void) 99 99 { 100 100 return this_cpu_ptr(&sched_clock_data); 101 101 } 102 102 103 - static inline struct sched_clock_data *cpu_sdc(int cpu) 103 + notrace static inline struct sched_clock_data *cpu_sdc(int cpu) 104 104 { 105 105 return &per_cpu(sched_clock_data, cpu); 106 106 } 107 107 108 - int sched_clock_stable(void) 108 + notrace int sched_clock_stable(void) 109 109 { 110 110 return static_branch_likely(&__sched_clock_stable); 111 111 } 112 112 113 - static void __scd_stamp(struct sched_clock_data *scd) 113 + notrace static void __scd_stamp(struct sched_clock_data *scd) 114 114 { 115 115 scd->tick_gtod = ktime_get_ns(); 116 116 scd->tick_raw = sched_clock(); 117 117 } 118 118 119 - static void __set_sched_clock_stable(void) 119 + notrace static void __set_sched_clock_stable(void) 120 120 { 121 121 struct sched_clock_data *scd; 122 122 ··· 151 151 * The only way to fully avoid random clock jumps is to boot with: 152 152 * "tsc=unstable". 153 153 */ 154 - static void __sched_clock_work(struct work_struct *work) 154 + notrace static void __sched_clock_work(struct work_struct *work) 155 155 { 156 156 struct sched_clock_data *scd; 157 157 int cpu; ··· 177 177 178 178 static DECLARE_WORK(sched_clock_work, __sched_clock_work); 179 179 180 - static void __clear_sched_clock_stable(void) 180 + notrace static void __clear_sched_clock_stable(void) 181 181 { 182 182 if (!sched_clock_stable()) 183 183 return; ··· 186 186 schedule_work(&sched_clock_work); 187 187 } 188 188 189 - void clear_sched_clock_stable(void) 189 + notrace void clear_sched_clock_stable(void) 190 190 { 191 191 __sched_clock_stable_early = 0; 192 192 ··· 196 196 __clear_sched_clock_stable(); 197 197 } 198 198 199 - static void __sched_clock_gtod_offset(void) 199 + notrace static void __sched_clock_gtod_offset(void) 200 200 { 201 201 struct sched_clock_data *scd = this_scd(); 202 202 ··· 246 246 * min, max except they take wrapping into account 247 247 */ 248 248 249 - static inline u64 wrap_min(u64 x, u64 y) 249 + notrace static inline u64 wrap_min(u64 x, u64 y) 250 250 { 251 251 return (s64)(x - y) < 0 ? x : y; 252 252 } 253 253 254 - static inline u64 wrap_max(u64 x, u64 y) 254 + notrace static inline u64 wrap_max(u64 x, u64 y) 255 255 { 256 256 return (s64)(x - y) > 0 ? x : y; 257 257 } ··· 262 262 * - filter out backward motion 263 263 * - use the GTOD tick value to create a window to filter crazy TSC values 264 264 */ 265 - static u64 sched_clock_local(struct sched_clock_data *scd) 265 + notrace static u64 sched_clock_local(struct sched_clock_data *scd) 266 266 { 267 267 u64 now, clock, old_clock, min_clock, max_clock, gtod; 268 268 s64 delta; ··· 295 295 return clock; 296 296 } 297 297 298 - static u64 sched_clock_remote(struct sched_clock_data *scd) 298 + notrace static u64 sched_clock_remote(struct sched_clock_data *scd) 299 299 { 300 300 struct sched_clock_data *my_scd = this_scd(); 301 301 u64 this_clock, remote_clock; ··· 362 362 * 363 363 * See cpu_clock(). 364 364 */ 365 - u64 sched_clock_cpu(int cpu) 365 + notrace u64 sched_clock_cpu(int cpu) 366 366 { 367 367 struct sched_clock_data *scd; 368 368 u64 clock; ··· 386 386 } 387 387 EXPORT_SYMBOL_GPL(sched_clock_cpu); 388 388 389 - void sched_clock_tick(void) 389 + notrace void sched_clock_tick(void) 390 390 { 391 391 struct sched_clock_data *scd; 392 392 ··· 403 403 sched_clock_local(scd); 404 404 } 405 405 406 - void sched_clock_tick_stable(void) 406 + notrace void sched_clock_tick_stable(void) 407 407 { 408 408 if (!sched_clock_stable()) 409 409 return; ··· 423 423 /* 424 424 * We are going deep-idle (irqs are disabled): 425 425 */ 426 - void sched_clock_idle_sleep_event(void) 426 + notrace void sched_clock_idle_sleep_event(void) 427 427 { 428 428 sched_clock_cpu(smp_processor_id()); 429 429 } ··· 432 432 /* 433 433 * We just idled; resync with ktime. 434 434 */ 435 - void sched_clock_idle_wakeup_event(void) 435 + notrace void sched_clock_idle_wakeup_event(void) 436 436 { 437 437 unsigned long flags; 438 438 ··· 458 458 local_irq_enable(); 459 459 } 460 460 461 - u64 sched_clock_cpu(int cpu) 461 + notrace u64 sched_clock_cpu(int cpu) 462 462 { 463 463 if (!static_branch_likely(&sched_clock_running)) 464 464 return 0; ··· 476 476 * On bare metal this function should return the same as local_clock. 477 477 * Architectures and sub-architectures can override this. 478 478 */ 479 - u64 __weak running_clock(void) 479 + notrace u64 __weak running_clock(void) 480 480 { 481 481 return local_clock(); 482 482 }