Merge tag 'trace-v6.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing fixes from Steven Rostedt:

- Fix setting affinity of hwlat threads in containers

Using sched_set_affinity() has unwanted side effects when being
called within a container. Use set_cpus_allowed_ptr() instead

- Fix per cpu thread management of the hwlat tracer:
- Do not start per_cpu threads if one is already running for the CPU
- When starting per_cpu threads, do not clear the kthread variable
as it may already be set to running per cpu threads

- Fix return value for test_gen_kprobe_cmd()

On error the return value was overwritten by being set to the result
of the call from kprobe_event_delete(), which would likely succeed,
and thus have the function return success

- Fix splice() reads from the trace file that was broken by commit
36e2c7421f02 ("fs: don't allow splice read/write without explicit
ops")

- Remove obsolete and confusing comment in ring_buffer.c

The original design of the ring buffer used struct page flags for
tricks to optimize, which was shortly removed due to them being
tricks. But a comment for those tricks remained

- Set local functions and variables to static

* tag 'trace-v6.3-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
tracing/hwlat: Replace sched_setaffinity with set_cpus_allowed_ptr
ring-buffer: remove obsolete comment for free_buffer_page()
tracing: Make splice_read available again
ftrace: Set direct_ops storage-class-specifier to static
trace/hwlat: Do not start per-cpu thread if it is already running
trace/hwlat: Do not wipe the contents of per-cpu thread data
tracing/osnoise: set several trace_osnoise.c variables storage-class-specifier to static
tracing: Fix wrong return in kprobe_event_gen_test.c

+1 -1
kernel/trace/ftrace.c
··· 2592 2592 arch_ftrace_set_direct_caller(fregs, addr); 2593 2593 } 2594 2594 2595 - struct ftrace_ops direct_ops = { 2595 + static struct ftrace_ops direct_ops = { 2596 2596 .func = call_direct_funcs, 2597 2597 .flags = FTRACE_OPS_FL_DIRECT | FTRACE_OPS_FL_SAVE_REGS 2598 2598 | FTRACE_OPS_FL_PERMANENT,
+2 -2
kernel/trace/kprobe_event_gen_test.c
··· 146 146 if (trace_event_file_is_valid(gen_kprobe_test)) 147 147 gen_kprobe_test = NULL; 148 148 /* We got an error after creating the event, delete it */ 149 - ret = kprobe_event_delete("gen_kprobe_test"); 149 + kprobe_event_delete("gen_kprobe_test"); 150 150 goto out; 151 151 } 152 152 ··· 211 211 if (trace_event_file_is_valid(gen_kretprobe_test)) 212 212 gen_kretprobe_test = NULL; 213 213 /* We got an error after creating the event, delete it */ 214 - ret = kprobe_event_delete("gen_kretprobe_test"); 214 + kprobe_event_delete("gen_kretprobe_test"); 215 215 goto out; 216 216 } 217 217
-4
kernel/trace/ring_buffer.c
··· 354 354 local_set(&bpage->commit, 0); 355 355 } 356 356 357 - /* 358 - * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing 359 - * this issue out. 360 - */ 361 357 static void free_buffer_page(struct buffer_page *bpage) 362 358 { 363 359 free_page((unsigned long)bpage->page);
+2
kernel/trace/trace.c
··· 5167 5167 static const struct file_operations tracing_fops = { 5168 5168 .open = tracing_open, 5169 5169 .read = seq_read, 5170 + .read_iter = seq_read_iter, 5171 + .splice_read = generic_file_splice_read, 5170 5172 .write = tracing_write_stub, 5171 5173 .llseek = tracing_lseek, 5172 5174 .release = tracing_release,
+6 -5
kernel/trace/trace_hwlat.c
··· 339 339 cpumask_clear(current_mask); 340 340 cpumask_set_cpu(next_cpu, current_mask); 341 341 342 - sched_setaffinity(0, current_mask); 342 + set_cpus_allowed_ptr(current, current_mask); 343 343 return; 344 344 345 345 change_mode: ··· 446 446 447 447 } 448 448 449 - sched_setaffinity(kthread->pid, current_mask); 449 + set_cpus_allowed_ptr(kthread, current_mask); 450 450 451 451 kdata->kthread = kthread; 452 452 wake_up_process(kthread); ··· 491 491 static int start_cpu_kthread(unsigned int cpu) 492 492 { 493 493 struct task_struct *kthread; 494 + 495 + /* Do not start a new hwlatd thread if it is already running */ 496 + if (per_cpu(hwlat_per_cpu_data, cpu).kthread) 497 + return 0; 494 498 495 499 kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u"); 496 500 if (IS_ERR(kthread)) { ··· 587 583 * Run only on CPUs in which hwlat is allowed to run. 588 584 */ 589 585 cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask); 590 - 591 - for_each_online_cpu(cpu) 592 - per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL; 593 586 594 587 for_each_cpu(cpu, current_mask) { 595 588 retval = start_cpu_kthread(cpu);
+5 -5
kernel/trace/trace_osnoise.c
··· 217 217 /* 218 218 * Per-cpu runtime information. 219 219 */ 220 - DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var); 220 + static DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var); 221 221 222 222 /* 223 223 * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU ··· 240 240 u64 count; 241 241 }; 242 242 243 - DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var); 243 + static DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var); 244 244 245 245 /* 246 246 * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU ··· 332 332 /* 333 333 * Protect the interface. 334 334 */ 335 - struct mutex interface_lock; 335 + static struct mutex interface_lock; 336 336 337 337 /* 338 338 * Tracer data. ··· 2239 2239 /* 2240 2240 * osnoise/timerlat_period: min 100 us, max 1 s 2241 2241 */ 2242 - u64 timerlat_min_period = 100; 2243 - u64 timerlat_max_period = 1000000; 2242 + static u64 timerlat_min_period = 100; 2243 + static u64 timerlat_max_period = 1000000; 2244 2244 static struct trace_min_max_param timerlat_period = { 2245 2245 .lock = &interface_lock, 2246 2246 .val = &osnoise_data.timerlat_period,