···11+/*22+ * X86 trace clocks33+ */44+#include <asm/trace_clock.h>55+#include <asm/barrier.h>66+#include <asm/msr.h>77+88+/*99+ * trace_clock_x86_tsc(): A clock that is just the cycle counter.1010+ *1111+ * Unlike the other clocks, this is not in nanoseconds.1212+ */1313+u64 notrace trace_clock_x86_tsc(void)1414+{1515+ u64 ret;1616+1717+ rdtsc_barrier();1818+ rdtscll(ret);1919+2020+ return ret;2121+}
···11+#ifndef _ASM_GENERIC_TRACE_CLOCK_H22+#define _ASM_GENERIC_TRACE_CLOCK_H33+/*44+ * Arch-specific trace clocks.55+ */66+77+/*88+ * Additional trace clocks added to the trace_clocks99+ * array in kernel/trace/trace.c1010+ * None if the architecture has not defined it.1111+ */1212+#ifndef ARCH_TRACE_CLOCKS1313+# define ARCH_TRACE_CLOCKS1414+#endif1515+1616+#endif /* _ASM_GENERIC_TRACE_CLOCK_H */
···619619620620#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)621621622622-/*623623- * Define the insertion callback to perf events624624- *625625- * The job is very similar to ftrace_raw_event_<call> except that we don't626626- * insert in the ring buffer but in a perf counter.627627- *628628- * static void ftrace_perf_<call>(proto)629629- * {630630- * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;631631- * struct ftrace_event_call *event_call = &event_<call>;632632- * extern void perf_tp_event(int, u64, u64, void *, int);633633- * struct ftrace_raw_##call *entry;634634- * struct perf_trace_buf *trace_buf;635635- * u64 __addr = 0, __count = 1;636636- * unsigned long irq_flags;637637- * struct trace_entry *ent;638638- * int __entry_size;639639- * int __data_size;640640- * int __cpu641641- * int pc;642642- *643643- * pc = preempt_count();644644- *645645- * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);646646- *647647- * // Below we want to get the aligned size by taking into account648648- * // the u32 field that will later store the buffer size649649- * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),650650- * sizeof(u64));651651- * __entry_size -= sizeof(u32);652652- *653653- * // Protect the non nmi buffer654654- * // This also protects the rcu read side655655- * local_irq_save(irq_flags);656656- * __cpu = smp_processor_id();657657- *658658- * if (in_nmi())659659- * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);660660- * else661661- * trace_buf = rcu_dereference_sched(perf_trace_buf);662662- *663663- * if (!trace_buf)664664- * goto end;665665- *666666- * trace_buf = per_cpu_ptr(trace_buf, __cpu);667667- *668668- * // Avoid recursion from perf that could mess up the buffer669669- * if (trace_buf->recursion++)670670- * goto end_recursion;671671- *672672- * raw_data = trace_buf->buf;673673- *674674- * // Make recursion update visible before entering perf_tp_event675675- * // so that we protect from perf recursions.676676- *677677- * barrier();678678- *679679- * //zero dead bytes from alignment to avoid stack leak to userspace:680680- * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;681681- * entry = (struct ftrace_raw_<call> *)raw_data;682682- * ent = &entry->ent;683683- * tracing_generic_entry_update(ent, irq_flags, pc);684684- * ent->type = event_call->id;685685- *686686- * <tstruct> <- do some jobs with dynamic arrays687687- *688688- * <assign> <- affect our values689689- *690690- * perf_tp_event(event_call->id, __addr, __count, entry,691691- * __entry_size); <- submit them to perf counter692692- *693693- * }694694- */695622696623#ifdef CONFIG_PERF_EVENTS697624
+30-9
kernel/trace/trace.c
···484484static struct {485485 u64 (*func)(void);486486 const char *name;487487+ int in_ns; /* is this clock in nanoseconds? */487488} trace_clocks[] = {488488- { trace_clock_local, "local" },489489- { trace_clock_global, "global" },490490- { trace_clock_counter, "counter" },489489+ { trace_clock_local, "local", 1 },490490+ { trace_clock_global, "global", 1 },491491+ { trace_clock_counter, "counter", 0 },492492+ ARCH_TRACE_CLOCKS491493};492494493495int trace_clock_id;···24792477 if (ring_buffer_overruns(iter->tr->buffer))24802478 iter->iter_flags |= TRACE_FILE_ANNOTATE;2481247924802480+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */24812481+ if (trace_clocks[trace_clock_id].in_ns)24822482+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;24832483+24822484 /* stop the trace while dumping */24832485 tracing_stop();24842486···3343333733443338 if (trace_flags & TRACE_ITER_LATENCY_FMT)33453339 iter->iter_flags |= TRACE_FILE_LAT_FMT;33403340+33413341+ /* Output in nanoseconds only if we are using a clock in nanoseconds. */33423342+ if (trace_clocks[trace_clock_id].in_ns)33433343+ iter->iter_flags |= TRACE_FILE_TIME_IN_NS;3346334433473345 iter->cpu_file = cpu_file;33483346 iter->tr = &global_trace;···43884378 cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);43894379 trace_seq_printf(s, "bytes: %ld\n", cnt);4390438043914391- t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));43924392- usec_rem = do_div(t, USEC_PER_SEC);43934393- trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);43814381+ if (trace_clocks[trace_clock_id].in_ns) {43824382+ /* local or global for trace_clock */43834383+ t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));43844384+ usec_rem = do_div(t, USEC_PER_SEC);43854385+ trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",43864386+ t, usec_rem);4394438743954395- t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));43964396- usec_rem = do_div(t, USEC_PER_SEC);43974397- trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);43884388+ t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));43894389+ usec_rem = do_div(t, USEC_PER_SEC);43904390+ trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);43914391+ } else {43924392+ /* counter or tsc mode for trace_clock */43934393+ trace_seq_printf(s, "oldest event ts: %llu\n",43944394+ ring_buffer_oldest_event_ts(tr->buffer, cpu));43954395+43964396+ trace_seq_printf(s, "now ts: %llu\n",43974397+ ring_buffer_time_stamp(tr->buffer, cpu));43984398+ }4398439943994400 cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);44004401 trace_seq_printf(s, "dropped events: %ld\n", cnt);