Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
kernel/profile.c: fix section mismatch warning
function tracing: fix wrong pos computing when read buffer has been fulfilled
tracing: fix mmiotrace resizing crash
ring-buffer: no preempt for sched_clock()
ring-buffer: buffer record on/off switch

+135 -19
+3
include/linux/ring_buffer.h
··· 120 120 u64 ring_buffer_time_stamp(int cpu); 121 121 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 122 122 123 + void tracing_on(void); 124 + void tracing_off(void); 125 + 123 126 enum ring_buffer_flags { 124 127 RB_FL_OVERWRITE = 1 << 0, 125 128 };
+1 -1
kernel/profile.c
··· 544 544 }; 545 545 546 546 #ifdef CONFIG_SMP 547 - static void __init profile_nop(void *unused) 547 + static inline void profile_nop(void *unused) 548 548 { 549 549 } 550 550
+17 -17
kernel/trace/ftrace.c
··· 185 185 }; 186 186 187 187 static int ftrace_filtered; 188 - static int tracing_on; 189 188 190 189 static LIST_HEAD(ftrace_new_addrs); 191 190 ··· 505 506 { 506 507 int *command = data; 507 508 508 - if (*command & FTRACE_ENABLE_CALLS) { 509 + if (*command & FTRACE_ENABLE_CALLS) 509 510 ftrace_replace_code(1); 510 - tracing_on = 1; 511 - } else if (*command & FTRACE_DISABLE_CALLS) { 511 + else if (*command & FTRACE_DISABLE_CALLS) 512 512 ftrace_replace_code(0); 513 - tracing_on = 0; 514 - } 515 513 516 514 if (*command & FTRACE_UPDATE_TRACE_FUNC) 517 515 ftrace_update_ftrace_func(ftrace_trace_function); ··· 673 677 674 678 cnt = num_to_init / ENTRIES_PER_PAGE; 675 679 pr_info("ftrace: allocating %ld entries in %d pages\n", 676 - num_to_init, cnt); 680 + num_to_init, cnt + 1); 677 681 678 682 for (i = 0; i < cnt; i++) { 679 683 pg->next = (void *)get_zeroed_page(GFP_KERNEL); ··· 753 757 void *p = NULL; 754 758 loff_t l = -1; 755 759 756 - if (*pos != iter->pos) { 757 - for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l)) 758 - ; 759 - } else { 760 - l = *pos; 761 - p = t_next(m, p, &l); 762 - } 760 + if (*pos > iter->pos) 761 + *pos = iter->pos; 762 + 763 + l = *pos; 764 + p = t_next(m, p, &l); 763 765 764 766 return p; 765 767 } ··· 768 774 769 775 static int t_show(struct seq_file *m, void *v) 770 776 { 777 + struct ftrace_iterator *iter = m->private; 771 778 struct dyn_ftrace *rec = v; 772 779 char str[KSYM_SYMBOL_LEN]; 780 + int ret = 0; 773 781 774 782 if (!rec) 775 783 return 0; 776 784 777 785 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 778 786 779 - seq_printf(m, "%s\n", str); 787 + ret = seq_printf(m, "%s\n", str); 788 + if (ret < 0) { 789 + iter->pos--; 790 + iter->idx--; 791 + } 780 792 781 793 return 0; 782 794 } ··· 808 808 return -ENOMEM; 809 809 810 810 iter->pg = ftrace_pages_start; 811 - iter->pos = -1; 811 + iter->pos = 0; 812 812 813 813 ret = seq_open(file, &show_ftrace_seq_ops); 814 814 if (!ret) { ··· 895 895 896 896 if (file->f_mode & FMODE_READ) { 897 897 iter->pg = ftrace_pages_start; 898 - iter->pos = -1; 898 + iter->pos = 0; 899 899 iter->flags = enable ? FTRACE_ITER_FILTER : 900 900 FTRACE_ITER_NOTRACE; 901 901
+114 -1
kernel/trace/ring_buffer.c
··· 16 16 #include <linux/list.h> 17 17 #include <linux/fs.h> 18 18 19 + #include "trace.h" 20 + 21 + /* Global flag to disable all recording to ring buffers */ 22 + static int ring_buffers_off __read_mostly; 23 + 24 + /** 25 + * tracing_on - enable all tracing buffers 26 + * 27 + * This function enables all tracing buffers that may have been 28 + * disabled with tracing_off. 29 + */ 30 + void tracing_on(void) 31 + { 32 + ring_buffers_off = 0; 33 + } 34 + 35 + /** 36 + * tracing_off - turn off all tracing buffers 37 + * 38 + * This function stops all tracing buffers from recording data. 39 + * It does not disable any overhead the tracers themselves may 40 + * be causing. This function simply causes all recording to 41 + * the ring buffers to fail. 42 + */ 43 + void tracing_off(void) 44 + { 45 + ring_buffers_off = 1; 46 + } 47 + 19 48 /* Up this if you want to test the TIME_EXTENTS and normalization */ 20 49 #define DEBUG_SHIFT 0 21 50 22 51 /* FIXME!!! */ 23 52 u64 ring_buffer_time_stamp(int cpu) 24 53 { 54 + u64 time; 55 + 56 + preempt_disable_notrace(); 25 57 /* shift to debug/test normalization and TIME_EXTENTS */ 26 - return sched_clock() << DEBUG_SHIFT; 58 + time = sched_clock() << DEBUG_SHIFT; 59 + preempt_enable_notrace(); 60 + 61 + return time; 27 62 } 28 63 29 64 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) ··· 537 502 unsigned long addr; 538 503 LIST_HEAD(pages); 539 504 int i, cpu; 505 + 506 + /* 507 + * Always succeed at resizing a non-existent buffer: 508 + */ 509 + if (!buffer) 510 + return size; 540 511 541 512 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); 542 513 size *= BUF_PAGE_SIZE; ··· 1174 1133 struct ring_buffer_event *event; 1175 1134 int cpu, resched; 1176 1135 1136 + if (ring_buffers_off) 1137 + return NULL; 1138 + 1177 1139 if (atomic_read(&buffer->record_disabled)) 1178 1140 return NULL; 1179 1141 ··· 1292 1248 void *body; 1293 1249 int ret = -EBUSY; 1294 1250 int cpu, resched; 1251 + 1252 + if (ring_buffers_off) 1253 + return -EBUSY; 1295 1254 1296 1255 if (atomic_read(&buffer->record_disabled)) 1297 1256 return -EBUSY; ··· 2117 2070 return 0; 2118 2071 } 2119 2072 2073 + static ssize_t 2074 + rb_simple_read(struct file *filp, char __user *ubuf, 2075 + size_t cnt, loff_t *ppos) 2076 + { 2077 + int *p = filp->private_data; 2078 + char buf[64]; 2079 + int r; 2080 + 2081 + /* !ring_buffers_off == tracing_on */ 2082 + r = sprintf(buf, "%d\n", !*p); 2083 + 2084 + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); 2085 + } 2086 + 2087 + static ssize_t 2088 + rb_simple_write(struct file *filp, const char __user *ubuf, 2089 + size_t cnt, loff_t *ppos) 2090 + { 2091 + int *p = filp->private_data; 2092 + char buf[64]; 2093 + long val; 2094 + int ret; 2095 + 2096 + if (cnt >= sizeof(buf)) 2097 + return -EINVAL; 2098 + 2099 + if (copy_from_user(&buf, ubuf, cnt)) 2100 + return -EFAULT; 2101 + 2102 + buf[cnt] = 0; 2103 + 2104 + ret = strict_strtoul(buf, 10, &val); 2105 + if (ret < 0) 2106 + return ret; 2107 + 2108 + /* !ring_buffers_off == tracing_on */ 2109 + *p = !val; 2110 + 2111 + (*ppos)++; 2112 + 2113 + return cnt; 2114 + } 2115 + 2116 + static struct file_operations rb_simple_fops = { 2117 + .open = tracing_open_generic, 2118 + .read = rb_simple_read, 2119 + .write = rb_simple_write, 2120 + }; 2121 + 2122 + 2123 + static __init int rb_init_debugfs(void) 2124 + { 2125 + struct dentry *d_tracer; 2126 + struct dentry *entry; 2127 + 2128 + d_tracer = tracing_init_dentry(); 2129 + 2130 + entry = debugfs_create_file("tracing_on", 0644, d_tracer, 2131 + &ring_buffers_off, &rb_simple_fops); 2132 + if (!entry) 2133 + pr_warning("Could not create debugfs 'tracing_on' entry\n"); 2134 + 2135 + return 0; 2136 + } 2137 + 2138 + fs_initcall(rb_init_debugfs);