Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'trace-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
"Not much changes for the 3.12 merge window. The major tracing changes
are still in flux, and will have to wait for 3.13.

The changes for 3.12 are mostly clean ups and minor fixes.

H Peter Anvin added a check to x86_32 static function tracing that
helps a small segment of the kernel community.

Oleg Nesterov had a few changes from 3.11, but were mostly clean ups
and not worth pushing in the -rc time frame.

Li Zefan had small clean up with annotating a raw_init with __init.

I fixed a slight race in updating function callbacks, but the race is
so small and the bug that happens when it occurs is so minor it's not
even worth pushing to stable.

The only real enhancement is from Alexander Z Lam that made the
tracing_cpumask work for trace buffer instances, instead of them all
sharing a global cpumask"

* tag 'trace-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
ftrace/rcu: Do not trace debug_lockdep_rcu_enabled()
x86-32, ftrace: Fix static ftrace when early microcode is enabled
ftrace: Fix a slight race in modifying what function callback gets traced
tracing: Make tracing_cpumask available for all instances
tracing: Kill the !CONFIG_MODULES code in trace_events.c
tracing: Don't pass file_operations array to event_create_dir()
tracing: Kill trace_create_file_ops() and friends
tracing/syscalls: Annotate raw_init function with __init

+68 -209
+3
arch/x86/kernel/entry_32.S
··· 1176 1176 #else /* ! CONFIG_DYNAMIC_FTRACE */ 1177 1177 1178 1178 ENTRY(mcount) 1179 + cmpl $__PAGE_OFFSET, %esp 1180 + jb ftrace_stub /* Paging not enabled yet? */ 1181 + 1179 1182 cmpl $0, function_trace_stop 1180 1183 jne ftrace_stub 1181 1184
+1 -1
kernel/rcupdate.c
··· 122 122 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 123 123 EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 124 124 125 - int debug_lockdep_rcu_enabled(void) 125 + int notrace debug_lockdep_rcu_enabled(void) 126 126 { 127 127 return rcu_scheduler_active && debug_locks && 128 128 current->lockdep_recursion == 0;
+16 -1
kernel/trace/ftrace.c
··· 1978 1978 1979 1979 void ftrace_modify_all_code(int command) 1980 1980 { 1981 + int update = command & FTRACE_UPDATE_TRACE_FUNC; 1982 + 1983 + /* 1984 + * If the ftrace_caller calls a ftrace_ops func directly, 1985 + * we need to make sure that it only traces functions it 1986 + * expects to trace. When doing the switch of functions, 1987 + * we need to update to the ftrace_ops_list_func first 1988 + * before the transition between old and new calls are set, 1989 + * as the ftrace_ops_list_func will check the ops hashes 1990 + * to make sure the ops are having the right functions 1991 + * traced. 1992 + */ 1993 + if (update) 1994 + ftrace_update_ftrace_func(ftrace_ops_list_func); 1995 + 1981 1996 if (command & FTRACE_UPDATE_CALLS) 1982 1997 ftrace_replace_code(1); 1983 1998 else if (command & FTRACE_DISABLE_CALLS) 1984 1999 ftrace_replace_code(0); 1985 2000 1986 - if (command & FTRACE_UPDATE_TRACE_FUNC) 2001 + if (update && ftrace_trace_function != ftrace_ops_list_func) 1987 2002 ftrace_update_ftrace_func(ftrace_trace_function); 1988 2003 1989 2004 if (command & FTRACE_START_FUNC_RET)
+20 -17
kernel/trace/trace.c
··· 3166 3166 }; 3167 3167 3168 3168 /* 3169 - * Only trace on a CPU if the bitmask is set: 3170 - */ 3171 - static cpumask_var_t tracing_cpumask; 3172 - 3173 - /* 3174 3169 * The tracer itself will not take this lock, but still we want 3175 3170 * to provide a consistent cpumask to user-space: 3176 3171 */ ··· 3181 3186 tracing_cpumask_read(struct file *filp, char __user *ubuf, 3182 3187 size_t count, loff_t *ppos) 3183 3188 { 3189 + struct trace_array *tr = file_inode(filp)->i_private; 3184 3190 int len; 3185 3191 3186 3192 mutex_lock(&tracing_cpumask_update_lock); 3187 3193 3188 - len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 3194 + len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); 3189 3195 if (count - len < 2) { 3190 3196 count = -EINVAL; 3191 3197 goto out_err; ··· 3204 3208 tracing_cpumask_write(struct file *filp, const char __user *ubuf, 3205 3209 size_t count, loff_t *ppos) 3206 3210 { 3207 - struct trace_array *tr = filp->private_data; 3211 + struct trace_array *tr = file_inode(filp)->i_private; 3208 3212 cpumask_var_t tracing_cpumask_new; 3209 3213 int err, cpu; 3210 3214 ··· 3224 3228 * Increase/decrease the disabled counter if we are 3225 3229 * about to flip a bit in the cpumask: 3226 3230 */ 3227 - if (cpumask_test_cpu(cpu, tracing_cpumask) && 3231 + if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3228 3232 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3229 3233 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3230 3234 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3231 3235 } 3232 - if (!cpumask_test_cpu(cpu, tracing_cpumask) && 3236 + if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && 3233 3237 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3234 3238 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3235 3239 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); ··· 3238 3242 arch_spin_unlock(&ftrace_max_lock); 3239 3243 local_irq_enable(); 3240 3244 3241 - cpumask_copy(tracing_cpumask, tracing_cpumask_new); 3245 + cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); 3242 3246 3243 3247 mutex_unlock(&tracing_cpumask_update_lock); 3244 3248 free_cpumask_var(tracing_cpumask_new); ··· 3252 3256 } 3253 3257 3254 3258 static const struct file_operations tracing_cpumask_fops = { 3255 - .open = tracing_open_generic, 3259 + .open = tracing_open_generic_tr, 3256 3260 .read = tracing_cpumask_read, 3257 3261 .write = tracing_cpumask_write, 3262 + .release = tracing_release_generic_tr, 3258 3263 .llseek = generic_file_llseek, 3259 3264 }; 3260 3265 ··· 5935 5938 if (!tr->name) 5936 5939 goto out_free_tr; 5937 5940 5941 + if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) 5942 + goto out_free_tr; 5943 + 5944 + cpumask_copy(tr->tracing_cpumask, cpu_all_mask); 5945 + 5938 5946 raw_spin_lock_init(&tr->start_lock); 5939 5947 5940 5948 tr->current_trace = &nop_trace; ··· 5971 5969 out_free_tr: 5972 5970 if (tr->trace_buffer.buffer) 5973 5971 ring_buffer_free(tr->trace_buffer.buffer); 5972 + free_cpumask_var(tr->tracing_cpumask); 5974 5973 kfree(tr->name); 5975 5974 kfree(tr); 5976 5975 ··· 6101 6098 { 6102 6099 int cpu; 6103 6100 6101 + trace_create_file("tracing_cpumask", 0644, d_tracer, 6102 + tr, &tracing_cpumask_fops); 6103 + 6104 6104 trace_create_file("trace_options", 0644, d_tracer, 6105 6105 tr, &tracing_iter_fops); 6106 6106 ··· 6152 6146 return 0; 6153 6147 6154 6148 init_tracer_debugfs(&global_trace, d_tracer); 6155 - 6156 - trace_create_file("tracing_cpumask", 0644, d_tracer, 6157 - &global_trace, &tracing_cpumask_fops); 6158 6149 6159 6150 trace_create_file("available_tracers", 0444, d_tracer, 6160 6151 &global_trace, &show_traces_fops); ··· 6374 6371 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 6375 6372 goto out; 6376 6373 6377 - if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 6374 + if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) 6378 6375 goto out_free_buffer_mask; 6379 6376 6380 6377 /* Only allocate trace_printk buffers if a trace_printk exists */ ··· 6389 6386 ring_buf_size = 1; 6390 6387 6391 6388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 6392 - cpumask_copy(tracing_cpumask, cpu_all_mask); 6389 + cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); 6393 6390 6394 6391 raw_spin_lock_init(&global_trace.start_lock); 6395 6392 ··· 6444 6441 #ifdef CONFIG_TRACER_MAX_TRACE 6445 6442 free_percpu(global_trace.max_buffer.data); 6446 6443 #endif 6447 - free_cpumask_var(tracing_cpumask); 6444 + free_cpumask_var(global_trace.tracing_cpumask); 6448 6445 out_free_buffer_mask: 6449 6446 free_cpumask_var(tracing_buffer_mask); 6450 6447 out:
+1
kernel/trace/trace.h
··· 206 206 struct dentry *event_dir; 207 207 struct list_head systems; 208 208 struct list_head events; 209 + cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ 209 210 int ref; 210 211 }; 211 212
+22 -185
kernel/trace/trace_events.c
··· 1489 1489 } 1490 1490 1491 1491 static int 1492 - event_create_dir(struct dentry *parent, 1493 - struct ftrace_event_file *file, 1494 - const struct file_operations *id, 1495 - const struct file_operations *enable, 1496 - const struct file_operations *filter, 1497 - const struct file_operations *format) 1492 + event_create_dir(struct dentry *parent, struct ftrace_event_file *file) 1498 1493 { 1499 1494 struct ftrace_event_call *call = file->event_call; 1500 1495 struct trace_array *tr = file->tr; ··· 1517 1522 1518 1523 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1519 1524 trace_create_file("enable", 0644, file->dir, file, 1520 - enable); 1525 + &ftrace_enable_fops); 1521 1526 1522 1527 #ifdef CONFIG_PERF_EVENTS 1523 1528 if (call->event.type && call->class->reg) 1524 1529 trace_create_file("id", 0444, file->dir, 1525 - (void *)(long)call->event.type, id); 1530 + (void *)(long)call->event.type, 1531 + &ftrace_event_id_fops); 1526 1532 #endif 1527 1533 1528 1534 /* ··· 1540 1544 } 1541 1545 } 1542 1546 trace_create_file("filter", 0644, file->dir, call, 1543 - filter); 1547 + &ftrace_event_filter_fops); 1544 1548 1545 1549 trace_create_file("format", 0444, file->dir, call, 1546 - format); 1550 + &ftrace_event_format_fops); 1547 1551 1548 1552 return 0; 1549 1553 } ··· 1644 1648 1645 1649 /* Add an event to a trace directory */ 1646 1650 static int 1647 - __trace_add_new_event(struct ftrace_event_call *call, 1648 - struct trace_array *tr, 1649 - const struct file_operations *id, 1650 - const struct file_operations *enable, 1651 - const struct file_operations *filter, 1652 - const struct file_operations *format) 1651 + __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) 1653 1652 { 1654 1653 struct ftrace_event_file *file; 1655 1654 ··· 1652 1661 if (!file) 1653 1662 return -ENOMEM; 1654 1663 1655 - return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1664 + return event_create_dir(tr->event_dir, file); 1656 1665 } 1657 1666 1658 1667 /* ··· 1674 1683 } 1675 1684 1676 1685 struct ftrace_module_file_ops; 1677 - static void __add_event_to_tracers(struct ftrace_event_call *call, 1678 - struct ftrace_module_file_ops *file_ops); 1686 + static void __add_event_to_tracers(struct ftrace_event_call *call); 1679 1687 1680 1688 /* Add an additional event_call dynamically */ 1681 1689 int trace_add_event_call(struct ftrace_event_call *call) ··· 1685 1695 1686 1696 ret = __register_event(call, NULL); 1687 1697 if (ret >= 0) 1688 - __add_event_to_tracers(call, NULL); 1698 + __add_event_to_tracers(call); 1689 1699 1690 1700 mutex_unlock(&event_mutex); 1691 1701 mutex_unlock(&trace_types_lock); ··· 1759 1769 1760 1770 #ifdef CONFIG_MODULES 1761 1771 1762 - static LIST_HEAD(ftrace_module_file_list); 1763 - 1764 - /* 1765 - * Modules must own their file_operations to keep up with 1766 - * reference counting. 1767 - */ 1768 - struct ftrace_module_file_ops { 1769 - struct list_head list; 1770 - struct module *mod; 1771 - struct file_operations id; 1772 - struct file_operations enable; 1773 - struct file_operations format; 1774 - struct file_operations filter; 1775 - }; 1776 - 1777 - static struct ftrace_module_file_ops * 1778 - find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) 1779 - { 1780 - /* 1781 - * As event_calls are added in groups by module, 1782 - * when we find one file_ops, we don't need to search for 1783 - * each call in that module, as the rest should be the 1784 - * same. Only search for a new one if the last one did 1785 - * not match. 1786 - */ 1787 - if (file_ops && mod == file_ops->mod) 1788 - return file_ops; 1789 - 1790 - list_for_each_entry(file_ops, &ftrace_module_file_list, list) { 1791 - if (file_ops->mod == mod) 1792 - return file_ops; 1793 - } 1794 - return NULL; 1795 - } 1796 - 1797 - static struct ftrace_module_file_ops * 1798 - trace_create_file_ops(struct module *mod) 1799 - { 1800 - struct ftrace_module_file_ops *file_ops; 1801 - 1802 - /* 1803 - * This is a bit of a PITA. To allow for correct reference 1804 - * counting, modules must "own" their file_operations. 1805 - * To do this, we allocate the file operations that will be 1806 - * used in the event directory. 1807 - */ 1808 - 1809 - file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL); 1810 - if (!file_ops) 1811 - return NULL; 1812 - 1813 - file_ops->mod = mod; 1814 - 1815 - file_ops->id = ftrace_event_id_fops; 1816 - file_ops->id.owner = mod; 1817 - 1818 - file_ops->enable = ftrace_enable_fops; 1819 - file_ops->enable.owner = mod; 1820 - 1821 - file_ops->filter = ftrace_event_filter_fops; 1822 - file_ops->filter.owner = mod; 1823 - 1824 - file_ops->format = ftrace_event_format_fops; 1825 - file_ops->format.owner = mod; 1826 - 1827 - list_add(&file_ops->list, &ftrace_module_file_list); 1828 - 1829 - return file_ops; 1830 - } 1831 - 1832 1772 static void trace_module_add_events(struct module *mod) 1833 1773 { 1834 - struct ftrace_module_file_ops *file_ops = NULL; 1835 1774 struct ftrace_event_call **call, **start, **end; 1836 1775 1837 1776 start = mod->trace_events; 1838 1777 end = mod->trace_events + mod->num_trace_events; 1839 1778 1840 - if (start == end) 1841 - return; 1842 - 1843 - file_ops = trace_create_file_ops(mod); 1844 - if (!file_ops) 1845 - return; 1846 - 1847 1779 for_each_event(call, start, end) { 1848 1780 __register_event(*call, mod); 1849 - __add_event_to_tracers(*call, file_ops); 1781 + __add_event_to_tracers(*call); 1850 1782 } 1851 1783 } 1852 1784 1853 1785 static void trace_module_remove_events(struct module *mod) 1854 1786 { 1855 - struct ftrace_module_file_ops *file_ops; 1856 1787 struct ftrace_event_call *call, *p; 1857 1788 bool clear_trace = false; 1858 1789 ··· 1784 1873 clear_trace = true; 1785 1874 __trace_remove_event_call(call); 1786 1875 } 1787 - } 1788 - 1789 - /* Now free the file_operations */ 1790 - list_for_each_entry(file_ops, &ftrace_module_file_list, list) { 1791 - if (file_ops->mod == mod) 1792 - break; 1793 - } 1794 - if (&file_ops->list != &ftrace_module_file_list) { 1795 - list_del(&file_ops->list); 1796 - kfree(file_ops); 1797 1876 } 1798 1877 up_write(&trace_event_sem); 1799 1878 ··· 1820 1919 return 0; 1821 1920 } 1822 1921 1823 - static int 1824 - __trace_add_new_mod_event(struct ftrace_event_call *call, 1825 - struct trace_array *tr, 1826 - struct ftrace_module_file_ops *file_ops) 1827 - { 1828 - return __trace_add_new_event(call, tr, 1829 - &file_ops->id, &file_ops->enable, 1830 - &file_ops->filter, &file_ops->format); 1831 - } 1832 - 1833 - #else 1834 - static inline struct ftrace_module_file_ops * 1835 - find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) 1836 - { 1837 - return NULL; 1838 - } 1839 - static inline int trace_module_notify(struct notifier_block *self, 1840 - unsigned long val, void *data) 1841 - { 1842 - return 0; 1843 - } 1844 - static inline int 1845 - __trace_add_new_mod_event(struct ftrace_event_call *call, 1846 - struct trace_array *tr, 1847 - struct ftrace_module_file_ops *file_ops) 1848 - { 1849 - return -ENODEV; 1850 - } 1922 + static struct notifier_block trace_module_nb = { 1923 + .notifier_call = trace_module_notify, 1924 + .priority = 0, 1925 + }; 1851 1926 #endif /* CONFIG_MODULES */ 1852 1927 1853 1928 /* Create a new event directory structure for a trace directory. */ 1854 1929 static void 1855 1930 __trace_add_event_dirs(struct trace_array *tr) 1856 1931 { 1857 - struct ftrace_module_file_ops *file_ops = NULL; 1858 1932 struct ftrace_event_call *call; 1859 1933 int ret; 1860 1934 1861 1935 list_for_each_entry(call, &ftrace_events, list) { 1862 - if (call->mod) { 1863 - /* 1864 - * Directories for events by modules need to 1865 - * keep module ref counts when opened (as we don't 1866 - * want the module to disappear when reading one 1867 - * of these files). The file_ops keep account of 1868 - * the module ref count. 1869 - */ 1870 - file_ops = find_ftrace_file_ops(file_ops, call->mod); 1871 - if (!file_ops) 1872 - continue; /* Warn? */ 1873 - ret = __trace_add_new_mod_event(call, tr, file_ops); 1874 - if (ret < 0) 1875 - pr_warning("Could not create directory for event %s\n", 1876 - call->name); 1877 - continue; 1878 - } 1879 - ret = __trace_add_new_event(call, tr, 1880 - &ftrace_event_id_fops, 1881 - &ftrace_enable_fops, 1882 - &ftrace_event_filter_fops, 1883 - &ftrace_event_format_fops); 1936 + ret = __trace_add_new_event(call, tr); 1884 1937 if (ret < 0) 1885 1938 pr_warning("Could not create directory for event %s\n", 1886 1939 call->name); ··· 2142 2287 2143 2288 2144 2289 list_for_each_entry(file, &tr->events, list) { 2145 - ret = event_create_dir(tr->event_dir, file, 2146 - &ftrace_event_id_fops, 2147 - &ftrace_enable_fops, 2148 - &ftrace_event_filter_fops, 2149 - &ftrace_event_format_fops); 2290 + ret = event_create_dir(tr->event_dir, file); 2150 2291 if (ret < 0) 2151 2292 pr_warning("Could not create directory for event %s\n", 2152 2293 file->event_call->name); ··· 2183 2332 remove_event_file_dir(file); 2184 2333 } 2185 2334 2186 - static void 2187 - __add_event_to_tracers(struct ftrace_event_call *call, 2188 - struct ftrace_module_file_ops *file_ops) 2335 + static void __add_event_to_tracers(struct ftrace_event_call *call) 2189 2336 { 2190 2337 struct trace_array *tr; 2191 2338 2192 - list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2193 - if (file_ops) 2194 - __trace_add_new_mod_event(call, tr, file_ops); 2195 - else 2196 - __trace_add_new_event(call, tr, 2197 - &ftrace_event_id_fops, 2198 - &ftrace_enable_fops, 2199 - &ftrace_event_filter_fops, 2200 - &ftrace_event_format_fops); 2201 - } 2339 + list_for_each_entry(tr, &ftrace_trace_arrays, list) 2340 + __trace_add_new_event(call, tr); 2202 2341 } 2203 - 2204 - static struct notifier_block trace_module_nb = { 2205 - .notifier_call = trace_module_notify, 2206 - .priority = 0, 2207 - }; 2208 2342 2209 2343 extern struct ftrace_event_call *__start_ftrace_events[]; 2210 2344 extern struct ftrace_event_call *__stop_ftrace_events[]; ··· 2395 2559 if (ret) 2396 2560 return ret; 2397 2561 2562 + #ifdef CONFIG_MODULES 2398 2563 ret = register_module_notifier(&trace_module_nb); 2399 2564 if (ret) 2400 2565 pr_warning("Failed to register trace events module notifier\n"); 2401 - 2566 + #endif 2402 2567 return 0; 2403 2568 } 2404 2569 early_initcall(event_trace_memsetup);
+5 -5
kernel/trace/trace_syscalls.c
··· 200 200 #type, #name, offsetof(typeof(trace), name), \ 201 201 sizeof(trace.name), is_signed_type(type) 202 202 203 - static 204 - int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 203 + static int __init 204 + __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 205 205 { 206 206 int i; 207 207 int pos = 0; ··· 228 228 return pos; 229 229 } 230 230 231 - static int set_syscall_print_fmt(struct ftrace_event_call *call) 231 + static int __init set_syscall_print_fmt(struct ftrace_event_call *call) 232 232 { 233 233 char *print_fmt; 234 234 int len; ··· 253 253 return 0; 254 254 } 255 255 256 - static void free_syscall_print_fmt(struct ftrace_event_call *call) 256 + static void __init free_syscall_print_fmt(struct ftrace_event_call *call) 257 257 { 258 258 struct syscall_metadata *entry = call->data; 259 259 ··· 459 459 mutex_unlock(&syscall_trace_lock); 460 460 } 461 461 462 - static int init_syscall_trace(struct ftrace_event_call *call) 462 + static int __init init_syscall_trace(struct ftrace_event_call *call) 463 463 { 464 464 int id; 465 465 int num;