Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
tracing, ring-buffer: add paranoid checks for loops
ftrace: use kretprobe trampoline name to test in output
tracing, alpha: undefined reference to `save_stack_trace'

+80 -19
+1 -1
kernel/trace/Kconfig
··· 25 bool 26 select DEBUG_FS 27 select RING_BUFFER 28 - select STACKTRACE 29 select TRACEPOINTS 30 select NOP_TRACER 31
··· 25 bool 26 select DEBUG_FS 27 select RING_BUFFER 28 + select STACKTRACE if STACKTRACE_SUPPORT 29 select TRACEPOINTS 30 select NOP_TRACER 31
+56
kernel/trace/ring_buffer.c
··· 1022 struct ring_buffer_event *event; 1023 u64 ts, delta; 1024 int commit = 0; 1025 1026 again: 1027 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1028 1029 /* ··· 1547 { 1548 struct buffer_page *reader = NULL; 1549 unsigned long flags; 1550 1551 spin_lock_irqsave(&cpu_buffer->lock, flags); 1552 1553 again: 1554 reader = cpu_buffer->reader_page; 1555 1556 /* If there's more to read, return this page */ ··· 1693 struct ring_buffer_per_cpu *cpu_buffer; 1694 struct ring_buffer_event *event; 1695 struct buffer_page *reader; 1696 1697 if (!cpu_isset(cpu, buffer->cpumask)) 1698 return NULL; ··· 1701 cpu_buffer = buffer->buffers[cpu]; 1702 1703 again: 1704 reader = rb_get_reader_page(cpu_buffer); 1705 if (!reader) 1706 return NULL; ··· 1764 struct ring_buffer *buffer; 1765 struct ring_buffer_per_cpu *cpu_buffer; 1766 struct ring_buffer_event *event; 1767 1768 if (ring_buffer_iter_empty(iter)) 1769 return NULL; ··· 1773 buffer = cpu_buffer->buffer; 1774 1775 again: 1776 if (rb_per_cpu_empty(cpu_buffer)) 1777 return NULL; 1778
··· 1022 struct ring_buffer_event *event; 1023 u64 ts, delta; 1024 int commit = 0; 1025 + int nr_loops = 0; 1026 1027 again: 1028 + /* 1029 + * We allow for interrupts to reenter here and do a trace. 1030 + * If one does, it will cause this original code to loop 1031 + * back here. Even with heavy interrupts happening, this 1032 + * should only happen a few times in a row. If this happens 1033 + * 1000 times in a row, there must be either an interrupt 1034 + * storm or we have something buggy. 1035 + * Bail! 1036 + */ 1037 + if (unlikely(++nr_loops > 1000)) { 1038 + RB_WARN_ON(cpu_buffer, 1); 1039 + return NULL; 1040 + } 1041 + 1042 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1043 1044 /* ··· 1532 { 1533 struct buffer_page *reader = NULL; 1534 unsigned long flags; 1535 + int nr_loops = 0; 1536 1537 spin_lock_irqsave(&cpu_buffer->lock, flags); 1538 1539 again: 1540 + /* 1541 + * This should normally only loop twice. But because the 1542 + * start of the reader inserts an empty page, it causes 1543 + * a case where we will loop three times. There should be no 1544 + * reason to loop four times (that I know of). 1545 + */ 1546 + if (unlikely(++nr_loops > 3)) { 1547 + RB_WARN_ON(cpu_buffer, 1); 1548 + reader = NULL; 1549 + goto out; 1550 + } 1551 + 1552 reader = cpu_buffer->reader_page; 1553 1554 /* If there's more to read, return this page */ ··· 1665 struct ring_buffer_per_cpu *cpu_buffer; 1666 struct ring_buffer_event *event; 1667 struct buffer_page *reader; 1668 + int nr_loops = 0; 1669 1670 if (!cpu_isset(cpu, buffer->cpumask)) 1671 return NULL; ··· 1672 cpu_buffer = buffer->buffers[cpu]; 1673 1674 again: 1675 + /* 1676 + * We repeat when a timestamp is encountered. It is possible 1677 + * to get multiple timestamps from an interrupt entering just 1678 + * as one timestamp is about to be written. The max times 1679 + * that this can happen is the number of nested interrupts we 1680 + * can have. Nesting 10 deep of interrupts is clearly 1681 + * an anomaly. 1682 + */ 1683 + if (unlikely(++nr_loops > 10)) { 1684 + RB_WARN_ON(cpu_buffer, 1); 1685 + return NULL; 1686 + } 1687 + 1688 reader = rb_get_reader_page(cpu_buffer); 1689 if (!reader) 1690 return NULL; ··· 1722 struct ring_buffer *buffer; 1723 struct ring_buffer_per_cpu *cpu_buffer; 1724 struct ring_buffer_event *event; 1725 + int nr_loops = 0; 1726 1727 if (ring_buffer_iter_empty(iter)) 1728 return NULL; ··· 1730 buffer = cpu_buffer->buffer; 1731 1732 again: 1733 + /* 1734 + * We repeat when a timestamp is encountered. It is possible 1735 + * to get multiple timestamps from an interrupt entering just 1736 + * as one timestamp is about to be written. The max times 1737 + * that this can happen is the number of nested interrupts we 1738 + * can have. Nesting 10 deep of interrupts is clearly 1739 + * an anomaly. 1740 + */ 1741 + if (unlikely(++nr_loops > 10)) { 1742 + RB_WARN_ON(cpu_buffer, 1); 1743 + return NULL; 1744 + } 1745 + 1746 if (rb_per_cpu_empty(cpu_buffer)) 1747 return NULL; 1748
+23 -18
kernel/trace/trace.c
··· 705 unsigned long flags, 706 int skip, int pc) 707 { 708 struct ring_buffer_event *event; 709 struct stack_entry *entry; 710 struct stack_trace trace; ··· 731 732 save_stack_trace(&trace); 733 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 734 } 735 736 void __trace_stack(struct trace_array *tr, ··· 1088 mutex_unlock(&trace_types_lock); 1089 } 1090 1091 - #define KRETPROBE_MSG "[unknown/kretprobe'd]" 1092 - 1093 #ifdef CONFIG_KRETPROBES 1094 - static inline int kretprobed(unsigned long addr) 1095 { 1096 - return addr == (unsigned long)kretprobe_trampoline; 1097 } 1098 #else 1099 - static inline int kretprobed(unsigned long addr) 1100 { 1101 - return 0; 1102 } 1103 #endif /* CONFIG_KRETPROBES */ 1104 ··· 1110 { 1111 #ifdef CONFIG_KALLSYMS 1112 char str[KSYM_SYMBOL_LEN]; 1113 1114 kallsyms_lookup(address, NULL, NULL, NULL, str); 1115 1116 - return trace_seq_printf(s, fmt, str); 1117 #endif 1118 return 1; 1119 } ··· 1127 { 1128 #ifdef CONFIG_KALLSYMS 1129 char str[KSYM_SYMBOL_LEN]; 1130 1131 sprint_symbol(str, address); 1132 - return trace_seq_printf(s, fmt, str); 1133 #endif 1134 return 1; 1135 } ··· 1386 1387 seq_print_ip_sym(s, field->ip, sym_flags); 1388 trace_seq_puts(s, " ("); 1389 - if (kretprobed(field->parent_ip)) 1390 - trace_seq_puts(s, KRETPROBE_MSG); 1391 - else 1392 - seq_print_ip_sym(s, field->parent_ip, sym_flags); 1393 trace_seq_puts(s, ")\n"); 1394 break; 1395 } ··· 1502 ret = trace_seq_printf(s, " <-"); 1503 if (!ret) 1504 return TRACE_TYPE_PARTIAL_LINE; 1505 - if (kretprobed(field->parent_ip)) 1506 - ret = trace_seq_puts(s, KRETPROBE_MSG); 1507 - else 1508 - ret = seq_print_ip_sym(s, 1509 - field->parent_ip, 1510 - sym_flags); 1511 if (!ret) 1512 return TRACE_TYPE_PARTIAL_LINE; 1513 }
··· 705 unsigned long flags, 706 int skip, int pc) 707 { 708 + #ifdef CONFIG_STACKTRACE 709 struct ring_buffer_event *event; 710 struct stack_entry *entry; 711 struct stack_trace trace; ··· 730 731 save_stack_trace(&trace); 732 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 733 + #endif 734 } 735 736 void __trace_stack(struct trace_array *tr, ··· 1086 mutex_unlock(&trace_types_lock); 1087 } 1088 1089 #ifdef CONFIG_KRETPROBES 1090 + static inline const char *kretprobed(const char *name) 1091 { 1092 + static const char tramp_name[] = "kretprobe_trampoline"; 1093 + int size = sizeof(tramp_name); 1094 + 1095 + if (strncmp(tramp_name, name, size) == 0) 1096 + return "[unknown/kretprobe'd]"; 1097 + return name; 1098 } 1099 #else 1100 + static inline const char *kretprobed(const char *name) 1101 { 1102 + return name; 1103 } 1104 #endif /* CONFIG_KRETPROBES */ 1105 ··· 1105 { 1106 #ifdef CONFIG_KALLSYMS 1107 char str[KSYM_SYMBOL_LEN]; 1108 + const char *name; 1109 1110 kallsyms_lookup(address, NULL, NULL, NULL, str); 1111 1112 + name = kretprobed(str); 1113 + 1114 + return trace_seq_printf(s, fmt, name); 1115 #endif 1116 return 1; 1117 } ··· 1119 { 1120 #ifdef CONFIG_KALLSYMS 1121 char str[KSYM_SYMBOL_LEN]; 1122 + const char *name; 1123 1124 sprint_symbol(str, address); 1125 + name = kretprobed(str); 1126 + 1127 + return trace_seq_printf(s, fmt, name); 1128 #endif 1129 return 1; 1130 } ··· 1375 1376 seq_print_ip_sym(s, field->ip, sym_flags); 1377 trace_seq_puts(s, " ("); 1378 + seq_print_ip_sym(s, field->parent_ip, sym_flags); 1379 trace_seq_puts(s, ")\n"); 1380 break; 1381 } ··· 1494 ret = trace_seq_printf(s, " <-"); 1495 if (!ret) 1496 return TRACE_TYPE_PARTIAL_LINE; 1497 + ret = seq_print_ip_sym(s, 1498 + field->parent_ip, 1499 + sym_flags); 1500 if (!ret) 1501 return TRACE_TYPE_PARTIAL_LINE; 1502 }