Merge branch 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'tracing-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
tracing, ring-buffer: add paranoid checks for loops
ftrace: use kretprobe trampoline name to test in output
tracing, alpha: undefined reference to `save_stack_trace'

+80 -19
+1 -1
kernel/trace/Kconfig
··· 25 25 bool 26 26 select DEBUG_FS 27 27 select RING_BUFFER 28 - select STACKTRACE 28 + select STACKTRACE if STACKTRACE_SUPPORT 29 29 select TRACEPOINTS 30 30 select NOP_TRACER 31 31
+56
kernel/trace/ring_buffer.c
··· 1022 1022 struct ring_buffer_event *event; 1023 1023 u64 ts, delta; 1024 1024 int commit = 0; 1025 + int nr_loops = 0; 1025 1026 1026 1027 again: 1028 + /* 1029 + * We allow for interrupts to reenter here and do a trace. 1030 + * If one does, it will cause this original code to loop 1031 + * back here. Even with heavy interrupts happening, this 1032 + * should only happen a few times in a row. If this happens 1033 + * 1000 times in a row, there must be either an interrupt 1034 + * storm or we have something buggy. 1035 + * Bail! 1036 + */ 1037 + if (unlikely(++nr_loops > 1000)) { 1038 + RB_WARN_ON(cpu_buffer, 1); 1039 + return NULL; 1040 + } 1041 + 1027 1042 ts = ring_buffer_time_stamp(cpu_buffer->cpu); 1028 1043 1029 1044 /* ··· 1547 1532 { 1548 1533 struct buffer_page *reader = NULL; 1549 1534 unsigned long flags; 1535 + int nr_loops = 0; 1550 1536 1551 1537 spin_lock_irqsave(&cpu_buffer->lock, flags); 1552 1538 1553 1539 again: 1540 + /* 1541 + * This should normally only loop twice. But because the 1542 + * start of the reader inserts an empty page, it causes 1543 + * a case where we will loop three times. There should be no 1544 + * reason to loop four times (that I know of). 1545 + */ 1546 + if (unlikely(++nr_loops > 3)) { 1547 + RB_WARN_ON(cpu_buffer, 1); 1548 + reader = NULL; 1549 + goto out; 1550 + } 1551 + 1554 1552 reader = cpu_buffer->reader_page; 1555 1553 1556 1554 /* If there's more to read, return this page */ ··· 1693 1665 struct ring_buffer_per_cpu *cpu_buffer; 1694 1666 struct ring_buffer_event *event; 1695 1667 struct buffer_page *reader; 1668 + int nr_loops = 0; 1696 1669 1697 1670 if (!cpu_isset(cpu, buffer->cpumask)) 1698 1671 return NULL; ··· 1701 1672 cpu_buffer = buffer->buffers[cpu]; 1702 1673 1703 1674 again: 1675 + /* 1676 + * We repeat when a timestamp is encountered. It is possible 1677 + * to get multiple timestamps from an interrupt entering just 1678 + * as one timestamp is about to be written. The max times 1679 + * that this can happen is the number of nested interrupts we 1680 + * can have. Nesting 10 deep of interrupts is clearly 1681 + * an anomaly. 1682 + */ 1683 + if (unlikely(++nr_loops > 10)) { 1684 + RB_WARN_ON(cpu_buffer, 1); 1685 + return NULL; 1686 + } 1687 + 1704 1688 reader = rb_get_reader_page(cpu_buffer); 1705 1689 if (!reader) 1706 1690 return NULL; ··· 1764 1722 struct ring_buffer *buffer; 1765 1723 struct ring_buffer_per_cpu *cpu_buffer; 1766 1724 struct ring_buffer_event *event; 1725 + int nr_loops = 0; 1767 1726 1768 1727 if (ring_buffer_iter_empty(iter)) 1769 1728 return NULL; ··· 1773 1730 buffer = cpu_buffer->buffer; 1774 1731 1775 1732 again: 1733 + /* 1734 + * We repeat when a timestamp is encountered. It is possible 1735 + * to get multiple timestamps from an interrupt entering just 1736 + * as one timestamp is about to be written. The max times 1737 + * that this can happen is the number of nested interrupts we 1738 + * can have. Nesting 10 deep of interrupts is clearly 1739 + * an anomaly. 1740 + */ 1741 + if (unlikely(++nr_loops > 10)) { 1742 + RB_WARN_ON(cpu_buffer, 1); 1743 + return NULL; 1744 + } 1745 + 1776 1746 if (rb_per_cpu_empty(cpu_buffer)) 1777 1747 return NULL; 1778 1748
+23 -18
kernel/trace/trace.c
··· 705 705 unsigned long flags, 706 706 int skip, int pc) 707 707 { 708 + #ifdef CONFIG_STACKTRACE 708 709 struct ring_buffer_event *event; 709 710 struct stack_entry *entry; 710 711 struct stack_trace trace; ··· 731 730 732 731 save_stack_trace(&trace); 733 732 ring_buffer_unlock_commit(tr->buffer, event, irq_flags); 733 + #endif 734 734 } 735 735 736 736 void __trace_stack(struct trace_array *tr, ··· 1088 1086 mutex_unlock(&trace_types_lock); 1089 1087 } 1090 1088 1091 - #define KRETPROBE_MSG "[unknown/kretprobe'd]" 1092 - 1093 1089 #ifdef CONFIG_KRETPROBES 1094 - static inline int kretprobed(unsigned long addr) 1090 + static inline const char *kretprobed(const char *name) 1095 1091 { 1096 - return addr == (unsigned long)kretprobe_trampoline; 1092 + static const char tramp_name[] = "kretprobe_trampoline"; 1093 + int size = sizeof(tramp_name); 1094 + 1095 + if (strncmp(tramp_name, name, size) == 0) 1096 + return "[unknown/kretprobe'd]"; 1097 + return name; 1097 1098 } 1098 1099 #else 1099 - static inline int kretprobed(unsigned long addr) 1100 + static inline const char *kretprobed(const char *name) 1100 1101 { 1101 - return 0; 1102 + return name; 1102 1103 } 1103 1104 #endif /* CONFIG_KRETPROBES */ 1104 1105 ··· 1110 1105 { 1111 1106 #ifdef CONFIG_KALLSYMS 1112 1107 char str[KSYM_SYMBOL_LEN]; 1108 + const char *name; 1113 1109 1114 1110 kallsyms_lookup(address, NULL, NULL, NULL, str); 1115 1111 1116 - return trace_seq_printf(s, fmt, str); 1112 + name = kretprobed(str); 1113 + 1114 + return trace_seq_printf(s, fmt, name); 1117 1115 #endif 1118 1116 return 1; 1119 1117 } ··· 1127 1119 { 1128 1120 #ifdef CONFIG_KALLSYMS 1129 1121 char str[KSYM_SYMBOL_LEN]; 1122 + const char *name; 1130 1123 1131 1124 sprint_symbol(str, address); 1132 - return trace_seq_printf(s, fmt, str); 1125 + name = kretprobed(str); 1126 + 1127 + return trace_seq_printf(s, fmt, name); 1133 1128 #endif 1134 1129 return 1; 1135 1130 } ··· 1386 1375 1387 1376 seq_print_ip_sym(s, field->ip, sym_flags); 1388 1377 trace_seq_puts(s, " ("); 1389 - if (kretprobed(field->parent_ip)) 1390 - trace_seq_puts(s, KRETPROBE_MSG); 1391 - else 1392 - seq_print_ip_sym(s, field->parent_ip, sym_flags); 1378 + seq_print_ip_sym(s, field->parent_ip, sym_flags); 1393 1379 trace_seq_puts(s, ")\n"); 1394 1380 break; 1395 1381 } ··· 1502 1494 ret = trace_seq_printf(s, " <-"); 1503 1495 if (!ret) 1504 1496 return TRACE_TYPE_PARTIAL_LINE; 1505 - if (kretprobed(field->parent_ip)) 1506 - ret = trace_seq_puts(s, KRETPROBE_MSG); 1507 - else 1508 - ret = seq_print_ip_sym(s, 1509 - field->parent_ip, 1510 - sym_flags); 1497 + ret = seq_print_ip_sym(s, 1498 + field->parent_ip, 1499 + sym_flags); 1511 1500 if (!ret) 1512 1501 return TRACE_TYPE_PARTIAL_LINE; 1513 1502 }