Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'core-urgent-for-linus', 'perf-urgent-for-linus', 'sched-urgent-for-linus' and 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
bugs, x86: Fix printk levels for panic, softlockups and stack dumps

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf top: Fix number of samples displayed
perf tools: Fix strlen() bug in perf_event__synthesize_event_type()
perf tools: Fix broken build by defining _GNU_SOURCE in Makefile
x86/dumpstack: Remove unneeded check in dump_trace()
perf: Fix broken interrupt rate throttling

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/rt: Fix task stack corruption under __ARCH_WANT_INTERRUPTS_ON_CTXSW
sched: Fix ancient race in do_exit()
sched/nohz: Fix nohz cpu idle load balancing state with cpu hotplug
sched/s390: Fix compile error in sched/core.c
sched: Fix rq->nr_uninterruptible update race

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/reboot: Remove VersaLogic Menlow reboot quirk
x86/reboot: Skip DMI checks if reboot set by user
x86: Properly parenthesize cmpxchg() macro arguments

+177 -97
+3 -3
arch/x86/include/asm/cmpxchg.h
··· 145 145 146 146 #ifdef __HAVE_ARCH_CMPXCHG 147 147 #define cmpxchg(ptr, old, new) \ 148 - __cmpxchg((ptr), (old), (new), sizeof(*ptr)) 148 + __cmpxchg(ptr, old, new, sizeof(*(ptr))) 149 149 150 150 #define sync_cmpxchg(ptr, old, new) \ 151 - __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr)) 151 + __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) 152 152 153 153 #define cmpxchg_local(ptr, old, new) \ 154 - __cmpxchg_local((ptr), (old), (new), sizeof(*ptr)) 154 + __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) 155 155 #endif 156 156 157 157 /*
+2 -1
arch/x86/kernel/dumpstack.c
··· 252 252 unsigned short ss; 253 253 unsigned long sp; 254 254 #endif 255 - printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 255 + printk(KERN_DEFAULT 256 + "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); 256 257 #ifdef CONFIG_PREEMPT 257 258 printk("PREEMPT "); 258 259 #endif
+4 -4
arch/x86/kernel/dumpstack_64.c
··· 129 129 if (!stack) { 130 130 if (regs) 131 131 stack = (unsigned long *)regs->sp; 132 - else if (task && task != current) 132 + else if (task != current) 133 133 stack = (unsigned long *)task->thread.sp; 134 134 else 135 135 stack = &dummy; ··· 269 269 unsigned char c; 270 270 u8 *ip; 271 271 272 - printk(KERN_EMERG "Stack:\n"); 272 + printk(KERN_DEFAULT "Stack:\n"); 273 273 show_stack_log_lvl(NULL, regs, (unsigned long *)sp, 274 - 0, KERN_EMERG); 274 + 0, KERN_DEFAULT); 275 275 276 - printk(KERN_EMERG "Code: "); 276 + printk(KERN_DEFAULT "Code: "); 277 277 278 278 ip = (u8 *)regs->ip - code_prologue; 279 279 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
+26 -10
arch/x86/kernel/reboot.c
··· 39 39 enum reboot_type reboot_type = BOOT_ACPI; 40 40 int reboot_force; 41 41 42 + /* This variable is used privately to keep track of whether or not 43 + * reboot_type is still set to its default value (i.e., reboot= hasn't 44 + * been set on the command line). This is needed so that we can 45 + * suppress DMI scanning for reboot quirks. Without it, it's 46 + * impossible to override a faulty reboot quirk without recompiling. 47 + */ 48 + static int reboot_default = 1; 49 + 42 50 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP) 43 51 static int reboot_cpu = -1; 44 52 #endif ··· 75 67 static int __init reboot_setup(char *str) 76 68 { 77 69 for (;;) { 70 + /* Having anything passed on the command line via 71 + * reboot= will cause us to disable DMI checking 72 + * below. 73 + */ 74 + reboot_default = 0; 75 + 78 76 switch (*str) { 79 77 case 'w': 80 78 reboot_mode = 0x1234; ··· 309 295 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 310 296 }, 311 297 }, 312 - { /* Handle problems with rebooting on VersaLogic Menlow boards */ 313 - .callback = set_bios_reboot, 314 - .ident = "VersaLogic Menlow based board", 315 - .matches = { 316 - DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), 317 - DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), 318 - }, 319 - }, 320 298 { /* Handle reboot issue on Acer Aspire one */ 321 299 .callback = set_kbd_reboot, 322 300 .ident = "Acer Aspire One A110", ··· 322 316 323 317 static int __init reboot_init(void) 324 318 { 325 - dmi_check_system(reboot_dmi_table); 319 + /* Only do the DMI check if reboot_type hasn't been overridden 320 + * on the command line 321 + */ 322 + if (reboot_default) { 323 + dmi_check_system(reboot_dmi_table); 324 + } 326 325 return 0; 327 326 } 328 327 core_initcall(reboot_init); ··· 476 465 477 466 static int __init pci_reboot_init(void) 478 467 { 479 - dmi_check_system(pci_reboot_dmi_table); 468 + /* Only do the DMI check if reboot_type hasn't been overridden 469 + * on the command line 470 + */ 471 + if (reboot_default) { 472 + dmi_check_system(pci_reboot_dmi_table); 473 + } 480 474 return 0; 481 475 } 482 476 core_initcall(pci_reboot_init);
+2 -2
arch/x86/mm/fault.c
··· 673 673 674 674 stackend = end_of_stack(tsk); 675 675 if (tsk != &init_task && *stackend != STACK_END_MAGIC) 676 - printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); 676 + printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); 677 677 678 678 tsk->thread.cr2 = address; 679 679 tsk->thread.trap_no = 14; ··· 684 684 sig = 0; 685 685 686 686 /* Executive summary in case the body of the oops scrolled away */ 687 - printk(KERN_EMERG "CR2: %016lx\n", address); 687 + printk(KERN_DEFAULT "CR2: %016lx\n", address); 688 688 689 689 oops_end(flags, regs, sig); 690 690 }
+1
include/linux/perf_event.h
··· 587 587 u64 sample_period; 588 588 u64 last_period; 589 589 local64_t period_left; 590 + u64 interrupts_seq; 590 591 u64 interrupts; 591 592 592 593 u64 freq_time_stamp;
+66 -38
kernel/events/core.c
··· 2300 2300 return div64_u64(dividend, divisor); 2301 2301 } 2302 2302 2303 + static DEFINE_PER_CPU(int, perf_throttled_count); 2304 + static DEFINE_PER_CPU(u64, perf_throttled_seq); 2305 + 2303 2306 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) 2304 2307 { 2305 2308 struct hw_perf_event *hwc = &event->hw; ··· 2328 2325 } 2329 2326 } 2330 2327 2331 - static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period) 2328 + /* 2329 + * combine freq adjustment with unthrottling to avoid two passes over the 2330 + * events. At the same time, make sure, having freq events does not change 2331 + * the rate of unthrottling as that would introduce bias. 2332 + */ 2333 + static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx, 2334 + int needs_unthr) 2332 2335 { 2333 2336 struct perf_event *event; 2334 2337 struct hw_perf_event *hwc; 2335 - u64 interrupts, now; 2338 + u64 now, period = TICK_NSEC; 2336 2339 s64 delta; 2337 2340 2338 - if (!ctx->nr_freq) 2341 + /* 2342 + * only need to iterate over all events iff: 2343 + * - context have events in frequency mode (needs freq adjust) 2344 + * - there are events to unthrottle on this cpu 2345 + */ 2346 + if (!(ctx->nr_freq || needs_unthr)) 2339 2347 return; 2348 + 2349 + raw_spin_lock(&ctx->lock); 2340 2350 2341 2351 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 2342 2352 if (event->state != PERF_EVENT_STATE_ACTIVE) ··· 2360 2344 2361 2345 hwc = &event->hw; 2362 2346 2363 - interrupts = hwc->interrupts; 2364 - hwc->interrupts = 0; 2365 - 2366 - /* 2367 - * unthrottle events on the tick 2368 - */ 2369 - if (interrupts == MAX_INTERRUPTS) { 2347 + if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) { 2348 + hwc->interrupts = 0; 2370 2349 perf_log_throttle(event, 1); 2371 2350 event->pmu->start(event, 0); 2372 2351 } ··· 2369 2358 if (!event->attr.freq || !event->attr.sample_freq) 2370 2359 continue; 2371 2360 2372 - event->pmu->read(event); 2361 + /* 2362 + * stop the event and update event->count 2363 + */ 2364 + event->pmu->stop(event, PERF_EF_UPDATE); 2365 + 2373 2366 now = local64_read(&event->count); 2374 2367 delta = now - hwc->freq_count_stamp; 2375 2368 hwc->freq_count_stamp = now; 2376 2369 2370 + /* 2371 + * restart the event 2372 + * reload only if value has changed 2373 + */ 2377 2374 if (delta > 0) 2378 2375 perf_adjust_period(event, period, delta); 2376 + 2377 + event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); 2379 2378 } 2379 + 2380 + raw_spin_unlock(&ctx->lock); 2380 2381 } 2381 2382 2382 2383 /* ··· 2411 2388 */ 2412 2389 static void perf_rotate_context(struct perf_cpu_context *cpuctx) 2413 2390 { 2414 - u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC; 2415 2391 struct perf_event_context *ctx = NULL; 2416 - int rotate = 0, remove = 1, freq = 0; 2392 + int rotate = 0, remove = 1; 2417 2393 2418 2394 if (cpuctx->ctx.nr_events) { 2419 2395 remove = 0; 2420 2396 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) 2421 2397 rotate = 1; 2422 - if (cpuctx->ctx.nr_freq) 2423 - freq = 1; 2424 2398 } 2425 2399 2426 2400 ctx = cpuctx->task_ctx; ··· 2425 2405 remove = 0; 2426 2406 if (ctx->nr_events != ctx->nr_active) 2427 2407 rotate = 1; 2428 - if (ctx->nr_freq) 2429 - freq = 1; 2430 2408 } 2431 2409 2432 - if (!rotate && !freq) 2410 + if (!rotate) 2433 2411 goto done; 2434 2412 2435 2413 perf_ctx_lock(cpuctx, cpuctx->task_ctx); 2436 2414 perf_pmu_disable(cpuctx->ctx.pmu); 2437 2415 2438 - if (freq) { 2439 - perf_ctx_adjust_freq(&cpuctx->ctx, interval); 2440 - if (ctx) 2441 - perf_ctx_adjust_freq(ctx, interval); 2442 - } 2416 + cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2417 + if (ctx) 2418 + ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2443 2419 2444 - if (rotate) { 2445 - cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2446 - if (ctx) 2447 - ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); 2420 + rotate_ctx(&cpuctx->ctx); 2421 + if (ctx) 2422 + rotate_ctx(ctx); 2448 2423 2449 - rotate_ctx(&cpuctx->ctx); 2450 - if (ctx) 2451 - rotate_ctx(ctx); 2452 - 2453 - perf_event_sched_in(cpuctx, ctx, current); 2454 - } 2424 + perf_event_sched_in(cpuctx, ctx, current); 2455 2425 2456 2426 perf_pmu_enable(cpuctx->ctx.pmu); 2457 2427 perf_ctx_unlock(cpuctx, cpuctx->task_ctx); 2458 - 2459 2428 done: 2460 2429 if (remove) 2461 2430 list_del_init(&cpuctx->rotation_list); ··· 2454 2445 { 2455 2446 struct list_head *head = &__get_cpu_var(rotation_list); 2456 2447 struct perf_cpu_context *cpuctx, *tmp; 2448 + struct perf_event_context *ctx; 2449 + int throttled; 2457 2450 2458 2451 WARN_ON(!irqs_disabled()); 2459 2452 2453 + __this_cpu_inc(perf_throttled_seq); 2454 + throttled = __this_cpu_xchg(perf_throttled_count, 0); 2455 + 2460 2456 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) { 2457 + ctx = &cpuctx->ctx; 2458 + perf_adjust_freq_unthr_context(ctx, throttled); 2459 + 2460 + ctx = cpuctx->task_ctx; 2461 + if (ctx) 2462 + perf_adjust_freq_unthr_context(ctx, throttled); 2463 + 2461 2464 if (cpuctx->jiffies_interval == 1 || 2462 2465 !(jiffies % cpuctx->jiffies_interval)) 2463 2466 perf_rotate_context(cpuctx); ··· 4530 4509 { 4531 4510 int events = atomic_read(&event->event_limit); 4532 4511 struct hw_perf_event *hwc = &event->hw; 4512 + u64 seq; 4533 4513 int ret = 0; 4534 4514 4535 4515 /* ··· 4540 4518 if (unlikely(!is_sampling_event(event))) 4541 4519 return 0; 4542 4520 4543 - if (unlikely(hwc->interrupts >= max_samples_per_tick)) { 4544 - if (throttle) { 4521 + seq = __this_cpu_read(perf_throttled_seq); 4522 + if (seq != hwc->interrupts_seq) { 4523 + hwc->interrupts_seq = seq; 4524 + hwc->interrupts = 1; 4525 + } else { 4526 + hwc->interrupts++; 4527 + if (unlikely(throttle 4528 + && hwc->interrupts >= max_samples_per_tick)) { 4529 + __this_cpu_inc(perf_throttled_count); 4545 4530 hwc->interrupts = MAX_INTERRUPTS; 4546 4531 perf_log_throttle(event, 0); 4547 4532 ret = 1; 4548 4533 } 4549 - } else 4550 - hwc->interrupts++; 4534 + } 4551 4535 4552 4536 if (event->attr.freq) { 4553 4537 u64 now = perf_clock();
+16
kernel/exit.c
··· 1038 1038 if (tsk->nr_dirtied) 1039 1039 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); 1040 1040 exit_rcu(); 1041 + 1042 + /* 1043 + * The setting of TASK_RUNNING by try_to_wake_up() may be delayed 1044 + * when the following two conditions become true. 1045 + * - There is race condition of mmap_sem (It is acquired by 1046 + * exit_mm()), and 1047 + * - SMI occurs before setting TASK_RUNINNG. 1048 + * (or hypervisor of virtual machine switches to other guest) 1049 + * As a result, we may become TASK_RUNNING after becoming TASK_DEAD 1050 + * 1051 + * To avoid it, we have to wait for releasing tsk->pi_lock which 1052 + * is held by try_to_wake_up() 1053 + */ 1054 + smp_mb(); 1055 + raw_spin_unlock_wait(&tsk->pi_lock); 1056 + 1041 1057 /* causes final put_task_struct in finish_task_switch(). */ 1042 1058 tsk->state = TASK_DEAD; 1043 1059 tsk->flags |= PF_NOFREEZE; /* tell freezer to ignore us */
+7 -12
kernel/sched/core.c
··· 74 74 75 75 #include <asm/tlb.h> 76 76 #include <asm/irq_regs.h> 77 + #include <asm/mutex.h> 77 78 #ifdef CONFIG_PARAVIRT 78 79 #include <asm/paravirt.h> 79 80 #endif ··· 724 723 p->sched_class->dequeue_task(rq, p, flags); 725 724 } 726 725 727 - /* 728 - * activate_task - move a task to the runqueue. 729 - */ 730 726 void activate_task(struct rq *rq, struct task_struct *p, int flags) 731 727 { 732 728 if (task_contributes_to_load(p)) ··· 732 734 enqueue_task(rq, p, flags); 733 735 } 734 736 735 - /* 736 - * deactivate_task - remove a task from the runqueue. 737 - */ 738 737 void deactivate_task(struct rq *rq, struct task_struct *p, int flags) 739 738 { 740 739 if (task_contributes_to_load(p)) ··· 4129 4134 on_rq = p->on_rq; 4130 4135 running = task_current(rq, p); 4131 4136 if (on_rq) 4132 - deactivate_task(rq, p, 0); 4137 + dequeue_task(rq, p, 0); 4133 4138 if (running) 4134 4139 p->sched_class->put_prev_task(rq, p); 4135 4140 ··· 4142 4147 if (running) 4143 4148 p->sched_class->set_curr_task(rq); 4144 4149 if (on_rq) 4145 - activate_task(rq, p, 0); 4150 + enqueue_task(rq, p, 0); 4146 4151 4147 4152 check_class_changed(rq, p, prev_class, oldprio); 4148 4153 task_rq_unlock(rq, p, &flags); ··· 4993 4998 * placed properly. 4994 4999 */ 4995 5000 if (p->on_rq) { 4996 - deactivate_task(rq_src, p, 0); 5001 + dequeue_task(rq_src, p, 0); 4997 5002 set_task_cpu(p, dest_cpu); 4998 - activate_task(rq_dest, p, 0); 5003 + enqueue_task(rq_dest, p, 0); 4999 5004 check_preempt_curr(rq_dest, p, 0); 5000 5005 } 5001 5006 done: ··· 7027 7032 7028 7033 on_rq = p->on_rq; 7029 7034 if (on_rq) 7030 - deactivate_task(rq, p, 0); 7035 + dequeue_task(rq, p, 0); 7031 7036 __setscheduler(rq, p, SCHED_NORMAL, 0); 7032 7037 if (on_rq) { 7033 - activate_task(rq, p, 0); 7038 + enqueue_task(rq, p, 0); 7034 7039 resched_task(rq->curr); 7035 7040 } 7036 7041
+29 -5
kernel/sched/fair.c
··· 4866 4866 return; 4867 4867 } 4868 4868 4869 + static inline void clear_nohz_tick_stopped(int cpu) 4870 + { 4871 + if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 4872 + cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 4873 + atomic_dec(&nohz.nr_cpus); 4874 + clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 4875 + } 4876 + } 4877 + 4869 4878 static inline void set_cpu_sd_state_busy(void) 4870 4879 { 4871 4880 struct sched_domain *sd; ··· 4913 4904 { 4914 4905 int cpu = smp_processor_id(); 4915 4906 4907 + /* 4908 + * If this cpu is going down, then nothing needs to be done. 4909 + */ 4910 + if (!cpu_active(cpu)) 4911 + return; 4912 + 4916 4913 if (stop_tick) { 4917 4914 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) 4918 4915 return; ··· 4928 4913 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 4929 4914 } 4930 4915 return; 4916 + } 4917 + 4918 + static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, 4919 + unsigned long action, void *hcpu) 4920 + { 4921 + switch (action & ~CPU_TASKS_FROZEN) { 4922 + case CPU_DYING: 4923 + clear_nohz_tick_stopped(smp_processor_id()); 4924 + return NOTIFY_OK; 4925 + default: 4926 + return NOTIFY_DONE; 4927 + } 4931 4928 } 4932 4929 #endif 4933 4930 ··· 5097 5070 * busy tick after returning from idle, we will update the busy stats. 5098 5071 */ 5099 5072 set_cpu_sd_state_busy(); 5100 - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { 5101 - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 5102 - cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); 5103 - atomic_dec(&nohz.nr_cpus); 5104 - } 5073 + clear_nohz_tick_stopped(cpu); 5105 5074 5106 5075 /* 5107 5076 * None are in tickless mode and hence no need for NOHZ idle load ··· 5613 5590 5614 5591 #ifdef CONFIG_NO_HZ 5615 5592 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); 5593 + cpu_notifier(sched_ilb_notifier, 0); 5616 5594 #endif 5617 5595 #endif /* SMP */ 5618 5596
+5
kernel/sched/rt.c
··· 1587 1587 if (!next_task) 1588 1588 return 0; 1589 1589 1590 + #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW 1591 + if (unlikely(task_running(rq, next_task))) 1592 + return 0; 1593 + #endif 1594 + 1590 1595 retry: 1591 1596 if (unlikely(next_task == rq->curr)) { 1592 1597 WARN_ON(1);
+1 -1
kernel/watchdog.c
··· 296 296 if (__this_cpu_read(soft_watchdog_warn) == true) 297 297 return HRTIMER_RESTART; 298 298 299 - printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 299 + printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 300 300 smp_processor_id(), duration, 301 301 current->comm, task_pid_nr(current)); 302 302 print_modules();
+1 -1
lib/bug.c
··· 169 169 return BUG_TRAP_TYPE_WARN; 170 170 } 171 171 172 - printk(KERN_EMERG "------------[ cut here ]------------\n"); 172 + printk(KERN_DEFAULT "------------[ cut here ]------------\n"); 173 173 174 174 if (file) 175 175 printk(KERN_CRIT "kernel BUG at %s:%u!\n",
+2 -5
tools/perf/Makefile
··· 104 104 105 105 CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) 106 106 EXTLIBS = -lpthread -lrt -lelf -lm 107 - ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 107 + ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 108 108 ALL_LDFLAGS = $(LDFLAGS) 109 109 STRIP ?= strip 110 110 ··· 168 168 169 169 ### --- END CONFIGURATION SECTION --- 170 170 171 - # Those must not be GNU-specific; they are shared with perl/ which may 172 - # be built by a different compiler. (Note that this is an artifact now 173 - # but it still might be nice to keep that distinction.) 174 - BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include 171 + BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE 175 172 BASIC_LDFLAGS = 176 173 177 174 # Guard against environment variables
-2
tools/perf/builtin-probe.c
··· 20 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 21 * 22 22 */ 23 - #define _GNU_SOURCE 24 23 #include <sys/utsname.h> 25 24 #include <sys/types.h> 26 25 #include <sys/stat.h> ··· 30 31 #include <stdlib.h> 31 32 #include <string.h> 32 33 33 - #undef _GNU_SOURCE 34 34 #include "perf.h" 35 35 #include "builtin.h" 36 36 #include "util/util.h"
+10 -3
tools/perf/builtin-top.c
··· 89 89 90 90 static void perf_top__update_print_entries(struct perf_top *top) 91 91 { 92 - top->print_entries = top->winsize.ws_row; 93 - 94 92 if (top->print_entries > 9) 95 93 top->print_entries -= 9; 96 94 } ··· 98 100 struct perf_top *top = arg; 99 101 100 102 get_term_dimensions(&top->winsize); 103 + if (!top->print_entries 104 + || (top->print_entries+4) > top->winsize.ws_row) { 105 + top->print_entries = top->winsize.ws_row; 106 + } else { 107 + top->print_entries += 4; 108 + top->winsize.ws_row = top->print_entries; 109 + } 101 110 perf_top__update_print_entries(top); 102 111 } 103 112 ··· 458 453 }; 459 454 perf_top__sig_winch(SIGWINCH, NULL, top); 460 455 sigaction(SIGWINCH, &act, NULL); 461 - } else 456 + } else { 457 + perf_top__sig_winch(SIGWINCH, NULL, top); 462 458 signal(SIGWINCH, SIG_DFL); 459 + } 463 460 break; 464 461 case 'E': 465 462 if (top->evlist->nr_entries > 1) {
+1 -1
tools/perf/util/header.c
··· 2105 2105 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1); 2106 2106 2107 2107 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; 2108 - size = strlen(name); 2108 + size = strlen(ev.event_type.event_type.name); 2109 2109 size = ALIGN(size, sizeof(u64)); 2110 2110 ev.event_type.header.size = sizeof(ev.event_type) - 2111 2111 (sizeof(ev.event_type.event_type.name) - size);
-2
tools/perf/util/probe-event.c
··· 19 19 * 20 20 */ 21 21 22 - #define _GNU_SOURCE 23 22 #include <sys/utsname.h> 24 23 #include <sys/types.h> 25 24 #include <sys/stat.h> ··· 32 33 #include <limits.h> 33 34 #include <elf.h> 34 35 35 - #undef _GNU_SOURCE 36 36 #include "util.h" 37 37 #include "event.h" 38 38 #include "string.h"
-1
tools/perf/util/symbol.c
··· 1 - #define _GNU_SOURCE 2 1 #include <ctype.h> 3 2 #include <dirent.h> 4 3 #include <errno.h>
+1 -2
tools/perf/util/trace-event-parse.c
··· 21 21 * The parts for function graph printing was taken and modified from the 22 22 * Linux Kernel that were written by Frederic Weisbecker. 23 23 */ 24 - #define _GNU_SOURCE 24 + 25 25 #include <stdio.h> 26 26 #include <stdlib.h> 27 27 #include <string.h> 28 28 #include <ctype.h> 29 29 #include <errno.h> 30 30 31 - #undef _GNU_SOURCE 32 31 #include "../perf.h" 33 32 #include "util.h" 34 33 #include "trace-event.h"
-2
tools/perf/util/ui/browsers/hists.c
··· 1 - #define _GNU_SOURCE 2 1 #include <stdio.h> 3 - #undef _GNU_SOURCE 4 2 #include "../libslang.h" 5 3 #include <stdlib.h> 6 4 #include <string.h>
-1
tools/perf/util/ui/helpline.c
··· 1 - #define _GNU_SOURCE 2 1 #include <stdio.h> 3 2 #include <stdlib.h> 4 3 #include <string.h>
-1
tools/perf/util/util.h
··· 40 40 #define decimal_length(x) ((int)(sizeof(x) * 2.56 + 0.5) + 1) 41 41 42 42 #define _ALL_SOURCE 1 43 - #define _GNU_SOURCE 1 44 43 #define _BSD_SOURCE 1 45 44 #define HAS_BOOL 46 45