Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

perf: Simplify the ring-buffer logic: make perf_buffer_alloc() do everything needed

Currently there are perf_buffer_alloc() + perf_buffer_init() + some
separate bits, fold it all into a single perf_buffer_alloc() and only
leave the attachment to the event separate.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Peter Zijlstra and committed by
Ingo Molnar
d57e34fd ca5135e6

+36 -27
+2
include/linux/perf_event.h
··· 602 602 603 603 struct file; 604 604 605 + #define PERF_BUFFER_WRITABLE 0x01 606 + 605 607 struct perf_buffer { 606 608 atomic_t refcount; 607 609 struct rcu_head rcu_head;
+34 -27
kernel/perf_event.c
··· 2369 2369 rcu_read_unlock(); 2370 2370 } 2371 2371 2372 + static unsigned long perf_data_size(struct perf_buffer *buffer); 2373 + 2374 + static void 2375 + perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) 2376 + { 2377 + long max_size = perf_data_size(buffer); 2378 + 2379 + if (watermark) 2380 + buffer->watermark = min(max_size, watermark); 2381 + 2382 + if (!buffer->watermark) 2383 + buffer->watermark = max_size / 2; 2384 + 2385 + if (flags & PERF_BUFFER_WRITABLE) 2386 + buffer->writable = 1; 2387 + 2388 + atomic_set(&buffer->refcount, 1); 2389 + } 2390 + 2372 2391 #ifndef CONFIG_PERF_USE_VMALLOC 2373 2392 2374 2393 /* ··· 2420 2401 } 2421 2402 2422 2403 static struct perf_buffer * 2423 - perf_buffer_alloc(struct perf_event *event, int nr_pages) 2404 + perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) 2424 2405 { 2425 2406 struct perf_buffer *buffer; 2426 2407 unsigned long size; ··· 2433 2414 if (!buffer) 2434 2415 goto fail; 2435 2416 2436 - buffer->user_page = perf_mmap_alloc_page(event->cpu); 2417 + buffer->user_page = perf_mmap_alloc_page(cpu); 2437 2418 if (!buffer->user_page) 2438 2419 goto fail_user_page; 2439 2420 2440 2421 for (i = 0; i < nr_pages; i++) { 2441 - buffer->data_pages[i] = perf_mmap_alloc_page(event->cpu); 2422 + buffer->data_pages[i] = perf_mmap_alloc_page(cpu); 2442 2423 if (!buffer->data_pages[i]) 2443 2424 goto fail_data_pages; 2444 2425 } 2445 2426 2446 2427 buffer->nr_pages = nr_pages; 2428 + 2429 + perf_buffer_init(buffer, watermark, flags); 2447 2430 2448 2431 return buffer; 2449 2432 ··· 2537 2516 } 2538 2517 2539 2518 static struct perf_buffer * 2540 - perf_buffer_alloc(struct perf_event *event, int nr_pages) 2519 + perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) 2541 2520 { 2542 2521 struct perf_buffer *buffer; 2543 2522 unsigned long size; ··· 2560 2539 buffer->data_pages[0] = all_buf + PAGE_SIZE; 2561 2540 buffer->page_order = ilog2(nr_pages); 2562 2541 buffer->nr_pages = 1; 2542 + 2543 + perf_buffer_init(buffer, watermark, flags); 2563 2544 2564 2545 return buffer; 2565 2546 ··· 2612 2589 rcu_read_unlock(); 2613 2590 2614 2591 return ret; 2615 - } 2616 - 2617 - static void 2618 - perf_buffer_init(struct perf_event *event, struct perf_buffer *buffer) 2619 - { 2620 - long max_size = perf_data_size(buffer); 2621 - 2622 - if (event->attr.watermark) { 2623 - buffer->watermark = min_t(long, max_size, 2624 - event->attr.wakeup_watermark); 2625 - } 2626 - 2627 - if (!buffer->watermark) 2628 - buffer->watermark = max_size / 2; 2629 - 2630 - atomic_set(&buffer->refcount, 1); 2631 - rcu_assign_pointer(event->buffer, buffer); 2632 2592 } 2633 2593 2634 2594 static void perf_buffer_free_rcu(struct rcu_head *rcu_head) ··· 2688 2682 unsigned long vma_size; 2689 2683 unsigned long nr_pages; 2690 2684 long user_extra, extra; 2691 - int ret = 0; 2685 + int ret = 0, flags = 0; 2692 2686 2693 2687 /* 2694 2688 * Don't allow mmap() of inherited per-task counters. This would ··· 2753 2747 2754 2748 WARN_ON(event->buffer); 2755 2749 2756 - buffer = perf_buffer_alloc(event, nr_pages); 2750 + if (vma->vm_flags & VM_WRITE) 2751 + flags |= PERF_BUFFER_WRITABLE; 2752 + 2753 + buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, 2754 + event->cpu, flags); 2757 2755 if (!buffer) { 2758 2756 ret = -ENOMEM; 2759 2757 goto unlock; 2760 2758 } 2761 - 2762 - perf_buffer_init(event, buffer); 2763 - if (vma->vm_flags & VM_WRITE) 2764 - event->buffer->writable = 1; 2759 + rcu_assign_pointer(event->buffer, buffer); 2765 2760 2766 2761 atomic_long_add(user_extra, &user->locked_vm); 2767 2762 event->mmap_locked = extra;