Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm-tracepoint: rename page-free events

Rename mm_page_free_direct into mm_page_free and mm_pagevec_free into
mm_page_free_batched

Since v2.6.33-5426-gc475dab the kernel triggers mm_page_free_direct for
all freed pages, not only for directly freed. So, let's name it properly.
For pages freed via page-list we also trigger mm_page_free_batched event.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Konstantin Khlebnikov and committed by
Linus Torvalds
b413d48a da066ad3

+20 -20
+6 -6
Documentation/trace/events-kmem.txt
··· 40 40 ================== 41 41 mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s 42 42 mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d 43 - mm_page_free_direct page=%p pfn=%lu order=%d 44 - mm_pagevec_free page=%p pfn=%lu order=%d cold=%d 43 + mm_page_free page=%p pfn=%lu order=%d 44 + mm_page_free_batched page=%p pfn=%lu order=%d cold=%d 45 45 46 46 These four events deal with page allocation and freeing. mm_page_alloc is 47 47 a simple indicator of page allocator activity. Pages may be allocated from ··· 53 53 impairs performance by disabling interrupts, dirtying cache lines between 54 54 CPUs and serialising many CPUs. 55 55 56 - When a page is freed directly by the caller, the mm_page_free_direct event 56 + When a page is freed directly by the caller, the only mm_page_free event 57 57 is triggered. Significant amounts of activity here could indicate that the 58 58 callers should be batching their activities. 59 59 60 - When pages are freed using a pagevec, the mm_pagevec_free is 61 - triggered. Broadly speaking, pages are taken off the LRU lock in bulk and 62 - freed in batch with a pagevec. Significant amounts of activity here could 60 + When pages are freed in batch, the also mm_page_free_batched is triggered. 61 + Broadly speaking, pages are taken off the LRU lock in bulk and 62 + freed in batch with a page list. Significant amounts of activity here could 63 63 indicate that the system is under memory pressure and can also indicate 64 64 contention on the zone->lru_lock. 65 65
+10 -10
Documentation/trace/postprocess/trace-pagealloc-postprocess.pl
··· 17 17 18 18 # Tracepoint events 19 19 use constant MM_PAGE_ALLOC => 1; 20 - use constant MM_PAGE_FREE_DIRECT => 2; 21 - use constant MM_PAGEVEC_FREE => 3; 20 + use constant MM_PAGE_FREE => 2; 21 + use constant MM_PAGE_FREE_BATCHED => 3; 22 22 use constant MM_PAGE_PCPU_DRAIN => 4; 23 23 use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5; 24 24 use constant MM_PAGE_ALLOC_EXTFRAG => 6; ··· 223 223 # Perl Switch() sucks majorly 224 224 if ($tracepoint eq "mm_page_alloc") { 225 225 $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++; 226 - } elsif ($tracepoint eq "mm_page_free_direct") { 227 - $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++; 228 - } elsif ($tracepoint eq "mm_pagevec_free") { 229 - $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++; 226 + } elsif ($tracepoint eq "mm_page_free") { 227 + $perprocesspid{$process_pid}->{MM_PAGE_FREE}++ 228 + } elsif ($tracepoint eq "mm_page_free_batched") { 229 + $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}++; 230 230 } elsif ($tracepoint eq "mm_page_pcpu_drain") { 231 231 $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++; 232 232 $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++; ··· 336 336 $process_pid, 337 337 $stats{$process_pid}->{MM_PAGE_ALLOC}, 338 338 $stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED}, 339 - $stats{$process_pid}->{MM_PAGE_FREE_DIRECT}, 340 - $stats{$process_pid}->{MM_PAGEVEC_FREE}, 339 + $stats{$process_pid}->{MM_PAGE_FREE}, 340 + $stats{$process_pid}->{MM_PAGE_FREE_BATCHED}, 341 341 $stats{$process_pid}->{MM_PAGE_PCPU_DRAIN}, 342 342 $stats{$process_pid}->{HIGH_PCPU_DRAINS}, 343 343 $stats{$process_pid}->{HIGH_PCPU_REFILLS}, ··· 364 364 365 365 $perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}; 366 366 $perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED}; 367 - $perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}; 368 - $perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}; 367 + $perprocess{$process}->{MM_PAGE_FREE} += $perprocesspid{$process_pid}->{MM_PAGE_FREE}; 368 + $perprocess{$process}->{MM_PAGE_FREE_BATCHED} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}; 369 369 $perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}; 370 370 $perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS}; 371 371 $perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};
+2 -2
include/trace/events/kmem.h
··· 147 147 TP_ARGS(call_site, ptr) 148 148 ); 149 149 150 - TRACE_EVENT(mm_page_free_direct, 150 + TRACE_EVENT(mm_page_free, 151 151 152 152 TP_PROTO(struct page *page, unsigned int order), 153 153 ··· 169 169 __entry->order) 170 170 ); 171 171 172 - TRACE_EVENT(mm_pagevec_free, 172 + TRACE_EVENT(mm_page_free_batched, 173 173 174 174 TP_PROTO(struct page *page, int cold), 175 175
+2 -2
mm/page_alloc.c
··· 632 632 int i; 633 633 int bad = 0; 634 634 635 - trace_mm_page_free_direct(page, order); 635 + trace_mm_page_free(page, order); 636 636 kmemcheck_free_shadow(page, order); 637 637 638 638 if (PageAnon(page)) ··· 1196 1196 struct page *page, *next; 1197 1197 1198 1198 list_for_each_entry_safe(page, next, list, lru) { 1199 - trace_mm_pagevec_free(page, cold); 1199 + trace_mm_page_free_batched(page, cold); 1200 1200 free_hot_cold_page(page, cold); 1201 1201 } 1202 1202 }