Commit 4b4f278c authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

tracing, page-allocator: add trace events for page allocation and page freeing

This patch adds trace events for the allocation and freeing of pages,
including the freeing of pagevecs.  Using the events, it will be known
what struct page and pfns are being allocated and freed and what the call
site was in many cases.

The page alloc tracepoints be used as an indicator as to whether the
workload was heavily dependant on the page allocator or not.  You can make
a guess based on vmstat but you can't get a per-process breakdown.
Depending on the call path, the call_site for page allocation may be
__get_free_pages() instead of a useful callsite.  Instead of passing down
a return address similar to slab debugging, the user should enable the
stacktrace and seg-addr options to get a proper stack trace.

The pagevec free tracepoint has a different usecase.  It can be used to
get a idea of how many pages are being dumped off the LRU and whether it
is kswapd doing the work or a process doing direct reclaim.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Acked-by: default avatarRik van Riel <riel@redhat.com>
Reviewed-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Li Ming Chun <macli@brc.ubc.ca>
Reviewed-by: default avatarKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 38a39857
...@@ -225,6 +225,80 @@ TRACE_EVENT(kmem_cache_free, ...@@ -225,6 +225,80 @@ TRACE_EVENT(kmem_cache_free,
TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
); );
TRACE_EVENT(mm_page_free_direct,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
TP_STRUCT__entry(
__field( struct page *, page )
__field( unsigned int, order )
),
TP_fast_assign(
__entry->page = page;
__entry->order = order;
),
TP_printk("page=%p pfn=%lu order=%d",
__entry->page,
page_to_pfn(__entry->page),
__entry->order)
);
TRACE_EVENT(mm_pagevec_free,
TP_PROTO(struct page *page, int cold),
TP_ARGS(page, cold),
TP_STRUCT__entry(
__field( struct page *, page )
__field( int, cold )
),
TP_fast_assign(
__entry->page = page;
__entry->cold = cold;
),
TP_printk("page=%p pfn=%lu order=0 cold=%d",
__entry->page,
page_to_pfn(__entry->page),
__entry->cold)
);
TRACE_EVENT(mm_page_alloc,
TP_PROTO(struct page *page, unsigned int order,
gfp_t gfp_flags, int migratetype),
TP_ARGS(page, order, gfp_flags, migratetype),
TP_STRUCT__entry(
__field( struct page *, page )
__field( unsigned int, order )
__field( gfp_t, gfp_flags )
__field( int, migratetype )
),
TP_fast_assign(
__entry->page = page;
__entry->order = order;
__entry->gfp_flags = gfp_flags;
__entry->migratetype = migratetype;
),
TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
__entry->page,
page_to_pfn(__entry->page),
__entry->order,
__entry->migratetype,
show_gfp_flags(__entry->gfp_flags))
);
#endif /* _TRACE_KMEM_H */ #endif /* _TRACE_KMEM_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -1076,6 +1076,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1076,6 +1076,7 @@ static void free_hot_cold_page(struct page *page, int cold)
void free_hot_page(struct page *page) void free_hot_page(struct page *page)
{ {
trace_mm_page_free_direct(page, 0);
free_hot_cold_page(page, 0); free_hot_cold_page(page, 0);
} }
...@@ -1920,6 +1921,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, ...@@ -1920,6 +1921,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx, nodemask, zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype); preferred_zone, migratetype);
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
return page; return page;
} }
EXPORT_SYMBOL(__alloc_pages_nodemask); EXPORT_SYMBOL(__alloc_pages_nodemask);
...@@ -1954,13 +1956,16 @@ void __pagevec_free(struct pagevec *pvec) ...@@ -1954,13 +1956,16 @@ void __pagevec_free(struct pagevec *pvec)
{ {
int i = pagevec_count(pvec); int i = pagevec_count(pvec);
while (--i >= 0) while (--i >= 0) {
trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
free_hot_cold_page(pvec->pages[i], pvec->cold); free_hot_cold_page(pvec->pages[i], pvec->cold);
}
} }
void __free_pages(struct page *page, unsigned int order) void __free_pages(struct page *page, unsigned int order)
{ {
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
trace_mm_page_free_direct(page, order);
if (order == 0) if (order == 0)
free_hot_page(page); free_hot_page(page);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment