Commit 9617d95e authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: rmap optimisation

Optimise rmap functions by minimising atomic operations when we know there
will be no concurrent modifications.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 224abf92
...@@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma, ...@@ -324,7 +324,7 @@ void install_arg_page(struct vm_area_struct *vma,
lru_cache_add_active(page); lru_cache_add_active(page);
set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte( set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
page, vma->vm_page_prot)))); page, vma->vm_page_prot))));
page_add_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
pte_unmap_unlock(pte, ptl); pte_unmap_unlock(pte, ptl);
/* no need for flush_tlb */ /* no need for flush_tlb */
......
...@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *); ...@@ -71,6 +71,7 @@ void __anon_vma_link(struct vm_area_struct *);
* rmap interfaces called when adding or removing pte of page * rmap interfaces called when adding or removing pte of page
*/ */
void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
void page_add_file_rmap(struct page *); void page_add_file_rmap(struct page *);
void page_remove_rmap(struct page *); void page_remove_rmap(struct page *);
......
...@@ -1498,7 +1498,7 @@ gotten: ...@@ -1498,7 +1498,7 @@ gotten:
update_mmu_cache(vma, address, entry); update_mmu_cache(vma, address, entry);
lazy_mmu_prot_update(entry); lazy_mmu_prot_update(entry);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_new_anon_rmap(new_page, vma, address);
/* Free the old page.. */ /* Free the old page.. */
new_page = old_page; new_page = old_page;
...@@ -1978,7 +1978,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1978,7 +1978,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(page); lru_cache_add_active(page);
SetPageReferenced(page); SetPageReferenced(page);
page_add_anon_rmap(page, vma, address); page_add_new_anon_rmap(page, vma, address);
} else { } else {
/* Map the ZERO_PAGE - vm_page_prot is readonly */ /* Map the ZERO_PAGE - vm_page_prot is readonly */
page = ZERO_PAGE(address); page = ZERO_PAGE(address);
...@@ -2109,7 +2109,7 @@ retry: ...@@ -2109,7 +2109,7 @@ retry:
if (anon) { if (anon) {
inc_mm_counter(mm, anon_rss); inc_mm_counter(mm, anon_rss);
lru_cache_add_active(new_page); lru_cache_add_active(new_page);
page_add_anon_rmap(new_page, vma, address); page_add_new_anon_rmap(new_page, vma, address);
} else { } else {
inc_mm_counter(mm, file_rss); inc_mm_counter(mm, file_rss);
page_add_file_rmap(new_page); page_add_file_rmap(new_page);
......
...@@ -435,17 +435,14 @@ int page_referenced(struct page *page, int is_locked) ...@@ -435,17 +435,14 @@ int page_referenced(struct page *page, int is_locked)
} }
/** /**
* page_add_anon_rmap - add pte mapping to an anonymous page * page_set_anon_rmap - setup new anonymous rmap
* @page: the page to add the mapping to * @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added * @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped * @address: the user virtual address mapped
*
* The caller needs to hold the pte lock.
*/ */
void page_add_anon_rmap(struct page *page, static void __page_set_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address) struct vm_area_struct *vma, unsigned long address)
{ {
if (atomic_inc_and_test(&page->_mapcount)) {
struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma *anon_vma = vma->anon_vma;
BUG_ON(!anon_vma); BUG_ON(!anon_vma);
...@@ -455,10 +452,40 @@ void page_add_anon_rmap(struct page *page, ...@@ -455,10 +452,40 @@ void page_add_anon_rmap(struct page *page,
page->index = linear_page_index(vma, address); page->index = linear_page_index(vma, address);
inc_page_state(nr_mapped); inc_page_state(nr_mapped);
} }
/**
* page_add_anon_rmap - add pte mapping to an anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* The caller needs to hold the pte lock.
*/
void page_add_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
if (atomic_inc_and_test(&page->_mapcount))
__page_set_anon_rmap(page, vma, address);
/* else checking page index and mapping is racy */ /* else checking page index and mapping is racy */
} }
/*
* page_add_new_anon_rmap - add pte mapping to a new anonymous page
* @page: the page to add the mapping to
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
* Same as page_add_anon_rmap but must only be called on *new* pages.
* This means the inc-and-test can be bypassed.
*/
void page_add_new_anon_rmap(struct page *page,
struct vm_area_struct *vma, unsigned long address)
{
atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */
__page_set_anon_rmap(page, vma, address);
}
/** /**
* page_add_file_rmap - add pte mapping to a file page * page_add_file_rmap - add pte mapping to a file page
* @page: the page to add the mapping to * @page: the page to add the mapping to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment