Commit b20a3503 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] page migration reorg

Centralize the page migration functions in anticipation of additional
tinkering.  Creates a new file mm/migrate.c

1. Extract buffer_migrate_page() from fs/buffer.c

2. Extract central migration code from vmscan.c

3. Extract some components from mempolicy.c

4. Export pageout() and remove_from_swap() from vmscan.c

5. Make it possible to configure NUMA systems without page migration
   and non-NUMA systems with page migration.

I had to so some #ifdeffing in mempolicy.c that may need a cleanup.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 442295c9
...@@ -3050,68 +3050,6 @@ asmlinkage long sys_bdflush(int func, long data) ...@@ -3050,68 +3050,6 @@ asmlinkage long sys_bdflush(int func, long data)
return 0; return 0;
} }
/*
* Migration function for pages with buffers. This function can only be used
* if the underlying filesystem guarantees that no other references to "page"
* exist.
*/
#ifdef CONFIG_MIGRATION
int buffer_migrate_page(struct page *newpage, struct page *page)
{
struct address_space *mapping = page->mapping;
struct buffer_head *bh, *head;
int rc;
if (!mapping)
return -EAGAIN;
if (!page_has_buffers(page))
return migrate_page(newpage, page);
head = page_buffers(page);
rc = migrate_page_remove_references(newpage, page, 3);
if (rc)
return rc;
bh = head;
do {
get_bh(bh);
lock_buffer(bh);
bh = bh->b_this_page;
} while (bh != head);
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
put_page(page);
get_page(newpage);
bh = head;
do {
set_bh_page(bh, newpage, bh_offset(bh));
bh = bh->b_this_page;
} while (bh != head);
SetPagePrivate(newpage);
migrate_page_copy(newpage, page);
bh = head;
do {
unlock_buffer(bh);
put_bh(bh);
bh = bh->b_this_page;
} while (bh != head);
return 0;
}
EXPORT_SYMBOL(buffer_migrate_page);
#endif
/* /*
* Buffer-head allocation * Buffer-head allocation
*/ */
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/migrate.h>
#include "xfs_linux.h" #include "xfs_linux.h"
STATIC kmem_zone_t *xfs_buf_zone; STATIC kmem_zone_t *xfs_buf_zone;
......
#ifndef _LINUX_MIGRATE_H
#define _LINUX_MIGRATE_H
#include <linux/config.h>
#include <linux/mm.h>
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
extern int putback_lru_pages(struct list_head *l);
extern int migrate_page(struct page *, struct page *);
extern void migrate_page_copy(struct page *, struct page *);
extern int migrate_page_remove_references(struct page *, struct page *, int);
extern int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest);
extern int fail_migrate_page(struct page *, struct page *);
extern int migrate_prep(void);
#else
static inline int isolate_lru_page(struct page *p, struct list_head *list)
{ return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define fail_migrate_page NULL
#endif /* CONFIG_MIGRATION */
#endif /* _LINUX_MIGRATE_H */
...@@ -175,6 +175,21 @@ extern void swap_setup(void); ...@@ -175,6 +175,21 @@ extern void swap_setup(void);
extern unsigned long try_to_free_pages(struct zone **, gfp_t); extern unsigned long try_to_free_pages(struct zone **, gfp_t);
extern unsigned long shrink_all_memory(unsigned long nr_pages); extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness; extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
/* possible outcome of pageout() */
typedef enum {
/* failed to write page out, page is locked */
PAGE_KEEP,
/* move page to the active list, page is locked */
PAGE_ACTIVATE,
/* page has been sent to the disk successfully, page is unlocked */
PAGE_SUCCESS,
/* page is clean and locked */
PAGE_CLEAN,
} pageout_t;
extern pageout_t pageout(struct page *page, struct address_space *mapping);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
extern int zone_reclaim_mode; extern int zone_reclaim_mode;
...@@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) ...@@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
} }
#endif #endif
#ifdef CONFIG_MIGRATION
extern int isolate_lru_page(struct page *p);
extern unsigned long putback_lru_pages(struct list_head *l);
extern int migrate_page(struct page *, struct page *);
extern void migrate_page_copy(struct page *, struct page *);
extern int migrate_page_remove_references(struct page *, struct page *, int);
extern unsigned long migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed);
extern int fail_migrate_page(struct page *, struct page *);
#else
static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
static inline int putback_lru_pages(struct list_head *l) { return 0; }
static inline int migrate_pages(struct list_head *l, struct list_head *t,
struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
/* Possible settings for the migrate_page() method in address_operations */
#define migrate_page NULL
#define fail_migrate_page NULL
#endif
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* linux/mm/shmem.c */ /* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page); extern int shmem_unuse(swp_entry_t entry, struct page *page);
......
...@@ -137,5 +137,11 @@ config SPLIT_PTLOCK_CPUS ...@@ -137,5 +137,11 @@ config SPLIT_PTLOCK_CPUS
# support for page migration # support for page migration
# #
config MIGRATION config MIGRATION
bool "Page migration"
def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM
depends on SWAP depends on SWAP
help
Allows the migration of the physical location of pages of processes
while the virtual addresses are not changed. This is useful for
example on NUMA systems to put pages nearer to the processors accessing
the page.
...@@ -22,3 +22,5 @@ obj-$(CONFIG_SLOB) += slob.o ...@@ -22,3 +22,5 @@ obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_FS_XIP) += filemap_xip.o
obj-$(CONFIG_MIGRATION) += migrate.o
...@@ -86,6 +86,7 @@ ...@@ -86,6 +86,7 @@
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/migrate.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -95,9 +96,6 @@ ...@@ -95,9 +96,6 @@
#define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */ #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
#define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */ #define MPOL_MF_STATS (MPOL_MF_INTERNAL << 2) /* Gather statistics */
/* The number of pages to migrate per call to migrate_pages() */
#define MIGRATE_CHUNK_SIZE 256
static struct kmem_cache *policy_cache; static struct kmem_cache *policy_cache;
static struct kmem_cache *sn_cache; static struct kmem_cache *sn_cache;
...@@ -331,17 +329,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, ...@@ -331,17 +329,10 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
struct vm_area_struct *first, *vma, *prev; struct vm_area_struct *first, *vma, *prev;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
/* Must have swap device for migration */
if (nr_swap_pages <= 0)
return ERR_PTR(-ENODEV);
/* err = migrate_prep();
* Clear the LRU lists so pages can be isolated. if (err)
* Note that pages may be moved off the LRU after we have return ERR_PTR(err);
* drained them. Those pages will fail to migrate like other
* pages that may be busy.
*/
lru_add_drain_all();
} }
first = find_vma(mm, start); first = find_vma(mm, start);
...@@ -550,92 +541,18 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask, ...@@ -550,92 +541,18 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
return err; return err;
} }
#ifdef CONFIG_MIGRATION
/* /*
* page migration * page migration
*/ */
static void migrate_page_add(struct page *page, struct list_head *pagelist, static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags) unsigned long flags)
{ {
/* /*
* Avoid migrating a page that is shared with others. * Avoid migrating a page that is shared with others.
*/ */
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) { if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1)
if (isolate_lru_page(page)) isolate_lru_page(page, pagelist);
list_add_tail(&page->lru, pagelist);
}
}
/*
* Migrate the list 'pagelist' of pages to a certain destination.
*
* Specify destination with either non-NULL vma or dest_node >= 0
* Return the number of pages not migrated or error code
*/
static int migrate_pages_to(struct list_head *pagelist,
struct vm_area_struct *vma, int dest)
{
LIST_HEAD(newlist);
LIST_HEAD(moved);
LIST_HEAD(failed);
int err = 0;
unsigned long offset = 0;
int nr_pages;
struct page *page;
struct list_head *p;
redo:
nr_pages = 0;
list_for_each(p, pagelist) {
if (vma) {
/*
* The address passed to alloc_page_vma is used to
* generate the proper interleave behavior. We fake
* the address here by an increasing offset in order
* to get the proper distribution of pages.
*
* No decision has been made as to which page
* a certain old page is moved to so we cannot
* specify the correct address.
*/
page = alloc_page_vma(GFP_HIGHUSER, vma,
offset + vma->vm_start);
offset += PAGE_SIZE;
}
else
page = alloc_pages_node(dest, GFP_HIGHUSER, 0);
if (!page) {
err = -ENOMEM;
goto out;
}
list_add_tail(&page->lru, &newlist);
nr_pages++;
if (nr_pages > MIGRATE_CHUNK_SIZE)
break;
}
err = migrate_pages(pagelist, &newlist, &moved, &failed);
putback_lru_pages(&moved); /* Call release pages instead ?? */
if (err >= 0 && list_empty(&newlist) && !list_empty(pagelist))
goto redo;
out:
/* Return leftover allocated pages */
while (!list_empty(&newlist)) {
page = list_entry(newlist.next, struct page, lru);
list_del(&page->lru);
__free_page(page);
}
list_splice(&failed, pagelist);
if (err < 0)
return err;
/* Calculate number of leftover pages */
nr_pages = 0;
list_for_each(p, pagelist)
nr_pages++;
return nr_pages;
} }
/* /*
...@@ -742,7 +659,22 @@ int do_migrate_pages(struct mm_struct *mm, ...@@ -742,7 +659,22 @@ int do_migrate_pages(struct mm_struct *mm,
if (err < 0) if (err < 0)
return err; return err;
return busy; return busy;
}
#else
static void migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
}
int do_migrate_pages(struct mm_struct *mm,
const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags)
{
return -ENOSYS;
} }
#endif
long do_mbind(unsigned long start, unsigned long len, long do_mbind(unsigned long start, unsigned long len,
unsigned long mode, nodemask_t *nmask, unsigned long flags) unsigned long mode, nodemask_t *nmask, unsigned long flags)
...@@ -808,6 +740,7 @@ long do_mbind(unsigned long start, unsigned long len, ...@@ -808,6 +740,7 @@ long do_mbind(unsigned long start, unsigned long len,
if (!err && nr_failed && (flags & MPOL_MF_STRICT)) if (!err && nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO; err = -EIO;
} }
if (!list_empty(&pagelist)) if (!list_empty(&pagelist))
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
......
This diff is collapsed.
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
#include <linux/migrate.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment