Commit 351619ba authored by Rafael J. Wysocki's avatar Rafael J. Wysocki Committed by Linus Torvalds

[PATCH] swsusp: rework image freeing

The following patch makes swsusp use PG_nosave and PG_nosave_free flags to
mark pages that should be freed after the state of the system has been
restored from the image (or in case of an error during suspend).

This allows us to avoid storing metadata in swap twice and to reduce the
amount of memory needed by swsusp.   Additionally, it allows us to simplify
the code by removing a couple of functions that are no longer necessary.
Signed-off-by: default avatarRafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: default avatarPavel Machek <pavel@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c32b6b8e
...@@ -708,24 +708,28 @@ static void count_data_pages(void) ...@@ -708,24 +708,28 @@ static void count_data_pages(void)
} }
} }
static void copy_data_pages(void) static void copy_data_pages(void)
{ {
struct zone *zone; struct zone *zone;
unsigned long zone_pfn; unsigned long zone_pfn;
struct pbe * pbe = pagedir_nosave; struct pbe *pbe = pagedir_nosave, *p;
pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages); pr_debug("copy_data_pages(): pages to copy: %d\n", nr_copy_pages);
for_each_zone (zone) { for_each_zone (zone) {
if (is_highmem(zone)) if (is_highmem(zone))
continue; continue;
mark_free_pages(zone); mark_free_pages(zone);
/* This is necessary for swsusp_free() */
for_each_pb_page (p, pagedir_nosave)
SetPageNosaveFree(virt_to_page(p));
for_each_pbe(p, pagedir_nosave)
SetPageNosaveFree(virt_to_page(p->address));
for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) { for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) {
if (saveable(zone, &zone_pfn)) { if (saveable(zone, &zone_pfn)) {
struct page * page; struct page * page;
page = pfn_to_page(zone_pfn + zone->zone_start_pfn); page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
BUG_ON(!pbe); BUG_ON(!pbe);
pbe->orig_address = (long) page_address(page); pbe->orig_address = (unsigned long)page_address(page);
/* copy_page is not usable for copying task structs. */ /* copy_page is not usable for copying task structs. */
memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE); memcpy((void *)pbe->address, (void *)pbe->orig_address, PAGE_SIZE);
pbe = pbe->next; pbe = pbe->next;
...@@ -736,15 +740,6 @@ static void copy_data_pages(void) ...@@ -736,15 +740,6 @@ static void copy_data_pages(void)
} }
/**
* calc_nr - Determine the number of pages needed for a pbe list.
*/
static int calc_nr(int nr_copy)
{
return nr_copy + (nr_copy+PBES_PER_PAGE-2)/(PBES_PER_PAGE-1);
}
/** /**
* free_pagedir - free pages allocated with alloc_pagedir() * free_pagedir - free pages allocated with alloc_pagedir()
*/ */
...@@ -755,6 +750,8 @@ static inline void free_pagedir(struct pbe *pblist) ...@@ -755,6 +750,8 @@ static inline void free_pagedir(struct pbe *pblist)
while (pblist) { while (pblist) {
pbe = (pblist + PB_PAGE_SKIP)->next; pbe = (pblist + PB_PAGE_SKIP)->next;
ClearPageNosave(virt_to_page(pblist));
ClearPageNosaveFree(virt_to_page(pblist));
free_page((unsigned long)pblist); free_page((unsigned long)pblist);
pblist = pbe; pblist = pbe;
} }
...@@ -800,6 +797,16 @@ static void create_pbe_list(struct pbe *pblist, unsigned nr_pages) ...@@ -800,6 +797,16 @@ static void create_pbe_list(struct pbe *pblist, unsigned nr_pages)
pr_debug("create_pbe_list(): initialized %d PBEs\n", num); pr_debug("create_pbe_list(): initialized %d PBEs\n", num);
} }
static void *alloc_image_page(void)
{
void *res = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
if (res) {
SetPageNosave(virt_to_page(res));
SetPageNosaveFree(virt_to_page(res));
}
return res;
}
/** /**
* alloc_pagedir - Allocate the page directory. * alloc_pagedir - Allocate the page directory.
* *
...@@ -822,11 +829,11 @@ static struct pbe * alloc_pagedir(unsigned nr_pages) ...@@ -822,11 +829,11 @@ static struct pbe * alloc_pagedir(unsigned nr_pages)
return NULL; return NULL;
pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages); pr_debug("alloc_pagedir(): nr_pages = %d\n", nr_pages);
pblist = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); pblist = (struct pbe *)alloc_image_page();
for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages; for (pbe = pblist, num = PBES_PER_PAGE; pbe && num < nr_pages;
pbe = pbe->next, num += PBES_PER_PAGE) { pbe = pbe->next, num += PBES_PER_PAGE) {
pbe += PB_PAGE_SKIP; pbe += PB_PAGE_SKIP;
pbe->next = (struct pbe *)get_zeroed_page(GFP_ATOMIC | __GFP_COLD); pbe->next = (struct pbe *)alloc_image_page();
} }
if (!pbe) { /* get_zeroed_page() failed */ if (!pbe) { /* get_zeroed_page() failed */
free_pagedir(pblist); free_pagedir(pblist);
...@@ -836,51 +843,29 @@ static struct pbe * alloc_pagedir(unsigned nr_pages) ...@@ -836,51 +843,29 @@ static struct pbe * alloc_pagedir(unsigned nr_pages)
} }
/** /**
* free_image_pages - Free pages allocated for snapshot * Free pages we allocated for suspend. Suspend pages are alocated
* before atomic copy, so we need to free them after resume.
*/ */
static void free_image_pages(void) void swsusp_free(void)
{ {
struct pbe * p; struct zone *zone;
unsigned long zone_pfn;
for_each_pbe (p, pagedir_save) { for_each_zone(zone) {
if (p->address) { for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
ClearPageNosave(virt_to_page(p->address)); if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
free_page(p->address); struct page * page;
p->address = 0; page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
} if (PageNosave(page) && PageNosaveFree(page)) {
ClearPageNosave(page);
ClearPageNosaveFree(page);
free_page((long) page_address(page));
}
}
} }
} }
/**
* alloc_image_pages - Allocate pages for the snapshot.
*/
static int alloc_image_pages(void)
{
struct pbe * p;
for_each_pbe (p, pagedir_save) {
p->address = get_zeroed_page(GFP_ATOMIC | __GFP_COLD);
if (!p->address)
return -ENOMEM;
SetPageNosave(virt_to_page(p->address));
}
return 0;
}
/* Free pages we allocated for suspend. Suspend pages are alocated
* before atomic copy, so we need to free them after resume.
*/
void swsusp_free(void)
{
BUG_ON(PageNosave(virt_to_page(pagedir_save)));
BUG_ON(PageNosaveFree(virt_to_page(pagedir_save)));
free_image_pages();
free_pagedir(pagedir_save);
}
/** /**
* enough_free_mem - Make sure we enough free memory to snapshot. * enough_free_mem - Make sure we enough free memory to snapshot.
* *
...@@ -890,12 +875,9 @@ void swsusp_free(void) ...@@ -890,12 +875,9 @@ void swsusp_free(void)
static int enough_free_mem(void) static int enough_free_mem(void)
{ {
if (nr_free_pages() < (nr_copy_pages + PAGES_FOR_IO)) { pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
pr_debug("swsusp: Not enough free pages: Have %d\n", return nr_free_pages() > (nr_copy_pages + PAGES_FOR_IO +
nr_free_pages()); nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE));
return 0;
}
return 1;
} }
...@@ -914,33 +896,16 @@ static int enough_swap(void) ...@@ -914,33 +896,16 @@ static int enough_swap(void)
struct sysinfo i; struct sysinfo i;
si_swapinfo(&i); si_swapinfo(&i);
if (i.freeswap < (nr_copy_pages + PAGES_FOR_IO)) { pr_debug("swsusp: available swap: %lu pages\n", i.freeswap);
pr_debug("swsusp: Not enough swap. Need %ld\n",i.freeswap); return i.freeswap > (nr_copy_pages + PAGES_FOR_IO +
return 0; nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE));
}
return 1;
} }
static int swsusp_alloc(void) static int swsusp_alloc(void)
{ {
int error; struct pbe * p;
pagedir_nosave = NULL; pagedir_nosave = NULL;
nr_copy_pages = calc_nr(nr_copy_pages);
nr_copy_pages_check = nr_copy_pages;
pr_debug("suspend: (pages needed: %d + %d free: %d)\n",
nr_copy_pages, PAGES_FOR_IO, nr_free_pages());
if (!enough_free_mem())
return -ENOMEM;
if (!enough_swap())
return -ENOSPC;
if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
!!(nr_copy_pages % PBES_PER_PAGE))
return -ENOSPC;
if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) { if (!(pagedir_save = alloc_pagedir(nr_copy_pages))) {
printk(KERN_ERR "suspend: Allocating pagedir failed.\n"); printk(KERN_ERR "suspend: Allocating pagedir failed.\n");
...@@ -948,10 +913,14 @@ static int swsusp_alloc(void) ...@@ -948,10 +913,14 @@ static int swsusp_alloc(void)
} }
create_pbe_list(pagedir_save, nr_copy_pages); create_pbe_list(pagedir_save, nr_copy_pages);
pagedir_nosave = pagedir_save; pagedir_nosave = pagedir_save;
if ((error = alloc_image_pages())) {
printk(KERN_ERR "suspend: Allocating image pages failed.\n"); for_each_pbe (p, pagedir_save) {
swsusp_free(); p->address = (unsigned long)alloc_image_page();
return error; if (!p->address) {
printk(KERN_ERR "suspend: Allocating image pages failed.\n");
swsusp_free();
return -ENOMEM;
}
} }
return 0; return 0;
...@@ -963,7 +932,7 @@ static int suspend_prepare_image(void) ...@@ -963,7 +932,7 @@ static int suspend_prepare_image(void)
pr_debug("swsusp: critical section: \n"); pr_debug("swsusp: critical section: \n");
if (save_highmem()) { if (save_highmem()) {
printk(KERN_CRIT "Suspend machine: Not enough free pages for highmem\n"); printk(KERN_CRIT "swsusp: Not enough free pages for highmem\n");
restore_highmem(); restore_highmem();
return -ENOMEM; return -ENOMEM;
} }
...@@ -971,6 +940,28 @@ static int suspend_prepare_image(void) ...@@ -971,6 +940,28 @@ static int suspend_prepare_image(void)
drain_local_pages(); drain_local_pages();
count_data_pages(); count_data_pages();
printk("swsusp: Need to copy %u pages\n", nr_copy_pages); printk("swsusp: Need to copy %u pages\n", nr_copy_pages);
nr_copy_pages_check = nr_copy_pages;
pr_debug("swsusp: pages needed: %u + %lu + %u, free: %u\n",
nr_copy_pages,
nr_copy_pages/PBES_PER_PAGE + !!(nr_copy_pages%PBES_PER_PAGE),
PAGES_FOR_IO, nr_free_pages());
if (!enough_free_mem()) {
printk(KERN_ERR "swsusp: Not enough free memory\n");
return -ENOMEM;
}
if (MAX_PBES < nr_copy_pages / PBES_PER_PAGE +
!!(nr_copy_pages % PBES_PER_PAGE)) {
printk(KERN_ERR "swsusp: Too many image pages\n");
return -ENOSPC;
}
if (!enough_swap()) {
printk(KERN_ERR "swsusp: Not enough free swap\n");
return -ENOSPC;
}
error = swsusp_alloc(); error = swsusp_alloc();
if (error) if (error)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment