Commit e5c86131 authored by Stephen Rothwell's avatar Stephen Rothwell

Merge commit 'suspend/linux-next'

parents e15e3b5e 33f82d14
......@@ -50,7 +50,7 @@ static DEFINE_MUTEX(dpm_list_mtx);
static bool transition_started;
/**
* device_pm_init - Initialize the PM-related part of a device object
* device_pm_init - Initialize the PM-related part of a device object.
* @dev: Device object being initialized.
*/
void device_pm_init(struct device *dev)
......@@ -60,7 +60,7 @@ void device_pm_init(struct device *dev)
}
/**
* device_pm_lock - lock the list of active devices used by the PM core
* device_pm_lock - Lock the list of active devices used by the PM core.
*/
void device_pm_lock(void)
{
......@@ -68,7 +68,7 @@ void device_pm_lock(void)
}
/**
* device_pm_unlock - unlock the list of active devices used by the PM core
* device_pm_unlock - Unlock the list of active devices used by the PM core.
*/
void device_pm_unlock(void)
{
......@@ -76,8 +76,8 @@ void device_pm_unlock(void)
}
/**
* device_pm_add - add a device to the list of active devices
* @dev: Device to be added to the list
* device_pm_add - Add a device to the PM core's list of active devices.
* @dev: Device to add to the list.
*/
void device_pm_add(struct device *dev)
{
......@@ -103,10 +103,8 @@ void device_pm_add(struct device *dev)
}
/**
* device_pm_remove - remove a device from the list of active devices
* @dev: Device to be removed from the list
*
* This function also removes the device's PM-related sysfs attributes.
* device_pm_remove - Remove a device from the PM core's list of active devices.
* @dev: Device to be removed from the list.
*/
void device_pm_remove(struct device *dev)
{
......@@ -120,9 +118,9 @@ void device_pm_remove(struct device *dev)
}
/**
* device_pm_move_before - move device in dpm_list
* @deva: Device to move in dpm_list
* @devb: Device @deva should come before
* device_pm_move_before - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come before.
*/
void device_pm_move_before(struct device *deva, struct device *devb)
{
......@@ -136,9 +134,9 @@ void device_pm_move_before(struct device *deva, struct device *devb)
}
/**
* device_pm_move_after - move device in dpm_list
* @deva: Device to move in dpm_list
* @devb: Device @deva should come after
* device_pm_move_after - Move device in the PM core's list of active devices.
* @deva: Device to move in dpm_list.
* @devb: Device @deva should come after.
*/
void device_pm_move_after(struct device *deva, struct device *devb)
{
......@@ -152,8 +150,8 @@ void device_pm_move_after(struct device *deva, struct device *devb)
}
/**
* device_pm_move_last - move device to end of dpm_list
* @dev: Device to move in dpm_list
* device_pm_move_last - Move device to end of the PM core's list of devices.
* @dev: Device to move in dpm_list.
*/
void device_pm_move_last(struct device *dev)
{
......@@ -164,8 +162,8 @@ void device_pm_move_last(struct device *dev)
}
/**
* pm_op - execute the PM operation appropiate for given PM event
* @dev: Device.
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
......@@ -225,13 +223,13 @@ static int pm_op(struct device *dev,
}
/**
* pm_noirq_op - execute the PM operation appropiate for given PM event
* @dev: Device.
* pm_noirq_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The operation is executed with interrupts disabled by the only remaining
* functional CPU in the system.
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int pm_noirq_op(struct device *dev,
const struct dev_pm_ops *ops,
......@@ -329,11 +327,12 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
/*------------------------- Resume routines -------------------------*/
/**
* device_resume_noirq - Power on one device (early resume).
* @dev: Device.
* device_resume_noirq - Execute an "early resume" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Must be called with interrupts disabled.
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
......@@ -355,20 +354,18 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
}
/**
* dpm_resume_noirq - Power on all regular (non-sysdev) devices.
* dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Call the "noirq" resume handlers for all devices marked as
* DPM_OFF_IRQ and enable device drivers to receive interrupts.
*
* Must be called under dpm_list_mtx. Device drivers should not receive
* interrupts while it's being executed.
* Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and
* enable device drivers to receive interrupts.
*/
void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
mutex_lock(&dpm_list_mtx);
transition_started = false;
list_for_each_entry(dev, &dpm_list, power.entry)
if (dev->power.status > DPM_OFF) {
int error;
......@@ -384,8 +381,8 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
* device_resume - Restore state for one device.
* @dev: Device.
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*/
static int device_resume(struct device *dev, pm_message_t state)
......@@ -435,11 +432,11 @@ static int device_resume(struct device *dev, pm_message_t state)
}
/**
* dpm_resume - Resume every device.
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the appropriate "resume" callback for all devices the status of
* which indicates that they are inactive.
* Execute the appropriate "resume" callback for all devices whose status
* indicates that they are suspended.
*/
static void dpm_resume(pm_message_t state)
{
......@@ -447,7 +444,6 @@ static void dpm_resume(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
transition_started = false;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.next);
......@@ -476,8 +472,8 @@ static void dpm_resume(pm_message_t state)
}
/**
* device_complete - Complete a PM transition for given device
* @dev: Device.
* device_complete - Complete a PM transition for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*/
static void device_complete(struct device *dev, pm_message_t state)
......@@ -503,11 +499,11 @@ static void device_complete(struct device *dev, pm_message_t state)
}
/**
* dpm_complete - Complete a PM transition for all devices.
* dpm_complete - Complete a PM transition for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the ->complete() callbacks for all devices that are not marked
* as DPM_ON.
* Execute the ->complete() callbacks for all devices whose PM status is not
* DPM_ON (this allows new devices to be registered).
*/
static void dpm_complete(pm_message_t state)
{
......@@ -537,11 +533,11 @@ static void dpm_complete(pm_message_t state)
}
/**
* dpm_resume_end - Restore state of each device in system.
* dpm_resume_end - Execute "resume" callbacks and complete system transition.
* @state: PM transition of the system being carried out.
*
* Resume all the devices, unlock them all, and allow new
* devices to be registered once again.
* Execute "resume" callbacks for all devices and complete the PM transition of
* the system.
*/
void dpm_resume_end(pm_message_t state)
{
......@@ -555,9 +551,11 @@ EXPORT_SYMBOL_GPL(dpm_resume_end);
/*------------------------- Suspend routines -------------------------*/
/**
* resume_event - return a PM message representing the resume event
* corresponding to given sleep state.
* resume_event - Return a "resume" message for given "suspend" sleep state.
* @sleep_state: PM message representing a sleep state.
*
* Return a PM message representing the resume event corresponding to given
* sleep state.
*/
static pm_message_t resume_event(pm_message_t sleep_state)
{
......@@ -574,11 +572,12 @@ static pm_message_t resume_event(pm_message_t sleep_state)
}
/**
* device_suspend_noirq - Shut down one device (late suspend).
* @dev: Device.
* device_suspend_noirq - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* This is called with interrupts off and only a single CPU running.
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
......@@ -595,13 +594,11 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
}
/**
* dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
* dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Prevent device drivers from receiving interrupts and call the "noirq"
* suspend handlers.
*
* Must be called under dpm_list_mtx.
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
int dpm_suspend_noirq(pm_message_t state)
{
......@@ -626,8 +623,8 @@ int dpm_suspend_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
* device_suspend - Save state of one device.
* @dev: Device.
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*/
static int device_suspend(struct device *dev, pm_message_t state)
......@@ -675,10 +672,8 @@ static int device_suspend(struct device *dev, pm_message_t state)
}
/**
* dpm_suspend - Suspend every device.
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
*
* Execute the appropriate "suspend" callbacks for all devices.
*/
static int dpm_suspend(pm_message_t state)
{
......@@ -712,9 +707,12 @@ static int dpm_suspend(pm_message_t state)
}
/**
* device_prepare - Execute the ->prepare() callback(s) for given device.
* @dev: Device.
* device_prepare - Prepare a device for system power transition.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback(s) for given device. No new children of the
* device may be registered after this function has returned.
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
......@@ -750,10 +748,10 @@ static int device_prepare(struct device *dev, pm_message_t state)
}
/**
* dpm_prepare - Prepare all devices for a PM transition.
* dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
* @state: PM transition of the system being carried out.
*
* Execute the ->prepare() callback for all devices.
* Execute the ->prepare() callback(s) for all devices.
*/
static int dpm_prepare(pm_message_t state)
{
......@@ -804,10 +802,11 @@ static int dpm_prepare(pm_message_t state)
}
/**
* dpm_suspend_start - Save state and stop all devices in system.
* dpm_suspend_start - Prepare devices for PM transition and suspend them.
* @state: PM transition of the system being carried out.
*
* Prepare and suspend all devices.
* Prepare all non-sysdev devices for system PM transition and execute "suspend"
* callbacks for them.
*/
int dpm_suspend_start(pm_message_t state)
{
......
......@@ -4151,7 +4151,7 @@ static void floppy_device_release(struct device *dev)
{
}
static int floppy_resume(struct platform_device *dev)
static int floppy_resume(struct device *dev)
{
int fdc;
......@@ -4162,10 +4162,15 @@ static int floppy_resume(struct platform_device *dev)
return 0;
}
static struct platform_driver floppy_driver = {
static struct dev_pm_ops floppy_pm_ops = {
.resume = floppy_resume,
.restore = floppy_resume,
};
static struct platform_driver floppy_driver = {
.driver = {
.name = "floppy",
.pm = &floppy_pm_ops,
},
};
......
......@@ -1168,8 +1168,9 @@ static void at_dma_shutdown(struct platform_device *pdev)
clk_disable(atdma->clk);
}
static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
static int at_dma_suspend_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
at_dma_off(platform_get_drvdata(pdev));
......@@ -1177,23 +1178,27 @@ static int at_dma_suspend_late(struct platform_device *pdev, pm_message_t mesg)
return 0;
}
static int at_dma_resume_early(struct platform_device *pdev)
static int at_dma_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct at_dma *atdma = platform_get_drvdata(pdev);
clk_enable(atdma->clk);
dma_writel(atdma, EN, AT_DMA_ENABLE);
return 0;
}
static struct dev_pm_ops at_dma_dev_pm_ops = {
.suspend_noirq = at_dma_suspend_noirq,
.resume_noirq = at_dma_resume_noirq,
};
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
.suspend_late = at_dma_suspend_late,
.resume_early = at_dma_resume_early,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
},
};
......
......@@ -53,7 +53,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
static int __init hp_wmi_bios_setup(struct platform_device *device);
static int __exit hp_wmi_bios_remove(struct platform_device *device);
static int hp_wmi_resume_handler(struct platform_device *device);
static int hp_wmi_resume_handler(struct device *device);
struct bios_args {
u32 signature;
......@@ -94,14 +94,19 @@ static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
static struct dev_pm_ops hp_wmi_pm_ops = {
.resume = hp_wmi_resume_handler,
.restore = hp_wmi_resume_handler,
};
static struct platform_driver hp_wmi_driver = {
.driver = {
.name = "hp-wmi",
.owner = THIS_MODULE,
.pm = &hp_wmi_pm_ops,
},
.probe = hp_wmi_bios_setup,
.remove = hp_wmi_bios_remove,
.resume = hp_wmi_resume_handler,
};
static int hp_wmi_perform_query(int query, int write, int value)
......@@ -512,7 +517,7 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
return 0;
}
static int hp_wmi_resume_handler(struct platform_device *device)
static int hp_wmi_resume_handler(struct device *device)
{
/*
* Hardware state may have changed while suspended, so trigger
......
......@@ -213,6 +213,20 @@ struct dev_pm_ops {
int (*runtime_idle)(struct device *dev);
};
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
*/
#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
struct dev_pm_ops name = { \
.suspend = suspend_fn, \
.resume = resume_fn, \
.freeze = suspend_fn, \
.thaw = resume_fn, \
.poweroff = suspend_fn, \
.restore = resume_fn, \
}
/**
* PM_EVENT_ messages
*
......
......@@ -298,8 +298,8 @@ int hibernation_snapshot(int platform_mode)
if (error)
return error;
/* Free memory before shutting down devices. */
error = swsusp_shrink_memory();
/* Preallocate image memory before shutting down devices. */
error = hibernate_preallocate_memory();
if (error)
goto Close;
......@@ -315,6 +315,10 @@ int hibernation_snapshot(int platform_mode)
/* Control returns here after successful restore */
Resume_devices:
/* We may need to release the preallocated image pages here. */
if (error || !in_suspend)
swsusp_free();
dpm_resume_end(in_suspend ?
(error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
resume_console();
......@@ -460,11 +464,11 @@ int hibernation_platform_enter(void)
error = hibernation_ops->prepare();
if (error)
goto Platofrm_finish;
goto Platform_finish;
error = disable_nonboot_cpus();
if (error)
goto Platofrm_finish;
goto Platform_finish;
local_irq_disable();
sysdev_suspend(PMSG_HIBERNATE);
......@@ -476,7 +480,7 @@ int hibernation_platform_enter(void)
* We don't need to reenable the nonboot CPUs or resume consoles, since
* the system is going to be halted anyway.
*/
Platofrm_finish:
Platform_finish:
hibernation_ops->finish();
dpm_suspend_noirq(PMSG_RESTORE);
......@@ -578,7 +582,10 @@ int hibernate(void)
goto Thaw;
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
if (in_suspend && !error) {
if (error)
goto Thaw;
if (in_suspend) {
unsigned int flags = 0;
if (hibernation_mode == HIBERNATION_PLATFORM)
......@@ -590,8 +597,8 @@ int hibernate(void)
power_down();
} else {
pr_debug("PM: Image restored successfully.\n");
swsusp_free();
}
Thaw:
thaw_processes();
Finish:
......
......@@ -74,7 +74,7 @@ extern asmlinkage int swsusp_arch_resume(void);
extern int create_basic_memory_bitmaps(void);
extern void free_basic_memory_bitmaps(void);
extern int swsusp_shrink_memory(void);
extern int hibernate_preallocate_memory(void);
/**
* Auxiliary structure used for reading the snapshot image data and
......
......@@ -233,7 +233,7 @@ static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
#define BM_END_OF_MAP (~0UL)
#define BM_BITS_PER_BLOCK (PAGE_SIZE << 3)
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
struct bm_block {
struct list_head hook; /* hook into a list of bitmap blocks */
......@@ -275,7 +275,7 @@ static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
/**
* create_bm_block_list - create a list of block bitmap objects
* @nr_blocks - number of blocks to allocate
* @pages - number of pages to track
* @list - list to put the allocated blocks into
* @ca - chain allocator to be used for allocating memory
*/
......@@ -853,7 +853,7 @@ static unsigned int count_highmem_pages(void)
struct zone *zone;
unsigned int n = 0;
for_each_zone(zone) {
for_each_populated_zone(zone) {
unsigned long pfn, max_zone_pfn;
if (!is_highmem(zone))
......@@ -916,7 +916,7 @@ static unsigned int count_data_pages(void)
unsigned long pfn, max_zone_pfn;
unsigned int n = 0;
for_each_zone(zone) {
for_each_populated_zone(zone) {
if (is_highmem(zone))
continue;
......@@ -1010,7 +1010,7 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
struct zone *zone;
unsigned long pfn;
for_each_zone(zone) {
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
mark_free_pages(zone);
......@@ -1033,6 +1033,25 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
*/
unsigned int alloc_normal, alloc_highmem;
/*
* Memory bitmap used for marking saveable pages (during hibernation) or
* hibernation image pages (during restore)
*/
static struct memory_bitmap orig_bm;
/*
* Memory bitmap used during hibernation for marking allocated page frames that
* will contain copies of saveable pages. During restore it is initially used
* for marking hibernation image pages, but then the set bits from it are
* duplicated in @orig_bm and it is released. On highmem systems it is next
* used for marking "safe" highmem pages, but it has to be reinitialized for
* this purpose.
*/
static struct memory_bitmap copy_bm;
/**
* swsusp_free - free pages allocated for the suspend.
......@@ -1046,7 +1065,7 @@ void swsusp_free(void)
struct zone *zone;
unsigned long pfn, max_zone_pfn;
for_each_zone(zone) {
for_each_populated_zone(zone) {
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
......@@ -1064,74 +1083,286 @@ void swsusp_free(void)
nr_meta_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
}
/* Helper functions used for the shrinking of memory. */
#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
/**
* swsusp_shrink_memory - Try to free as much memory as needed
*
* ... but do not OOM-kill anyone
* preallocate_image_pages - Allocate a number of pages for hibernation image
* @nr_pages: Number of page frames to allocate.
* @mask: GFP flags to use for the allocation.
*
* Notice: all userland should be stopped before it is called, or
* livelock is possible.
* Return value: Number of page frames actually allocated
*/
static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
{
unsigned long nr_alloc = 0;
while (nr_pages > 0) {
struct page *page;
page = alloc_image_page(mask);
if (!page)
break;
memory_bm_set_bit(&copy_bm, page_to_pfn(page));
if (PageHighMem(page))
alloc_highmem++;
else
alloc_normal++;
nr_pages--;
nr_alloc++;
}
return nr_alloc;
}
static unsigned long preallocate_image_memory(unsigned long nr_pages)
{
return preallocate_image_pages(nr_pages, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
static unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
}
/**
* __fraction - Compute (an approximation of) x * (multiplier / base)
*/
static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{
x *= multiplier;
do_div(x, base);
return (unsigned long)x;
}
static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
unsigned long alloc = __fraction(nr_pages, highmem, total);
return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
}
#else /* CONFIG_HIGHMEM */
static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return 0;
}
static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
return 0;
}
#endif /* CONFIG_HIGHMEM */
/**
* free_unnecessary_pages - Release preallocated pages not needed for the image
*/
static void free_unnecessary_pages(void)
{
unsigned long save_highmem, to_free_normal, to_free_highmem;
to_free_normal = alloc_normal - count_data_pages();
save_highmem = count_highmem_pages();
if (alloc_highmem > save_highmem) {
to_free_highmem = alloc_highmem - save_highmem;
} else {
to_free_highmem = 0;
to_free_normal -= save_highmem - alloc_highmem;
}
memory_bm_position_reset(&copy_bm);
while (to_free_normal > 0 && to_free_highmem > 0) {
unsigned long pfn = memory_bm_next_pfn(&copy_bm);
struct page *page = pfn_to_page(pfn);
if (PageHighMem(page)) {
if (!to_free_highmem)
continue;
to_free_highmem--;
alloc_highmem--;
} else {
if (!to_free_normal)
continue;
to_free_normal--;
alloc_normal--;
}
memory_bm_clear_bit(&copy_bm, pfn);
swsusp_unset_page_forbidden(page);
swsusp_unset_page_free(page);
__free_page(page);
}
}
#define SHRINK_BITE 10000
static inline unsigned long __shrink_memory(long tmp)
/**
* minimum_image_size - Estimate the minimum acceptable size of an image
* @saveable: Number of saveable pages in the system.
*
* We want to avoid attempting to free too much memory too hard, so estimate the
* minimum acceptable size of a hibernation image to use as the lower limit for
* preallocating memory.
*
* We assume that the minimum image size should be proportional to
*
* [number of saveable pages] - [number of pages that can be freed in theory]
*
* where the second term is the sum of (1) reclaimable slab pages, (2) active
* and (3) inactive anonymouns pages, (4) active and (5) inactive file pages,
* minus mapped file pages.
*/
static unsigned long minimum_image_size(unsigned long saveable)
{
if (tmp > SHRINK_BITE)
tmp = SHRINK_BITE;
return shrink_all_memory(tmp);
unsigned long size;
size = global_page_state(NR_SLAB_RECLAIMABLE)
+ global_page_state(NR_ACTIVE_ANON)
+ global_page_state(NR_INACTIVE_ANON)
+ global_page_state(NR_ACTIVE_FILE)
+ global_page_state(NR_INACTIVE_FILE)
- global_page_state(NR_FILE_MAPPED);
return saveable <= size ? 0 : saveable - size;
}
int swsusp_shrink_memory(void)
/**
* hibernate_preallocate_memory - Preallocate memory for hibernation image
*
* To create a hibernation image it is necessary to make a copy of every page
* frame in use. We also need a number of page frames to be free during
* hibernation for allocations made while saving the image and for device
* drivers, in case they need to allocate memory from their hibernation
* callbacks (these two numbers are given by PAGES_FOR_IO and SPARE_PAGES,
* respectively, both of which are rough estimates). To make this happen, we
* compute the total number of available page frames and allocate at least
*
* ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2 + 2 * SPARE_PAGES
*
* of them, which corresponds to the maximum size of a hibernation image.
*
* If image_size is set below the number following from the above formula,
* the preallocation of memory is continued until the total number of saveable
* pages in the system is below the requested image size or the minimum
* acceptable image size returned by minimum_image_size(), whichever is greater.
*/
int hibernate_preallocate_memory(void)
{
long tmp;
struct zone *zone;
unsigned long pages = 0;
unsigned int i = 0;
char *p = "-\\|/";
unsigned long saveable, size, max_size, count, highmem, pages = 0;
unsigned long alloc, save_highmem, pages_highmem;
struct timeval start, stop;
int error;
printk(KERN_INFO "PM: Shrinking memory... ");
printk(KERN_INFO "PM: Preallocating image memory... ");
do_gettimeofday(&start);
do {
long size, highmem_size;
highmem_size = count_highmem_pages();
size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
tmp = size;
size += highmem_size;
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
alloc_normal = 0;
alloc_highmem = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
saveable = count_data_pages();
/*
* Compute the total number of page frames we can use (count) and the
* number of pages needed for image metadata (size).
*/
count = saveable;
saveable += save_highmem;
highmem = save_highmem;
size = 0;
for_each_populated_zone(zone) {
tmp += snapshot_additional_pages(zone);
if (is_highmem(zone)) {
highmem_size -=
zone_page_state(zone, NR_FREE_PAGES);
} else {
tmp -= zone_page_state(zone, NR_FREE_PAGES);
tmp += zone->lowmem_reserve[ZONE_NORMAL];
size += snapshot_additional_pages(zone);
if (is_highmem(zone))
highmem += zone_page_state(zone, NR_FREE_PAGES);
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
count += highmem;
count -= totalreserve_pages;
/* Compute the maximum number of saveable pages to leave in memory. */
max_size = (count - (size + PAGES_FOR_IO)) / 2 - 2 * SPARE_PAGES;
size = DIV_ROUND_UP(image_size, PAGE_SIZE);
if (size > max_size)
size = max_size;
/*
* If the maximum is not less than the current number of saveable pages
* in memory, allocate page frames for the image and we're done.
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
pages += preallocate_image_memory(saveable - pages);
goto out;
}
if (highmem_size < 0)
highmem_size = 0;
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
if (size < pages)
size = min_t(unsigned long, pages, max_size);
tmp += highmem_size;
if (tmp > 0) {
tmp = __shrink_memory(tmp);
if (!tmp)
return -ENOMEM;
pages += tmp;
} else if (size > image_size / PAGE_SIZE) {
tmp = __shrink_memory(size - (image_size / PAGE_SIZE));
pages += tmp;
}
printk("\b%c", p[i++%4]);
} while (tmp > 0);
/*
* Let the memory management subsystem know that we're going to need a
* large number of page frames to allocate and make it free some memory.
* NOTE: If this is not done, performance will be hurt badly in some
* test cases.
*/
shrink_all_memory(saveable - size);
/*
* The number of saveable pages in memory was too high, so apply some
* pressure to decrease it. First, make room for the largest possible
* image and fail if that doesn't work. Next, try to decrease the size
* of the image as much as indicated by 'size' using allocations from
* highmem and non-highmem zones separately.
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = (count - max_size) - pages_highmem;
pages = preallocate_image_memory(alloc);
if (pages < alloc)
goto err_out;
size = max_size - size;
alloc = size;
size = preallocate_highmem_fraction(size, highmem, count);
pages_highmem += size;
alloc -= size;
pages += preallocate_image_memory(alloc);
pages += pages_highmem;
/*
* We only need as many page frames for the image as there are saveable
* pages in memory, but we have allocated more. Release the excessive
* ones now.
*/
free_unnecessary_pages();
out:
do_gettimeofday(&stop);
printk("\bdone (%lu pages freed)\n", pages);
swsusp_show_speed(&start, &stop, pages, "Freed");
printk(KERN_CONT "done (allocated %lu pages)\n", pages);
swsusp_show_speed(&start, &stop, pages, "Allocated");
return 0;
err_out:
printk(KERN_CONT "\n");
swsusp_free();
return -ENOMEM;
}
#ifdef CONFIG_HIGHMEM
......@@ -1142,7 +1373,7 @@ int swsusp_shrink_memory(void)
static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
{
unsigned int free_highmem = count_free_highmem_pages();
unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
if (free_highmem >= nr_highmem)
nr_highmem = 0;
......@@ -1164,19 +1395,17 @@ count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
{
struct zone *zone;
unsigned int free = 0, meta = 0;
unsigned int free = alloc_normal;
for_each_zone(zone) {
meta += snapshot_additional_pages(zone);
for_each_populated_zone(zone)
if (!is_highmem(zone))
free += zone_page_state(zone, NR_FREE_PAGES);
}
nr_pages += count_pages_for_highmem(nr_highmem);
pr_debug("PM: Normal pages needed: %u + %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, meta, free);
pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, free);
return free > nr_pages + PAGES_FOR_IO + meta;
return free > nr_pages + PAGES_FOR_IO;
}
#ifdef CONFIG_HIGHMEM
......@@ -1198,7 +1427,7 @@ static inline int get_highmem_buffer(int safe_needed)
*/
static inline unsigned int
alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
{
unsigned int to_alloc = count_free_highmem_pages();
......@@ -1218,7 +1447,7 @@ alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
static inline int get_highmem_buffer(int safe_needed) { return 0; }
static inline unsigned int
alloc_highmem_image_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
......@@ -1237,51 +1466,36 @@ static int
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
int error;
error = memory_bm_create(orig_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
if (error)
goto Free;
error = memory_bm_create(copy_bm, GFP_ATOMIC | __GFP_COLD, PG_ANY);
if (error)
goto Free;
int error = 0;
if (nr_highmem > 0) {
error = get_highmem_buffer(PG_ANY);
if (error)
goto Free;
nr_pages += alloc_highmem_image_pages(copy_bm, nr_highmem);
goto err_out;
if (nr_highmem > alloc_highmem) {
nr_highmem -= alloc_highmem;
nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
}
}
if (nr_pages > alloc_normal) {
nr_pages -= alloc_normal;
while (nr_pages-- > 0) {
struct page *page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
struct page *page;
page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
if (!page)
goto Free;
goto err_out;
memory_bm_set_bit(copy_bm, page_to_pfn(page));
}
}
return 0;
Free:
err_out:
swsusp_free();
return -ENOMEM;
return error;
}
/* Memory bitmap used for marking saveable pages (during suspend) or the
* suspend image pages (during resume)
*/
static struct memory_bitmap orig_bm;
/* Memory bitmap used on suspend for marking allocated pages that will contain
* the copies of saveable pages. During resume it is initially used for
* marking the suspend image pages, but then its set bits are duplicated in
* @orig_bm and it is released. Next, on systems with high memory, it may be
* used for marking "safe" highmem pages, but it has to be reinitialized for
* this purpose.
*/
static struct memory_bitmap copy_bm;
asmlinkage int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
......@@ -1474,7 +1688,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
unsigned long pfn, max_zone_pfn;
/* Clear page flags */
for_each_zone(zone) {
for_each_populated_zone(zone) {
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment