Commit fc9a07e7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

invalidate_mapping_pages(): add cond_resched

invalidate_mapping_pages() can sometimes take a long time (millions of pages
to free).  Long enough for the softlockup detector to trigger.

We used to have a cond_resched() in there but I took it out because the
drop_caches code calls invalidate_mapping_pages() under inode_lock.

The patch adds a nasty flag and puts the cond_resched() back.
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 45426812
...@@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb) ...@@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb)
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_FREEING|I_WILL_FREE)) if (inode->i_state & (I_FREEING|I_WILL_FREE))
continue; continue;
invalidate_mapping_pages(inode->i_mapping, 0, -1); __invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
} }
spin_unlock(&inode_lock); spin_unlock(&inode_lock);
} }
......
...@@ -1610,6 +1610,9 @@ extern int __invalidate_device(struct block_device *); ...@@ -1610,6 +1610,9 @@ extern int __invalidate_device(struct block_device *);
extern int invalidate_partition(struct gendisk *, int); extern int invalidate_partition(struct gendisk *, int);
#endif #endif
extern int invalidate_inodes(struct super_block *); extern int invalidate_inodes(struct super_block *);
unsigned long __invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end,
bool be_atomic);
unsigned long invalidate_mapping_pages(struct address_space *mapping, unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end); pgoff_t start, pgoff_t end);
......
...@@ -253,21 +253,8 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) ...@@ -253,21 +253,8 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
} }
EXPORT_SYMBOL(truncate_inode_pages); EXPORT_SYMBOL(truncate_inode_pages);
/** unsigned long __invalidate_mapping_pages(struct address_space *mapping,
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode pgoff_t start, pgoff_t end, bool be_atomic)
* @mapping: the address_space which holds the pages to invalidate
* @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive)
*
* This function only removes the unlocked pages, if you want to
* remove all the pages of one inode, you must call truncate_inode_pages.
*
* invalidate_mapping_pages() will not block on IO activity. It will not
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{ {
struct pagevec pvec; struct pagevec pvec;
pgoff_t next = start; pgoff_t next = start;
...@@ -308,9 +295,30 @@ unlock: ...@@ -308,9 +295,30 @@ unlock:
break; break;
} }
pagevec_release(&pvec); pagevec_release(&pvec);
if (likely(!be_atomic))
cond_resched();
} }
return ret; return ret;
} }
/**
* invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
* @mapping: the address_space which holds the pages to invalidate
* @start: the offset 'from' which to invalidate
* @end: the offset 'to' which to invalidate (inclusive)
*
* This function only removes the unlocked pages, if you want to
* remove all the pages of one inode, you must call truncate_inode_pages.
*
* invalidate_mapping_pages() will not block on IO activity. It will not
* invalidate pages which are dirty, locked, under writeback or mapped into
* pagetables.
*/
unsigned long invalidate_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t end)
{
return __invalidate_mapping_pages(mapping, start, end, false);
}
EXPORT_SYMBOL(invalidate_mapping_pages); EXPORT_SYMBOL(invalidate_mapping_pages);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment