Commit 045a2529 authored by Wu Fengguang's avatar Wu Fengguang Committed by Linus Torvalds

readahead: move the random read case to bottom

Split all readahead cases, and move the random one to bottom.

No behavior changes.

This is to prepare for the introduction of context readahead, and make it
easy for inserting accounting/tracing points for each case.
Signed-off-by: default avatarWu Fengguang <fengguang.wu@intel.com>
Cc: Vladislav Bolkhovitin <vst@vlnb.net>
Cc: Jens Axboe <jens.axboe@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Ying Han <yinghan@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent dc566127
...@@ -339,14 +339,18 @@ ondemand_readahead(struct address_space *mapping, ...@@ -339,14 +339,18 @@ ondemand_readahead(struct address_space *mapping,
unsigned long req_size) unsigned long req_size)
{ {
unsigned long max = max_sane_readahead(ra->ra_pages); unsigned long max = max_sane_readahead(ra->ra_pages);
pgoff_t prev_offset;
int sequential; /*
* start of file
*/
if (!offset)
goto initial_readahead;
/* /*
* It's the expected callback offset, assume sequential access. * It's the expected callback offset, assume sequential access.
* Ramp up sizes, and push forward the readahead window. * Ramp up sizes, and push forward the readahead window.
*/ */
if (offset && (offset == (ra->start + ra->size - ra->async_size) || if ((offset == (ra->start + ra->size - ra->async_size) ||
offset == (ra->start + ra->size))) { offset == (ra->start + ra->size))) {
ra->start += ra->size; ra->start += ra->size;
ra->size = get_next_ra_size(ra, max); ra->size = get_next_ra_size(ra, max);
...@@ -354,18 +358,6 @@ ondemand_readahead(struct address_space *mapping, ...@@ -354,18 +358,6 @@ ondemand_readahead(struct address_space *mapping,
goto readit; goto readit;
} }
prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
sequential = offset - prev_offset <= 1UL || req_size > max;
/*
* Standalone, small read.
* Read as is, and do not pollute the readahead state.
*/
if (!hit_readahead_marker && !sequential) {
return __do_page_cache_readahead(mapping, filp,
offset, req_size, 0);
}
/* /*
* Hit a marked page without valid readahead state. * Hit a marked page without valid readahead state.
* E.g. interleaved reads. * E.g. interleaved reads.
...@@ -391,12 +383,24 @@ ondemand_readahead(struct address_space *mapping, ...@@ -391,12 +383,24 @@ ondemand_readahead(struct address_space *mapping,
} }
/* /*
* It may be one of * oversize read
* - first read on start of file
* - sequential cache miss
* - oversize random read
* Start readahead for it.
*/ */
if (req_size > max)
goto initial_readahead;
/*
* sequential cache miss
*/
if (offset - (ra->prev_pos >> PAGE_CACHE_SHIFT) <= 1UL)
goto initial_readahead;
/*
* standalone, small random read
* Read as is, and do not pollute the readahead state.
*/
return __do_page_cache_readahead(mapping, filp, offset, req_size, 0);
initial_readahead:
ra->start = offset; ra->start = offset;
ra->size = get_init_ra_size(req_size, max); ra->size = get_init_ra_size(req_size, max);
ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment