summaryrefslogtreecommitdiff
path: root/mm/filemap.c
diff options
context:
space:
mode:
authorFengguang Wu <wfg@mail.ustc.edu.cn>2007-10-16 01:24:35 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:42:52 -0700
commit7ff81078d8b9f3d05a27b7bd3786ffb1ef1b0d1f (patch)
treeac73cf0c8325783a28c4d16c783f6fd96d17be7c /mm/filemap.c
parent6b10c6c9fbfe754e8482efb8c8b84f8e40c0f2eb (diff)
downloadlinux-3.10-7ff81078d8b9f3d05a27b7bd3786ffb1ef1b0d1f.tar.gz
linux-3.10-7ff81078d8b9f3d05a27b7bd3786ffb1ef1b0d1f.tar.bz2
linux-3.10-7ff81078d8b9f3d05a27b7bd3786ffb1ef1b0d1f.zip
readahead: remove the local copy of ra in do_generic_mapping_read()
The local copy of ra in do_generic_mapping_read() can now go away. It predates readanead(req_size). In a time when the readahead code was called on *every* single page. Hence a local has to be made to reduce the chance of the readahead state being overwritten by a concurrent reader. More details in: Linux: Random File I/O Regressions In 2.6 <http://kerneltrap.org/node/3039> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/filemap.c')
-rw-r--r--mm/filemap.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index bbcca456d8a..3c97bdc74a8 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -859,7 +859,7 @@ static void shrink_readahead_size_eio(struct file *filp,
* It may be NULL.
*/
void do_generic_mapping_read(struct address_space *mapping,
- struct file_ra_state *_ra,
+ struct file_ra_state *ra,
struct file *filp,
loff_t *ppos,
read_descriptor_t *desc,
@@ -874,13 +874,12 @@ void do_generic_mapping_read(struct address_space *mapping,
unsigned int prev_offset;
struct page *cached_page;
int error;
- struct file_ra_state ra = *_ra;
cached_page = NULL;
index = *ppos >> PAGE_CACHE_SHIFT;
next_index = index;
- prev_index = ra.prev_pos >> PAGE_CACHE_SHIFT;
- prev_offset = ra.prev_pos & (PAGE_CACHE_SIZE-1);
+ prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
+ prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
@@ -895,7 +894,7 @@ find_page:
page = find_get_page(mapping, index);
if (!page) {
page_cache_sync_readahead(mapping,
- &ra, filp,
+ ra, filp,
index, last_index - index);
page = find_get_page(mapping, index);
if (unlikely(page == NULL))
@@ -903,7 +902,7 @@ find_page:
}
if (PageReadahead(page)) {
page_cache_async_readahead(mapping,
- &ra, filp, page,
+ ra, filp, page,
index, last_index - index);
}
if (!PageUptodate(page))
@@ -1014,7 +1013,7 @@ readpage:
}
unlock_page(page);
error = -EIO;
- shrink_readahead_size_eio(filp, &ra);
+ shrink_readahead_size_eio(filp, ra);
goto readpage_error;
}
unlock_page(page);
@@ -1054,10 +1053,9 @@ no_cached_page:
}
out:
- *_ra = ra;
- _ra->prev_pos = prev_index;
- _ra->prev_pos <<= PAGE_CACHE_SHIFT;
- _ra->prev_pos |= prev_offset;
+ ra->prev_pos = prev_index;
+ ra->prev_pos <<= PAGE_CACHE_SHIFT;
+ ra->prev_pos |= prev_offset;
*ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
if (cached_page)