diff options
author | Nick Piggin <npiggin@suse.de> | 2007-05-06 14:49:04 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 12:12:51 -0700 |
commit | 6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2 (patch) | |
tree | 8bbfe5072279227cc50a941ad4813908082426a1 /fs/ntfs/super.c | |
parent | 714b8171af9c930a59a0da8f6fe50518e70ab035 (diff) | |
download | linux-3.10-6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2.tar.gz linux-3.10-6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2.tar.bz2 linux-3.10-6fe6900e1e5b6fa9e5c59aa5061f244fe3f467e2.zip |
mm: make read_cache_page synchronous
Ensure pages are uptodate after returning from read_cache_page, which allows
us to cut out most of the filesystem-internal PageUptodate calls.
I didn't have a great look down the call chains, but this appears to fixes 7
possible use-before uptodate in hfs, 2 in hfsplus, 1 in jfs, a few in
ecryptfs, 1 in jffs2, and a possible cleared data overwritten with readpage in
block2mtd. All depending on whether the filler is async and/or can return
with a !uptodate page.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ntfs/super.c')
-rw-r--r-- | fs/ntfs/super.c | 30 |
1 files changed, 4 insertions, 26 deletions
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 1594c90b716..2ddde534db0 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -2471,7 +2471,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) s64 nr_free = vol->nr_clusters; u32 *kaddr; struct address_space *mapping = vol->lcnbmp_ino->i_mapping; - filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; pgoff_t index, max_index; @@ -2494,24 +2493,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. */ - page = read_cache_page(mapping, index, (filler_t*)readpage, - NULL); + page = read_mapping_page(mapping, index, NULL); /* Ignore pages which errored synchronously. */ if (IS_ERR(page)) { - ntfs_debug("Sync read_cache_page() error. Skipping " + ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); nr_free -= PAGE_CACHE_SIZE * 8; continue; } - wait_on_page_locked(page); - /* Ignore pages which errored asynchronously. */ - if (!PageUptodate(page)) { - ntfs_debug("Async read_cache_page() error. Skipping " - "page (index 0x%lx).", index); - page_cache_release(page); - nr_free -= PAGE_CACHE_SIZE * 8; - continue; - } kaddr = (u32*)kmap_atomic(page, KM_USER0); /* * For each 4 bytes, subtract the number of set bits. If this @@ -2562,7 +2551,6 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, { u32 *kaddr; struct address_space *mapping = vol->mftbmp_ino->i_mapping; - filler_t *readpage = (filler_t*)mapping->a_ops->readpage; struct page *page; pgoff_t index; @@ -2576,21 +2564,11 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, * Read the page from page cache, getting it from backing store * if necessary, and increment the use count. */ - page = read_cache_page(mapping, index, (filler_t*)readpage, - NULL); + page = read_mapping_page(mapping, index, NULL); /* Ignore pages which errored synchronously. */ if (IS_ERR(page)) { - ntfs_debug("Sync read_cache_page() error. Skipping " - "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; - continue; - } - wait_on_page_locked(page); - /* Ignore pages which errored asynchronously. */ - if (!PageUptodate(page)) { - ntfs_debug("Async read_cache_page() error. Skipping " + ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - page_cache_release(page); nr_free -= PAGE_CACHE_SIZE * 8; continue; } |