diff options
author | Hugh Dickins <hughd@google.com> | 2011-07-25 17:12:33 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-25 20:57:11 -0700 |
commit | 71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872 (patch) | |
tree | 1eac7c25f32ac88bba52a31179989e62773aa079 | |
parent | 708e3508c2a2204cc276dcdb543009a441bfe91b (diff) | |
download | kernel-common-71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872.tar.gz kernel-common-71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872.tar.bz2 kernel-common-71f0e07a605fad1fb6b288e4dc1dd8dfa78f4872.zip |
tmpfs: refine shmem_file_splice_read
Tidy up shmem_file_splice_read():
Remove readahead: okay, we could implement shmem readahead on swap,
but have never done so before, swap being the slow exceptional path.
Use shmem_getpage() instead of find_or_create_page() plus ->readpage().
Remove several comments: sorry, I found them more distracting than
helpful, and this will not be the reference version of splice_read().
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/shmem.c | 138 |
1 files changed, 19 insertions, 119 deletions
diff --git a/mm/shmem.c b/mm/shmem.c index d176e488f04d..f96614526d1c 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1850,6 +1850,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, unsigned int flags) { struct address_space *mapping = in->f_mapping; + struct inode *inode = mapping->host; unsigned int loff, nr_pages, req_pages; struct page *pages[PIPE_DEF_BUFFERS]; struct partial_page partial[PIPE_DEF_BUFFERS]; @@ -1865,7 +1866,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, .spd_release = spd_release_page, }; - isize = i_size_read(in->f_mapping->host); + isize = i_size_read(inode); if (unlikely(*ppos >= isize)) return 0; @@ -1881,153 +1882,57 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; nr_pages = min(req_pages, pipe->buffers); - /* - * Lookup the (hopefully) full range of pages we need. - */ spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); index += spd.nr_pages; - - /* - * If find_get_pages_contig() returned fewer pages than we needed, - * readahead/allocate the rest and fill in the holes. - */ - if (spd.nr_pages < nr_pages) - page_cache_sync_readahead(mapping, &in->f_ra, in, - index, req_pages - spd.nr_pages); - error = 0; - while (spd.nr_pages < nr_pages) { - /* - * Page could be there, find_get_pages_contig() breaks on - * the first hole. - */ - page = find_get_page(mapping, index); - if (!page) { - /* - * page didn't exist, allocate one. - */ - page = page_cache_alloc_cold(mapping); - if (!page) - break; - - error = add_to_page_cache_lru(page, mapping, index, - GFP_KERNEL); - if (unlikely(error)) { - page_cache_release(page); - if (error == -EEXIST) - continue; - break; - } - /* - * add_to_page_cache() locks the page, unlock it - * to avoid convoluting the logic below even more. - */ - unlock_page(page); - } + while (spd.nr_pages < nr_pages) { + page = NULL; + error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL); + if (error) + break; + unlock_page(page); spd.pages[spd.nr_pages++] = page; index++; } - /* - * Now loop over the map and see if we need to start IO on any - * pages, fill in the partial map, etc. - */ index = *ppos >> PAGE_CACHE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; + for (page_nr = 0; page_nr < nr_pages; page_nr++) { unsigned int this_len; if (!len) break; - /* - * this_len is the max we'll use from this page - */ this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); page = spd.pages[page_nr]; - if (PageReadahead(page)) - page_cache_async_readahead(mapping, &in->f_ra, in, - page, index, req_pages - page_nr); - - /* - * If the page isn't uptodate, we may need to start io on it - */ - if (!PageUptodate(page)) { - lock_page(page); - - /* - * Page was truncated, or invalidated by the - * filesystem. Redo the find/create, but this time the - * page is kept locked, so there's no chance of another - * race with truncate/invalidate. - */ - if (!page->mapping) { - unlock_page(page); - page = find_or_create_page(mapping, index, - mapping_gfp_mask(mapping)); - - if (!page) { - error = -ENOMEM; - break; - } - page_cache_release(spd.pages[page_nr]); - spd.pages[page_nr] = page; - } - /* - * page was already under io and is now done, great - */ - if (PageUptodate(page)) { - unlock_page(page); - goto fill_it; - } - - /* - * need to read in the page - */ - error = mapping->a_ops->readpage(in, page); - if (unlikely(error)) { - /* - * We really should re-lookup the page here, - * but it complicates things a lot. Instead - * lets just do what we already stored, and - * we'll get it the next time we are called. - */ - if (error == AOP_TRUNCATED_PAGE) - error = 0; - + if (!PageUptodate(page) || page->mapping != mapping) { + page = NULL; + error = shmem_getpage(inode, index, &page, + SGP_CACHE, NULL); + if (error) break; - } + unlock_page(page); + page_cache_release(spd.pages[page_nr]); + spd.pages[page_nr] = page; } -fill_it: - /* - * i_size must be checked after PageUptodate. - */ - isize = i_size_read(mapping->host); + + isize = i_size_read(inode); end_index = (isize - 1) >> PAGE_CACHE_SHIFT; if (unlikely(!isize || index > end_index)) break; - /* - * if this is the last page, see if we need to shrink - * the length and stop - */ if (end_index == index) { unsigned int plen; - /* - * max good bytes in this page - */ plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; if (plen <= loff) break; - /* - * force quit after adding this page - */ this_len = min(this_len, plen - loff); len = this_len; } @@ -2040,13 +1945,8 @@ fill_it: index++; } - /* - * Release any pages at the end, if we quit early. 'page_nr' is how far - * we got, 'nr_pages' is how many pages are in the map. - */ while (page_nr < nr_pages) page_cache_release(spd.pages[page_nr++]); - in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); |