diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2013-12-18 14:14:52 +0000 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-01-09 12:24:24 -0800 |
commit | 8553459e73c6da3e5b9da9239dd8ef017181252a (patch) | |
tree | e20426b756ab2e9133cb6963e2081f358b00de65 /fs | |
parent | 21b6291b88c534d8c6018ad483e232abab63f644 (diff) | |
download | linux-3.10-8553459e73c6da3e5b9da9239dd8ef017181252a.tar.gz linux-3.10-8553459e73c6da3e5b9da9239dd8ef017181252a.tar.bz2 linux-3.10-8553459e73c6da3e5b9da9239dd8ef017181252a.zip |
GFS2: Fix incorrect invalidation for DIO/buffered I/O
commit dfd11184d894cd0a92397b25cac18831a1a6a5bc upstream.
In patch 209806aba9d540dde3db0a5ce72307f85f33468f we allowed
local deferred locks to be granted against a cached exclusive
lock. That opened up a corner case which this patch now
fixes.
The solution to the problem is to check whether we have cached
pages each time we do direct I/O and if so to unmap, flush
and invalidate those pages. Since the glock state machine
normally does that for us, mostly the code will be a no-op.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/aops.c | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 0bad69ed633..76251600cbe 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -999,6 +999,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; + struct address_space *mapping = inode->i_mapping; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_holder gh; int rv; @@ -1019,6 +1020,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ + /* + * Now since we are holding a deferred (CW) lock at this point, you + * might be wondering why this is ever needed. There is a case however + * where we've granted a deferred local lock against a cached exclusive + * glock. That is ok provided all granted local locks are deferred, but + * it also means that it is possible to encounter pages which are + * cached and possibly also mapped. So here we check for that and sort + * them out ahead of the dio. The glock state machine will take care of + * everything else. + * + * If in fact the cached glock state (gl->gl_state) is deferred (CW) in + * the first place, mapping->nr_pages will always be zero. + */ + if (mapping->nrpages) { + loff_t lstart = offset & (PAGE_CACHE_SIZE - 1); + loff_t len = iov_length(iov, nr_segs); + loff_t end = PAGE_ALIGN(offset + len) - 1; + + rv = 0; + if (len == 0) + goto out; + if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) + unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); + rv = filemap_write_and_wait_range(mapping, lstart, end); + if (rv) + return rv; + truncate_inode_pages_range(mapping, lstart, end); + } + rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, offset, nr_segs, gfs2_get_block_direct, NULL, NULL, 0); |