diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2011-02-28 17:04:04 +0530 |
---|---|---|
committer | Eric Van Hensbergen <ericvh@gmail.com> | 2011-03-15 09:57:40 -0500 |
commit | e959b54901e835f062ac8d44107bc543b66f0364 (patch) | |
tree | 89e8dae4796391a7effecd07b2950a08c1b5e410 | |
parent | fa6ea16160c72c448e2728dab4b6b0a133fdfc98 (diff) | |
download | linux-3.10-e959b54901e835f062ac8d44107bc543b66f0364.tar.gz linux-3.10-e959b54901e835f062ac8d44107bc543b66f0364.tar.bz2 linux-3.10-e959b54901e835f062ac8d44107bc543b66f0364.zip |
fs/9p: Add direct IO support in cached mode
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
-rw-r--r-- | fs/9p/vfs_addr.c | 5 | ||||
-rw-r--r-- | fs/9p/vfs_file.c | 117 |
2 files changed, 116 insertions, 6 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index ee455526ca5..2524e4cbb8e 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -254,8 +254,9 @@ static int v9fs_launder_page(struct page *page) * with an error. * */ -ssize_t v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, - loff_t pos, unsigned long nr_segs) +static ssize_t +v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + loff_t pos, unsigned long nr_segs) { /* * FIXME diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 3cff25e759e..78bcb97c342 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -579,15 +579,124 @@ out_unlock: return VM_FAULT_NOPAGE; } +static ssize_t +v9fs_direct_read(struct file *filp, char __user *udata, size_t count, + loff_t *offsetp) +{ + loff_t size, offset; + struct inode *inode; + struct address_space *mapping; + + offset = *offsetp; + mapping = filp->f_mapping; + inode = mapping->host; + if (!count) + return 0; + size = i_size_read(inode); + if (offset < size) + filemap_write_and_wait_range(mapping, offset, + offset + count - 1); + + return v9fs_file_read(filp, udata, count, offsetp); +} + +/** + * v9fs_cached_file_read - read from a file + * @filp: file pointer to read + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ +static ssize_t +v9fs_cached_file_read(struct file *filp, char __user *data, size_t count, + loff_t *offset) +{ + if (filp->f_flags & O_DIRECT) + return v9fs_direct_read(filp, data, count, offset); + return do_sync_read(filp, data, count, offset); +} + +static ssize_t +v9fs_direct_write(struct file *filp, const char __user * data, + size_t count, loff_t *offsetp) +{ + loff_t offset; + ssize_t retval; + struct inode *inode; + struct address_space *mapping; + + offset = *offsetp; + mapping = filp->f_mapping; + inode = mapping->host; + if (!count) + return 0; + + mutex_lock(&inode->i_mutex); + retval = filemap_write_and_wait_range(mapping, offset, + offset + count - 1); + if (retval) + goto err_out; + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that if we fail + * here we fall back to buffered write + */ + if (mapping->nrpages) { + pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT; + pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT; + + retval = invalidate_inode_pages2_range(mapping, + pg_start, pg_end); + /* + * If a page can not be invalidated, fall back + * to buffered write. + */ + if (retval) { + if (retval == -EBUSY) + goto buff_write; + goto err_out; + } + } + retval = v9fs_file_write(filp, data, count, offsetp); +err_out: + mutex_unlock(&inode->i_mutex); + return retval; + +buff_write: + mutex_unlock(&inode->i_mutex); + return do_sync_write(filp, data, count, offsetp); +} + +/** + * v9fs_cached_file_write - write to a file + * @filp: file pointer to write + * @data: data buffer to write data from + * @count: size of buffer + * @offset: offset at which to write data + * + */ +static ssize_t +v9fs_cached_file_write(struct file *filp, const char __user * data, + size_t count, loff_t *offset) +{ + + if (filp->f_flags & O_DIRECT) + return v9fs_direct_write(filp, data, count, offset); + return do_sync_write(filp, data, count, offset); +} + static const struct vm_operations_struct v9fs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = v9fs_vm_page_mkwrite, }; + const struct file_operations v9fs_cached_file_operations = { .llseek = generic_file_llseek, - .read = do_sync_read, - .write = do_sync_write, + .read = v9fs_cached_file_read, + .write = v9fs_cached_file_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .open = v9fs_file_open, @@ -599,8 +708,8 @@ const struct file_operations v9fs_cached_file_operations = { const struct file_operations v9fs_cached_file_operations_dotl = { .llseek = generic_file_llseek, - .read = do_sync_read, - .write = do_sync_write, + .read = v9fs_cached_file_read, + .write = v9fs_cached_file_write, .aio_read = generic_file_aio_read, .aio_write = generic_file_aio_write, .open = v9fs_file_open, |