summaryrefslogtreecommitdiff
path: root/mm/backing-dev.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-05-04 19:54:37 -0600
committerWu Fengguang <fengguang.wu@intel.com>2011-07-09 22:09:01 -0700
commitd46db3d58233be4be980eb1e42eebe7808bcabab (patch)
tree6d813b33938d915f0c0633e8615d1ffdcc554c96 /mm/backing-dev.c
parent36715cef0770b7e2547892b7c3197fc024274630 (diff)
downloadlinux-3.10-d46db3d58233be4be980eb1e42eebe7808bcabab.tar.gz
linux-3.10-d46db3d58233be4be980eb1e42eebe7808bcabab.tar.bz2
linux-3.10-d46db3d58233be4be980eb1e42eebe7808bcabab.zip
writeback: make writeback_control.nr_to_write straight
Pass struct wb_writeback_work all the way down to writeback_sb_inodes(), and initialize the struct writeback_control there. struct writeback_control is basically designed to control writeback of a single file, but we keep abuse it for writing multiple files in writeback_sb_inodes() and its callers. It immediately clean things up, e.g. suddenly wbc.nr_to_write vs work->nr_pages starts to make sense, and instead of saving and restoring pages_skipped in writeback_sb_inodes it can always start with a clean zero value. It also makes a neat IO pattern change: large dirty files are now written in the full 4MB writeback chunk size, rather than whatever remained quota in wbc->nr_to_write. Acked-by: Jan Kara <jack@suse.cz> Proposed-by: Christoph Hellwig <hch@infradead.org> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r--mm/backing-dev.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 5f6553ef1ba..7ba303be5e0 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -260,18 +260,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi)
return wb_has_dirty_io(&bdi->wb);
}
-static void bdi_flush_io(struct backing_dev_info *bdi)
-{
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- .older_than_this = NULL,
- .range_cyclic = 1,
- .nr_to_write = 1024,
- };
-
- writeback_inodes_wb(&bdi->wb, &wbc);
-}
-
/*
* kupdated() used to do this. We cannot do it from the bdi_forker_thread()
* or we risk deadlocking on ->s_umount. The longer term solution would be
@@ -457,9 +445,10 @@ static int bdi_forker_thread(void *ptr)
if (IS_ERR(task)) {
/*
* If thread creation fails, force writeout of
- * the bdi from the thread.
+ * the bdi from the thread. Hopefully 1024 is
+ * large enough for efficient IO.
*/
- bdi_flush_io(bdi);
+ writeback_inodes_wb(&bdi->wb, 1024);
} else {
/*
* The spinlock makes sure we do not lose