summaryrefslogtreecommitdiff
path: root/drivers/md
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2012-10-11 13:32:13 +1100
committerNeilBrown <neilb@suse.de>2012-10-11 13:32:13 +1100
commit57c67df48866d57b50d72eb198ffcc0cf7a6232d (patch)
treeef0ed816c8ff324a8b7f1635f5d2727bbac2d968 /drivers/md
parent532a2a3fba8df076d65fdf17518eeb327b37a313 (diff)
downloadlinux-3.10-57c67df48866d57b50d72eb198ffcc0cf7a6232d.tar.gz
linux-3.10-57c67df48866d57b50d72eb198ffcc0cf7a6232d.tar.bz2
linux-3.10-57c67df48866d57b50d72eb198ffcc0cf7a6232d.zip
md/raid10: submit IO from originating thread instead of md thread.
queuing writes to the md thread means that all requests go through the one processor which may not be able to keep up with very high request rates. So use the plugging infrastructure to submit all requests on unplug. If a 'schedule' is needed, we fall back on the old approach of handing the requests to the thread for it to handle. This is nearly identical to a recent patch which provided similar functionality to RAID1. Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid10.c57
1 files changed, 54 insertions, 3 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index f92e0ed59be..05dc96a950d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1055,6 +1055,44 @@ static sector_t choose_data_offset(struct r10bio *r10_bio,
return rdev->new_data_offset;
}
+struct raid10_plug_cb {
+ struct blk_plug_cb cb;
+ struct bio_list pending;
+ int pending_cnt;
+};
+
+static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ struct mddev *mddev = plug->cb.data;
+ struct r10conf *conf = mddev->private;
+ struct bio *bio;
+
+ if (from_schedule) {
+ spin_lock_irq(&conf->device_lock);
+ bio_list_merge(&conf->pending_bio_list, &plug->pending);
+ conf->pending_count += plug->pending_cnt;
+ spin_unlock_irq(&conf->device_lock);
+ md_wakeup_thread(mddev->thread);
+ kfree(plug);
+ return;
+ }
+
+ /* we aren't scheduling, so we can do the write-out directly. */
+ bio = bio_list_get(&plug->pending);
+ bitmap_unplug(mddev->bitmap);
+ wake_up(&conf->wait_barrier);
+
+ while (bio) { /* submit pending writes */
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ generic_make_request(bio);
+ bio = next;
+ }
+ kfree(plug);
+}
+
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
@@ -1070,6 +1108,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
& (REQ_DISCARD | REQ_SECURE));
unsigned long flags;
struct md_rdev *blocked_rdev;
+ struct blk_plug_cb *cb;
+ struct raid10_plug_cb *plug = NULL;
int sectors_handled;
int max_sectors;
int sectors;
@@ -1421,11 +1461,22 @@ retry_write:
mbio->bi_private = r10_bio;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb, cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
if (!r10_bio->devs[i].repl_bio)