summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 08:52:07 +0100
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 08:52:07 +0100
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /drivers
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
downloadlinux-3.10-7eaceaccab5f40bbfda044629a6298616aeaed50.tar.gz
linux-3.10-7eaceaccab5f40bbfda044629a6298616aeaed50.tar.bz2
linux-3.10-7eaceaccab5f40bbfda044629a6298616aeaed50.zip
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c1
-rw-r--r--drivers/block/drbd/drbd_int.h14
-rw-r--r--drivers/block/drbd/drbd_main.c33
-rw-r--r--drivers/block/drbd/drbd_receiver.c20
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c1
-rw-r--r--drivers/block/drbd/drbd_wrappers.h18
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/block/loop.c13
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/umem.c16
-rw-r--r--drivers/ide/ide-atapi.c3
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-park.c2
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-crypt.c9
-rw-r--r--drivers/md/dm-kcopyd.c52
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-table.c24
-rw-r--r--drivers/md/dm.c33
-rw-r--r--drivers/md/linear.c17
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/multipath.c31
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c83
-rw-r--r--drivers/md/raid10.c87
-rw-r--r--drivers/md/raid5.c62
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/message/i2o/i2o_block.c6
-rw-r--r--drivers/mmc/card/queue.c3
-rw-r--r--drivers/s390/block/dasd.c2
-rw-r--r--drivers/s390/char/tape_block.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/scsi_transport_sas.c6
-rw-r--r--drivers/target/target_core_iblock.c7
39 files changed, 71 insertions, 529 deletions
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9279272b373..35658f445fc 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -3170,12 +3170,6 @@ static void do_cciss_request(struct request_queue *q)
int sg_index = 0;
int chained = 0;
- /* We call start_io here in case there is a command waiting on the
- * queue that has not been sent.
- */
- if (blk_queue_plugged(q))
- goto startio;
-
queue:
creq = blk_peek_request(q);
if (!creq)
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index 946dad4caef..b2fceb53e80 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -911,9 +911,6 @@ static void do_ida_request(struct request_queue *q)
struct scatterlist tmp_sg[SG_MAX];
int i, dir, seg;
- if (blk_queue_plugged(q))
- goto startio;
-
queue_next:
creq = blk_peek_request(q);
if (!creq)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ba95cba192b..2096628d6e6 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -689,8 +689,6 @@ void drbd_al_to_on_disk_bm(struct drbd_conf *mdev)
}
}
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
-
/* always (try to) flush bitmap to stable storage */
drbd_md_flush(mdev);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index fd42832f785..0645ca829a9 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -840,7 +840,6 @@ static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local)
for (i = 0; i < num_pages; i++)
bm_page_io_async(mdev, b, i, rw);
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev));
wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0);
if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) {
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 3803a034893..0b5718e1958 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -2382,20 +2382,6 @@ static inline int drbd_queue_order_type(struct drbd_conf *mdev)
return QUEUE_ORDERED_NONE;
}
-static inline void drbd_blk_run_queue(struct request_queue *q)
-{
- if (q && q->unplug_fn)
- q->unplug_fn(q);
-}
-
-static inline void drbd_kick_lo(struct drbd_conf *mdev)
-{
- if (get_ldev(mdev)) {
- drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev));
- put_ldev(mdev);
- }
-}
-
static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 29cd0dc9fe4..6049cb85310 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2719,35 +2719,6 @@ static int drbd_release(struct gendisk *gd, fmode_t mode)
return 0;
}
-static void drbd_unplug_fn(struct request_queue *q)
-{
- struct drbd_conf *mdev = q->queuedata;
-
- /* unplug FIRST */
- spin_lock_irq(q->queue_lock);
- blk_remove_plug(q);
- spin_unlock_irq(q->queue_lock);
-
- /* only if connected */
- spin_lock_irq(&mdev->req_lock);
- if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
- D_ASSERT(mdev->state.role == R_PRIMARY);
- if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
- /* add to the data.work queue,
- * unless already queued.
- * XXX this might be a good addition to drbd_queue_work
- * anyways, to detect "double queuing" ... */
- if (list_empty(&mdev->unplug_work.list))
- drbd_queue_work(&mdev->data.work,
- &mdev->unplug_work);
- }
- }
- spin_unlock_irq(&mdev->req_lock);
-
- if (mdev->state.disk >= D_INCONSISTENT)
- drbd_kick_lo(mdev);
-}
-
static void drbd_set_defaults(struct drbd_conf *mdev)
{
/* This way we get a compile error when sync_conf grows,
@@ -3222,9 +3193,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec);
- q->queue_lock = &mdev->req_lock; /* needed since we use */
- /* plugging on a queue, that actually has no requests! */
- q->unplug_fn = drbd_unplug_fn;
+ q->queue_lock = &mdev->req_lock;
mdev->md_io_page = alloc_page(GFP_KERNEL);
if (!mdev->md_io_page)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 24487d4fb20..84132f8bf8a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -187,15 +187,6 @@ static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int
return NULL;
}
-/* kick lower level device, if we have more than (arbitrary number)
- * reference counts on it, which typically are locally submitted io
- * requests. don't use unacked_cnt, so we speed up proto A and B, too. */
-static void maybe_kick_lo(struct drbd_conf *mdev)
-{
- if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark)
- drbd_kick_lo(mdev);
-}
-
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
{
struct drbd_epoch_entry *e;
@@ -219,7 +210,6 @@ static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
LIST_HEAD(reclaimed);
struct drbd_epoch_entry *e, *t;
- maybe_kick_lo(mdev);
spin_lock_irq(&mdev->req_lock);
reclaim_net_ee(mdev, &reclaimed);
spin_unlock_irq(&mdev->req_lock);
@@ -436,8 +426,7 @@ void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
while (!list_empty(head)) {
prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&mdev->req_lock);
- drbd_kick_lo(mdev);
- schedule();
+ io_schedule();
finish_wait(&mdev->ee_wait, &wait);
spin_lock_irq(&mdev->req_lock);
}
@@ -1147,7 +1136,6 @@ next_bio:
drbd_generic_make_request(mdev, fault_type, bio);
} while (bios);
- maybe_kick_lo(mdev);
return 0;
fail:
@@ -1167,9 +1155,6 @@ static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsign
inc_unacked(mdev);
- if (mdev->net_conf->wire_protocol != DRBD_PROT_C)
- drbd_kick_lo(mdev);
-
mdev->current_epoch->barrier_nr = p->barrier;
rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
@@ -3556,9 +3541,6 @@ static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
{
- if (mdev->state.disk >= D_INCONSISTENT)
- drbd_kick_lo(mdev);
-
/* Make sure we've acked all the TCP data associated
* with the data requests being unplugged */
drbd_tcp_quickack(mdev->data.socket);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 11a75d32a2e..ad3fc6228f2 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -960,10 +960,6 @@ allocate_barrier:
bio_endio(req->private_bio, -EIO);
}
- /* we need to plug ALWAYS since we possibly need to kick lo_dev.
- * we plug after submit, so we won't miss an unplug event */
- drbd_plug_device(mdev);
-
return 0;
fail_conflicting:
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 34f224b018b..e027446590d 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -792,7 +792,6 @@ int drbd_resync_finished(struct drbd_conf *mdev)
* queue (or even the read operations for those packets
* is not finished by now). Retry in 100ms. */
- drbd_kick_lo(mdev);
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(HZ / 10);
w = kmalloc(sizeof(struct drbd_work), GFP_ATOMIC);
diff --git a/drivers/block/drbd/drbd_wrappers.h b/drivers/block/drbd/drbd_wrappers.h
index defdb5013ea..53586fa5ae1 100644
--- a/drivers/block/drbd/drbd_wrappers.h
+++ b/drivers/block/drbd/drbd_wrappers.h
@@ -45,24 +45,6 @@ static inline void drbd_generic_make_request(struct drbd_conf *mdev,
generic_make_request(bio);
}
-static inline void drbd_plug_device(struct drbd_conf *mdev)
-{
- struct request_queue *q;
- q = bdev_get_queue(mdev->this_bdev);
-
- spin_lock_irq(q->queue_lock);
-
-/* XXX the check on !blk_queue_plugged is redundant,
- * implicitly checked in blk_plug_device */
-
- if (!blk_queue_plugged(q)) {
- blk_plug_device(q);
- del_timer(&q->unplug_timer);
- /* unplugging should not happen automatically... */
- }
- spin_unlock_irq(q->queue_lock);
-}
-
static inline int drbd_crypto_is_hash(struct crypto_tfm *tfm)
{
return (crypto_tfm_alg_type(tfm) & CRYPTO_ALG_TYPE_HASH_MASK)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b3..271142b9e2c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3837,7 +3837,6 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_end_io = floppy_rb0_complete;
submit_bio(READ, &bio);
- generic_unplug_device(bdev_get_queue(bdev));
process_fd_request();
wait_for_completion(&complete);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a545eb6..01b8e4a87c9 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -541,17 +541,6 @@ out:
return 0;
}
-/*
- * kick off io on the underlying address space
- */
-static void loop_unplug(struct request_queue *q)
-{
- struct loop_device *lo = q->queuedata;
-
- queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
- blk_run_address_space(lo->lo_backing_file->f_mapping);
-}
-
struct switch_request {
struct file *file;
struct completion wait;
@@ -918,7 +907,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
*/
blk_queue_make_request(lo->lo_queue, loop_make_request);
lo->lo_queue->queuedata = lo;
- lo->lo_queue->unplug_fn = loop_unplug;
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
@@ -1020,7 +1008,6 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
kthread_stop(lo->lo_thread);
- lo->lo_queue->unplug_fn = NULL;
lo->lo_backing_file = NULL;
loop_release_xfer(lo);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 77d70eebb6b..d20e13f8000 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1606,8 +1606,6 @@ static int kcdrwd(void *foobar)
min_sleep_time = pkt->sleep_time;
}
- generic_unplug_device(bdev_get_queue(pd->bdev));
-
VPRINTK("kcdrwd: sleeping\n");
residue = schedule_timeout(min_sleep_time);
VPRINTK("kcdrwd: wake up\n");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index 8be57151f5d..653439faa72 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -241,8 +241,7 @@ static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
*
* Whenever IO on the active page completes, the Ready page is activated
* and the ex-Active page is clean out and made Ready.
- * Otherwise the Ready page is only activated when it becomes full, or
- * when mm_unplug_device is called via the unplug_io_fn.
+ * Otherwise the Ready page is only activated when it becomes full.
*
* If a request arrives while both pages a full, it is queued, and b_rdev is
* overloaded to record whether it was a read or a write.
@@ -333,17 +332,6 @@ static inline void reset_page(struct mm_page *page)
page->biotail = &page->bio;
}
-static void mm_unplug_device(struct request_queue *q)
-{
- struct cardinfo *card = q->queuedata;
- unsigned long flags;
-
- spin_lock_irqsave(&card->lock, flags);
- if (blk_remove_plug(q))
- activate(card);
- spin_unlock_irqrestore(&card->lock, flags);
-}
-
/*
* If there is room on Ready page, take
* one bh off list and add it.
@@ -535,7 +523,6 @@ static int mm_make_request(struct request_queue *q, struct bio *bio)
*card->biotail = bio;
bio->bi_next = NULL;
card->biotail = &bio->bi_next;
- blk_plug_device(q);
spin_unlock_irq(&card->lock);
return 0;
@@ -907,7 +894,6 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
blk_queue_make_request(card->queue, mm_make_request);
card->queue->queue_lock = &card->lock;
card->queue->queuedata = card;
- card->queue->unplug_fn = mm_unplug_device;
tasklet_init(&card->tasklet, process_page, (unsigned long)card);
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index e88a2cf1771..6f218e014e9 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -233,8 +233,7 @@ int ide_queue_sense_rq(ide_drive_t *drive, void *special)
drive->hwif->rq = NULL;
- elv_add_request(drive->queue, &drive->sense_rq,
- ELEVATOR_INSERT_FRONT, 0);
+ elv_add_request(drive->queue, &drive->sense_rq, ELEVATOR_INSERT_FRONT);
return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 999dac054bc..f4077840d3a 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -549,8 +549,6 @@ plug_device_2:
if (rq)
blk_requeue_request(q, rq);
- if (!elv_queue_empty(q))
- blk_plug_device(q);
}
void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
@@ -562,8 +560,6 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
if (rq)
blk_requeue_request(q, rq);
- if (!elv_queue_empty(q))
- blk_plug_device(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 88a380c5a47..6ab9ab2a508 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -52,7 +52,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
rq->cmd[0] = REQ_UNPARK_HEADS;
rq->cmd_len = 1;
rq->cmd_type = REQ_TYPE_SPECIAL;
- elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
+ elv_add_request(q, rq, ELEVATOR_INSERT_FRONT);
out:
return;
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 9a35320fb59..54bfc274b39 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1339,8 +1339,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
prepare_to_wait(&bitmap->overflow_wait, &__wait,
TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&bitmap->lock);
- md_unplug(bitmap->mddev);
- schedule();
+ io_schedule();
finish_wait(&bitmap->overflow_wait, &__wait);
continue;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 4e054bd9166..2c62c1169f7 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -991,11 +991,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor;
}
-static void kcryptd_unplug(struct crypt_config *cc)
-{
- blk_unplug(bdev_get_queue(cc->dev->bdev));
-}
-
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
{
struct crypt_config *cc = io->target->private;
@@ -1008,10 +1003,8 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* one in order to decrypt the whole bio data *afterwards*.
*/
clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
- if (!clone) {
- kcryptd_unplug(cc);
+ if (!clone)
return 1;
- }
crypt_inc_pending(io);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 924f5f0084c..400cf35094a 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -37,13 +37,6 @@ struct dm_kcopyd_client {
unsigned int nr_pages;
unsigned int nr_free_pages;
- /*
- * Block devices to unplug.
- * Non-NULL pointer means that a block device has some pending requests
- * and needs to be unplugged.
- */
- struct block_device *unplug[2];
-
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
@@ -315,31 +308,6 @@ static int run_complete_job(struct kcopyd_job *job)
return 0;
}
-/*
- * Unplug the block device at the specified index.
- */
-static void unplug(struct dm_kcopyd_client *kc, int rw)
-{
- if (kc->unplug[rw] != NULL) {
- blk_unplug(bdev_get_queue(kc->unplug[rw]));
- kc->unplug[rw] = NULL;
- }
-}
-
-/*
- * Prepare block device unplug. If there's another device
- * to be unplugged at the same array index, we unplug that
- * device first.
- */
-static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
- struct block_device *bdev)
-{
- if (likely(kc->unplug[rw] == bdev))
- return;
- unplug(kc, rw);
- kc->unplug[rw] = bdev;
-}
-
static void complete_io(unsigned long error, void *context)
{
struct kcopyd_job *job = (struct kcopyd_job *) context;
@@ -386,15 +354,12 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
- if (job->rw == READ) {
+ if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL);
- prepare_unplug(job->kc, READ, job->source.bdev);
- } else {
+ else {
if (job->num_dests > 1)
io_req.bi_rw |= REQ_UNPLUG;
r = dm_io(&io_req, job->num_dests, job->dests, NULL);
- if (!(io_req.bi_rw & REQ_UNPLUG))
- prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
}
return r;
@@ -466,6 +431,7 @@ static void do_work(struct work_struct *work)
{
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
+ struct blk_plug plug;
/*
* The order that these are called is *very* important.
@@ -473,18 +439,12 @@ static void do_work(struct work_struct *work)
* Pages jobs when successful will jump onto the io jobs
* list. io jobs call wake when they complete and it all
* starts again.
- *
- * Note that io_jobs add block devices to the unplug array,
- * this array is cleared with "unplug" calls. It is thus
- * forbidden to run complete_jobs after io_jobs and before
- * unplug because the block device could be destroyed in
- * job completion callback.
*/
+ blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
process_jobs(&kc->io_jobs, kc, run_io_job);
- unplug(kc, READ);
- unplug(kc, WRITE);
+ blk_finish_plug(&plug);
}
/*
@@ -665,8 +625,6 @@ int dm_kcopyd_client_create(unsigned int nr_pages,
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
- memset(kc->unplug, 0, sizeof(kc->unplug));
-
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
goto bad_slab;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b9e1e15ef11..5ef136cdba9 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -394,7 +394,7 @@ static void raid_unplug(struct dm_target_callbacks *cb)
{
struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
- md_raid5_unplug_device(rs->md.private);
+ md_raid5_kick_device(rs->md.private);
}
/*
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index dee326775c6..976ad4688af 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -842,8 +842,6 @@ static void do_mirror(struct work_struct *work)
do_reads(ms, &reads);
do_writes(ms, &writes);
do_failures(ms, &failures);
-
- dm_table_unplug_all(ms->ti->table);
}
/*-----------------------------------------------------------------
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 38e4eb1bb96..f50a7b95225 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1275,29 +1275,6 @@ int dm_table_any_busy_target(struct dm_table *t)
return 0;
}
-void dm_table_unplug_all(struct dm_table *t)
-{
- struct dm_dev_internal *dd;
- struct list_head *devices = dm_table_get_devices(t);
- struct dm_target_callbacks *cb;
-
- list_for_each_entry(dd, devices, list) {
- struct request_queue *q = bdev_get_queue(dd->dm_dev.bdev);
- char b[BDEVNAME_SIZE];
-
- if (likely(q))
- blk_unplug(q);
- else
- DMWARN_LIMIT("%s: Cannot unplug nonexistent device %s",
- dm_device_name(t->md),
- bdevname(dd->dm_dev.bdev, b));
- }
-
- list_for_each_entry(cb, &t->target_callbacks, list)
- if (cb->unplug_fn)
- cb->unplug_fn(cb);
-}
-
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
return t->md;
@@ -1345,4 +1322,3 @@ EXPORT_SYMBOL(dm_table_get_mode);
EXPORT_SYMBOL(dm_table_get_md);
EXPORT_SYMBOL(dm_table_put);
EXPORT_SYMBOL(dm_table_get);
-EXPORT_SYMBOL(dm_table_unplug_all);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index eaa3af0e063..d22b9905c16 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -807,8 +807,6 @@ void dm_requeue_unmapped_request(struct request *clone)
dm_unprep_request(rq);
spin_lock_irqsave(q->queue_lock, flags);
- if (elv_queue_empty(q))
- blk_plug_device(q);
blk_requeue_request(q, rq);
spin_unlock_irqrestore(q->queue_lock, flags);
@@ -1613,10 +1611,10 @@ static void dm_request_fn(struct request_queue *q)
* number of in-flight I/Os after the queue is stopped in
* dm_suspend().
*/
- while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
+ while (!blk_queue_stopped(q)) {
rq = blk_peek_request(q);
if (!rq)
- goto plug_and_out;
+ goto delay_and_out;
/* always use block 0 to find the target for flushes for now */
pos = 0;
@@ -1627,7 +1625,7 @@ static void dm_request_fn(struct request_queue *q)
BUG_ON(!dm_target_is_valid(ti));
if (ti->type->busy && ti->type->busy(ti))
- goto plug_and_out;
+ goto delay_and_out;
blk_start_request(rq);
clone = rq->special;
@@ -1647,11 +1645,8 @@ requeued:
BUG_ON(!irqs_disabled());
spin_lock(q->queue_lock);
-plug_and_out:
- if (!elv_queue_empty(q))
- /* Some requests still remain, retry later */
- blk_plug_device(q);
-
+delay_and_out:
+ blk_delay_queue(q, HZ / 10);
out:
dm_table_put(map);
@@ -1680,20 +1675,6 @@ static int dm_lld_busy(struct request_queue *q)
return r;
}
-static void dm_unplug_all(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_table *map = dm_get_live_table(md);
-
- if (map) {
- if (dm_request_based(md))
- generic_unplug_device(q);
-
- dm_table_unplug_all(map);
- dm_table_put(map);
- }
-}
-
static int dm_any_congested(void *congested_data, int bdi_bits)
{
int r = bdi_bits;
@@ -1817,7 +1798,6 @@ static void dm_init_md_queue(struct mapped_device *md)
md->queue->backing_dev_info.congested_data = md;
blk_queue_make_request(md->queue, dm_request);
blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
- md->queue->unplug_fn = dm_unplug_all;
blk_queue_merge_bvec(md->queue, dm_merge_bvec);
blk_queue_flush(md->queue, REQ_FLUSH | REQ_FUA);
}
@@ -2263,8 +2243,6 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
int r = 0;
DECLARE_WAITQUEUE(wait, current);
- dm_unplug_all(md->queue);
-
add_wait_queue(&md->wait, &wait);
while (1) {
@@ -2539,7 +2517,6 @@ int dm_resume(struct mapped_device *md)
clear_bit(DMF_SUSPENDED, &md->flags);
- dm_table_unplug_all(map);
r = 0;
out:
dm_table_put(map);
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d..38861b5b9d9 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -87,22 +87,6 @@ static int linear_mergeable_bvec(struct request_queue *q,
return maxsectors << 9;
}
-static void linear_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- linear_conf_t *conf;
- int i;
-
- rcu_read_lock();
- conf = rcu_dereference(mddev->private);
-
- for (i=0; i < mddev->raid_disks; i++) {
- struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev);
- blk_unplug(r_queue);
- }
- rcu_read_unlock();
-}
-
static int linear_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -225,7 +209,6 @@ static int linear_run (mddev_t *mddev)
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
- mddev->queue->unplug_fn = linear_unplug;
mddev->queue->backing_dev_info.congested_fn = linear_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c..ca0d79c264b 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4812,7 +4812,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
__md_stop_writes(mddev);
md_stop(mddev);
mddev->queue->merge_bvec_fn = NULL;
- mddev->queue->unplug_fn = NULL;
mddev->queue->backing_dev_info.congested_fn = NULL;
/* tell userspace to handle 'inactive' */
@@ -6669,8 +6668,6 @@ EXPORT_SYMBOL_GPL(md_allow_write);
void md_unplug(mddev_t *mddev)
{
- if (mddev->queue)
- blk_unplug(mddev->queue);
if (mddev->plug)
mddev->plug->unplug_fn(mddev->plug);
}
@@ -6853,7 +6850,6 @@ void md_do_sync(mddev_t *mddev)
>= mddev->resync_max - mddev->curr_resync_completed
)) {
/* time to update curr_resync_completed */
- md_unplug(mddev);
wait_event(mddev->recovery_wait,
atomic_read(&mddev->recovery_active) == 0);
mddev->curr_resync_completed = j;
@@ -6929,7 +6925,6 @@ void md_do_sync(mddev_t *mddev)
* about not overloading the IO subsystem. (things like an
* e2fsck being done on the RAID array should execute fast)
*/
- md_unplug(mddev);
cond_resched();
currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2
@@ -6948,8 +6943,6 @@ void md_do_sync(mddev_t *mddev)
* this also signals 'finished resyncing' to md_stop
*/
out:
- md_unplug(mddev);
-
wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active));
/* tell personality that we are finished */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2..1cc8ed44e4a 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -106,36 +106,6 @@ static void multipath_end_request(struct bio *bio, int error)
rdev_dec_pending(rdev, conf->mddev);
}
-static void unplug_slaves(mddev_t *mddev)
-{
- multipath_conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags)
- && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void multipath_unplug(struct request_queue *q)
-{
- unplug_slaves(q->queuedata);
-}
-
-
static int multipath_make_request(mddev_t *mddev, struct bio * bio)
{
multipath_conf_t *conf = mddev->private;
@@ -518,7 +488,6 @@ static int multipath_run (mddev_t *mddev)
*/
md_set_array_sectors(mddev, multipath_size(mddev, 0, 0));
- mddev->queue->unplug_fn = multipath_unplug;
mddev->queue->backing_dev_info.congested_fn = multipath_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855ed..6338c0fe620 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -25,21 +25,6 @@
#include "raid0.h"
#include "raid5.h"
-static void raid0_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- raid0_conf_t *conf = mddev->private;
- mdk_rdev_t **devlist = conf->devlist;
- int raid_disks = conf->strip_zone[0].nb_dev;
- int i;
-
- for (i=0; i < raid_disks; i++) {
- struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
-
- blk_unplug(r_queue);
- }
-}
-
static int raid0_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -272,7 +257,6 @@ static int create_strip_zones(mddev_t *mddev, raid0_conf_t **private_conf)
mdname(mddev),
(unsigned long long)smallest->sectors);
}
- mddev->queue->unplug_fn = raid0_unplug;
mddev->queue->backing_dev_info.congested_fn = raid0_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba..b67d822d57a 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -52,23 +52,16 @@
#define NR_RAID1_BIOS 256
-static void unplug_slaves(mddev_t *mddev);
-
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct pool_info *pi = data;
- r1bio_t *r1_bio;
int size = offsetof(r1bio_t, bios[pi->raid_disks]);
/* allocate a r1bio with room for raid_disks entries in the bios array */
- r1_bio = kzalloc(size, gfp_flags);
- if (!r1_bio && pi->mddev)
- unplug_slaves(pi->mddev);
-
- return r1_bio;
+ return kzalloc(size, gfp_flags);
}
static void r1bio_pool_free(void *r1_bio, void *data)
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
int i, j;
r1_bio = r1bio_pool_alloc(gfp_flags, pi);
- if (!r1_bio) {
- unplug_slaves(pi->mddev);
+ if (!r1_bio)
return NULL;
- }
/*
* Allocate bios : 1 for reading, n-1 for writing
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
return new_disk;
}
-static void unplug_slaves(mddev_t *mddev)
-{
- conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i<mddev->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void raid1_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
-
- unplug_slaves(mddev);
- md_wakeup_thread(mddev->thread);
-}
-
static int raid1_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits)
}
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
{
/* Any writes that have been queued but are awaiting
* bitmap updates get flushed here.
- * We return 1 if any requests were actually submitted.
*/
- int rv = 0;
-
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- blk_remove_plug(conf->mddev->queue);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio);
bio = next;
}
- rv = 1;
} else
spin_unlock_irq(&conf->device_lock);
- return rv;
+}
+
+static void md_kick_device(mddev_t *mddev)
+{
+ blk_flush_plug(current);
+ md_wakeup_thread(mddev->thread);
}
/* Barriers....
@@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf)
/* Wait until no block IO is waiting */
wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting,
- conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
/* block any new IO from starting */
conf->barrier++;
@@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf)
/* Now wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
spin_unlock_irq(&conf->resync_lock);
}
@@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- raid1_unplug(conf->mddev->queue));
+ md_kick_device(conf->mddev));
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
({ flush_pending_writes(conf);
- raid1_unplug(conf->mddev->queue); }));
+ md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock);
}
static void unfreeze_array(conf_t *conf)
@@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid1d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync)
+ if (do_sync || !bitmap)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev)
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- int unplug=0;
mdk_rdev_t *rdev;
md_check_recovery(mddev);
@@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev)
for (;;) {
char b[BDEVNAME_SIZE];
- unplug += flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev)
mddev = r1_bio->mddev;
conf = mddev->private;
- if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
+ if (test_bit(R1BIO_IsSync, &r1_bio->state))
sync_request_write(mddev, r1_bio);
- unplug = 1;
- } else {
+ else {
int disk;
/* we got a read error. Maybe the drive is bad. Maybe just
@@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev)
bio->bi_end_io = raid1_end_read_request;
bio->bi_rw = READ | do_sync;
bio->bi_private = r1_bio;
- unplug = 1;
generic_make_request(bio);
}
}
cond_resched();
}
- if (unplug)
- unplug_slaves(mddev);
}
@@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
- mddev->queue->unplug_fn = raid1_unplug;
mddev->queue->backing_dev_info.congested_fn = raid1_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
md_integrity_register(mddev);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741..e79f1c5bf71 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
*/
#define NR_RAID10_BIOS 256
-static void unplug_slaves(mddev_t *mddev);
-
static void allow_barrier(conf_t *conf);
static void lower_barrier(conf_t *conf);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
conf_t *conf = data;
- r10bio_t *r10_bio;
int size = offsetof(struct r10bio_s, devs[conf->copies]);
/* allocate a r10bio with room for raid_disks entries in the bios array */
- r10_bio = kzalloc(size, gfp_flags);
- if (!r10_bio && conf->mddev)
- unplug_slaves(conf->mddev);
-
- return r10_bio;
+ return kzalloc(size, gfp_flags);
}
static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
int nalloc;
r10_bio = r10bio_pool_alloc(gfp_flags, conf);
- if (!r10_bio) {
- unplug_slaves(conf->mddev);
+ if (!r10_bio)
return NULL;
- }
if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
return disk;
}
-static void unplug_slaves(mddev_t *mddev)
-{
- conf_t *conf = mddev->private;
- int i;
-
- rcu_read_lock();
- for (i=0; i < conf->raid_disks; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-static void raid10_unplug(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
-
- unplug_slaves(q->queuedata);
- md_wakeup_thread(mddev->thread);
-}
-
static int raid10_congested(void *data, int bits)
{
mddev_t *mddev = data;
@@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits)
return ret;
}
-static int flush_pending_writes(conf_t *conf)
+static void flush_pending_writes(conf_t *conf)
{
/* Any writes that have been queued but are awaiting
* bitmap updates get flushed here.
- * We return 1 if any requests were actually submitted.
*/
- int rv = 0;
-
spin_lock_irq(&conf->device_lock);
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
- blk_remove_plug(conf->mddev->queue);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
generic_make_request(bio);
bio = next;
}
- rv = 1;
} else
spin_unlock_irq(&conf->device_lock);
- return rv;
}
+
+static void md_kick_device(mddev_t *mddev)
+{
+ blk_flush_plug(current);
+ md_wakeup_thread(mddev->thread);
+}
+
/* Barriers....
* Sometimes we need to suspend IO while we do something else,
* either some resync/recovery, or reconfigure the array.
@@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
/* Wait until no block IO is waiting (unless 'force') */
wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
- conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
/* block any new IO from starting */
conf->barrier++;
@@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
/* No wait for all pending IO to complete */
wait_event_lock_irq(conf->wait_barrier,
!conf->nr_pending && conf->barrier < RESYNC_DEPTH,
- conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ conf->resync_lock, md_kick_device(conf->mddev));
spin_unlock_irq(&conf->resync_lock);
}
@@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf)
conf->nr_waiting++;
wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
conf->resync_lock,
- raid10_unplug(conf->mddev->queue));
+ md_kick_device(conf->mddev));
conf->nr_waiting--;
}
conf->nr_pending++;
@@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf)
conf->nr_pending == conf->nr_queued+1,
conf->resync_lock,
({ flush_pending_writes(conf);
- raid10_unplug(conf->mddev->queue); }));
+ md_kick_device(conf->mddev); }));
spin_unlock_irq(&conf->resync_lock);
}
@@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
- blk_plug_device(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
/* In case raid10d snuck in to freeze_array */
wake_up(&conf->wait_barrier);
- if (do_sync)
+ if (do_sync || !mddev->bitmap)
md_wakeup_thread(mddev->thread);
return 0;
@@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
unsigned long flags;
conf_t *conf = mddev->private;
struct list_head *head = &conf->retry_list;
- int unplug=0;
mdk_rdev_t *rdev;
md_check_recovery(mddev);
@@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
for (;;) {
char b[BDEVNAME_SIZE];
- unplug += flush_pending_writes(conf);
+ flush_pending_writes(conf);
spin_lock_irqsave(&conf->device_lock, flags);
if (list_empty(head)) {
@@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
mddev = r10_bio->mddev;
conf = mddev->private;
- if (test_bit(R10BIO_IsSync, &r10_bio->state)) {
+ if (test_bit(R10BIO_IsSync, &r10_bio->state))
sync_request_write(mddev, r10_bio);
- unplug = 1;
- } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
+ else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
recovery_request_write(mddev, r10_bio);
- unplug = 1;
- } else {
+ else {
int mirror;
/* we got a read error. Maybe the drive is bad. Maybe just
* the block and we can fix it.
@@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
bio->bi_rw = READ | do_sync;
bio->bi_private = r10_bio;
bio->bi_end_io = raid10_end_read_request;
- unplug = 1;
generic_make_request(bio);
}
}
cond_resched();
}
- if (unplug)
- unplug_slaves(mddev);
}
@@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev)
md_set_array_sectors(mddev, size);
mddev->resync_max_sectors = size;
- mddev->queue->unplug_fn = raid10_unplug;
mddev->queue->backing_dev_info.congested_fn = raid10_congested;
mddev->queue->backing_dev_info.congested_data = mddev;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 70281282419..e867ee42b15 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -433,8 +433,6 @@ static int has_failed(raid5_conf_t *conf)
return 0;
}
-static void unplug_slaves(mddev_t *mddev);
-
static struct stripe_head *
get_active_stripe(raid5_conf_t *conf, sector_t sector,
int previous, int noblock, int noquiesce)
@@ -463,8 +461,7 @@ get_active_stripe(raid5_conf_t *conf, sector_t sector,
< (conf->max_nr_stripes *3/4)
|| !conf->inactive_blocked),
conf->device_lock,
- md_raid5_unplug_device(conf)
- );
+ md_raid5_kick_device(conf));
conf->inactive_blocked = 0;
} else
init_stripe(sh, sector, previous);
@@ -1473,8 +1470,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
wait_event_lock_irq(conf->wait_for_stripe,
!list_empty(&conf->inactive_list),
conf->device_lock,
- unplug_slaves(conf->mddev)
- );
+ blk_flush_plug(current));
osh = get_free_stripe(conf);
spin_unlock_irq(&conf->device_lock);
atomic_set(&nsh->count, 1);
@@ -3645,58 +3641,19 @@ static void activate_bit_delay(raid5_conf_t *conf)
}
}
-static void unplug_slaves(mddev_t *mddev)
+void md_raid5_kick_device(raid5_conf_t *conf)
{
- raid5_conf_t *conf = mddev->private;
- int i;
- int devs = max(conf->raid_disks, conf->previous_raid_disks);
-
- rcu_read_lock();
- for (i = 0; i < devs; i++) {
- mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
- if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
- struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
-
- atomic_inc(&rdev->nr_pending);
- rcu_read_unlock();
-
- blk_unplug(r_queue);
-
- rdev_dec_pending(rdev, mddev);
- rcu_read_lock();
- }
- }
- rcu_read_unlock();
-}
-
-void md_raid5_unplug_device(raid5_conf_t *conf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&conf->device_lock, flags);
-
- if (plugger_remove_plug(&conf->plug)) {
- conf->seq_flush++;
- raid5_activate_delayed(conf);
- }
+ blk_flush_plug(current);
+ raid5_activate_delayed(conf);
md_wakeup_thread(conf->mddev->thread);
-
- spin_unlock_irqrestore(&conf->device_lock, flags);
-
- unplug_slaves(conf->mddev);
}
-EXPORT_SYMBOL_GPL(md_raid5_unplug_device);
+EXPORT_SYMBOL_GPL(md_raid5_kick_device);
static void raid5_unplug(struct plug_handle *plug)
{
raid5_conf_t *conf = container_of(plug, raid5_conf_t, plug);
- md_raid5_unplug_device(conf);
-}
-static void raid5_unplug_queue(struct request_queue *q)
-{
- mddev_t *mddev = q->queuedata;
- md_raid5_unplug_device(mddev->private);
+ md_raid5_kick_device(conf);
}
int md_raid5_congested(mddev_t *mddev, int bits)
@@ -4100,7 +4057,7 @@ static int make_request(mddev_t *mddev, struct bio * bi)
* add failed due to overlap. Flush everything
* and wait a while
*/
- md_raid5_unplug_device(conf);
+ md_raid5_kick_device(conf);
release_stripe(sh);
schedule();
goto retry;
@@ -4365,7 +4322,6 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski
if (sector_nr >= max_sector) {
/* just being told to finish up .. nothing much to do */
- unplug_slaves(mddev);
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
end_reshape(conf);
@@ -4569,7 +4525,6 @@ static void raid5d(mddev_t *mddev)
spin_unlock_irq(&conf->device_lock);
async_tx_issue_pending_all();
- unplug_slaves(mddev);
pr_debug("--- raid5d inactive\n");
}
@@ -5205,7 +5160,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
mddev->queue->queue_lock = &conf->device_lock;
- mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;
blk_queue_io_min(mddev->queue, chunk_size);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 2ace0582b40..8d563a4f022 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -503,6 +503,6 @@ static inline int algorithm_is_DDF(int layout)
}
extern int md_raid5_congested(mddev_t *mddev, int bits);
-extern void md_raid5_unplug_device(raid5_conf_t *conf);
+extern void md_raid5_kick_device(raid5_conf_t *conf);
extern int raid5_set_cache_size(mddev_t *mddev, int size);
#endif
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index ae7cad18589..b29eb4eaa86 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -895,11 +895,7 @@ static void i2o_block_request_fn(struct request_queue *q)
{
struct request *req;
- while (!blk_queue_plugged(q)) {
- req = blk_peek_request(q);
- if (!req)
- break;
-
+ while ((req = blk_peek_request(q)) != NULL) {
if (req->cmd_type == REQ_TYPE_FS) {
struct i2o_block_delayed_request *dreq;
struct i2o_block_request *ireq = req->special;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4e42d030e09..2ae727568df 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -55,8 +55,7 @@ static int mmc_queue_thread(void *d)
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
- if (!blk_queue_plugged(q))
- req = blk_fetch_request(q);
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 794bfd96226..4d2df2f76ea 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1917,7 +1917,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
return;
}
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
+ while ((req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 55d2d0f4eab..f061b2527b4 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -161,7 +161,6 @@ tapeblock_requeue(struct work_struct *work) {
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
- !blk_queue_plugged(queue) &&
blk_peek_request(queue) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be323..2cefabd5bdb 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3913,7 +3913,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
if (!get_device(dev))
return;
- while (!blk_queue_plugged(q)) {
+ while (1) {
if (rport && (rport->port_state == FC_PORTSTATE_BLOCKED) &&
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
break;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 927e99cb722..c6fcf76cade 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -173,11 +173,7 @@ static void sas_smp_request(struct request_queue *q, struct Scsi_Host *shost,
int ret;
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
- while (!blk_queue_plugged(q)) {
- req = blk_fetch_request(q);
- if (!req)
- break;
-
+ while ((req = blk_fetch_request(q)) != NULL) {
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 67f0c09983c..c1b539d7b0d 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -392,9 +392,8 @@ static int iblock_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct iblock_req *req = IBLOCK_REQ(task);
- struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
- struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
struct bio *bio = req->ib_bio, *nbio = NULL;
+ struct blk_plug plug;
int rw;
if (task->task_data_direction == DMA_TO_DEVICE) {
@@ -412,6 +411,7 @@ static int iblock_do_task(struct se_task *task)
rw = READ;
}
+ blk_start_plug(&plug);
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
@@ -421,9 +421,8 @@ static int iblock_do_task(struct se_task *task)
submit_bio(rw, bio);
bio = nbio;
}
+ blk_finish_plug(&plug);
- if (q->unplug_fn)
- q->unplug_fn(q);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}