diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-07 09:40:21 +0100 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-07 09:40:21 +0100 |
commit | b873c5d692d4d5453cceed18bb06c62bb1a73ac0 (patch) | |
tree | f9d5816bc26ced37f187a141ee6c74c7b8e00fcf /block | |
parent | a60327107b56573c305ecc78e471dbdbb4d2f426 (diff) | |
parent | e83a46bbb1d4c03defd733a64b727632a40059ad (diff) | |
download | linux-3.10-b873c5d692d4d5453cceed18bb06c62bb1a73ac0.tar.gz linux-3.10-b873c5d692d4d5453cceed18bb06c62bb1a73ac0.tar.bz2 linux-3.10-b873c5d692d4d5453cceed18bb06c62bb1a73ac0.zip |
Merge branch 'block-for-2.6.39-core' of ssh://master.kernel.org/pub/scm/linux/kernel/git/tj/misc into for-2.6.39/core
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 18 | ||||
-rw-r--r-- | block/blk-flush.c | 18 | ||||
-rw-r--r-- | block/blk-lib.c | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 29 | ||||
-rw-r--r-- | block/cfq-iosched.c | 6 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | block/genhd.c | 2 | ||||
-rw-r--r-- | block/ioctl.c | 8 |
8 files changed, 49 insertions, 38 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index accff29ad67..74d496ccf4d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -342,7 +342,7 @@ void blk_start_queue(struct request_queue *q) WARN_ON(!irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q); + __blk_run_queue(q, false); } EXPORT_SYMBOL(blk_start_queue); @@ -396,13 +396,14 @@ EXPORT_SYMBOL(blk_sync_queue); /** * __blk_run_queue - run a single device queue * @q: The queue to run + * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. * * Description: * See @blk_run_queue. This variant must be called with the queue lock * held and interrupts disabled. * */ -void __blk_run_queue(struct request_queue *q) +void __blk_run_queue(struct request_queue *q, bool force_kblockd) { blk_remove_plug(q); @@ -416,7 +417,7 @@ void __blk_run_queue(struct request_queue *q) * Only recurse once to avoid overrunning the stack, let the unplug * handling reinvoke the handler shortly if we already got there. */ - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { + if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { q->request_fn(q); queue_flag_clear(QUEUE_FLAG_REENTER, q); } else { @@ -439,7 +440,7 @@ void blk_run_queue(struct request_queue *q) unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); @@ -1085,7 +1086,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, drive_stat_acct(rq, 1); __elv_add_request(q, rq, where, 0); - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_insert_request); @@ -2642,13 +2643,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *dwork, unsigned long delay) -{ - return queue_delayed_work(kblockd_workqueue, dwork, delay); -} -EXPORT_SYMBOL(kblockd_schedule_delayed_work); - int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * diff --git a/block/blk-flush.c b/block/blk-flush.c index a867e3f524f..0bd8c9c5d6e 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -212,9 +212,14 @@ static void flush_end_io(struct request *flush_rq, int error) queued |= blk_flush_complete_seq(rq, seq, error); } - /* after populating an empty queue, kick it to avoid stall */ + /* + * Moving a request silently to empty queue_head may stall the + * queue. Kick the queue in those cases. This function is called + * from request completion path and calling directly into + * request_fn may confuse the driver. Always use kblockd. + */ if (queued && was_empty) - __blk_run_queue(q); + __blk_run_queue(q, true); } /** @@ -257,7 +262,7 @@ static bool blk_kick_flush(struct request_queue *q) q->flush_rq.end_io = flush_end_io; q->flush_pending_idx ^= 1; - elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_FRONT); + elv_insert(q, &q->flush_rq, ELEVATOR_INSERT_REQUEUE); return true; } @@ -266,9 +271,12 @@ static void flush_data_end_io(struct request *rq, int error) struct request_queue *q = rq->q; bool was_empty = elv_queue_empty(q); - /* after populating an empty queue, kick it to avoid stall */ + /* + * After populating an empty queue, kick it to avoid stall. Read + * the comment in flush_end_io(). + */ if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error) && was_empty) - __blk_run_queue(q); + __blk_run_queue(q, true); } /** diff --git a/block/blk-lib.c b/block/blk-lib.c index 1a320d2406b..eec78becb35 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c @@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err) } /** - * blkdev_issue_zeroout generate number of zero filed write bios + * blkdev_issue_zeroout - generate number of zero filed write bios * @bdev: blockdev to issue * @sector: start sector * @nr_sects: number of sectors to write diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c0f62374216..061dee66e2a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -20,6 +20,11 @@ static int throtl_quantum = 32; /* Throttling is performed over 100ms slice and after that slice is renewed */ static unsigned long throtl_slice = HZ/10; /* 100 ms */ +/* A workqueue to queue throttle related work */ +static struct workqueue_struct *kthrotld_workqueue; +static void throtl_schedule_delayed_work(struct throtl_data *td, + unsigned long delay); + struct throtl_rb_root { struct rb_root rb; struct rb_node *left; @@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) update_min_dispatch_time(st); if (time_before_eq(st->min_disptime, jiffies)) - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); else - throtl_schedule_delayed_work(td->queue, - (st->min_disptime - jiffies)); + throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); } static inline void @@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work) } /* Call with queue lock held */ -void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) +static void +throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) { - struct throtl_data *td = q->td; struct delayed_work *dwork = &td->throtl_work; if (total_nr_queued(td) > 0) { @@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) * Cancel that and schedule a new one. */ __cancel_delayed_work(dwork); - kblockd_schedule_delayed_work(q, dwork, delay); + queue_delayed_work(kthrotld_workqueue, dwork, delay); throtl_log(td, "schedule work. delay=%lu jiffies=%lu", delay, jiffies); } } -EXPORT_SYMBOL(throtl_schedule_delayed_work); static void throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) @@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key, smp_mb__after_atomic_inc(); /* Schedule a work now to process the limit change */ - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_write_bps(void *key, @@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_read_iops(void *key, @@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_update_blkio_group_write_iops(void *key, @@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, smp_mb__before_atomic_inc(); atomic_inc(&td->limits_changed); smp_mb__after_atomic_inc(); - throtl_schedule_delayed_work(td->queue, 0); + throtl_schedule_delayed_work(td, 0); } static void throtl_shutdown_wq(struct request_queue *q) @@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q) static int __init throtl_init(void) { + kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); + if (!kthrotld_workqueue) + panic("Failed to create kthrotld\n"); + blkio_policy_register(&blkio_policy_throtl); return 0; } diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 938ae52aa92..9697053f80b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3344,7 +3344,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); } else { cfq_blkiocg_update_idle_time_stats( &cfqq->cfqg->blkg); @@ -3359,7 +3359,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * this new queue is RT and the current one is BE */ cfq_preempt_queue(cfqd, cfqq); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); } } @@ -3719,7 +3719,7 @@ static void cfq_kick_queue(struct work_struct *work) struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); - __blk_run_queue(cfqd->queue); + __blk_run_queue(cfqd->queue, false); spin_unlock_irq(q->queue_lock); } diff --git a/block/elevator.c b/block/elevator.c index f98e92edc93..fabf3675c91 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) */ elv_drain_elevator(q); while (q->rq.elvpriv) { - __blk_run_queue(q); + __blk_run_queue(q, false); spin_unlock_irq(q->queue_lock); msleep(10); spin_lock_irq(q->queue_lock); @@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) * with anything. There's no point in delaying queue * processing. */ - __blk_run_queue(q); + __blk_run_queue(q, false); break; case ELEVATOR_INSERT_SORT: diff --git a/block/genhd.c b/block/genhd.c index 73d85a8e3c8..3e2b57b55e3 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) struct block_device *bdev = bdget_disk(disk, partno); if (bdev) { fsync_bdev(bdev); - res = __invalidate_device(bdev); + res = __invalidate_device(bdev, true); bdput(bdev); } return res; diff --git a/block/ioctl.c b/block/ioctl.c index 9049d460fa8..1124cd29726 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return -EINVAL; if (get_user(n, (int __user *) arg)) return -EFAULT; - if (!(mode & FMODE_EXCL) && - blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) - return -EBUSY; + if (!(mode & FMODE_EXCL)) { + bdgrab(bdev); + if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) + return -EBUSY; + } ret = set_blocksize(bdev, n); if (!(mode & FMODE_EXCL)) blkdev_put(bdev, mode | FMODE_EXCL); |