diff options
author | Kim Kibum <kb0929.kim@samsung.com> | 2012-04-29 16:59:19 +0900 |
---|---|---|
committer | Kim Kibum <kb0929.kim@samsung.com> | 2012-04-29 16:59:19 +0900 |
commit | c1775d1a93a77a57380a4ce87ac3a8f807c944b2 (patch) | |
tree | e1f233f2af38ee247a677082198dd3a69a12a5a1 /block | |
parent | 2c2dcd5ffef2e97176e6a55e45512177e55e6fb9 (diff) | |
download | linux-2.6.36-master.tar.gz linux-2.6.36-master.tar.bz2 linux-2.6.36-master.zip |
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 87 | ||||
-rw-r--r-- | block/Kconfig.iosched | 72 | ||||
-rw-r--r-- | block/Makefile | 17 | ||||
-rw-r--r-- | block/blk-barrier.c | 350 | ||||
-rw-r--r-- | block/blk-cgroup.c | 1051 | ||||
-rw-r--r-- | block/blk-cgroup.h | 258 | ||||
-rw-r--r-- | block/blk-core.c | 2598 | ||||
-rw-r--r-- | block/blk-exec.c | 105 | ||||
-rw-r--r-- | block/blk-integrity.c | 387 | ||||
-rw-r--r-- | block/blk-ioc.c | 180 | ||||
-rw-r--r-- | block/blk-iopoll.c | 227 | ||||
-rw-r--r-- | block/blk-lib.c | 231 | ||||
-rw-r--r-- | block/blk-map.c | 328 | ||||
-rw-r--r-- | block/blk-merge.c | 461 | ||||
-rw-r--r-- | block/blk-settings.c | 786 | ||||
-rw-r--r-- | block/blk-softirq.c | 175 | ||||
-rw-r--r-- | block/blk-sysfs.c | 535 | ||||
-rw-r--r-- | block/blk-tag.c | 406 | ||||
-rw-r--r-- | block/blk-timeout.c | 237 | ||||
-rw-r--r-- | block/blk.h | 174 | ||||
-rw-r--r-- | block/bsg.c | 1121 | ||||
-rw-r--r-- | block/cfq-iosched.c | 4155 | ||||
-rw-r--r-- | block/cfq.h | 115 | ||||
-rw-r--r-- | block/compat_ioctl.c | 768 | ||||
-rw-r--r-- | block/deadline-iosched.c | 481 | ||||
-rw-r--r-- | block/elevator.c | 1173 | ||||
-rw-r--r-- | block/genhd.c | 1296 | ||||
-rw-r--r-- | block/ioctl.c | 327 | ||||
-rw-r--r-- | block/noop-iosched.c | 125 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 699 |
30 files changed, 18925 insertions, 0 deletions
diff --git a/block/Kconfig b/block/Kconfig new file mode 100644 index 00000000..9be0b56e --- /dev/null +++ b/block/Kconfig @@ -0,0 +1,87 @@ +# +# Block layer core configuration +# +menuconfig BLOCK + bool "Enable the block layer" if EMBEDDED + default y + help + Provide block layer support for the kernel. + + Disable this option to remove the block layer support from the + kernel. This may be useful for embedded devices. + + If this option is disabled: + + - block device files will become unusable + - some filesystems (such as ext3) will become unavailable. + + Also, SCSI character devices and USB storage will be disabled since + they make use of various block layer definitions and facilities. + + Say Y here unless you know you really don't want to mount disks and + suchlike. + +if BLOCK + +config LBDAF + bool "Support for large (2TB+) block devices and files" + depends on !64BIT + default y + help + Enable block devices or files of size 2TB and larger. + + This option is required to support the full capacity of large + (2TB+) block devices, including RAID, disk, Network Block Device, + Logical Volume Manager (LVM) and loopback. + + This option also enables support for single files larger than + 2TB. + + The ext4 filesystem requires that this feature be enabled in + order to support filesystems that have the huge_file feature + enabled. Otherwise, it will refuse to mount in the read-write + mode any filesystems that use the huge_file feature, which is + enabled by default by mke2fs.ext4. + + The GFS2 filesystem also requires this feature. + + If unsure, say Y. + +config BLK_DEV_BSG + bool "Block layer SG support v4" + default y + help + Saying Y here will enable generic SG (SCSI generic) v4 support + for any block device. + + Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 + can handle complicated SCSI commands: tagged variable length cdbs + with bidirectional data transfers and generic request/response + protocols (e.g. Task Management Functions and SMP in Serial + Attached SCSI). + + This option is required by recent UDEV versions to properly + access device serial numbers, etc. + + If unsure, say Y. + +config BLK_DEV_INTEGRITY + bool "Block layer data integrity support" + ---help--- + Some storage devices allow extra information to be + stored/retrieved to help protect the data. The block layer + data integrity option provides hooks which can be used by + filesystems to ensure better data integrity. + + Say yes here if you have a storage device that provides the + T10/SCSI Data Integrity Field or the T13/ATA External Path + Protection. If in doubt, say N. + +endif # BLOCK + +config BLOCK_COMPAT + bool + depends on BLOCK && COMPAT + default y + +source block/Kconfig.iosched diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched new file mode 100644 index 00000000..3199b76f --- /dev/null +++ b/block/Kconfig.iosched @@ -0,0 +1,72 @@ +if BLOCK + +menu "IO Schedulers" + +config IOSCHED_NOOP + bool + default y + ---help--- + The no-op I/O scheduler is a minimal scheduler that does basic merging + and sorting. Its main uses include non-disk based block devices like + memory devices, and specialised software or hardware environments + that do their own scheduling and require only minimal assistance from + the kernel. + +config IOSCHED_DEADLINE + tristate "Deadline I/O scheduler" + default y + ---help--- + The deadline I/O scheduler is simple and compact. It will provide + CSCAN service with FIFO expiration of requests, switching to + a new point in the service tree and doing a batch of IO from there + in case of expiry. + +config IOSCHED_CFQ + tristate "CFQ I/O scheduler" + # If BLK_CGROUP is a module, CFQ has to be built as module. + depends on (BLK_CGROUP=m && m) || !BLK_CGROUP || BLK_CGROUP=y + default y + ---help--- + The CFQ I/O scheduler tries to distribute bandwidth equally + among all processes in the system. It should provide a fair + and low latency working environment, suitable for both desktop + and server systems. + + This is the default I/O scheduler. + + Note: If BLK_CGROUP=m, then CFQ can be built only as module. + +config CFQ_GROUP_IOSCHED + bool "CFQ Group Scheduling support" + depends on IOSCHED_CFQ && BLK_CGROUP + default n + ---help--- + Enable group IO scheduling in CFQ. + +choice + prompt "Default I/O scheduler" + default DEFAULT_CFQ + help + Select the I/O scheduler which will be used by default for all + block devices. + + config DEFAULT_DEADLINE + bool "Deadline" if IOSCHED_DEADLINE=y + + config DEFAULT_CFQ + bool "CFQ" if IOSCHED_CFQ=y + + config DEFAULT_NOOP + bool "No-op" + +endchoice + +config DEFAULT_IOSCHED + string + default "deadline" if DEFAULT_DEADLINE + default "cfq" if DEFAULT_CFQ + default "noop" if DEFAULT_NOOP + +endmenu + +endif diff --git a/block/Makefile b/block/Makefile new file mode 100644 index 00000000..0bb499a7 --- /dev/null +++ b/block/Makefile @@ -0,0 +1,17 @@ +# +# Makefile for the kernel block layer +# + +obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ + blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ + blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ + blk-iopoll.o blk-lib.o ioctl.o genhd.o scsi_ioctl.o + +obj-$(CONFIG_BLK_DEV_BSG) += bsg.o +obj-$(CONFIG_BLK_CGROUP) += blk-cgroup.o +obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o +obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o +obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o + +obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o +obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o diff --git a/block/blk-barrier.c b/block/blk-barrier.c new file mode 100644 index 00000000..f0faefca --- /dev/null +++ b/block/blk-barrier.c @@ -0,0 +1,350 @@ +/* + * Functions related to barrier IO handling + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/gfp.h> + +#include "blk.h" + +/** + * blk_queue_ordered - does this queue support ordered writes + * @q: the request queue + * @ordered: one of QUEUE_ORDERED_* + * + * Description: + * For journalled file systems, doing ordered writes on a commit + * block instead of explicitly doing wait_on_buffer (which is bad + * for performance) can be a big win. Block drivers supporting this + * feature should call this function and indicate so. + * + **/ +int blk_queue_ordered(struct request_queue *q, unsigned ordered) +{ + if (ordered != QUEUE_ORDERED_NONE && + ordered != QUEUE_ORDERED_DRAIN && + ordered != QUEUE_ORDERED_DRAIN_FLUSH && + ordered != QUEUE_ORDERED_DRAIN_FUA && + ordered != QUEUE_ORDERED_TAG && + ordered != QUEUE_ORDERED_TAG_FLUSH && + ordered != QUEUE_ORDERED_TAG_FUA) { + printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); + return -EINVAL; + } + + q->ordered = ordered; + q->next_ordered = ordered; + + return 0; +} +EXPORT_SYMBOL(blk_queue_ordered); + +/* + * Cache flushing for ordered writes handling + */ +unsigned blk_ordered_cur_seq(struct request_queue *q) +{ + if (!q->ordseq) + return 0; + return 1 << ffz(q->ordseq); +} + +unsigned blk_ordered_req_seq(struct request *rq) +{ + struct request_queue *q = rq->q; + + BUG_ON(q->ordseq == 0); + + if (rq == &q->pre_flush_rq) + return QUEUE_ORDSEQ_PREFLUSH; + if (rq == &q->bar_rq) + return QUEUE_ORDSEQ_BAR; + if (rq == &q->post_flush_rq) + return QUEUE_ORDSEQ_POSTFLUSH; + + /* + * !fs requests don't need to follow barrier ordering. Always + * put them at the front. This fixes the following deadlock. + * + * http://thread.gmane.org/gmane.linux.kernel/537473 + */ + if (rq->cmd_type != REQ_TYPE_FS) + return QUEUE_ORDSEQ_DRAIN; + + if ((rq->cmd_flags & REQ_ORDERED_COLOR) == + (q->orig_bar_rq->cmd_flags & REQ_ORDERED_COLOR)) + return QUEUE_ORDSEQ_DRAIN; + else + return QUEUE_ORDSEQ_DONE; +} + +bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) +{ + struct request *rq; + + if (error && !q->orderr) + q->orderr = error; + + BUG_ON(q->ordseq & seq); + q->ordseq |= seq; + + if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) + return false; + + /* + * Okay, sequence complete. + */ + q->ordseq = 0; + rq = q->orig_bar_rq; + __blk_end_request_all(rq, q->orderr); + return true; +} + +static void pre_flush_end_io(struct request *rq, int error) +{ + elv_completed_request(rq->q, rq); + blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_PREFLUSH, error); +} + +static void bar_end_io(struct request *rq, int error) +{ + elv_completed_request(rq->q, rq); + blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_BAR, error); +} + +static void post_flush_end_io(struct request *rq, int error) +{ + elv_completed_request(rq->q, rq); + blk_ordered_complete_seq(rq->q, QUEUE_ORDSEQ_POSTFLUSH, error); +} + +static void queue_flush(struct request_queue *q, unsigned which) +{ + struct request *rq; + rq_end_io_fn *end_io; + + if (which == QUEUE_ORDERED_DO_PREFLUSH) { + rq = &q->pre_flush_rq; + end_io = pre_flush_end_io; + } else { + rq = &q->post_flush_rq; + end_io = post_flush_end_io; + } + + blk_rq_init(q, rq); + rq->cmd_type = REQ_TYPE_FS; + rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH; + rq->rq_disk = q->orig_bar_rq->rq_disk; + rq->end_io = end_io; + + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); +} + +static inline bool start_ordered(struct request_queue *q, struct request **rqp) +{ + struct request *rq = *rqp; + unsigned skip = 0; + + q->orderr = 0; + q->ordered = q->next_ordered; + q->ordseq |= QUEUE_ORDSEQ_STARTED; + + /* + * For an empty barrier, there's no actual BAR request, which + * in turn makes POSTFLUSH unnecessary. Mask them off. + */ + if (!blk_rq_sectors(rq)) { + q->ordered &= ~(QUEUE_ORDERED_DO_BAR | + QUEUE_ORDERED_DO_POSTFLUSH); + /* + * Empty barrier on a write-through device w/ ordered + * tag has no command to issue and without any command + * to issue, ordering by tag can't be used. Drain + * instead. + */ + if ((q->ordered & QUEUE_ORDERED_BY_TAG) && + !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) { + q->ordered &= ~QUEUE_ORDERED_BY_TAG; + q->ordered |= QUEUE_ORDERED_BY_DRAIN; + } + } + + /* stash away the original request */ + blk_dequeue_request(rq); + q->orig_bar_rq = rq; + rq = NULL; + + /* + * Queue ordered sequence. As we stack them at the head, we + * need to queue in reverse order. Note that we rely on that + * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs + * request gets inbetween ordered sequence. + */ + if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) { + queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); + rq = &q->post_flush_rq; + } else + skip |= QUEUE_ORDSEQ_POSTFLUSH; + + if (q->ordered & QUEUE_ORDERED_DO_BAR) { + rq = &q->bar_rq; + + /* initialize proxy request and queue it */ + blk_rq_init(q, rq); + if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) + rq->cmd_flags |= REQ_WRITE; + if (q->ordered & QUEUE_ORDERED_DO_FUA) + rq->cmd_flags |= REQ_FUA; + init_request_from_bio(rq, q->orig_bar_rq->bio); + rq->end_io = bar_end_io; + + elv_insert(q, rq, ELEVATOR_INSERT_FRONT); + } else + skip |= QUEUE_ORDSEQ_BAR; + + if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { + queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); + rq = &q->pre_flush_rq; + } else + skip |= QUEUE_ORDSEQ_PREFLUSH; + + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) + rq = NULL; + else + skip |= QUEUE_ORDSEQ_DRAIN; + + *rqp = rq; + + /* + * Complete skipped sequences. If whole sequence is complete, + * return false to tell elevator that this request is gone. + */ + return !blk_ordered_complete_seq(q, skip, 0); +} + +bool blk_do_ordered(struct request_queue *q, struct request **rqp) +{ + struct request *rq = *rqp; + const int is_barrier = rq->cmd_type == REQ_TYPE_FS && + (rq->cmd_flags & REQ_HARDBARRIER); + + if (!q->ordseq) { + if (!is_barrier) + return true; + + if (q->next_ordered != QUEUE_ORDERED_NONE) + return start_ordered(q, rqp); + else { + /* + * Queue ordering not supported. Terminate + * with prejudice. + */ + blk_dequeue_request(rq); + __blk_end_request_all(rq, -EOPNOTSUPP); + *rqp = NULL; + return false; + } + } + + /* + * Ordered sequence in progress + */ + + /* Special requests are not subject to ordering rules. */ + if (rq->cmd_type != REQ_TYPE_FS && + rq != &q->pre_flush_rq && rq != &q->post_flush_rq) + return true; + + if (q->ordered & QUEUE_ORDERED_BY_TAG) { + /* Ordered by tag. Blocking the next barrier is enough. */ + if (is_barrier && rq != &q->bar_rq) + *rqp = NULL; + } else { + /* Ordered by draining. Wait for turn. */ + WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); + if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) + *rqp = NULL; + } + + return true; +} + +static void bio_end_empty_barrier(struct bio *bio, int err) +{ + if (err) { + if (err == -EOPNOTSUPP) + set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); + clear_bit(BIO_UPTODATE, &bio->bi_flags); + } + if (bio->bi_private) + complete(bio->bi_private); + bio_put(bio); +} + +/** + * blkdev_issue_flush - queue a flush + * @bdev: blockdev to issue flush for + * @gfp_mask: memory allocation flags (for bio_alloc) + * @error_sector: error sector + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Issue a flush for the block device in question. Caller can supply + * room for storing the error offset in case of a flush error, if they + * wish to. If WAIT flag is not passed then caller may check only what + * request was pushed in some internal queue for later handling. + */ +int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, + sector_t *error_sector, unsigned long flags) +{ + DECLARE_COMPLETION_ONSTACK(wait); + struct request_queue *q; + struct bio *bio; + int ret = 0; + + if (bdev->bd_disk == NULL) + return -ENXIO; + + q = bdev_get_queue(bdev); + if (!q) + return -ENXIO; + + /* + * some block devices may not have their queue correctly set up here + * (e.g. loop device without a backing file) and so issuing a flush + * here will panic. Ensure there is a request function before issuing + * the barrier. + */ + if (!q->make_request_fn) + return -ENXIO; + + bio = bio_alloc(gfp_mask, 0); + bio->bi_end_io = bio_end_empty_barrier; + bio->bi_bdev = bdev; + if (test_bit(BLKDEV_WAIT, &flags)) + bio->bi_private = &wait; + + bio_get(bio); + submit_bio(WRITE_BARRIER, bio); + if (test_bit(BLKDEV_WAIT, &flags)) { + wait_for_completion(&wait); + /* + * The driver must store the error location in ->bi_sector, if + * it supports it. For non-stacked drivers, this should be + * copied from blk_rq_pos(rq). + */ + if (error_sector) + *error_sector = bio->bi_sector; + } + + if (bio_flagged(bio, BIO_EOPNOTSUPP)) + ret = -EOPNOTSUPP; + else if (!bio_flagged(bio, BIO_UPTODATE)) + ret = -EIO; + + bio_put(bio); + return ret; +} +EXPORT_SYMBOL(blkdev_issue_flush); diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c new file mode 100644 index 00000000..5d2dcbad --- /dev/null +++ b/block/blk-cgroup.c @@ -0,0 +1,1051 @@ +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + * + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> + * Paolo Valente <paolo.valente@unimore.it> + * + * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> + * Nauman Rafique <nauman@google.com> + */ +#include <linux/ioprio.h> +#include <linux/seq_file.h> +#include <linux/kdev_t.h> +#include <linux/module.h> +#include <linux/err.h> +#include <linux/blkdev.h> +#include <linux/slab.h> +#include "blk-cgroup.h" +#include <linux/genhd.h> + +#define MAX_KEY_LEN 100 + +static DEFINE_SPINLOCK(blkio_list_lock); +static LIST_HEAD(blkio_list); + +struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; +EXPORT_SYMBOL_GPL(blkio_root_cgroup); + +static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, + struct cgroup *); +static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, + struct task_struct *, bool); +static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, + struct cgroup *, struct task_struct *, bool); +static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); +static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); + +struct cgroup_subsys blkio_subsys = { + .name = "blkio", + .create = blkiocg_create, + .can_attach = blkiocg_can_attach, + .attach = blkiocg_attach, + .destroy = blkiocg_destroy, + .populate = blkiocg_populate, +#ifdef CONFIG_BLK_CGROUP + /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */ + .subsys_id = blkio_subsys_id, +#endif + .use_id = 1, + .module = THIS_MODULE, +}; +EXPORT_SYMBOL_GPL(blkio_subsys); + +static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg, + struct blkio_policy_node *pn) +{ + list_add(&pn->node, &blkcg->policy_list); +} + +/* Must be called with blkcg->lock held */ +static inline void blkio_policy_delete_node(struct blkio_policy_node *pn) +{ + list_del(&pn->node); +} + +/* Must be called with blkcg->lock held */ +static struct blkio_policy_node * +blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev) +{ + struct blkio_policy_node *pn; + + list_for_each_entry(pn, &blkcg->policy_list, node) { + if (pn->dev == dev) + return pn; + } + + return NULL; +} + +struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) +{ + return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), + struct blkio_cgroup, css); +} +EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); + +/* + * Add to the appropriate stat variable depending on the request type. + * This should be called with the blkg->stats_lock held. + */ +static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction, + bool sync) +{ + if (direction) + stat[BLKIO_STAT_WRITE] += add; + else + stat[BLKIO_STAT_READ] += add; + if (sync) + stat[BLKIO_STAT_SYNC] += add; + else + stat[BLKIO_STAT_ASYNC] += add; +} + +/* + * Decrements the appropriate stat variable if non-zero depending on the + * request type. Panics on value being zero. + * This should be called with the blkg->stats_lock held. + */ +static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync) +{ + if (direction) { + BUG_ON(stat[BLKIO_STAT_WRITE] == 0); + stat[BLKIO_STAT_WRITE]--; + } else { + BUG_ON(stat[BLKIO_STAT_READ] == 0); + stat[BLKIO_STAT_READ]--; + } + if (sync) { + BUG_ON(stat[BLKIO_STAT_SYNC] == 0); + stat[BLKIO_STAT_SYNC]--; + } else { + BUG_ON(stat[BLKIO_STAT_ASYNC] == 0); + stat[BLKIO_STAT_ASYNC]--; + } +} + +#ifdef CONFIG_DEBUG_BLK_CGROUP +/* This should be called with the blkg->stats_lock held. */ +static void blkio_set_start_group_wait_time(struct blkio_group *blkg, + struct blkio_group *curr_blkg) +{ + if (blkio_blkg_waiting(&blkg->stats)) + return; + if (blkg == curr_blkg) + return; + blkg->stats.start_group_wait_time = sched_clock(); + blkio_mark_blkg_waiting(&blkg->stats); +} + +/* This should be called with the blkg->stats_lock held. */ +static void blkio_update_group_wait_time(struct blkio_group_stats *stats) +{ + unsigned long long now; + + if (!blkio_blkg_waiting(stats)) + return; + + now = sched_clock(); + if (time_after64(now, stats->start_group_wait_time)) + stats->group_wait_time += now - stats->start_group_wait_time; + blkio_clear_blkg_waiting(stats); +} + +/* This should be called with the blkg->stats_lock held. */ +static void blkio_end_empty_time(struct blkio_group_stats *stats) +{ + unsigned long long now; + + if (!blkio_blkg_empty(stats)) + return; + + now = sched_clock(); + if (time_after64(now, stats->start_empty_time)) + stats->empty_time += now - stats->start_empty_time; + blkio_clear_blkg_empty(stats); +} + +void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) +{ + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + BUG_ON(blkio_blkg_idling(&blkg->stats)); + blkg->stats.start_idle_time = sched_clock(); + blkio_mark_blkg_idling(&blkg->stats); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats); + +void blkiocg_update_idle_time_stats(struct blkio_group *blkg) +{ + unsigned long flags; + unsigned long long now; + struct blkio_group_stats *stats; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + if (blkio_blkg_idling(stats)) { + now = sched_clock(); + if (time_after64(now, stats->start_idle_time)) + stats->idle_time += now - stats->start_idle_time; + blkio_clear_blkg_idling(stats); + } + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats); + +void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) +{ + unsigned long flags; + struct blkio_group_stats *stats; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + stats->avg_queue_size_sum += + stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] + + stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]; + stats->avg_queue_size_samples++; + blkio_update_group_wait_time(stats); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats); + +void blkiocg_set_start_empty_time(struct blkio_group *blkg) +{ + unsigned long flags; + struct blkio_group_stats *stats; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + + if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] || + stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) { + spin_unlock_irqrestore(&blkg->stats_lock, flags); + return; + } + + /* + * group is already marked empty. This can happen if cfqq got new + * request in parent group and moved to this group while being added + * to service tree. Just ignore the event and move on. + */ + if(blkio_blkg_empty(stats)) { + spin_unlock_irqrestore(&blkg->stats_lock, flags); + return; + } + + stats->start_empty_time = sched_clock(); + blkio_mark_blkg_empty(stats); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time); + +void blkiocg_update_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue) +{ + blkg->stats.dequeue += dequeue; +} +EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats); +#else +static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg, + struct blkio_group *curr_blkg) {} +static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {} +#endif + +void blkiocg_update_io_add_stats(struct blkio_group *blkg, + struct blkio_group *curr_blkg, bool direction, + bool sync) +{ + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction, + sync); + blkio_end_empty_time(&blkg->stats); + blkio_set_start_group_wait_time(blkg, curr_blkg); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats); + +void blkiocg_update_io_remove_stats(struct blkio_group *blkg, + bool direction, bool sync) +{ + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], + direction, sync); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats); + +void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time) +{ + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + blkg->stats.time += time; + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used); + +void blkiocg_update_dispatch_stats(struct blkio_group *blkg, + uint64_t bytes, bool direction, bool sync) +{ + struct blkio_group_stats *stats; + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + stats->sectors += bytes >> 9; + blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICED], 1, direction, + sync); + blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_BYTES], bytes, + direction, sync); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); + +void blkiocg_update_completion_stats(struct blkio_group *blkg, + uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) +{ + struct blkio_group_stats *stats; + unsigned long flags; + unsigned long long now = sched_clock(); + + spin_lock_irqsave(&blkg->stats_lock, flags); + stats = &blkg->stats; + if (time_after64(now, io_start_time)) + blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME], + now - io_start_time, direction, sync); + if (time_after64(io_start_time, start_time)) + blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME], + io_start_time - start_time, direction, sync); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats); + +void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, + bool sync) +{ + unsigned long flags; + + spin_lock_irqsave(&blkg->stats_lock, flags); + blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_MERGED], 1, direction, + sync); + spin_unlock_irqrestore(&blkg->stats_lock, flags); +} +EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats); + +void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) +{ + unsigned long flags; + + spin_lock_irqsave(&blkcg->lock, flags); + spin_lock_init(&blkg->stats_lock); + rcu_assign_pointer(blkg->key, key); + blkg->blkcg_id = css_id(&blkcg->css); + hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); + spin_unlock_irqrestore(&blkcg->lock, flags); + /* Need to take css reference ? */ + cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); + blkg->dev = dev; +} +EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group); + +static void __blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + hlist_del_init_rcu(&blkg->blkcg_node); + blkg->blkcg_id = 0; +} + +/* + * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 + * indicating that blk_group was unhashed by the time we got to it. + */ +int blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + struct blkio_cgroup *blkcg; + unsigned long flags; + struct cgroup_subsys_state *css; + int ret = 1; + + rcu_read_lock(); + css = css_lookup(&blkio_subsys, blkg->blkcg_id); + if (css) { + blkcg = container_of(css, struct blkio_cgroup, css); + spin_lock_irqsave(&blkcg->lock, flags); + if (!hlist_unhashed(&blkg->blkcg_node)) { + __blkiocg_del_blkio_group(blkg); + ret = 0; + } + spin_unlock_irqrestore(&blkcg->lock, flags); + } + + rcu_read_unlock(); + return ret; +} +EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); + +/* called under rcu_read_lock(). */ +struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) +{ + struct blkio_group *blkg; + struct hlist_node *n; + void *__key; + + hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { + __key = blkg->key; + if (__key == key) + return blkg; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(blkiocg_lookup_group); + +#define SHOW_FUNCTION(__VAR) \ +static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype) \ +{ \ + struct blkio_cgroup *blkcg; \ + \ + blkcg = cgroup_to_blkio_cgroup(cgroup); \ + return (u64)blkcg->__VAR; \ +} + +SHOW_FUNCTION(weight); +#undef SHOW_FUNCTION + +static int +blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) +{ + struct blkio_cgroup *blkcg; + struct blkio_group *blkg; + struct hlist_node *n; + struct blkio_policy_type *blkiop; + struct blkio_policy_node *pn; + + if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) + return -EINVAL; + + blkcg = cgroup_to_blkio_cgroup(cgroup); + spin_lock(&blkio_list_lock); + spin_lock_irq(&blkcg->lock); + blkcg->weight = (unsigned int)val; + + hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { + pn = blkio_policy_search_node(blkcg, blkg->dev); + + if (pn) + continue; + + list_for_each_entry(blkiop, &blkio_list, list) + blkiop->ops.blkio_update_group_weight_fn(blkg, + blkcg->weight); + } + spin_unlock_irq(&blkcg->lock); + spin_unlock(&blkio_list_lock); + return 0; +} + +static int +blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) +{ + struct blkio_cgroup *blkcg; + struct blkio_group *blkg; + struct blkio_group_stats *stats; + struct hlist_node *n; + uint64_t queued[BLKIO_STAT_TOTAL]; + int i; +#ifdef CONFIG_DEBUG_BLK_CGROUP + bool idling, waiting, empty; + unsigned long long now = sched_clock(); +#endif + + blkcg = cgroup_to_blkio_cgroup(cgroup); + spin_lock_irq(&blkcg->lock); + hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { + spin_lock(&blkg->stats_lock); + stats = &blkg->stats; +#ifdef CONFIG_DEBUG_BLK_CGROUP + idling = blkio_blkg_idling(stats); + waiting = blkio_blkg_waiting(stats); + empty = blkio_blkg_empty(stats); +#endif + for (i = 0; i < BLKIO_STAT_TOTAL; i++) + queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i]; + memset(stats, 0, sizeof(struct blkio_group_stats)); + for (i = 0; i < BLKIO_STAT_TOTAL; i++) + stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i]; +#ifdef CONFIG_DEBUG_BLK_CGROUP + if (idling) { + blkio_mark_blkg_idling(stats); + stats->start_idle_time = now; + } + if (waiting) { + blkio_mark_blkg_waiting(stats); + stats->start_group_wait_time = now; + } + if (empty) { + blkio_mark_blkg_empty(stats); + stats->start_empty_time = now; + } +#endif + spin_unlock(&blkg->stats_lock); + } + spin_unlock_irq(&blkcg->lock); + return 0; +} + +static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str, + int chars_left, bool diskname_only) +{ + snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev)); + chars_left -= strlen(str); + if (chars_left <= 0) { + printk(KERN_WARNING + "Possibly incorrect cgroup stat display format"); + return; + } + if (diskname_only) + return; + switch (type) { + case BLKIO_STAT_READ: + strlcat(str, " Read", chars_left); + break; + case BLKIO_STAT_WRITE: + strlcat(str, " Write", chars_left); + break; + case BLKIO_STAT_SYNC: + strlcat(str, " Sync", chars_left); + break; + case BLKIO_STAT_ASYNC: + strlcat(str, " Async", chars_left); + break; + case BLKIO_STAT_TOTAL: + strlcat(str, " Total", chars_left); + break; + default: + strlcat(str, " Invalid", chars_left); + } +} + +static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val, + struct cgroup_map_cb *cb, dev_t dev) +{ + blkio_get_key_name(0, dev, str, chars_left, true); + cb->fill(cb, str, val); + return val; +} + +/* This should be called with blkg->stats_lock held */ +static uint64_t blkio_get_stat(struct blkio_group *blkg, + struct cgroup_map_cb *cb, dev_t dev, enum stat_type type) +{ + uint64_t disk_total; + char key_str[MAX_KEY_LEN]; + enum stat_sub_type sub_type; + + if (type == BLKIO_STAT_TIME) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.time, cb, dev); + if (type == BLKIO_STAT_SECTORS) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.sectors, cb, dev); +#ifdef CONFIG_DEBUG_BLK_CGROUP + if (type == BLKIO_STAT_AVG_QUEUE_SIZE) { + uint64_t sum = blkg->stats.avg_queue_size_sum; + uint64_t samples = blkg->stats.avg_queue_size_samples; + if (samples) + do_div(sum, samples); + else + sum = 0; + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev); + } + if (type == BLKIO_STAT_GROUP_WAIT_TIME) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.group_wait_time, cb, dev); + if (type == BLKIO_STAT_IDLE_TIME) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.idle_time, cb, dev); + if (type == BLKIO_STAT_EMPTY_TIME) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.empty_time, cb, dev); + if (type == BLKIO_STAT_DEQUEUE) + return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, + blkg->stats.dequeue, cb, dev); +#endif + + for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL; + sub_type++) { + blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false); + cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]); + } + disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] + + blkg->stats.stat_arr[type][BLKIO_STAT_WRITE]; + blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false); + cb->fill(cb, key_str, disk_total); + return disk_total; +} + +#define SHOW_FUNCTION_PER_GROUP(__VAR, type, show_total) \ +static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \ + struct cftype *cftype, struct cgroup_map_cb *cb) \ +{ \ + struct blkio_cgroup *blkcg; \ + struct blkio_group *blkg; \ + struct hlist_node *n; \ + uint64_t cgroup_total = 0; \ + \ + if (!cgroup_lock_live_group(cgroup)) \ + return -ENODEV; \ + \ + blkcg = cgroup_to_blkio_cgroup(cgroup); \ + rcu_read_lock(); \ + hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\ + if (blkg->dev) { \ + spin_lock_irq(&blkg->stats_lock); \ + cgroup_total += blkio_get_stat(blkg, cb, \ + blkg->dev, type); \ + spin_unlock_irq(&blkg->stats_lock); \ + } \ + } \ + if (show_total) \ + cb->fill(cb, "Total", cgroup_total); \ + rcu_read_unlock(); \ + cgroup_unlock(); \ + return 0; \ +} + +SHOW_FUNCTION_PER_GROUP(time, BLKIO_STAT_TIME, 0); +SHOW_FUNCTION_PER_GROUP(sectors, BLKIO_STAT_SECTORS, 0); +SHOW_FUNCTION_PER_GROUP(io_service_bytes, BLKIO_STAT_SERVICE_BYTES, 1); +SHOW_FUNCTION_PER_GROUP(io_serviced, BLKIO_STAT_SERVICED, 1); +SHOW_FUNCTION_PER_GROUP(io_service_time, BLKIO_STAT_SERVICE_TIME, 1); +SHOW_FUNCTION_PER_GROUP(io_wait_time, BLKIO_STAT_WAIT_TIME, 1); +SHOW_FUNCTION_PER_GROUP(io_merged, BLKIO_STAT_MERGED, 1); +SHOW_FUNCTION_PER_GROUP(io_queued, BLKIO_STAT_QUEUED, 1); +#ifdef CONFIG_DEBUG_BLK_CGROUP +SHOW_FUNCTION_PER_GROUP(dequeue, BLKIO_STAT_DEQUEUE, 0); +SHOW_FUNCTION_PER_GROUP(avg_queue_size, BLKIO_STAT_AVG_QUEUE_SIZE, 0); +SHOW_FUNCTION_PER_GROUP(group_wait_time, BLKIO_STAT_GROUP_WAIT_TIME, 0); +SHOW_FUNCTION_PER_GROUP(idle_time, BLKIO_STAT_IDLE_TIME, 0); +SHOW_FUNCTION_PER_GROUP(empty_time, BLKIO_STAT_EMPTY_TIME, 0); +#endif +#undef SHOW_FUNCTION_PER_GROUP + +static int blkio_check_dev_num(dev_t dev) +{ + int part = 0; + struct gendisk *disk; + + disk = get_gendisk(dev, &part); + if (!disk || part) + return -ENODEV; + + return 0; +} + +static int blkio_policy_parse_and_set(char *buf, + struct blkio_policy_node *newpn) +{ + char *s[4], *p, *major_s = NULL, *minor_s = NULL; + int ret; + unsigned long major, minor, temp; + int i = 0; + dev_t dev; + + memset(s, 0, sizeof(s)); + + while ((p = strsep(&buf, " ")) != NULL) { + if (!*p) + continue; + + s[i++] = p; + + /* Prevent from inputing too many things */ + if (i == 3) + break; + } + + if (i != 2) + return -EINVAL; + + p = strsep(&s[0], ":"); + if (p != NULL) + major_s = p; + else + return -EINVAL; + + minor_s = s[0]; + if (!minor_s) + return -EINVAL; + + ret = strict_strtoul(major_s, 10, &major); + if (ret) + return -EINVAL; + + ret = strict_strtoul(minor_s, 10, &minor); + if (ret) + return -EINVAL; + + dev = MKDEV(major, minor); + + ret = blkio_check_dev_num(dev); + if (ret) + return ret; + + newpn->dev = dev; + + if (s[1] == NULL) + return -EINVAL; + + ret = strict_strtoul(s[1], 10, &temp); + if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) || + temp > BLKIO_WEIGHT_MAX) + return -EINVAL; + + newpn->weight = temp; + + return 0; +} + +unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, + dev_t dev) +{ + struct blkio_policy_node *pn; + + pn = blkio_policy_search_node(blkcg, dev); + if (pn) + return pn->weight; + else + return blkcg->weight; +} +EXPORT_SYMBOL_GPL(blkcg_get_weight); + + +static int blkiocg_weight_device_write(struct cgroup *cgrp, struct cftype *cft, + const char *buffer) +{ + int ret = 0; + char *buf; + struct blkio_policy_node *newpn, *pn; + struct blkio_cgroup *blkcg; + struct blkio_group *blkg; + int keep_newpn = 0; + struct hlist_node *n; + struct blkio_policy_type *blkiop; + + buf = kstrdup(buffer, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + newpn = kzalloc(sizeof(*newpn), GFP_KERNEL); + if (!newpn) { + ret = -ENOMEM; + goto free_buf; + } + + ret = blkio_policy_parse_and_set(buf, newpn); + if (ret) + goto free_newpn; + + blkcg = cgroup_to_blkio_cgroup(cgrp); + + spin_lock_irq(&blkcg->lock); + + pn = blkio_policy_search_node(blkcg, newpn->dev); + if (!pn) { + if (newpn->weight != 0) { + blkio_policy_insert_node(blkcg, newpn); + keep_newpn = 1; + } + spin_unlock_irq(&blkcg->lock); + goto update_io_group; + } + + if (newpn->weight == 0) { + /* weight == 0 means deleteing a specific weight */ + blkio_policy_delete_node(pn); + spin_unlock_irq(&blkcg->lock); + goto update_io_group; + } + spin_unlock_irq(&blkcg->lock); + + pn->weight = newpn->weight; + +update_io_group: + /* update weight for each cfqg */ + spin_lock(&blkio_list_lock); + spin_lock_irq(&blkcg->lock); + + hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { + if (newpn->dev == blkg->dev) { + list_for_each_entry(blkiop, &blkio_list, list) + blkiop->ops.blkio_update_group_weight_fn(blkg, + newpn->weight ? + newpn->weight : + blkcg->weight); + } + } + + spin_unlock_irq(&blkcg->lock); + spin_unlock(&blkio_list_lock); + +free_newpn: + if (!keep_newpn) + kfree(newpn); +free_buf: + kfree(buf); + return ret; +} + +static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft, + struct seq_file *m) +{ + struct blkio_cgroup *blkcg; + struct blkio_policy_node *pn; + + seq_printf(m, "dev\tweight\n"); + + blkcg = cgroup_to_blkio_cgroup(cgrp); + if (!list_empty(&blkcg->policy_list)) { + spin_lock_irq(&blkcg->lock); + list_for_each_entry(pn, &blkcg->policy_list, node) { + seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), + MINOR(pn->dev), pn->weight); + } + spin_unlock_irq(&blkcg->lock); + } + + return 0; +} + +struct cftype blkio_files[] = { + { + .name = "weight_device", + .read_seq_string = blkiocg_weight_device_read, + .write_string = blkiocg_weight_device_write, + .max_write_len = 256, + }, + { + .name = "weight", + .read_u64 = blkiocg_weight_read, + .write_u64 = blkiocg_weight_write, + }, + { + .name = "time", + .read_map = blkiocg_time_read, + }, + { + .name = "sectors", + .read_map = blkiocg_sectors_read, + }, + { + .name = "io_service_bytes", + .read_map = blkiocg_io_service_bytes_read, + }, + { + .name = "io_serviced", + .read_map = blkiocg_io_serviced_read, + }, + { + .name = "io_service_time", + .read_map = blkiocg_io_service_time_read, + }, + { + .name = "io_wait_time", + .read_map = blkiocg_io_wait_time_read, + }, + { + .name = "io_merged", + .read_map = blkiocg_io_merged_read, + }, + { + .name = "io_queued", + .read_map = blkiocg_io_queued_read, + }, + { + .name = "reset_stats", + .write_u64 = blkiocg_reset_stats, + }, +#ifdef CONFIG_DEBUG_BLK_CGROUP + { + .name = "avg_queue_size", + .read_map = blkiocg_avg_queue_size_read, + }, + { + .name = "group_wait_time", + .read_map = blkiocg_group_wait_time_read, + }, + { + .name = "idle_time", + .read_map = blkiocg_idle_time_read, + }, + { + .name = "empty_time", + .read_map = blkiocg_empty_time_read, + }, + { + .name = "dequeue", + .read_map = blkiocg_dequeue_read, + }, +#endif +}; + +static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + return cgroup_add_files(cgroup, subsys, blkio_files, + ARRAY_SIZE(blkio_files)); +} + +static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); + unsigned long flags; + struct blkio_group *blkg; + void *key; + struct blkio_policy_type *blkiop; + struct blkio_policy_node *pn, *pntmp; + + rcu_read_lock(); + do { + spin_lock_irqsave(&blkcg->lock, flags); + + if (hlist_empty(&blkcg->blkg_list)) { + spin_unlock_irqrestore(&blkcg->lock, flags); + break; + } + + blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, + blkcg_node); + key = rcu_dereference(blkg->key); + __blkiocg_del_blkio_group(blkg); + + spin_unlock_irqrestore(&blkcg->lock, flags); + + /* + * This blkio_group is being unlinked as associated cgroup is + * going away. Let all the IO controlling policies know about + * this event. Currently this is static call to one io + * controlling policy. Once we have more policies in place, we + * need some dynamic registration of callback function. + */ + spin_lock(&blkio_list_lock); + list_for_each_entry(blkiop, &blkio_list, list) + blkiop->ops.blkio_unlink_group_fn(key, blkg); + spin_unlock(&blkio_list_lock); + } while (1); + + list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { + blkio_policy_delete_node(pn); + kfree(pn); + } + + free_css_id(&blkio_subsys, &blkcg->css); + rcu_read_unlock(); + if (blkcg != &blkio_root_cgroup) + kfree(blkcg); +} + +static struct cgroup_subsys_state * +blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) +{ + struct blkio_cgroup *blkcg; + struct cgroup *parent = cgroup->parent; + + if (!parent) { + blkcg = &blkio_root_cgroup; + goto done; + } + + /* Currently we do not support hierarchy deeper than two level (0,1) */ + if (parent != cgroup->top_cgroup) + return ERR_PTR(-EPERM); + + blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); + if (!blkcg) + return ERR_PTR(-ENOMEM); + + blkcg->weight = BLKIO_WEIGHT_DEFAULT; +done: + spin_lock_init(&blkcg->lock); + INIT_HLIST_HEAD(&blkcg->blkg_list); + + INIT_LIST_HEAD(&blkcg->policy_list); + return &blkcg->css; +} + +/* + * We cannot support shared io contexts, as we have no mean to support + * two tasks with the same ioc in two different groups without major rework + * of the main cic data structures. For now we allow a task to change + * its cgroup only if it's the only owner of its ioc. + */ +static int blkiocg_can_attach(struct cgroup_subsys *subsys, + struct cgroup *cgroup, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + int ret = 0; + + /* task_lock() is needed to avoid races with exit_io_context() */ + task_lock(tsk); + ioc = tsk->io_context; + if (ioc && atomic_read(&ioc->nr_tasks) > 1) + ret = -EINVAL; + task_unlock(tsk); + + return ret; +} + +static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, + struct cgroup *prev, struct task_struct *tsk, + bool threadgroup) +{ + struct io_context *ioc; + + task_lock(tsk); + ioc = tsk->io_context; + if (ioc) + ioc->cgroup_changed = 1; + task_unlock(tsk); +} + +void blkio_policy_register(struct blkio_policy_type *blkiop) +{ + spin_lock(&blkio_list_lock); + list_add_tail(&blkiop->list, &blkio_list); + spin_unlock(&blkio_list_lock); +} +EXPORT_SYMBOL_GPL(blkio_policy_register); + +void blkio_policy_unregister(struct blkio_policy_type *blkiop) +{ + spin_lock(&blkio_list_lock); + list_del_init(&blkiop->list); + spin_unlock(&blkio_list_lock); +} +EXPORT_SYMBOL_GPL(blkio_policy_unregister); + +static int __init init_cgroup_blkio(void) +{ + return cgroup_load_subsys(&blkio_subsys); +} + +static void __exit exit_cgroup_blkio(void) +{ + cgroup_unload_subsys(&blkio_subsys); +} +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(init_cgroup_blkio); +#else +module_init(init_cgroup_blkio); +#endif +module_exit(exit_cgroup_blkio); +MODULE_LICENSE("GPL"); diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h new file mode 100644 index 00000000..2b866ec1 --- /dev/null +++ b/block/blk-cgroup.h @@ -0,0 +1,258 @@ +#ifndef _BLK_CGROUP_H +#define _BLK_CGROUP_H +/* + * Common Block IO controller cgroup interface + * + * Based on ideas and code from CFQ, CFS and BFQ: + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + * + * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> + * Paolo Valente <paolo.valente@unimore.it> + * + * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> + * Nauman Rafique <nauman@google.com> + */ + +#include <linux/cgroup.h> + +#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) + +#ifndef CONFIG_BLK_CGROUP +/* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */ +extern struct cgroup_subsys blkio_subsys; +#define blkio_subsys_id blkio_subsys.subsys_id +#endif + +enum stat_type { + /* Total time spent (in ns) between request dispatch to the driver and + * request completion for IOs doen by this cgroup. This may not be + * accurate when NCQ is turned on. */ + BLKIO_STAT_SERVICE_TIME = 0, + /* Total bytes transferred */ + BLKIO_STAT_SERVICE_BYTES, + /* Total IOs serviced, post merge */ + BLKIO_STAT_SERVICED, + /* Total time spent waiting in scheduler queue in ns */ + BLKIO_STAT_WAIT_TIME, + /* Number of IOs merged */ + BLKIO_STAT_MERGED, + /* Number of IOs queued up */ + BLKIO_STAT_QUEUED, + /* All the single valued stats go below this */ + BLKIO_STAT_TIME, + BLKIO_STAT_SECTORS, +#ifdef CONFIG_DEBUG_BLK_CGROUP + BLKIO_STAT_AVG_QUEUE_SIZE, + BLKIO_STAT_IDLE_TIME, + BLKIO_STAT_EMPTY_TIME, + BLKIO_STAT_GROUP_WAIT_TIME, + BLKIO_STAT_DEQUEUE +#endif +}; + +enum stat_sub_type { + BLKIO_STAT_READ = 0, + BLKIO_STAT_WRITE, + BLKIO_STAT_SYNC, + BLKIO_STAT_ASYNC, + BLKIO_STAT_TOTAL +}; + +/* blkg state flags */ +enum blkg_state_flags { + BLKG_waiting = 0, + BLKG_idling, + BLKG_empty, +}; + +struct blkio_cgroup { + struct cgroup_subsys_state css; + unsigned int weight; + spinlock_t lock; + struct hlist_head blkg_list; + struct list_head policy_list; /* list of blkio_policy_node */ +}; + +struct blkio_group_stats { + /* total disk time and nr sectors dispatched by this group */ + uint64_t time; + uint64_t sectors; + uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; +#ifdef CONFIG_DEBUG_BLK_CGROUP + /* Sum of number of IOs queued across all samples */ + uint64_t avg_queue_size_sum; + /* Count of samples taken for average */ + uint64_t avg_queue_size_samples; + /* How many times this group has been removed from service tree */ + unsigned long dequeue; + + /* Total time spent waiting for it to be assigned a timeslice. */ + uint64_t group_wait_time; + uint64_t start_group_wait_time; + + /* Time spent idling for this blkio_group */ + uint64_t idle_time; + uint64_t start_idle_time; + /* + * Total time when we have requests queued and do not contain the + * current active queue. + */ + uint64_t empty_time; + uint64_t start_empty_time; + uint16_t flags; +#endif +}; + +struct blkio_group { + /* An rcu protected unique identifier for the group */ + void *key; + struct hlist_node blkcg_node; + unsigned short blkcg_id; + /* Store cgroup path */ + char path[128]; + /* The device MKDEV(major, minor), this group has been created for */ + dev_t dev; + + /* Need to serialize the stats in the case of reset/update */ + spinlock_t stats_lock; + struct blkio_group_stats stats; +}; + +struct blkio_policy_node { + struct list_head node; + dev_t dev; + unsigned int weight; +}; + +extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, + dev_t dev); + +typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); +typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg, + unsigned int weight); + +struct blkio_policy_ops { + blkio_unlink_group_fn *blkio_unlink_group_fn; + blkio_update_group_weight_fn *blkio_update_group_weight_fn; +}; + +struct blkio_policy_type { + struct list_head list; + struct blkio_policy_ops ops; +}; + +/* Blkio controller policy registration */ +extern void blkio_policy_register(struct blkio_policy_type *); +extern void blkio_policy_unregister(struct blkio_policy_type *); + +static inline char *blkg_path(struct blkio_group *blkg) +{ + return blkg->path; +} + +#else + +struct blkio_group { +}; + +struct blkio_policy_type { +}; + +static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } +static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } + +static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } + +#endif + +#define BLKIO_WEIGHT_MIN 100 +#define BLKIO_WEIGHT_MAX 1000 +#define BLKIO_WEIGHT_DEFAULT 500 + +#ifdef CONFIG_DEBUG_BLK_CGROUP +void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg); +void blkiocg_update_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue); +void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg); +void blkiocg_update_idle_time_stats(struct blkio_group *blkg); +void blkiocg_set_start_empty_time(struct blkio_group *blkg); + +#define BLKG_FLAG_FNS(name) \ +static inline void blkio_mark_blkg_##name( \ + struct blkio_group_stats *stats) \ +{ \ + stats->flags |= (1 << BLKG_##name); \ +} \ +static inline void blkio_clear_blkg_##name( \ + struct blkio_group_stats *stats) \ +{ \ + stats->flags &= ~(1 << BLKG_##name); \ +} \ +static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ +{ \ + return (stats->flags & (1 << BLKG_##name)) != 0; \ +} \ + +BLKG_FLAG_FNS(waiting) +BLKG_FLAG_FNS(idling) +BLKG_FLAG_FNS(empty) +#undef BLKG_FLAG_FNS +#else +static inline void blkiocg_update_avg_queue_size_stats( + struct blkio_group *blkg) {} +static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue) {} +static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) +{} +static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {} +static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} +#endif + +#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) +extern struct blkio_cgroup blkio_root_cgroup; +extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); +extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev); +extern int blkiocg_del_blkio_group(struct blkio_group *blkg); +extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, + void *key); +void blkiocg_update_timeslice_used(struct blkio_group *blkg, + unsigned long time); +void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, + bool direction, bool sync); +void blkiocg_update_completion_stats(struct blkio_group *blkg, + uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); +void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, + bool sync); +void blkiocg_update_io_add_stats(struct blkio_group *blkg, + struct blkio_group *curr_blkg, bool direction, bool sync); +void blkiocg_update_io_remove_stats(struct blkio_group *blkg, + bool direction, bool sync); +#else +struct cgroup; +static inline struct blkio_cgroup * +cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } + +static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) {} + +static inline int +blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } + +static inline struct blkio_group * +blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } +static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, + unsigned long time) {} +static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, + uint64_t bytes, bool direction, bool sync) {} +static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, + uint64_t start_time, uint64_t io_start_time, bool direction, + bool sync) {} +static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, + bool direction, bool sync) {} +static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg, + struct blkio_group *curr_blkg, bool direction, bool sync) {} +static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg, + bool direction, bool sync) {} +#endif +#endif /* _BLK_CGROUP_H */ diff --git a/block/blk-core.c b/block/blk-core.c new file mode 100644 index 00000000..32a1c123 --- /dev/null +++ b/block/blk-core.c @@ -0,0 +1,2598 @@ +/* + * Copyright (C) 1991, 1992 Linus Torvalds + * Copyright (C) 1994, Karl Keyte: Added support for disk statistics + * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE + * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> + * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> + * - July2000 + * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 + */ + +/* + * This handles all read/write requests to block devices + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/backing-dev.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/kernel_stat.h> +#include <linux/string.h> +#include <linux/init.h> +#include <linux/completion.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/writeback.h> +#include <linux/task_io_accounting_ops.h> +#include <linux/fault-inject.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/block.h> + +#include "blk.h" + +EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); +EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); + +static int __make_request(struct request_queue *q, struct bio *bio); + +/* + * For the allocated request tables + */ +static struct kmem_cache *request_cachep; + +/* + * For queue allocation + */ +struct kmem_cache *blk_requestq_cachep; + +/* + * Controlling structure to kblockd + */ +static struct workqueue_struct *kblockd_workqueue; + +static void drive_stat_acct(struct request *rq, int new_io) +{ + struct hd_struct *part; + int rw = rq_data_dir(rq); + int cpu; + + if (!blk_do_io_stat(rq)) + return; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); + + if (!new_io) + part_stat_inc(cpu, part, merges[rw]); + else { + part_round_stats(cpu, part); + part_inc_in_flight(part, rw); + } + + part_stat_unlock(); +} + +void blk_queue_congestion_threshold(struct request_queue *q) +{ + int nr; + + nr = q->nr_requests - (q->nr_requests / 8) + 1; + if (nr > q->nr_requests) + nr = q->nr_requests; + q->nr_congestion_on = nr; + + nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; + if (nr < 1) + nr = 1; + q->nr_congestion_off = nr; +} + +/** + * blk_get_backing_dev_info - get the address of a queue's backing_dev_info + * @bdev: device + * + * Locates the passed device's request queue and returns the address of its + * backing_dev_info + * + * Will return NULL if the request queue cannot be located. + */ +struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) +{ + struct backing_dev_info *ret = NULL; + struct request_queue *q = bdev_get_queue(bdev); + + if (q) + ret = &q->backing_dev_info; + return ret; +} +EXPORT_SYMBOL(blk_get_backing_dev_info); + +void blk_rq_init(struct request_queue *q, struct request *rq) +{ + memset(rq, 0, sizeof(*rq)); + + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; + INIT_HLIST_NODE(&rq->hash); + RB_CLEAR_NODE(&rq->rb_node); + rq->cmd = rq->__cmd; + rq->cmd_len = BLK_MAX_CDB; + rq->tag = -1; + rq->ref_count = 1; + rq->start_time = jiffies; + set_start_time_ns(rq); +} +EXPORT_SYMBOL(blk_rq_init); + +static void req_bio_endio(struct request *rq, struct bio *bio, + unsigned int nbytes, int error) +{ + struct request_queue *q = rq->q; + + if (&q->bar_rq != rq) { + if (error) + clear_bit(BIO_UPTODATE, &bio->bi_flags); + else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) + error = -EIO; + + if (unlikely(nbytes > bio->bi_size)) { + printk(KERN_ERR "%s: want %u bytes done, %u left\n", + __func__, nbytes, bio->bi_size); + nbytes = bio->bi_size; + } + + if (unlikely(rq->cmd_flags & REQ_QUIET)) + set_bit(BIO_QUIET, &bio->bi_flags); + + bio->bi_size -= nbytes; + bio->bi_sector += (nbytes >> 9); + + if (bio_integrity(bio)) + bio_integrity_advance(bio, nbytes); + + if (bio->bi_size == 0) + bio_endio(bio, error); + } else { + + /* + * Okay, this is the barrier request in progress, just + * record the error; + */ + if (error && !q->orderr) + q->orderr = error; + } +} + +void blk_dump_rq_flags(struct request *rq, char *msg) +{ + int bit; + + printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, + rq->cmd_flags); + + printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", + (unsigned long long)blk_rq_pos(rq), + blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); + printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", + rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); + + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { + printk(KERN_INFO " cdb: "); + for (bit = 0; bit < BLK_MAX_CDB; bit++) + printk("%02x ", rq->cmd[bit]); + printk("\n"); + } +} +EXPORT_SYMBOL(blk_dump_rq_flags); + +/* + * "plug" the device if there are no outstanding requests: this will + * force the transfer to start only after we have put all the requests + * on the list. + * + * This is called with interrupts off and no requests on the queue and + * with the queue lock held. + */ +void blk_plug_device(struct request_queue *q) +{ + WARN_ON(!irqs_disabled()); + + /* + * don't plug a stopped queue, it must be paired with blk_start_queue() + * which will restart the queueing + */ + if (blk_queue_stopped(q)) + return; + + if (!queue_flag_test_and_set(QUEUE_FLAG_PLUGGED, q)) { + mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); + trace_block_plug(q); + } +} +EXPORT_SYMBOL(blk_plug_device); + +/** + * blk_plug_device_unlocked - plug a device without queue lock held + * @q: The &struct request_queue to plug + * + * Description: + * Like @blk_plug_device(), but grabs the queue lock and disables + * interrupts. + **/ +void blk_plug_device_unlocked(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + blk_plug_device(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_plug_device_unlocked); + +/* + * remove the queue from the plugged list, if present. called with + * queue lock held and interrupts disabled. + */ +int blk_remove_plug(struct request_queue *q) +{ + WARN_ON(!irqs_disabled()); + + if (!queue_flag_test_and_clear(QUEUE_FLAG_PLUGGED, q)) + return 0; + + del_timer(&q->unplug_timer); + return 1; +} +EXPORT_SYMBOL(blk_remove_plug); + +/* + * remove the plug and let it rip.. + */ +void __generic_unplug_device(struct request_queue *q) +{ + if (unlikely(blk_queue_stopped(q))) + return; + if (!blk_remove_plug(q) && !blk_queue_nonrot(q)) + return; + + q->request_fn(q); +} + +/** + * generic_unplug_device - fire a request queue + * @q: The &struct request_queue in question + * + * Description: + * Linux uses plugging to build bigger requests queues before letting + * the device have at them. If a queue is plugged, the I/O scheduler + * is still adding and merging requests on the queue. Once the queue + * gets unplugged, the request_fn defined for the queue is invoked and + * transfers started. + **/ +void generic_unplug_device(struct request_queue *q) +{ + if (blk_queue_plugged(q)) { + spin_lock_irq(q->queue_lock); + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + } +} +EXPORT_SYMBOL(generic_unplug_device); + +static void blk_backing_dev_unplug(struct backing_dev_info *bdi, + struct page *page) +{ + struct request_queue *q = bdi->unplug_io_data; + + blk_unplug(q); +} + +void blk_unplug_work(struct work_struct *work) +{ + struct request_queue *q = + container_of(work, struct request_queue, unplug_work); + + trace_block_unplug_io(q); + q->unplug_fn(q); +} + +void blk_unplug_timeout(unsigned long data) +{ + struct request_queue *q = (struct request_queue *)data; + + trace_block_unplug_timer(q); + kblockd_schedule_work(q, &q->unplug_work); +} + +void blk_unplug(struct request_queue *q) +{ + /* + * devices don't necessarily have an ->unplug_fn defined + */ + if (q->unplug_fn) { + trace_block_unplug_io(q); + q->unplug_fn(q); + } +} +EXPORT_SYMBOL(blk_unplug); + +/** + * blk_start_queue - restart a previously stopped queue + * @q: The &struct request_queue in question + * + * Description: + * blk_start_queue() will clear the stop flag on the queue, and call + * the request_fn for the queue if it was in a stopped state when + * entered. Also see blk_stop_queue(). Queue lock must be held. + **/ +void blk_start_queue(struct request_queue *q) +{ + WARN_ON(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +} +EXPORT_SYMBOL(blk_start_queue); + +/** + * blk_stop_queue - stop a queue + * @q: The &struct request_queue in question + * + * Description: + * The Linux block layer assumes that a block driver will consume all + * entries on the request queue when the request_fn strategy is called. + * Often this will not happen, because of hardware limitations (queue + * depth settings). If a device driver gets a 'queue full' response, + * or if it simply chooses not to queue more I/O at one point, it can + * call this function to prevent the request_fn from being called until + * the driver has signalled it's ready to go again. This happens by calling + * blk_start_queue() to restart queue operations. Queue lock must be held. + **/ +void blk_stop_queue(struct request_queue *q) +{ + blk_remove_plug(q); + queue_flag_set(QUEUE_FLAG_STOPPED, q); +} +EXPORT_SYMBOL(blk_stop_queue); + +/** + * blk_sync_queue - cancel any pending callbacks on a queue + * @q: the queue + * + * Description: + * The block layer may perform asynchronous callback activity + * on a queue, such as calling the unplug function after a timeout. + * A block device may call blk_sync_queue to ensure that any + * such activity is cancelled, thus allowing it to release resources + * that the callbacks might use. The caller must already have made sure + * that its ->make_request_fn will not re-add plugging prior to calling + * this function. + * + */ +void blk_sync_queue(struct request_queue *q) +{ + del_timer_sync(&q->unplug_timer); + del_timer_sync(&q->timeout); + cancel_work_sync(&q->unplug_work); +} +EXPORT_SYMBOL(blk_sync_queue); + +/** + * __blk_run_queue - run a single device queue + * @q: The queue to run + * + * Description: + * See @blk_run_queue. This variant must be called with the queue lock + * held and interrupts disabled. + * + */ +void __blk_run_queue(struct request_queue *q) +{ + blk_remove_plug(q); + + if (unlikely(blk_queue_stopped(q))) + return; + + if (elv_queue_empty(q)) + return; + + /* + * Only recurse once to avoid overrunning the stack, let the unplug + * handling reinvoke the handler shortly if we already got there. + */ + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { + q->request_fn(q); + queue_flag_clear(QUEUE_FLAG_REENTER, q); + } else { + queue_flag_set(QUEUE_FLAG_PLUGGED, q); + kblockd_schedule_work(q, &q->unplug_work); + } +} +EXPORT_SYMBOL(__blk_run_queue); + +/** + * blk_run_queue - run a single device queue + * @q: The queue to run + * + * Description: + * Invoke request handling on this queue, if it has pending work to do. + * May be used to restart queueing when a request has completed. + */ +void blk_run_queue(struct request_queue *q) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_run_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_run_queue); + +void blk_put_queue(struct request_queue *q) +{ + kobject_put(&q->kobj); +} + +void blk_cleanup_queue(struct request_queue *q) +{ + /* + * We know we have process context here, so we can be a little + * cautious and ensure that pending block actions on this device + * are done before moving on. Going into this function, we should + * not have processes doing IO to this device. + */ + blk_sync_queue(q); + + del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + mutex_lock(&q->sysfs_lock); + queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); + mutex_unlock(&q->sysfs_lock); + + if (q->elevator) + elevator_exit(q->elevator); + + blk_put_queue(q); +} +EXPORT_SYMBOL(blk_cleanup_queue); + +static int blk_init_free_list(struct request_queue *q) +{ + struct request_list *rl = &q->rq; + + if (unlikely(rl->rq_pool)) + return 0; + + rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; + rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; + rl->elvpriv = 0; + init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); + init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); + + rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, + mempool_free_slab, request_cachep, q->node); + + if (!rl->rq_pool) + return -ENOMEM; + + return 0; +} + +struct request_queue *blk_alloc_queue(gfp_t gfp_mask) +{ + return blk_alloc_queue_node(gfp_mask, -1); +} +EXPORT_SYMBOL(blk_alloc_queue); + +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +{ + struct request_queue *q; + int err; + + q = kmem_cache_alloc_node(blk_requestq_cachep, + gfp_mask | __GFP_ZERO, node_id); + if (!q) + return NULL; + + q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; + q->backing_dev_info.unplug_io_data = q; + q->backing_dev_info.ra_pages = + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + q->backing_dev_info.state = 0; + q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; + q->backing_dev_info.name = "block"; + + err = bdi_init(&q->backing_dev_info); + if (err) { + kmem_cache_free(blk_requestq_cachep, q); + return NULL; + } + + setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, + laptop_mode_timer_fn, (unsigned long) q); + init_timer(&q->unplug_timer); + setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); + INIT_LIST_HEAD(&q->timeout_list); + INIT_WORK(&q->unplug_work, blk_unplug_work); + + kobject_init(&q->kobj, &blk_queue_ktype); + + mutex_init(&q->sysfs_lock); + spin_lock_init(&q->__queue_lock); + + return q; +} +EXPORT_SYMBOL(blk_alloc_queue_node); + +/** + * blk_init_queue - prepare a request queue for use with a block device + * @rfn: The function to be called to process requests that have been + * placed on the queue. + * @lock: Request queue spin lock + * + * Description: + * If a block device wishes to use the standard request handling procedures, + * which sorts requests and coalesces adjacent requests, then it must + * call blk_init_queue(). The function @rfn will be called when there + * are requests on the queue that need to be processed. If the device + * supports plugging, then @rfn may not be called immediately when requests + * are available on the queue, but may be called at some time later instead. + * Plugged queues are generally unplugged when a buffer belonging to one + * of the requests on the queue is needed, or due to memory pressure. + * + * @rfn is not required, or even expected, to remove all requests off the + * queue, but only as many as it can handle at a time. If it does leave + * requests on the queue, it is responsible for arranging that the requests + * get dealt with eventually. + * + * The queue spin lock must be held while manipulating the requests on the + * request queue; this lock will be taken also from interrupt context, so irq + * disabling is needed for it. + * + * Function returns a pointer to the initialized request queue, or %NULL if + * it didn't succeed. + * + * Note: + * blk_init_queue() must be paired with a blk_cleanup_queue() call + * when the block device is deactivated (such as at module unload). + **/ + +struct request_queue *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock) +{ + return blk_init_queue_node(rfn, lock, -1); +} +EXPORT_SYMBOL(blk_init_queue); + +struct request_queue * +blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) +{ + struct request_queue *uninit_q, *q; + + uninit_q = blk_alloc_queue_node(GFP_KERNEL, node_id); + if (!uninit_q) + return NULL; + + q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id); + if (!q) + blk_cleanup_queue(uninit_q); + + return q; +} +EXPORT_SYMBOL(blk_init_queue_node); + +struct request_queue * +blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, + spinlock_t *lock) +{ + return blk_init_allocated_queue_node(q, rfn, lock, -1); +} +EXPORT_SYMBOL(blk_init_allocated_queue); + +struct request_queue * +blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, + spinlock_t *lock, int node_id) +{ + if (!q) + return NULL; + + q->node = node_id; + if (blk_init_free_list(q)) + return NULL; + + q->request_fn = rfn; + q->prep_rq_fn = NULL; + q->unprep_rq_fn = NULL; + q->unplug_fn = generic_unplug_device; + q->queue_flags = QUEUE_FLAG_DEFAULT; + q->queue_lock = lock; + + /* + * This also sets hw/phys segments, boundary and size + */ + blk_queue_make_request(q, __make_request); + + q->sg_reserved_size = INT_MAX; + + /* + * all done + */ + if (!elevator_init(q, NULL)) { + blk_queue_congestion_threshold(q); + return q; + } + + return NULL; +} +EXPORT_SYMBOL(blk_init_allocated_queue_node); + +int blk_get_queue(struct request_queue *q) +{ + if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { + kobject_get(&q->kobj); + return 0; + } + + return 1; +} + +static inline void blk_free_request(struct request_queue *q, struct request *rq) +{ + if (rq->cmd_flags & REQ_ELVPRIV) + elv_put_request(q, rq); + mempool_free(rq, q->rq.rq_pool); +} + +static struct request * +blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) +{ + struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); + + if (!rq) + return NULL; + + blk_rq_init(q, rq); + + rq->cmd_flags = flags | REQ_ALLOCED; + + if (priv) { + if (unlikely(elv_set_request(q, rq, gfp_mask))) { + mempool_free(rq, q->rq.rq_pool); + return NULL; + } + rq->cmd_flags |= REQ_ELVPRIV; + } + + return rq; +} + +/* + * ioc_batching returns true if the ioc is a valid batching request and + * should be given priority access to a request. + */ +static inline int ioc_batching(struct request_queue *q, struct io_context *ioc) +{ + if (!ioc) + return 0; + + /* + * Make sure the process is able to allocate at least 1 request + * even if the batch times out, otherwise we could theoretically + * lose wakeups. + */ + return ioc->nr_batch_requests == q->nr_batching || + (ioc->nr_batch_requests > 0 + && time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME)); +} + +/* + * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This + * will cause the process to be a "batcher" on all queues in the system. This + * is the behaviour we want though - once it gets a wakeup it should be given + * a nice run. + */ +static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) +{ + if (!ioc || ioc_batching(q, ioc)) + return; + + ioc->nr_batch_requests = q->nr_batching; + ioc->last_waited = jiffies; +} + +static void __freed_request(struct request_queue *q, int sync) +{ + struct request_list *rl = &q->rq; + + if (rl->count[sync] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, sync); + + if (rl->count[sync] + 1 <= q->nr_requests) { + if (waitqueue_active(&rl->wait[sync])) + wake_up(&rl->wait[sync]); + + blk_clear_queue_full(q, sync); + } +} + +/* + * A request has just been released. Account for it, update the full and + * congestion status, wake up any waiters. Called under q->queue_lock. + */ +static void freed_request(struct request_queue *q, int sync, int priv) +{ + struct request_list *rl = &q->rq; + + rl->count[sync]--; + if (priv) + rl->elvpriv--; + + __freed_request(q, sync); + + if (unlikely(rl->starved[sync ^ 1])) + __freed_request(q, sync ^ 1); +} + +/* + * Get a free request, queue_lock must be held. + * Returns NULL on failure, with queue_lock held. + * Returns !NULL on success, with queue_lock *not held*. + */ +static struct request *get_request(struct request_queue *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) +{ + struct request *rq = NULL; + struct request_list *rl = &q->rq; + struct io_context *ioc = NULL; + const bool is_sync = rw_is_sync(rw_flags) != 0; + int may_queue, priv; + + may_queue = elv_may_queue(q, rw_flags); + if (may_queue == ELV_MQUEUE_NO) + goto rq_starved; + + if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { + if (rl->count[is_sync]+1 >= q->nr_requests) { + ioc = current_io_context(GFP_ATOMIC, q->node); + /* + * The queue will fill after this allocation, so set + * it as full, and mark this process as "batching". + * This process will be allowed to complete a batch of + * requests, others will be blocked. + */ + if (!blk_queue_full(q, is_sync)) { + ioc_set_batching(q, ioc); + blk_set_queue_full(q, is_sync); + } else { + if (may_queue != ELV_MQUEUE_MUST + && !ioc_batching(q, ioc)) { + /* + * The queue is full and the allocating + * process is not a "batcher", and not + * exempted by the IO scheduler + */ + goto out; + } + } + } + blk_set_queue_congested(q, is_sync); + } + + /* + * Only allow batching queuers to allocate up to 50% over the defined + * limit of requests, otherwise we could have thousands of requests + * allocated with any setting of ->nr_requests + */ + if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) + goto out; + + rl->count[is_sync]++; + rl->starved[is_sync] = 0; + + priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); + if (priv) + rl->elvpriv++; + + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; + spin_unlock_irq(q->queue_lock); + + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); + if (unlikely(!rq)) { + /* + * Allocation failed presumably due to memory. Undo anything + * we might have messed up. + * + * Allocating task should really be put onto the front of the + * wait queue, but this is pretty rare. + */ + spin_lock_irq(q->queue_lock); + freed_request(q, is_sync, priv); + + /* + * in the very unlikely event that allocation failed and no + * requests for this direction was pending, mark us starved + * so that freeing of a request in the other direction will + * notice us. another possible fix would be to split the + * rq mempool into READ and WRITE + */ +rq_starved: + if (unlikely(rl->count[is_sync] == 0)) + rl->starved[is_sync] = 1; + + goto out; + } + + /* + * ioc may be NULL here, and ioc_batching will be false. That's + * OK, if the queue is under the request limit then requests need + * not count toward the nr_batch_requests limit. There will always + * be some limit enforced by BLK_BATCH_TIME. + */ + if (ioc_batching(q, ioc)) + ioc->nr_batch_requests--; + + trace_block_getrq(q, bio, rw_flags & 1); +out: + return rq; +} + +/* + * No available requests for this queue, unplug the device and wait for some + * requests to become available. + * + * Called with q->queue_lock held, and returns with it unlocked. + */ +static struct request *get_request_wait(struct request_queue *q, int rw_flags, + struct bio *bio) +{ + const bool is_sync = rw_is_sync(rw_flags) != 0; + struct request *rq; + + rq = get_request(q, rw_flags, bio, GFP_NOIO); + while (!rq) { + DEFINE_WAIT(wait); + struct io_context *ioc; + struct request_list *rl = &q->rq; + + prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, + TASK_UNINTERRUPTIBLE); + + trace_block_sleeprq(q, bio, rw_flags & 1); + + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + io_schedule(); + + /* + * After sleeping, we become a "batching" process and + * will be able to allocate at least one request, and + * up to a big batch of them for a small period time. + * See ioc_batching, ioc_set_batching + */ + ioc = current_io_context(GFP_NOIO, q->node); + ioc_set_batching(q, ioc); + + spin_lock_irq(q->queue_lock); + finish_wait(&rl->wait[is_sync], &wait); + + rq = get_request(q, rw_flags, bio, GFP_NOIO); + }; + + return rq; +} + +struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) +{ + struct request *rq; + + BUG_ON(rw != READ && rw != WRITE); + + spin_lock_irq(q->queue_lock); + if (gfp_mask & __GFP_WAIT) { + rq = get_request_wait(q, rw, NULL); + } else { + rq = get_request(q, rw, NULL, gfp_mask); + if (!rq) + spin_unlock_irq(q->queue_lock); + } + /* q->queue_lock is unlocked at this point */ + + return rq; +} +EXPORT_SYMBOL(blk_get_request); + +/** + * blk_make_request - given a bio, allocate a corresponding struct request. + * @q: target request queue + * @bio: The bio describing the memory mappings that will be submitted for IO. + * It may be a chained-bio properly constructed by block/bio layer. + * @gfp_mask: gfp flags to be used for memory allocation + * + * blk_make_request is the parallel of generic_make_request for BLOCK_PC + * type commands. Where the struct request needs to be farther initialized by + * the caller. It is passed a &struct bio, which describes the memory info of + * the I/O transfer. + * + * The caller of blk_make_request must make sure that bi_io_vec + * are set to describe the memory buffers. That bio_data_dir() will return + * the needed direction of the request. (And all bio's in the passed bio-chain + * are properly set accordingly) + * + * If called under none-sleepable conditions, mapped bio buffers must not + * need bouncing, by calling the appropriate masked or flagged allocator, + * suitable for the target device. Otherwise the call to blk_queue_bounce will + * BUG. + * + * WARNING: When allocating/cloning a bio-chain, careful consideration should be + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for + * anything but the first bio in the chain. Otherwise you risk waiting for IO + * completion of a bio that hasn't been submitted yet, thus resulting in a + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead + * of bio_alloc(), as that avoids the mempool deadlock. + * If possible a big IO should be split into smaller parts when allocation + * fails. Partial allocation should not be an error, or you risk a live-lock. + */ +struct request *blk_make_request(struct request_queue *q, struct bio *bio, + gfp_t gfp_mask) +{ + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); + + if (unlikely(!rq)) + return ERR_PTR(-ENOMEM); + + for_each_bio(bio) { + struct bio *bounce_bio = bio; + int ret; + + blk_queue_bounce(q, &bounce_bio); + ret = blk_rq_append_bio(q, rq, bounce_bio); + if (unlikely(ret)) { + blk_put_request(rq); + return ERR_PTR(ret); + } + } + + return rq; +} +EXPORT_SYMBOL(blk_make_request); + +/** + * blk_requeue_request - put a request back on queue + * @q: request queue where request should be inserted + * @rq: request to be inserted + * + * Description: + * Drivers often keep queueing requests until the hardware cannot accept + * more, when that condition happens we need to put the request back + * on the queue. Must be called with queue lock held. + */ +void blk_requeue_request(struct request_queue *q, struct request *rq) +{ + blk_delete_timer(rq); + blk_clear_rq_complete(rq); + trace_block_rq_requeue(q, rq); + + if (blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + + BUG_ON(blk_queued_rq(rq)); + + elv_requeue_request(q, rq); +} +EXPORT_SYMBOL(blk_requeue_request); + +/** + * blk_insert_request - insert a special request into a request queue + * @q: request queue where request should be inserted + * @rq: request to be inserted + * @at_head: insert request at head or tail of queue + * @data: private data + * + * Description: + * Many block devices need to execute commands asynchronously, so they don't + * block the whole kernel from preemption during request execution. This is + * accomplished normally by inserting aritficial requests tagged as + * REQ_TYPE_SPECIAL in to the corresponding request queue, and letting them + * be scheduled for actual execution by the request queue. + * + * We have the option of inserting the head or the tail of the queue. + * Typically we use the tail for new ioctls and so forth. We use the head + * of the queue for things like a QUEUE_FULL message from a device, or a + * host that is unable to accept a particular command. + */ +void blk_insert_request(struct request_queue *q, struct request *rq, + int at_head, void *data) +{ + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + unsigned long flags; + + /* + * tell I/O scheduler that this isn't a regular read/write (ie it + * must not attempt merges on this) and that it acts as a soft + * barrier + */ + rq->cmd_type = REQ_TYPE_SPECIAL; + + rq->special = data; + + spin_lock_irqsave(q->queue_lock, flags); + + /* + * If command is tagged, release the tag + */ + if (blk_rq_tagged(rq)) + blk_queue_end_tag(q, rq); + + drive_stat_acct(rq, 1); + __elv_add_request(q, rq, where, 0); + __blk_run_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_insert_request); + +/* + * add-request adds a request to the linked list. + * queue lock is held and interrupts disabled, as we muck with the + * request queue list. + */ +static inline void add_request(struct request_queue *q, struct request *req) +{ + drive_stat_acct(req, 1); + + /* + * elevator indicated where it wants this request to be + * inserted at elevator_merge time + */ + __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); +} + +static void part_round_stats_single(int cpu, struct hd_struct *part, + unsigned long now) +{ + if (now == part->stamp) + return; + + if (part_in_flight(part)) { + __part_stat_add(cpu, part, time_in_queue, + part_in_flight(part) * (now - part->stamp)); + __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); + } + part->stamp = now; +} + +/** + * part_round_stats() - Round off the performance stats on a struct disk_stats. + * @cpu: cpu number for stats access + * @part: target partition + * + * The average IO queue length and utilisation statistics are maintained + * by observing the current state of the queue length and the amount of + * time it has been in this state for. + * + * Normally, that accounting is done on IO completion, but that can result + * in more than a second's worth of IO being accounted for within any one + * second, leading to >100% utilisation. To deal with that, we call this + * function to do a round-off before returning the results when reading + * /proc/diskstats. This accounts immediately for all queue usage up to + * the current jiffies and restarts the counters again. + */ +void part_round_stats(int cpu, struct hd_struct *part) +{ + unsigned long now = jiffies; + + if (part->partno) + part_round_stats_single(cpu, &part_to_disk(part)->part0, now); + part_round_stats_single(cpu, part, now); +} +EXPORT_SYMBOL_GPL(part_round_stats); + +/* + * queue lock must be held + */ +void __blk_put_request(struct request_queue *q, struct request *req) +{ + if (unlikely(!q)) + return; + if (unlikely(--req->ref_count)) + return; + + elv_completed_request(q, req); + + /* this is a bio leak */ + WARN_ON(req->bio != NULL); + + /* + * Request may not have originated from ll_rw_blk. if not, + * it didn't come out of our reserved rq pools + */ + if (req->cmd_flags & REQ_ALLOCED) { + int is_sync = rq_is_sync(req) != 0; + int priv = req->cmd_flags & REQ_ELVPRIV; + + BUG_ON(!list_empty(&req->queuelist)); + BUG_ON(!hlist_unhashed(&req->hash)); + + blk_free_request(q, req); + freed_request(q, is_sync, priv); + } +} +EXPORT_SYMBOL_GPL(__blk_put_request); + +void blk_put_request(struct request *req) +{ + unsigned long flags; + struct request_queue *q = req->q; + + spin_lock_irqsave(q->queue_lock, flags); + __blk_put_request(q, req); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(blk_put_request); + +/** + * blk_add_request_payload - add a payload to a request + * @rq: request to update + * @page: page backing the payload + * @len: length of the payload. + * + * This allows to later add a payload to an already submitted request by + * a block driver. The driver needs to take care of freeing the payload + * itself. + * + * Note that this is a quite horrible hack and nothing but handling of + * discard requests should ever use it. + */ +void blk_add_request_payload(struct request *rq, struct page *page, + unsigned int len) +{ + struct bio *bio = rq->bio; + + bio->bi_io_vec->bv_page = page; + bio->bi_io_vec->bv_offset = 0; + bio->bi_io_vec->bv_len = len; + + bio->bi_size = len; + bio->bi_vcnt = 1; + bio->bi_phys_segments = 1; + + rq->__data_len = rq->resid_len = len; + rq->nr_phys_segments = 1; + rq->buffer = bio_data(bio); +} +EXPORT_SYMBOL_GPL(blk_add_request_payload); + +void init_request_from_bio(struct request *req, struct bio *bio) +{ + req->cpu = bio->bi_comp_cpu; + req->cmd_type = REQ_TYPE_FS; + + req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; + if (bio->bi_rw & REQ_RAHEAD) + req->cmd_flags |= REQ_FAILFAST_MASK; + + req->errors = 0; + req->__sector = bio->bi_sector; + req->ioprio = bio_prio(bio); + blk_rq_bio_prep(req->q, req, bio); +} + +/* + * Only disabling plugging for non-rotational devices if it does tagging + * as well, otherwise we do need the proper merging + */ +static inline bool queue_should_plug(struct request_queue *q) +{ + return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); +} + +static int __make_request(struct request_queue *q, struct bio *bio) +{ + struct request *req; + int el_ret; + unsigned int bytes = bio->bi_size; + const unsigned short prio = bio_prio(bio); + const bool sync = !!(bio->bi_rw & REQ_SYNC); + const bool unplug = !!(bio->bi_rw & REQ_UNPLUG); + const unsigned long ff = bio->bi_rw & REQ_FAILFAST_MASK; + int rw_flags; + + if ((bio->bi_rw & REQ_HARDBARRIER) && + (q->next_ordered == QUEUE_ORDERED_NONE)) { + bio_endio(bio, -EOPNOTSUPP); + return 0; + } + /* + * low level driver can indicate that it wants pages above a + * certain limit bounced to low memory (ie for highmem, or even + * ISA dma in theory) + */ + blk_queue_bounce(q, &bio); + + spin_lock_irq(q->queue_lock); + + if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) + goto get_rq; + + el_ret = elv_merge(q, &req, bio); + switch (el_ret) { + case ELEVATOR_BACK_MERGE: + BUG_ON(!rq_mergeable(req)); + + if (!ll_back_merge_fn(q, req, bio)) + break; + + trace_block_bio_backmerge(q, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) + blk_rq_set_mixed_merge(req); + + req->biotail->bi_next = bio; + req->biotail = bio; + req->__data_len += bytes; + req->ioprio = ioprio_best(req->ioprio, prio); + if (!blk_rq_cpu_valid(req)) + req->cpu = bio->bi_comp_cpu; + drive_stat_acct(req, 0); + elv_bio_merged(q, req, bio); + if (!attempt_back_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out; + + case ELEVATOR_FRONT_MERGE: + BUG_ON(!rq_mergeable(req)); + + if (!ll_front_merge_fn(q, req, bio)) + break; + + trace_block_bio_frontmerge(q, bio); + + if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) { + blk_rq_set_mixed_merge(req); + req->cmd_flags &= ~REQ_FAILFAST_MASK; + req->cmd_flags |= ff; + } + + bio->bi_next = req->bio; + req->bio = bio; + + /* + * may not be valid. if the low level driver said + * it didn't need a bounce buffer then it better + * not touch req->buffer either... + */ + req->buffer = bio_data(bio); + req->__sector = bio->bi_sector; + req->__data_len += bytes; + req->ioprio = ioprio_best(req->ioprio, prio); + if (!blk_rq_cpu_valid(req)) + req->cpu = bio->bi_comp_cpu; + drive_stat_acct(req, 0); + elv_bio_merged(q, req, bio); + if (!attempt_front_merge(q, req)) + elv_merged_request(q, req, el_ret); + goto out; + + /* ELV_NO_MERGE: elevator says don't/can't merge. */ + default: + ; + } + +get_rq: + /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_SYNC; + + /* + * Grab a free request. This is might sleep but can not fail. + * Returns with the queue unlocked. + */ + req = get_request_wait(q, rw_flags, bio); + + /* + * After dropping the lock and possibly sleeping here, our request + * may now be mergeable after it had proven unmergeable (above). + * We don't worry about that case for efficiency. It won't happen + * often, and the elevators are able to handle it. + */ + init_request_from_bio(req, bio); + + spin_lock_irq(q->queue_lock); + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || + bio_flagged(bio, BIO_CPU_AFFINE)) + req->cpu = blk_cpu_to_group(smp_processor_id()); + if (queue_should_plug(q) && elv_queue_empty(q)) + blk_plug_device(q); + add_request(q, req); +out: + if (unplug || !queue_should_plug(q)) + __generic_unplug_device(q); + spin_unlock_irq(q->queue_lock); + return 0; +} + +/* + * If bio->bi_dev is a partition, remap the location + */ +static inline void blk_partition_remap(struct bio *bio) +{ + struct block_device *bdev = bio->bi_bdev; + + if (bio_sectors(bio) && bdev != bdev->bd_contains) { + struct hd_struct *p = bdev->bd_part; + + bio->bi_sector += p->start_sect; + bio->bi_bdev = bdev->bd_contains; + + trace_block_remap(bdev_get_queue(bio->bi_bdev), bio, + bdev->bd_dev, + bio->bi_sector - p->start_sect); + } +} + +static void handle_bad_sector(struct bio *bio) +{ + char b[BDEVNAME_SIZE]; + + printk(KERN_INFO "attempt to access beyond end of device\n"); + printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n", + bdevname(bio->bi_bdev, b), + bio->bi_rw, + (unsigned long long)bio->bi_sector + bio_sectors(bio), + (long long)(bio->bi_bdev->bd_inode->i_size >> 9)); + + set_bit(BIO_EOF, &bio->bi_flags); +} + +#ifdef CONFIG_FAIL_MAKE_REQUEST + +static DECLARE_FAULT_ATTR(fail_make_request); + +static int __init setup_fail_make_request(char *str) +{ + return setup_fault_attr(&fail_make_request, str); +} +__setup("fail_make_request=", setup_fail_make_request); + +static int should_fail_request(struct bio *bio) +{ + struct hd_struct *part = bio->bi_bdev->bd_part; + + if (part_to_disk(part)->part0.make_it_fail || part->make_it_fail) + return should_fail(&fail_make_request, bio->bi_size); + + return 0; +} + +static int __init fail_make_request_debugfs(void) +{ + return init_fault_attr_dentries(&fail_make_request, + "fail_make_request"); +} + +late_initcall(fail_make_request_debugfs); + +#else /* CONFIG_FAIL_MAKE_REQUEST */ + +static inline int should_fail_request(struct bio *bio) +{ + return 0; +} + +#endif /* CONFIG_FAIL_MAKE_REQUEST */ + +/* + * Check whether this bio extends beyond the end of the device. + */ +static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) +{ + sector_t maxsector; + + if (!nr_sectors) + return 0; + + /* Test device or partition size, when known. */ + maxsector = bio->bi_bdev->bd_inode->i_size >> 9; + if (maxsector) { + sector_t sector = bio->bi_sector; + + if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { + /* + * This may well happen - the kernel calls bread() + * without checking the size of the device, e.g., when + * mounting a device. + */ + handle_bad_sector(bio); + return 1; + } + } + + return 0; +} + +/** + * generic_make_request - hand a buffer to its device driver for I/O + * @bio: The bio describing the location in memory and on the device. + * + * generic_make_request() is used to make I/O requests of block + * devices. It is passed a &struct bio, which describes the I/O that needs + * to be done. + * + * generic_make_request() does not return any status. The + * success/failure status of the request, along with notification of + * completion, is delivered asynchronously through the bio->bi_end_io + * function described (one day) else where. + * + * The caller of generic_make_request must make sure that bi_io_vec + * are set to describe the memory buffer, and that bi_dev and bi_sector are + * set to describe the device address, and the + * bi_end_io and optionally bi_private are set to describe how + * completion notification should be signaled. + * + * generic_make_request and the drivers it calls may use bi_next if this + * bio happens to be merged with someone else, and may change bi_dev and + * bi_sector for remaps as it sees fit. So the values of these fields + * should NOT be depended on after the call to generic_make_request. + */ +static inline void __generic_make_request(struct bio *bio) +{ + struct request_queue *q; + sector_t old_sector; + int ret, nr_sectors = bio_sectors(bio); + dev_t old_dev; + int err = -EIO; + + might_sleep(); + + if (bio_check_eod(bio, nr_sectors)) + goto end_io; + + /* + * Resolve the mapping until finished. (drivers are + * still free to implement/resolve their own stacking + * by explicitly returning 0) + * + * NOTE: we don't repeat the blk_size check for each new device. + * Stacking drivers are expected to know what they are doing. + */ + old_sector = -1; + old_dev = 0; + do { + char b[BDEVNAME_SIZE]; + + q = bdev_get_queue(bio->bi_bdev); + if (unlikely(!q)) { + printk(KERN_ERR + "generic_make_request: Trying to access " + "nonexistent block-device %s (%Lu)\n", + bdevname(bio->bi_bdev, b), + (long long) bio->bi_sector); + goto end_io; + } + + if (unlikely(!(bio->bi_rw & REQ_DISCARD) && + nr_sectors > queue_max_hw_sectors(q))) { + printk(KERN_ERR "bio too big device %s (%u > %u)\n", + bdevname(bio->bi_bdev, b), + bio_sectors(bio), + queue_max_hw_sectors(q)); + goto end_io; + } + + if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) + goto end_io; + + if (should_fail_request(bio)) + goto end_io; + + /* + * If this device has partitions, remap block n + * of partition p to block n+start(p) of the disk. + */ + blk_partition_remap(bio); + + if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) + goto end_io; + + if (old_sector != -1) + trace_block_remap(q, bio, old_dev, old_sector); + + old_sector = bio->bi_sector; + old_dev = bio->bi_bdev->bd_dev; + + if (bio_check_eod(bio, nr_sectors)) + goto end_io; + + if ((bio->bi_rw & REQ_DISCARD) && + (!blk_queue_discard(q) || + ((bio->bi_rw & REQ_SECURE) && + !blk_queue_secdiscard(q)))) { + err = -EOPNOTSUPP; + goto end_io; + } + + trace_block_bio_queue(q, bio); + + ret = q->make_request_fn(q, bio); + } while (ret); + + return; + +end_io: + bio_endio(bio, err); +} + +/* + * We only want one ->make_request_fn to be active at a time, + * else stack usage with stacked devices could be a problem. + * So use current->bio_list to keep a list of requests + * submited by a make_request_fn function. + * current->bio_list is also used as a flag to say if + * generic_make_request is currently active in this task or not. + * If it is NULL, then no make_request is active. If it is non-NULL, + * then a make_request is active, and new requests should be added + * at the tail + */ +void generic_make_request(struct bio *bio) +{ + struct bio_list bio_list_on_stack; + + if (current->bio_list) { + /* make_request is active */ + bio_list_add(current->bio_list, bio); + return; + } + /* following loop may be a bit non-obvious, and so deserves some + * explanation. + * Before entering the loop, bio->bi_next is NULL (as all callers + * ensure that) so we have a list with a single bio. + * We pretend that we have just taken it off a longer list, so + * we assign bio_list to a pointer to the bio_list_on_stack, + * thus initialising the bio_list of new bios to be + * added. __generic_make_request may indeed add some more bios + * through a recursive call to generic_make_request. If it + * did, we find a non-NULL value in bio_list and re-enter the loop + * from the top. In this case we really did just take the bio + * of the top of the list (no pretending) and so remove it from + * bio_list, and call into __generic_make_request again. + * + * The loop was structured like this to make only one call to + * __generic_make_request (which is important as it is large and + * inlined) and to keep the structure simple. + */ + BUG_ON(bio->bi_next); + bio_list_init(&bio_list_on_stack); + current->bio_list = &bio_list_on_stack; + do { + __generic_make_request(bio); + bio = bio_list_pop(current->bio_list); + } while (bio); + current->bio_list = NULL; /* deactivate */ +} +EXPORT_SYMBOL(generic_make_request); + +/** + * submit_bio - submit a bio to the block device layer for I/O + * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) + * @bio: The &struct bio which describes the I/O + * + * submit_bio() is very similar in purpose to generic_make_request(), and + * uses that function to do most of the work. Both are fairly rough + * interfaces; @bio must be presetup and ready for I/O. + * + */ +void submit_bio(int rw, struct bio *bio) +{ + int count = bio_sectors(bio); + + bio->bi_rw |= rw; + + /* + * If it's a regular read/write or a barrier with data attached, + * go through the normal accounting stuff before submission. + */ + if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { + if (rw & WRITE) { + count_vm_events(PGPGOUT, count); + } else { + task_io_account_read(bio->bi_size); + count_vm_events(PGPGIN, count); + } + + if (unlikely(block_dump)) { + char b[BDEVNAME_SIZE]; + printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", + current->comm, task_pid_nr(current), + (rw & WRITE) ? "WRITE" : "READ", + (unsigned long long)bio->bi_sector, + bdevname(bio->bi_bdev, b)); + } + } + + generic_make_request(bio); +} +EXPORT_SYMBOL(submit_bio); + +/** + * blk_rq_check_limits - Helper function to check a request for the queue limit + * @q: the queue + * @rq: the request being checked + * + * Description: + * @rq may have been made based on weaker limitations of upper-level queues + * in request stacking drivers, and it may violate the limitation of @q. + * Since the block layer and the underlying device driver trust @rq + * after it is inserted to @q, it should be checked against @q before + * the insertion using this generic function. + * + * This function should also be useful for request stacking drivers + * in some cases below, so export this fuction. + * Request stacking drivers like request-based dm may change the queue + * limits while requests are in the queue (e.g. dm's table swapping). + * Such request stacking drivers should check those requests agaist + * the new queue limits again when they dispatch those requests, + * although such checkings are also done against the old queue limits + * when submitting requests. + */ +int blk_rq_check_limits(struct request_queue *q, struct request *rq) +{ + if (rq->cmd_flags & REQ_DISCARD) + return 0; + + if (blk_rq_sectors(rq) > queue_max_sectors(q) || + blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { + printk(KERN_ERR "%s: over max size limit.\n", __func__); + return -EIO; + } + + /* + * queue's settings related to segment counting like q->bounce_pfn + * may differ from that of other stacking queues. + * Recalculate it to check the request correctly on this queue's + * limitation. + */ + blk_recalc_rq_segments(rq); + if (rq->nr_phys_segments > queue_max_segments(q)) { + printk(KERN_ERR "%s: over max segments limit.\n", __func__); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(blk_rq_check_limits); + +/** + * blk_insert_cloned_request - Helper for stacking drivers to submit a request + * @q: the queue to submit the request + * @rq: the request being queued + */ +int blk_insert_cloned_request(struct request_queue *q, struct request *rq) +{ + unsigned long flags; + + if (blk_rq_check_limits(q, rq)) + return -EIO; + +#ifdef CONFIG_FAIL_MAKE_REQUEST + if (rq->rq_disk && rq->rq_disk->part0.make_it_fail && + should_fail(&fail_make_request, blk_rq_bytes(rq))) + return -EIO; +#endif + + spin_lock_irqsave(q->queue_lock, flags); + + /* + * Submitting request must be dequeued before calling this function + * because it will be linked to another request_queue + */ + BUG_ON(blk_queued_rq(rq)); + + drive_stat_acct(rq, 1); + __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); + + spin_unlock_irqrestore(q->queue_lock, flags); + + return 0; +} +EXPORT_SYMBOL_GPL(blk_insert_cloned_request); + +/** + * blk_rq_err_bytes - determine number of bytes till the next failure boundary + * @rq: request to examine + * + * Description: + * A request could be merge of IOs which require different failure + * handling. This function determines the number of bytes which + * can be failed from the beginning of the request without + * crossing into area which need to be retried further. + * + * Return: + * The number of bytes to fail. + * + * Context: + * queue_lock must be held. + */ +unsigned int blk_rq_err_bytes(const struct request *rq) +{ + unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + unsigned int bytes = 0; + struct bio *bio; + + if (!(rq->cmd_flags & REQ_MIXED_MERGE)) + return blk_rq_bytes(rq); + + /* + * Currently the only 'mixing' which can happen is between + * different fastfail types. We can safely fail portions + * which have all the failfast bits that the first one has - + * the ones which are at least as eager to fail as the first + * one. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + if ((bio->bi_rw & ff) != ff) + break; + bytes += bio->bi_size; + } + + /* this could lead to infinite loop */ + BUG_ON(blk_rq_bytes(rq) && !bytes); + return bytes; +} +EXPORT_SYMBOL_GPL(blk_rq_err_bytes); + +static void blk_account_io_completion(struct request *req, unsigned int bytes) +{ + if (blk_do_io_stat(req)) { + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + part_stat_add(cpu, part, sectors[rw], bytes >> 9); + part_stat_unlock(); + } +} + +static void blk_account_io_done(struct request *req) +{ + /* + * Account IO completion. bar_rq isn't accounted as a normal + * IO on queueing nor completion. Accounting the containing + * request is enough. + */ + if (blk_do_io_stat(req) && req != &req->q->bar_rq) { + unsigned long duration = jiffies - req->start_time; + const int rw = rq_data_dir(req); + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + + part_stat_inc(cpu, part, ios[rw]); + part_stat_add(cpu, part, ticks[rw], duration); + part_round_stats(cpu, part); + part_dec_in_flight(part, rw); + + part_stat_unlock(); + } +} + +/** + * blk_peek_request - peek at the top of a request queue + * @q: request queue to peek at + * + * Description: + * Return the request at the top of @q. The returned request + * should be started using blk_start_request() before LLD starts + * processing it. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_peek_request(struct request_queue *q) +{ + struct request *rq; + int ret; + + while ((rq = __elv_next_request(q)) != NULL) { + if (!(rq->cmd_flags & REQ_STARTED)) { + /* + * This is the first time the device driver + * sees this request (possibly after + * requeueing). Notify IO scheduler. + */ + if (rq->cmd_flags & REQ_SORTED) + elv_activate_rq(q, rq); + + /* + * just mark as started even if we don't start + * it, a request that has been delayed should + * not be passed by new incoming requests + */ + rq->cmd_flags |= REQ_STARTED; + trace_block_rq_issue(q, rq); + } + + if (!q->boundary_rq || q->boundary_rq == rq) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = NULL; + } + + if (rq->cmd_flags & REQ_DONTPREP) + break; + + if (q->dma_drain_size && blk_rq_bytes(rq)) { + /* + * make sure space for the drain appears we + * know we can do this because max_hw_segments + * has been adjusted to be one fewer than the + * device can handle + */ + rq->nr_phys_segments++; + } + + if (!q->prep_rq_fn) + break; + + ret = q->prep_rq_fn(q, rq); + if (ret == BLKPREP_OK) { + break; + } else if (ret == BLKPREP_DEFER) { + /* + * the request may have been (partially) prepped. + * we need to keep this request in the front to + * avoid resource deadlock. REQ_STARTED will + * prevent other fs requests from passing this one. + */ + if (q->dma_drain_size && blk_rq_bytes(rq) && + !(rq->cmd_flags & REQ_DONTPREP)) { + /* + * remove the space for the drain we added + * so that we don't add it again + */ + --rq->nr_phys_segments; + } + + rq = NULL; + break; + } else if (ret == BLKPREP_KILL) { + rq->cmd_flags |= REQ_QUIET; + /* + * Mark this request as started so we don't trigger + * any debug logic in the end I/O path. + */ + blk_start_request(rq); + __blk_end_request_all(rq, -EIO); + } else { + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); + break; + } + } + + return rq; +} +EXPORT_SYMBOL(blk_peek_request); + +void blk_dequeue_request(struct request *rq) +{ + struct request_queue *q = rq->q; + + BUG_ON(list_empty(&rq->queuelist)); + BUG_ON(ELV_ON_HASH(rq)); + + list_del_init(&rq->queuelist); + + /* + * the time frame between a request being removed from the lists + * and to it is freed is accounted as io that is in progress at + * the driver side. + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]++; + set_io_start_time_ns(rq); + } +} + +/** + * blk_start_request - start request processing on the driver + * @req: request to dequeue + * + * Description: + * Dequeue @req and start timeout timer on it. This hands off the + * request to the driver. + * + * Block internal functions which don't want to start timer should + * call blk_dequeue_request(). + * + * Context: + * queue_lock must be held. + */ +void blk_start_request(struct request *req) +{ + blk_dequeue_request(req); + + /* + * We are now handing the request to the hardware, initialize + * resid_len to full count and add the timeout handler. + */ + req->resid_len = blk_rq_bytes(req); + if (unlikely(blk_bidi_rq(req))) + req->next_rq->resid_len = blk_rq_bytes(req->next_rq); + + blk_add_timer(req); +} +EXPORT_SYMBOL(blk_start_request); + +/** + * blk_fetch_request - fetch a request from a request queue + * @q: request queue to fetch a request from + * + * Description: + * Return the request at the top of @q. The request is started on + * return and LLD can start processing it immediately. + * + * Return: + * Pointer to the request at the top of @q if available. Null + * otherwise. + * + * Context: + * queue_lock must be held. + */ +struct request *blk_fetch_request(struct request_queue *q) +{ + struct request *rq; + + rq = blk_peek_request(q); + if (rq) + blk_start_request(rq); + return rq; +} +EXPORT_SYMBOL(blk_fetch_request); + +/** + * blk_update_request - Special helper function for request stacking drivers + * @req: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @req + * + * Description: + * Ends I/O on a number of bytes attached to @req, but doesn't complete + * the request structure even if @req doesn't have leftover. + * If @req has leftover, sets it up for the next range of segments. + * + * This special helper function is only for request stacking drivers + * (e.g. request-based dm) so that they can handle partial completion. + * Actual device drivers should use blk_end_request instead. + * + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees + * %false return from this function. + * + * Return: + * %false - this request doesn't have any more data + * %true - this request has more data + **/ +bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) +{ + int total_bytes, bio_nbytes, next_idx = 0; + struct bio *bio; + + if (!req->bio) + return false; + + trace_block_rq_complete(req->q, req); + + /* + * For fs requests, rq is just carrier of independent bio's + * and each partial completion should be handled separately. + * Reset per-request error on each partial completion. + * + * TODO: tj: This is too subtle. It would be better to let + * low level drivers do what they see fit. + */ + if (req->cmd_type == REQ_TYPE_FS) + req->errors = 0; + + if (error && req->cmd_type == REQ_TYPE_FS && + !(req->cmd_flags & REQ_QUIET)) { + printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", + req->rq_disk ? req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); + } + + blk_account_io_completion(req, nr_bytes); + + total_bytes = bio_nbytes = 0; + while ((bio = req->bio) != NULL) { + int nbytes; + + if (nr_bytes >= bio->bi_size) { + req->bio = bio->bi_next; + nbytes = bio->bi_size; + req_bio_endio(req, bio, nbytes, error); + next_idx = 0; + bio_nbytes = 0; + } else { + int idx = bio->bi_idx + next_idx; + + if (unlikely(idx >= bio->bi_vcnt)) { + blk_dump_rq_flags(req, "__end_that"); + printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", + __func__, idx, bio->bi_vcnt); + break; + } + + nbytes = bio_iovec_idx(bio, idx)->bv_len; + BIO_BUG_ON(nbytes > bio->bi_size); + + /* + * not a complete bvec done + */ + if (unlikely(nbytes > nr_bytes)) { + bio_nbytes += nr_bytes; + total_bytes += nr_bytes; + break; + } + + /* + * advance to the next vector + */ + next_idx++; + bio_nbytes += nbytes; + } + + total_bytes += nbytes; + nr_bytes -= nbytes; + + bio = req->bio; + if (bio) { + /* + * end more in this run, or just return 'not-done' + */ + if (unlikely(nr_bytes <= 0)) + break; + } + } + + /* + * completely done + */ + if (!req->bio) { + /* + * Reset counters so that the request stacking driver + * can find how many bytes remain in the request + * later. + */ + req->__data_len = 0; + return false; + } + + /* + * if the request wasn't completed, update state + */ + if (bio_nbytes) { + req_bio_endio(req, bio, bio_nbytes, error); + bio->bi_idx += next_idx; + bio_iovec(bio)->bv_offset += nr_bytes; + bio_iovec(bio)->bv_len -= nr_bytes; + } + + req->__data_len -= total_bytes; + req->buffer = bio_data(req->bio); + + /* update sector only for requests with clear definition of sector */ + if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) + req->__sector += total_bytes >> 9; + + /* mixed attributes always follow the first bio */ + if (req->cmd_flags & REQ_MIXED_MERGE) { + req->cmd_flags &= ~REQ_FAILFAST_MASK; + req->cmd_flags |= req->bio->bi_rw & REQ_FAILFAST_MASK; + } + + /* + * If total number of sectors is less than the first segment + * size, something has gone terribly wrong. + */ + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { + printk(KERN_ERR "blk: request botched\n"); + req->__data_len = blk_rq_cur_bytes(req); + } + + /* recalculate the number of segments */ + blk_recalc_rq_segments(req); + + return true; +} +EXPORT_SYMBOL_GPL(blk_update_request); + +static bool blk_update_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, + unsigned int bidi_bytes) +{ + if (blk_update_request(rq, error, nr_bytes)) + return true; + + /* Bidi request must be completed as a whole */ + if (unlikely(blk_bidi_rq(rq)) && + blk_update_request(rq->next_rq, error, bidi_bytes)) + return true; + + if (blk_queue_add_random(rq->q)) + add_disk_randomness(rq->rq_disk); + + return false; +} + +/** + * blk_unprep_request - unprepare a request + * @req: the request + * + * This function makes a request ready for complete resubmission (or + * completion). It happens only after all error handling is complete, + * so represents the appropriate moment to deallocate any resources + * that were allocated to the request in the prep_rq_fn. The queue + * lock is held when calling this. + */ +void blk_unprep_request(struct request *req) +{ + struct request_queue *q = req->q; + + req->cmd_flags &= ~REQ_DONTPREP; + if (q->unprep_rq_fn) + q->unprep_rq_fn(q, req); +} +EXPORT_SYMBOL_GPL(blk_unprep_request); + +/* + * queue lock must be held + */ +static void blk_finish_request(struct request *req, int error) +{ + if (blk_rq_tagged(req)) + blk_queue_end_tag(req->q, req); + + BUG_ON(blk_queued_rq(req)); + + if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) + laptop_io_completion(&req->q->backing_dev_info); + + blk_delete_timer(req); + + if (req->cmd_flags & REQ_DONTPREP) + blk_unprep_request(req); + + + blk_account_io_done(req); + + if (req->end_io) + req->end_io(req, error); + else { + if (blk_bidi_rq(req)) + __blk_put_request(req->next_rq->q, req->next_rq); + + __blk_put_request(req->q, req); + } +} + +/** + * blk_end_bidi_request - Complete a bidi request + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq + * + * Description: + * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. + * Drivers that supports bidi can safely call this member for any + * type of request, bidi or uni. In the later case @bidi_bytes is + * just ignored. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +static bool blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) +{ + struct request_queue *q = rq->q; + unsigned long flags; + + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; + + spin_lock_irqsave(q->queue_lock, flags); + blk_finish_request(rq, error); + spin_unlock_irqrestore(q->queue_lock, flags); + + return false; +} + +/** + * __blk_end_bidi_request - Complete a bidi request with queue lock held + * @rq: the request to complete + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete @rq + * @bidi_bytes: number of bytes to complete @rq->next_rq + * + * Description: + * Identical to blk_end_bidi_request() except that queue lock is + * assumed to be locked on entry and remains so on return. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +static bool __blk_end_bidi_request(struct request *rq, int error, + unsigned int nr_bytes, unsigned int bidi_bytes) +{ + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) + return true; + + blk_finish_request(rq, error); + + return false; +} + +/** + * blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Ends I/O on a number of bytes attached to @rq. + * If @rq has leftover, sets it up for the next range of segments. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL(blk_end_request); + +/** + * blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. + */ +void blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL(blk_end_request_all); + +/** + * blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_cur(struct request *rq, int error) +{ + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL(blk_end_request_cur); + +/** + * blk_end_request_err - Finish a request till the next failure boundary. + * @rq: the request to finish till the next failure boundary for + * @error: must be negative errno + * + * Description: + * Complete @rq till the next failure boundary. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool blk_end_request_err(struct request *rq, int error) +{ + WARN_ON(error >= 0); + return blk_end_request(rq, error, blk_rq_err_bytes(rq)); +} +EXPORT_SYMBOL_GPL(blk_end_request_err); + +/** + * __blk_end_request - Helper function for drivers to complete the request. + * @rq: the request being processed + * @error: %0 for success, < %0 for error + * @nr_bytes: number of bytes to complete + * + * Description: + * Must be called with queue lock held unlike blk_end_request(). + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + **/ +bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) +{ + return __blk_end_bidi_request(rq, error, nr_bytes, 0); +} +EXPORT_SYMBOL(__blk_end_request); + +/** + * __blk_end_request_all - Helper function for drives to finish the request. + * @rq: the request to finish + * @error: %0 for success, < %0 for error + * + * Description: + * Completely finish @rq. Must be called with queue lock held. + */ +void __blk_end_request_all(struct request *rq, int error) +{ + bool pending; + unsigned int bidi_bytes = 0; + + if (unlikely(blk_bidi_rq(rq))) + bidi_bytes = blk_rq_bytes(rq->next_rq); + + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); + BUG_ON(pending); +} +EXPORT_SYMBOL(__blk_end_request_all); + +/** + * __blk_end_request_cur - Helper function to finish the current request chunk. + * @rq: the request to finish the current chunk for + * @error: %0 for success, < %0 for error + * + * Description: + * Complete the current consecutively mapped chunk from @rq. Must + * be called with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_cur(struct request *rq, int error) +{ + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); +} +EXPORT_SYMBOL(__blk_end_request_cur); + +/** + * __blk_end_request_err - Finish a request till the next failure boundary. + * @rq: the request to finish till the next failure boundary for + * @error: must be negative errno + * + * Description: + * Complete @rq till the next failure boundary. Must be called + * with queue lock held. + * + * Return: + * %false - we are done with this request + * %true - still buffers pending for this request + */ +bool __blk_end_request_err(struct request *rq, int error) +{ + WARN_ON(error >= 0); + return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); +} +EXPORT_SYMBOL_GPL(__blk_end_request_err); + +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ + rq->cmd_flags |= bio->bi_rw & REQ_WRITE; + + if (bio_has_data(bio)) { + rq->nr_phys_segments = bio_phys_segments(q, bio); + rq->buffer = bio_data(bio); + } + rq->__data_len = bio->bi_size; + rq->bio = rq->biotail = bio; + + if (bio->bi_bdev) + rq->rq_disk = bio->bi_bdev->bd_disk; +} + +#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +/** + * rq_flush_dcache_pages - Helper function to flush all pages in a request + * @rq: the request to be flushed + * + * Description: + * Flush all pages in @rq. + */ +void rq_flush_dcache_pages(struct request *rq) +{ + struct req_iterator iter; + struct bio_vec *bvec; + + rq_for_each_segment(bvec, rq, iter) + flush_dcache_page(bvec->bv_page); +} +EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); +#endif + +/** + * blk_lld_busy - Check if underlying low-level drivers of a device are busy + * @q : the queue of the device being checked + * + * Description: + * Check if underlying low-level drivers of a device are busy. + * If the drivers want to export their busy state, they must set own + * exporting function using blk_queue_lld_busy() first. + * + * Basically, this function is used only by request stacking drivers + * to stop dispatching requests to underlying devices when underlying + * devices are busy. This behavior helps more I/O merging on the queue + * of the request stacking driver and prevents I/O throughput regression + * on burst I/O load. + * + * Return: + * 0 - Not busy (The request stacking driver should dispatch request) + * 1 - Busy (The request stacking driver should stop dispatching request) + */ +int blk_lld_busy(struct request_queue *q) +{ + if (q->lld_busy_fn) + return q->lld_busy_fn(q); + + return 0; +} +EXPORT_SYMBOL_GPL(blk_lld_busy); + +/** + * blk_rq_unprep_clone - Helper function to free all bios in a cloned request + * @rq: the clone request to be cleaned up + * + * Description: + * Free all bios in @rq for a cloned request. + */ +void blk_rq_unprep_clone(struct request *rq) +{ + struct bio *bio; + + while ((bio = rq->bio) != NULL) { + rq->bio = bio->bi_next; + + bio_put(bio); + } +} +EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); + +/* + * Copy attributes of the original request to the clone request. + * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. + */ +static void __blk_rq_prep_clone(struct request *dst, struct request *src) +{ + dst->cpu = src->cpu; + dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); + if (src->cmd_flags & REQ_DISCARD) + dst->cmd_flags |= REQ_DISCARD; + dst->cmd_type = src->cmd_type; + dst->__sector = blk_rq_pos(src); + dst->__data_len = blk_rq_bytes(src); + dst->nr_phys_segments = src->nr_phys_segments; + dst->ioprio = src->ioprio; + dst->extra_len = src->extra_len; +} + +/** + * blk_rq_prep_clone - Helper function to setup clone request + * @rq: the request to be setup + * @rq_src: original request to be cloned + * @bs: bio_set that bios for clone are allocated from + * @gfp_mask: memory allocation mask for bio + * @bio_ctr: setup function to be called for each clone bio. + * Returns %0 for success, non %0 for failure. + * @data: private data to be passed to @bio_ctr + * + * Description: + * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. + * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) + * are not copied, and copying such parts is the caller's responsibility. + * Also, pages which the original bios are pointing to are not copied + * and the cloned bios just point same pages. + * So cloned bios must be completed before original bios, which means + * the caller must complete @rq before @rq_src. + */ +int blk_rq_prep_clone(struct request *rq, struct request *rq_src, + struct bio_set *bs, gfp_t gfp_mask, + int (*bio_ctr)(struct bio *, struct bio *, void *), + void *data) +{ + struct bio *bio, *bio_src; + + if (!bs) + bs = fs_bio_set; + + blk_rq_init(NULL, rq); + + __rq_for_each_bio(bio_src, rq_src) { + bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); + if (!bio) + goto free_and_out; + + __bio_clone(bio, bio_src); + + if (bio_integrity(bio_src) && + bio_integrity_clone(bio, bio_src, gfp_mask, bs)) + goto free_and_out; + + if (bio_ctr && bio_ctr(bio, bio_src, data)) + goto free_and_out; + + if (rq->bio) { + rq->biotail->bi_next = bio; + rq->biotail = bio; + } else + rq->bio = rq->biotail = bio; + } + + __blk_rq_prep_clone(rq, rq_src); + + return 0; + +free_and_out: + if (bio) + bio_free(bio, bs); + blk_rq_unprep_clone(rq); + + return -ENOMEM; +} +EXPORT_SYMBOL_GPL(blk_rq_prep_clone); + +int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) +{ + return queue_work(kblockd_workqueue, work); +} +EXPORT_SYMBOL(kblockd_schedule_work); + +int __init blk_dev_init(void) +{ + BUILD_BUG_ON(__REQ_NR_BITS > 8 * + sizeof(((struct request *)0)->cmd_flags)); + + kblockd_workqueue = create_workqueue("kblockd"); + if (!kblockd_workqueue) + panic("Failed to create kblockd\n"); + + request_cachep = kmem_cache_create("blkdev_requests", + sizeof(struct request), 0, SLAB_PANIC, NULL); + + blk_requestq_cachep = kmem_cache_create("blkdev_queue", + sizeof(struct request_queue), 0, SLAB_PANIC, NULL); + + return 0; +} diff --git a/block/blk-exec.c b/block/blk-exec.c new file mode 100644 index 00000000..e1672f14 --- /dev/null +++ b/block/blk-exec.c @@ -0,0 +1,105 @@ +/* + * Functions related to setting various queue properties from drivers + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> + +#include "blk.h" + +/* + * for max sense size + */ +#include <scsi/scsi_cmnd.h> + +/** + * blk_end_sync_rq - executes a completion event on a request + * @rq: request to complete + * @error: end I/O status of the request + */ +static void blk_end_sync_rq(struct request *rq, int error) +{ + struct completion *waiting = rq->end_io_data; + + rq->end_io_data = NULL; + __blk_put_request(rq->q, rq); + + /* + * complete last, if this is a stack request the process (and thus + * the rq pointer) could be invalid right after this complete() + */ + complete(waiting); +} + +/** + * blk_execute_rq_nowait - insert a request into queue for execution + * @q: queue to insert the request in + * @bd_disk: matching gendisk + * @rq: request to insert + * @at_head: insert request at head or tail of queue + * @done: I/O completion handler + * + * Description: + * Insert a fully prepared request at the back of the I/O scheduler queue + * for execution. Don't wait for completion. + */ +void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, + struct request *rq, int at_head, + rq_end_io_fn *done) +{ + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; + + rq->rq_disk = bd_disk; + rq->end_io = done; + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where, 1); + __generic_unplug_device(q); + /* the queue is stopped so it won't be plugged+unplugged */ + if (rq->cmd_type == REQ_TYPE_PM_RESUME) + q->request_fn(q); + spin_unlock_irq(q->queue_lock); +} +EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); + +/** + * blk_execute_rq - insert a request into queue for execution + * @q: queue to insert the request in + * @bd_disk: matching gendisk + * @rq: request to insert + * @at_head: insert request at head or tail of queue + * + * Description: + * Insert a fully prepared request at the back of the I/O scheduler queue + * for execution and wait for completion. + */ +int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, + struct request *rq, int at_head) +{ + DECLARE_COMPLETION_ONSTACK(wait); + char sense[SCSI_SENSE_BUFFERSIZE]; + int err = 0; + + /* + * we need an extra reference to the request, so we can look at + * it after io completion + */ + rq->ref_count++; + + if (!rq->sense) { + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + } + + rq->end_io_data = &wait; + blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); + wait_for_completion(&wait); + + if (rq->errors) + err = -EIO; + + return err; +} +EXPORT_SYMBOL(blk_execute_rq); diff --git a/block/blk-integrity.c b/block/blk-integrity.c new file mode 100644 index 00000000..edce1ef7 --- /dev/null +++ b/block/blk-integrity.c @@ -0,0 +1,387 @@ +/* + * blk-integrity.c - Block layer data integrity extensions + * + * Copyright (C) 2007, 2008 Oracle Corporation + * Written by: Martin K. Petersen <martin.petersen@oracle.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#include <linux/blkdev.h> +#include <linux/mempool.h> +#include <linux/bio.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> + +#include "blk.h" + +static struct kmem_cache *integrity_cachep; + +/** + * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements + * @rq: request with integrity metadata attached + * + * Description: Returns the number of elements required in a + * scatterlist corresponding to the integrity metadata in a request. + */ +int blk_rq_count_integrity_sg(struct request *rq) +{ + struct bio_vec *iv, *ivprv; + struct req_iterator iter; + unsigned int segments; + + ivprv = NULL; + segments = 0; + + rq_for_each_integrity_segment(iv, rq, iter) { + + if (!ivprv || !BIOVEC_PHYS_MERGEABLE(ivprv, iv)) + segments++; + + ivprv = iv; + } + + return segments; +} +EXPORT_SYMBOL(blk_rq_count_integrity_sg); + +/** + * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist + * @rq: request with integrity metadata attached + * @sglist: target scatterlist + * + * Description: Map the integrity vectors in request into a + * scatterlist. The scatterlist must be big enough to hold all + * elements. I.e. sized using blk_rq_count_integrity_sg(). + */ +int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist) +{ + struct bio_vec *iv, *ivprv; + struct req_iterator iter; + struct scatterlist *sg; + unsigned int segments; + + ivprv = NULL; + sg = NULL; + segments = 0; + + rq_for_each_integrity_segment(iv, rq, iter) { + + if (ivprv) { + if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) + goto new_segment; + + sg->length += iv->bv_len; + } else { +new_segment: + if (!sg) + sg = sglist; + else { + sg->page_link &= ~0x02; + sg = sg_next(sg); + } + + sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); + segments++; + } + + ivprv = iv; + } + + if (sg) + sg_mark_end(sg); + + return segments; +} +EXPORT_SYMBOL(blk_rq_map_integrity_sg); + +/** + * blk_integrity_compare - Compare integrity profile of two disks + * @gd1: Disk to compare + * @gd2: Disk to compare + * + * Description: Meta-devices like DM and MD need to verify that all + * sub-devices use the same integrity format before advertising to + * upper layers that they can send/receive integrity metadata. This + * function can be used to check whether two gendisk devices have + * compatible integrity formats. + */ +int blk_integrity_compare(struct gendisk *gd1, struct gendisk *gd2) +{ + struct blk_integrity *b1 = gd1->integrity; + struct blk_integrity *b2 = gd2->integrity; + + if (!b1 && !b2) + return 0; + + if (!b1 || !b2) + return -1; + + if (b1->sector_size != b2->sector_size) { + printk(KERN_ERR "%s: %s/%s sector sz %u != %u\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->sector_size, b2->sector_size); + return -1; + } + + if (b1->tuple_size != b2->tuple_size) { + printk(KERN_ERR "%s: %s/%s tuple sz %u != %u\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->tuple_size, b2->tuple_size); + return -1; + } + + if (b1->tag_size && b2->tag_size && (b1->tag_size != b2->tag_size)) { + printk(KERN_ERR "%s: %s/%s tag sz %u != %u\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->tag_size, b2->tag_size); + return -1; + } + + if (strcmp(b1->name, b2->name)) { + printk(KERN_ERR "%s: %s/%s type %s != %s\n", __func__, + gd1->disk_name, gd2->disk_name, + b1->name, b2->name); + return -1; + } + + return 0; +} +EXPORT_SYMBOL(blk_integrity_compare); + +struct integrity_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct blk_integrity *, char *); + ssize_t (*store)(struct blk_integrity *, const char *, size_t); +}; + +static ssize_t integrity_attr_show(struct kobject *kobj, struct attribute *attr, + char *page) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + struct integrity_sysfs_entry *entry = + container_of(attr, struct integrity_sysfs_entry, attr); + + return entry->show(bi, page); +} + +static ssize_t integrity_attr_store(struct kobject *kobj, + struct attribute *attr, const char *page, + size_t count) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + struct integrity_sysfs_entry *entry = + container_of(attr, struct integrity_sysfs_entry, attr); + ssize_t ret = 0; + + if (entry->store) + ret = entry->store(bi, page, count); + + return ret; +} + +static ssize_t integrity_format_show(struct blk_integrity *bi, char *page) +{ + if (bi != NULL && bi->name != NULL) + return sprintf(page, "%s\n", bi->name); + else + return sprintf(page, "none\n"); +} + +static ssize_t integrity_tag_size_show(struct blk_integrity *bi, char *page) +{ + if (bi != NULL) + return sprintf(page, "%u\n", bi->tag_size); + else + return sprintf(page, "0\n"); +} + +static ssize_t integrity_read_store(struct blk_integrity *bi, + const char *page, size_t count) +{ + char *p = (char *) page; + unsigned long val = simple_strtoul(p, &p, 10); + + if (val) + bi->flags |= INTEGRITY_FLAG_READ; + else + bi->flags &= ~INTEGRITY_FLAG_READ; + + return count; +} + +static ssize_t integrity_read_show(struct blk_integrity *bi, char *page) +{ + return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_READ) != 0); +} + +static ssize_t integrity_write_store(struct blk_integrity *bi, + const char *page, size_t count) +{ + char *p = (char *) page; + unsigned long val = simple_strtoul(p, &p, 10); + + if (val) + bi->flags |= INTEGRITY_FLAG_WRITE; + else + bi->flags &= ~INTEGRITY_FLAG_WRITE; + + return count; +} + +static ssize_t integrity_write_show(struct blk_integrity *bi, char *page) +{ + return sprintf(page, "%d\n", (bi->flags & INTEGRITY_FLAG_WRITE) != 0); +} + +static struct integrity_sysfs_entry integrity_format_entry = { + .attr = { .name = "format", .mode = S_IRUGO }, + .show = integrity_format_show, +}; + +static struct integrity_sysfs_entry integrity_tag_size_entry = { + .attr = { .name = "tag_size", .mode = S_IRUGO }, + .show = integrity_tag_size_show, +}; + +static struct integrity_sysfs_entry integrity_read_entry = { + .attr = { .name = "read_verify", .mode = S_IRUGO | S_IWUSR }, + .show = integrity_read_show, + .store = integrity_read_store, +}; + +static struct integrity_sysfs_entry integrity_write_entry = { + .attr = { .name = "write_generate", .mode = S_IRUGO | S_IWUSR }, + .show = integrity_write_show, + .store = integrity_write_store, +}; + +static struct attribute *integrity_attrs[] = { + &integrity_format_entry.attr, + &integrity_tag_size_entry.attr, + &integrity_read_entry.attr, + &integrity_write_entry.attr, + NULL, +}; + +static const struct sysfs_ops integrity_ops = { + .show = &integrity_attr_show, + .store = &integrity_attr_store, +}; + +static int __init blk_dev_integrity_init(void) +{ + integrity_cachep = kmem_cache_create("blkdev_integrity", + sizeof(struct blk_integrity), + 0, SLAB_PANIC, NULL); + return 0; +} +subsys_initcall(blk_dev_integrity_init); + +static void blk_integrity_release(struct kobject *kobj) +{ + struct blk_integrity *bi = + container_of(kobj, struct blk_integrity, kobj); + + kmem_cache_free(integrity_cachep, bi); +} + +static struct kobj_type integrity_ktype = { + .default_attrs = integrity_attrs, + .sysfs_ops = &integrity_ops, + .release = blk_integrity_release, +}; + +/** + * blk_integrity_register - Register a gendisk as being integrity-capable + * @disk: struct gendisk pointer to make integrity-aware + * @template: optional integrity profile to register + * + * Description: When a device needs to advertise itself as being able + * to send/receive integrity metadata it must use this function to + * register the capability with the block layer. The template is a + * blk_integrity struct with values appropriate for the underlying + * hardware. If template is NULL the new profile is allocated but + * not filled out. See Documentation/block/data-integrity.txt. + */ +int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) +{ + struct blk_integrity *bi; + + BUG_ON(disk == NULL); + + if (disk->integrity == NULL) { + bi = kmem_cache_alloc(integrity_cachep, + GFP_KERNEL | __GFP_ZERO); + if (!bi) + return -1; + + if (kobject_init_and_add(&bi->kobj, &integrity_ktype, + &disk_to_dev(disk)->kobj, + "%s", "integrity")) { + kmem_cache_free(integrity_cachep, bi); + return -1; + } + + kobject_uevent(&bi->kobj, KOBJ_ADD); + + bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; + bi->sector_size = queue_logical_block_size(disk->queue); + disk->integrity = bi; + } else + bi = disk->integrity; + + /* Use the provided profile as template */ + if (template != NULL) { + bi->name = template->name; + bi->generate_fn = template->generate_fn; + bi->verify_fn = template->verify_fn; + bi->tuple_size = template->tuple_size; + bi->set_tag_fn = template->set_tag_fn; + bi->get_tag_fn = template->get_tag_fn; + bi->tag_size = template->tag_size; + } else + bi->name = "unsupported"; + + return 0; +} +EXPORT_SYMBOL(blk_integrity_register); + +/** + * blk_integrity_unregister - Remove block integrity profile + * @disk: disk whose integrity profile to deallocate + * + * Description: This function frees all memory used by the block + * integrity profile. To be called at device teardown. + */ +void blk_integrity_unregister(struct gendisk *disk) +{ + struct blk_integrity *bi; + + if (!disk || !disk->integrity) + return; + + bi = disk->integrity; + + kobject_uevent(&bi->kobj, KOBJ_REMOVE); + kobject_del(&bi->kobj); + kobject_put(&bi->kobj); + kmem_cache_free(integrity_cachep, bi); + disk->integrity = NULL; +} +EXPORT_SYMBOL(blk_integrity_unregister); diff --git a/block/blk-ioc.c b/block/blk-ioc.c new file mode 100644 index 00000000..68c1c0ea --- /dev/null +++ b/block/blk-ioc.c @@ -0,0 +1,180 @@ +/* + * Functions related to io context handling + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ +#include <linux/slab.h> + +#include "blk.h" + +/* + * For io context allocations + */ +static struct kmem_cache *iocontext_cachep; + +static void cfq_dtor(struct io_context *ioc) +{ + if (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic; + + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, + cic_list); + cic->dtor(ioc); + } +} + +/* + * IO Context helper functions. put_io_context() returns 1 if there are no + * more users of this io context, 0 otherwise. + */ +int put_io_context(struct io_context *ioc) +{ + if (ioc == NULL) + return 1; + + BUG_ON(atomic_long_read(&ioc->refcount) == 0); + + if (atomic_long_dec_and_test(&ioc->refcount)) { + rcu_read_lock(); + cfq_dtor(ioc); + rcu_read_unlock(); + + kmem_cache_free(iocontext_cachep, ioc); + return 1; + } + return 0; +} +EXPORT_SYMBOL(put_io_context); + +static void cfq_exit(struct io_context *ioc) +{ + rcu_read_lock(); + + if (!hlist_empty(&ioc->cic_list)) { + struct cfq_io_context *cic; + + cic = list_entry(ioc->cic_list.first, struct cfq_io_context, + cic_list); + cic->exit(ioc); + } + rcu_read_unlock(); +} + +/* Called by the exitting task */ +void exit_io_context(struct task_struct *task) +{ + struct io_context *ioc; + + task_lock(task); + ioc = task->io_context; + task->io_context = NULL; + task_unlock(task); + + if (atomic_dec_and_test(&ioc->nr_tasks)) { + cfq_exit(ioc); + + } + put_io_context(ioc); +} + +struct io_context *alloc_io_context(gfp_t gfp_flags, int node) +{ + struct io_context *ret; + + ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); + if (ret) { + atomic_long_set(&ret->refcount, 1); + atomic_set(&ret->nr_tasks, 1); + spin_lock_init(&ret->lock); + ret->ioprio_changed = 0; + ret->ioprio = 0; + ret->last_waited = 0; /* doesn't matter... */ + ret->nr_batch_requests = 0; /* because this is 0 */ + INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); + INIT_HLIST_HEAD(&ret->cic_list); + ret->ioc_data = NULL; + } + + return ret; +} + +/* + * If the current task has no IO context then create one and initialise it. + * Otherwise, return its existing IO context. + * + * This returned IO context doesn't have a specifically elevated refcount, + * but since the current task itself holds a reference, the context can be + * used in general code, so long as it stays within `current` context. + */ +struct io_context *current_io_context(gfp_t gfp_flags, int node) +{ + struct task_struct *tsk = current; + struct io_context *ret; + + ret = tsk->io_context; + if (likely(ret)) + return ret; + + ret = alloc_io_context(gfp_flags, node); + if (ret) { + /* make sure set_task_ioprio() sees the settings above */ + smp_wmb(); + tsk->io_context = ret; + } + + return ret; +} + +/* + * If the current task has no IO context then create one and initialise it. + * If it does have a context, take a ref on it. + * + * This is always called in the context of the task which submitted the I/O. + */ +struct io_context *get_io_context(gfp_t gfp_flags, int node) +{ + struct io_context *ret = NULL; + + /* + * Check for unlikely race with exiting task. ioc ref count is + * zero when ioc is being detached. + */ + do { + ret = current_io_context(gfp_flags, node); + if (unlikely(!ret)) + break; + } while (!atomic_long_inc_not_zero(&ret->refcount)); + + return ret; +} +EXPORT_SYMBOL(get_io_context); + +void copy_io_context(struct io_context **pdst, struct io_context **psrc) +{ + struct io_context *src = *psrc; + struct io_context *dst = *pdst; + + if (src) { + BUG_ON(atomic_long_read(&src->refcount) == 0); + atomic_long_inc(&src->refcount); + put_io_context(dst); + *pdst = src; + } +} +EXPORT_SYMBOL(copy_io_context); + +static int __init blk_ioc_init(void) +{ + iocontext_cachep = kmem_cache_create("blkdev_ioc", + sizeof(struct io_context), 0, SLAB_PANIC, NULL); + return 0; +} +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(blk_ioc_init); +#else +subsys_initcall(blk_ioc_init); +#endif diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c new file mode 100644 index 00000000..58916afb --- /dev/null +++ b/block/blk-iopoll.c @@ -0,0 +1,227 @@ +/* + * Functions related to interrupt-poll handling in the block layer. This + * is similar to NAPI for network devices. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/interrupt.h> +#include <linux/cpu.h> +#include <linux/blk-iopoll.h> +#include <linux/delay.h> + +#include "blk.h" + +int blk_iopoll_enabled = 1; +EXPORT_SYMBOL(blk_iopoll_enabled); + +static unsigned int blk_iopoll_budget __read_mostly = 256; + +static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll); + +/** + * blk_iopoll_sched - Schedule a run of the iopoll handler + * @iop: The parent iopoll structure + * + * Description: + * Add this blk_iopoll structure to the pending poll list and trigger the + * raise of the blk iopoll softirq. The driver must already have gotten a + * successful return from blk_iopoll_sched_prep() before calling this. + **/ +void blk_iopoll_sched(struct blk_iopoll *iop) +{ + unsigned long flags; + + local_irq_save(flags); + list_add_tail(&iop->list, &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); +} +EXPORT_SYMBOL(blk_iopoll_sched); + +/** + * __blk_iopoll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * See blk_iopoll_complete(). This function must be called with interrupts + * disabled. + **/ +void __blk_iopoll_complete(struct blk_iopoll *iop) +{ + list_del(&iop->list); + smp_mb__before_clear_bit(); + clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(__blk_iopoll_complete); + +/** + * blk_iopoll_complete - Mark this @iop as un-polled again + * @iop: The parent iopoll structure + * + * Description: + * If a driver consumes less than the assigned budget in its run of the + * iopoll handler, it'll end the polled mode by calling this function. The + * iopoll handler will not be invoked again before blk_iopoll_sched_prep() + * is called. + **/ +void blk_iopoll_complete(struct blk_iopoll *iopoll) +{ + unsigned long flags; + + local_irq_save(flags); + __blk_iopoll_complete(iopoll); + local_irq_restore(flags); +} +EXPORT_SYMBOL(blk_iopoll_complete); + +static void blk_iopoll_softirq(struct softirq_action *h) +{ + struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); + int rearm = 0, budget = blk_iopoll_budget; + unsigned long start_time = jiffies; + + local_irq_disable(); + + while (!list_empty(list)) { + struct blk_iopoll *iop; + int work, weight; + + /* + * If softirq window is exhausted then punt. + */ + if (budget <= 0 || time_after(jiffies, start_time)) { + rearm = 1; + break; + } + + local_irq_enable(); + + /* Even though interrupts have been re-enabled, this + * access is safe because interrupts can only add new + * entries to the tail of this list, and only ->poll() + * calls can remove this head entry from the list. + */ + iop = list_entry(list->next, struct blk_iopoll, list); + + weight = iop->weight; + work = 0; + if (test_bit(IOPOLL_F_SCHED, &iop->state)) + work = iop->poll(iop, weight); + + budget -= work; + + local_irq_disable(); + + /* + * Drivers must not modify the iopoll state, if they + * consume their assigned weight (or more, some drivers can't + * easily just stop processing, they have to complete an + * entire mask of commands).In such cases this code + * still "owns" the iopoll instance and therefore can + * move the instance around on the list at-will. + */ + if (work >= weight) { + if (blk_iopoll_disable_pending(iop)) + __blk_iopoll_complete(iop); + else + list_move_tail(&iop->list, list); + } + } + + if (rearm) + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); +} + +/** + * blk_iopoll_disable - Disable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Disable io polling and wait for any pending callbacks to have completed. + **/ +void blk_iopoll_disable(struct blk_iopoll *iop) +{ + set_bit(IOPOLL_F_DISABLE, &iop->state); + while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state)) + msleep(1); + clear_bit(IOPOLL_F_DISABLE, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_disable); + +/** + * blk_iopoll_enable - Enable iopoll on this @iop + * @iop: The parent iopoll structure + * + * Description: + * Enable iopoll on this @iop. Note that the handler run will not be + * scheduled, it will only mark it as active. + **/ +void blk_iopoll_enable(struct blk_iopoll *iop) +{ + BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state)); + smp_mb__before_clear_bit(); + clear_bit_unlock(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_enable); + +/** + * blk_iopoll_init - Initialize this @iop + * @iop: The parent iopoll structure + * @weight: The default weight (or command completion budget) + * @poll_fn: The handler to invoke + * + * Description: + * Initialize this blk_iopoll structure. Before being actively used, the + * driver must call blk_iopoll_enable(). + **/ +void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn) +{ + memset(iop, 0, sizeof(*iop)); + INIT_LIST_HEAD(&iop->list); + iop->weight = weight; + iop->poll = poll_fn; + set_bit(IOPOLL_F_SCHED, &iop->state); +} +EXPORT_SYMBOL(blk_iopoll_init); + +static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + int cpu = (unsigned long) hcpu; + + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), + &__get_cpu_var(blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = { + .notifier_call = blk_iopoll_cpu_notify, +}; + +static __init int blk_iopoll_setup(void) +{ + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); + + open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); + register_hotcpu_notifier(&blk_iopoll_cpu_notifier); + return 0; +} +subsys_initcall(blk_iopoll_setup); diff --git a/block/blk-lib.c b/block/blk-lib.c new file mode 100644 index 00000000..c392029a --- /dev/null +++ b/block/blk-lib.c @@ -0,0 +1,231 @@ +/* + * Functions related to generic helpers functions + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/scatterlist.h> + +#include "blk.h" + +static void blkdev_discard_end_io(struct bio *bio, int err) +{ + if (err) { + if (err == -EOPNOTSUPP) + set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); + clear_bit(BIO_UPTODATE, &bio->bi_flags); + } + + if (bio->bi_private) + complete(bio->bi_private); + + bio_put(bio); +} + +/** + * blkdev_issue_discard - queue a discard + * @bdev: blockdev to issue discard for + * @sector: start sector + * @nr_sects: number of sectors to discard + * @gfp_mask: memory allocation flags (for bio_alloc) + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Issue a discard request for the sectors in question. + */ +int blkdev_issue_discard(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +{ + DECLARE_COMPLETION_ONSTACK(wait); + struct request_queue *q = bdev_get_queue(bdev); + int type = flags & BLKDEV_IFL_BARRIER ? + DISCARD_BARRIER : DISCARD_NOBARRIER; + unsigned int max_discard_sectors; + struct bio *bio; + int ret = 0; + + if (!q) + return -ENXIO; + + if (!blk_queue_discard(q)) + return -EOPNOTSUPP; + + /* + * Ensure that max_discard_sectors is of the proper + * granularity + */ + max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + if (q->limits.discard_granularity) { + unsigned int disc_sects = q->limits.discard_granularity >> 9; + + max_discard_sectors &= ~(disc_sects - 1); + } + + if (flags & BLKDEV_IFL_SECURE) { + if (!blk_queue_secdiscard(q)) + return -EOPNOTSUPP; + type |= DISCARD_SECURE; + } + + while (nr_sects && !ret) { + bio = bio_alloc(gfp_mask, 1); + if (!bio) { + ret = -ENOMEM; + break; + } + + bio->bi_sector = sector; + bio->bi_end_io = blkdev_discard_end_io; + bio->bi_bdev = bdev; + if (flags & BLKDEV_IFL_WAIT) + bio->bi_private = &wait; + + if (nr_sects > max_discard_sectors) { + bio->bi_size = max_discard_sectors << 9; + nr_sects -= max_discard_sectors; + sector += max_discard_sectors; + } else { + bio->bi_size = nr_sects << 9; + nr_sects = 0; + } + + bio_get(bio); + submit_bio(type, bio); + + if (flags & BLKDEV_IFL_WAIT) + wait_for_completion(&wait); + + if (bio_flagged(bio, BIO_EOPNOTSUPP)) + ret = -EOPNOTSUPP; + else if (!bio_flagged(bio, BIO_UPTODATE)) + ret = -EIO; + bio_put(bio); + } + + return ret; +} +EXPORT_SYMBOL(blkdev_issue_discard); + +struct bio_batch +{ + atomic_t done; + unsigned long flags; + struct completion *wait; + bio_end_io_t *end_io; +}; + +static void bio_batch_end_io(struct bio *bio, int err) +{ + struct bio_batch *bb = bio->bi_private; + + if (err) { + if (err == -EOPNOTSUPP) + set_bit(BIO_EOPNOTSUPP, &bb->flags); + else + clear_bit(BIO_UPTODATE, &bb->flags); + } + if (bb) { + if (bb->end_io) + bb->end_io(bio, err); + atomic_inc(&bb->done); + complete(bb->wait); + } + bio_put(bio); +} + +/** + * blkdev_issue_zeroout generate number of zero filed write bios + * @bdev: blockdev to issue + * @sector: start sector + * @nr_sects: number of sectors to write + * @gfp_mask: memory allocation flags (for bio_alloc) + * @flags: BLKDEV_IFL_* flags to control behaviour + * + * Description: + * Generate and issue number of bios with zerofiled pages. + * Send barrier at the beginning and at the end if requested. This guarantie + * correct request ordering. Empty barrier allow us to avoid post queue flush. + */ + +int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, + sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) +{ + int ret; + struct bio *bio; + struct bio_batch bb; + unsigned int sz, issued = 0; + DECLARE_COMPLETION_ONSTACK(wait); + + atomic_set(&bb.done, 0); + bb.flags = 1 << BIO_UPTODATE; + bb.wait = &wait; + bb.end_io = NULL; + + if (flags & BLKDEV_IFL_BARRIER) { + /* issue async barrier before the data */ + ret = blkdev_issue_flush(bdev, gfp_mask, NULL, 0); + if (ret) + return ret; + } +submit: + ret = 0; + while (nr_sects != 0) { + bio = bio_alloc(gfp_mask, + min(nr_sects, (sector_t)BIO_MAX_PAGES)); + if (!bio) { + ret = -ENOMEM; + break; + } + + bio->bi_sector = sector; + bio->bi_bdev = bdev; + bio->bi_end_io = bio_batch_end_io; + if (flags & BLKDEV_IFL_WAIT) + bio->bi_private = &bb; + + while (nr_sects != 0) { + sz = min((sector_t) PAGE_SIZE >> 9 , nr_sects); + if (sz == 0) + /* bio has maximum size possible */ + break; + ret = bio_add_page(bio, ZERO_PAGE(0), sz << 9, 0); + nr_sects -= ret >> 9; + sector += ret >> 9; + if (ret < (sz << 9)) + break; + } + ret = 0; + issued++; + submit_bio(WRITE, bio); + } + /* + * When all data bios are in flight. Send final barrier if requeted. + */ + if (nr_sects == 0 && flags & BLKDEV_IFL_BARRIER) + ret = blkdev_issue_flush(bdev, gfp_mask, NULL, + flags & BLKDEV_IFL_WAIT); + + + if (flags & BLKDEV_IFL_WAIT) + /* Wait for bios in-flight */ + while ( issued != atomic_read(&bb.done)) + wait_for_completion(&wait); + + if (!test_bit(BIO_UPTODATE, &bb.flags)) + /* One of bios in the batch was completed with error.*/ + ret = -EIO; + + if (ret) + goto out; + + if (test_bit(BIO_EOPNOTSUPP, &bb.flags)) { + ret = -EOPNOTSUPP; + goto out; + } + if (nr_sects != 0) + goto submit; +out: + return ret; +} +EXPORT_SYMBOL(blkdev_issue_zeroout); diff --git a/block/blk-map.c b/block/blk-map.c new file mode 100644 index 00000000..267a57b7 --- /dev/null +++ b/block/blk-map.c @@ -0,0 +1,328 @@ +/* + * Functions related to mapping data to requests + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <scsi/sg.h> /* for struct sg_iovec */ + +#include "blk.h" + +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + if (!rq->bio) + blk_rq_bio_prep(q, rq, bio); + else if (!ll_back_merge_fn(q, rq, bio)) + return -EINVAL; + else { + rq->biotail->bi_next = bio; + rq->biotail = bio; + + rq->__data_len += bio->bi_size; + } + return 0; +} + +static int __blk_rq_unmap_user(struct bio *bio) +{ + int ret = 0; + + if (bio) { + if (bio_flagged(bio, BIO_USER_MAPPED)) + bio_unmap_user(bio); + else + ret = bio_uncopy_user(bio); + } + + return ret; +} + +static int __blk_rq_map_user(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, void __user *ubuf, + unsigned int len, gfp_t gfp_mask) +{ + unsigned long uaddr; + struct bio *bio, *orig_bio; + int reading, ret; + + reading = rq_data_dir(rq) == READ; + + /* + * if alignment requirement is satisfied, map in user pages for + * direct dma. else, set up kernel bounce buffers + */ + uaddr = (unsigned long) ubuf; + if (blk_rq_aligned(q, ubuf, len) && !map_data) + bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); + else + bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (map_data && map_data->null_mapped) + bio->bi_flags |= (1 << BIO_NULL_MAPPED); + + orig_bio = bio; + blk_queue_bounce(q, &bio); + + /* + * We link the bounce buffer in and could have to traverse it + * later so we have to get a ref to prevent it from being freed + */ + bio_get(bio); + + ret = blk_rq_append_bio(q, rq, bio); + if (!ret) + return bio->bi_size; + + /* if it was boucned we must call the end io function */ + bio_endio(bio, 0); + __blk_rq_unmap_user(orig_bio); + bio_put(bio); + return ret; +} + +/** + * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request structure to fill + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @ubuf: the user buffer + * @len: length of user data + * @gfp_mask: memory allocation flags + * + * Description: + * Data will be mapped directly for zero copy I/O, if possible. Otherwise + * a kernel bounce buffer is used. + * + * A matching blk_rq_unmap_user() must be issued at the end of I/O, while + * still in process context. + * + * Note: The mapped bio may need to be bounced through blk_queue_bounce() + * before being submitted to the device, as pages mapped may be out of + * reach. It's the callers responsibility to make sure this happens. The + * original bio must be passed back in to blk_rq_unmap_user() for proper + * unmapping. + */ +int blk_rq_map_user(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, void __user *ubuf, + unsigned long len, gfp_t gfp_mask) +{ + unsigned long bytes_read = 0; + struct bio *bio = NULL; + int ret; + + if (len > (queue_max_hw_sectors(q) << 9)) + return -EINVAL; + if (!len) + return -EINVAL; + + if (!ubuf && (!map_data || !map_data->null_mapped)) + return -EINVAL; + + while (bytes_read != len) { + unsigned long map_len, end, start; + + map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); + end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) + >> PAGE_SHIFT; + start = (unsigned long)ubuf >> PAGE_SHIFT; + + /* + * A bad offset could cause us to require BIO_MAX_PAGES + 1 + * pages. If this happens we just lower the requested + * mapping len by a page so that we can fit + */ + if (end - start > BIO_MAX_PAGES) + map_len -= PAGE_SIZE; + + ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, + gfp_mask); + if (ret < 0) + goto unmap_rq; + if (!bio) + bio = rq->bio; + bytes_read += ret; + ubuf += ret; + + if (map_data) + map_data->offset += ret; + } + + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + + rq->buffer = NULL; + return 0; +unmap_rq: + blk_rq_unmap_user(bio); + rq->bio = NULL; + return ret; +} +EXPORT_SYMBOL(blk_rq_map_user); + +/** + * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to map data to + * @map_data: pointer to the rq_map_data holding pages (if necessary) + * @iov: pointer to the iovec + * @iov_count: number of elements in the iovec + * @len: I/O byte count + * @gfp_mask: memory allocation flags + * + * Description: + * Data will be mapped directly for zero copy I/O, if possible. Otherwise + * a kernel bounce buffer is used. + * + * A matching blk_rq_unmap_user() must be issued at the end of I/O, while + * still in process context. + * + * Note: The mapped bio may need to be bounced through blk_queue_bounce() + * before being submitted to the device, as pages mapped may be out of + * reach. It's the callers responsibility to make sure this happens. The + * original bio must be passed back in to blk_rq_unmap_user() for proper + * unmapping. + */ +int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, + struct rq_map_data *map_data, struct sg_iovec *iov, + int iov_count, unsigned int len, gfp_t gfp_mask) +{ + struct bio *bio; + int i, read = rq_data_dir(rq) == READ; + int unaligned = 0; + + if (!iov || iov_count <= 0) + return -EINVAL; + + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + + if (uaddr & queue_dma_alignment(q)) { + unaligned = 1; + break; + } + if (!iov[i].iov_len) + return -EINVAL; + } + + if (unaligned || (q->dma_pad_mask & len) || map_data) + bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, + gfp_mask); + else + bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (bio->bi_size != len) { + /* + * Grab an extra reference to this bio, as bio_unmap_user() + * expects to be able to drop it twice as it happens on the + * normal IO completion path + */ + bio_get(bio); + bio_endio(bio, 0); + __blk_rq_unmap_user(bio); + return -EINVAL; + } + + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + + blk_queue_bounce(q, &bio); + bio_get(bio); + blk_rq_bio_prep(q, rq, bio); + rq->buffer = NULL; + return 0; +} +EXPORT_SYMBOL(blk_rq_map_user_iov); + +/** + * blk_rq_unmap_user - unmap a request with user data + * @bio: start of bio list + * + * Description: + * Unmap a rq previously mapped by blk_rq_map_user(). The caller must + * supply the original rq->bio from the blk_rq_map_user() return, since + * the I/O completion may have changed rq->bio. + */ +int blk_rq_unmap_user(struct bio *bio) +{ + struct bio *mapped_bio; + int ret = 0, ret2; + + while (bio) { + mapped_bio = bio; + if (unlikely(bio_flagged(bio, BIO_BOUNCED))) + mapped_bio = bio->bi_private; + + ret2 = __blk_rq_unmap_user(mapped_bio); + if (ret2 && !ret) + ret = ret2; + + mapped_bio = bio; + bio = bio->bi_next; + bio_put(mapped_bio); + } + + return ret; +} +EXPORT_SYMBOL(blk_rq_unmap_user); + +/** + * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage + * @q: request queue where request should be inserted + * @rq: request to fill + * @kbuf: the kernel buffer + * @len: length of user data + * @gfp_mask: memory allocation flags + * + * Description: + * Data will be mapped directly if possible. Otherwise a bounce + * buffer is used. Can be called multple times to append multple + * buffers. + */ +int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, + unsigned int len, gfp_t gfp_mask) +{ + int reading = rq_data_dir(rq) == READ; + int do_copy = 0; + struct bio *bio; + int ret; + + if (len > (queue_max_hw_sectors(q) << 9)) + return -EINVAL; + if (!len || !kbuf) + return -EINVAL; + + do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); + if (do_copy) + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + else + bio = bio_map_kern(q, kbuf, len, gfp_mask); + + if (IS_ERR(bio)) + return PTR_ERR(bio); + + if (rq_data_dir(rq) == WRITE) + bio->bi_rw |= REQ_WRITE; + + if (do_copy) + rq->cmd_flags |= REQ_COPY_USER; + + ret = blk_rq_append_bio(q, rq, bio); + if (unlikely(ret)) { + /* request is too big */ + bio_put(bio); + return ret; + } + + blk_queue_bounce(q, &rq->bio); + rq->buffer = NULL; + return 0; +} +EXPORT_SYMBOL(blk_rq_map_kern); diff --git a/block/blk-merge.c b/block/blk-merge.c new file mode 100644 index 00000000..c24bf43d --- /dev/null +++ b/block/blk-merge.c @@ -0,0 +1,461 @@ +/* + * Functions related to segment and merge handling + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/scatterlist.h> + +#include "blk.h" + +static unsigned int __blk_recalc_rq_segments(struct request_queue *q, + struct bio *bio) +{ + struct bio_vec *bv, *bvprv = NULL; + int cluster, i, high, highprv = 1; + unsigned int seg_size, nr_phys_segs; + struct bio *fbio, *bbio; + + if (!bio) + return 0; + + fbio = bio; + cluster = blk_queue_cluster(q); + seg_size = 0; + nr_phys_segs = 0; + for_each_bio(bio) { + bio_for_each_segment(bv, bio, i) { + /* + * the trick here is making sure that a high page is + * never considered part of another segment, since that + * might change with the bounce page. + */ + high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); + if (high || highprv) + goto new_segment; + if (cluster) { + if (seg_size + bv->bv_len + > queue_max_segment_size(q)) + goto new_segment; + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) + goto new_segment; + + seg_size += bv->bv_len; + bvprv = bv; + continue; + } +new_segment: + if (nr_phys_segs == 1 && seg_size > + fbio->bi_seg_front_size) + fbio->bi_seg_front_size = seg_size; + + nr_phys_segs++; + bvprv = bv; + seg_size = bv->bv_len; + highprv = high; + } + bbio = bio; + } + + if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) + fbio->bi_seg_front_size = seg_size; + if (seg_size > bbio->bi_seg_back_size) + bbio->bi_seg_back_size = seg_size; + + return nr_phys_segs; +} + +void blk_recalc_rq_segments(struct request *rq) +{ + rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); +} + +void blk_recount_segments(struct request_queue *q, struct bio *bio) +{ + struct bio *nxt = bio->bi_next; + + bio->bi_next = NULL; + bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); + bio->bi_next = nxt; + bio->bi_flags |= (1 << BIO_SEG_VALID); +} +EXPORT_SYMBOL(blk_recount_segments); + +static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, + struct bio *nxt) +{ + if (!blk_queue_cluster(q)) + return 0; + + if (bio->bi_seg_back_size + nxt->bi_seg_front_size > + queue_max_segment_size(q)) + return 0; + + if (!bio_has_data(bio)) + return 1; + + if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) + return 0; + + /* + * bio and nxt are contiguous in memory; check if the queue allows + * these two to be merged into one + */ + if (BIO_SEG_BOUNDARY(q, bio, nxt)) + return 1; + + return 0; +} + +/* + * map a request to scatterlist, return number of sg entries setup. Caller + * must make sure sg can hold rq->nr_phys_segments entries + */ +int blk_rq_map_sg(struct request_queue *q, struct request *rq, + struct scatterlist *sglist) +{ + struct bio_vec *bvec, *bvprv; + struct req_iterator iter; + struct scatterlist *sg; + int nsegs, cluster; + + nsegs = 0; + cluster = blk_queue_cluster(q); + + /* + * for each bio in rq + */ + bvprv = NULL; + sg = NULL; + rq_for_each_segment(bvec, rq, iter) { + int nbytes = bvec->bv_len; + + if (bvprv && cluster) { + if (sg->length + nbytes > queue_max_segment_size(q)) + goto new_segment; + + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) + goto new_segment; + + sg->length += nbytes; + } else { +new_segment: + if (!sg) + sg = sglist; + else { + /* + * If the driver previously mapped a shorter + * list, we could see a termination bit + * prematurely unless it fully inits the sg + * table on each mapping. We KNOW that there + * must be more entries here or the driver + * would be buggy, so force clear the + * termination bit to avoid doing a full + * sg_init_table() in drivers for each command. + */ + sg->page_link &= ~0x02; + sg = sg_next(sg); + } + + sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); + nsegs++; + } + bvprv = bvec; + } /* segments in rq */ + + + if (unlikely(rq->cmd_flags & REQ_COPY_USER) && + (blk_rq_bytes(rq) & q->dma_pad_mask)) { + unsigned int pad_len = + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; + + sg->length += pad_len; + rq->extra_len += pad_len; + } + + if (q->dma_drain_size && q->dma_drain_needed(rq)) { + if (rq->cmd_flags & REQ_WRITE) + memset(q->dma_drain_buffer, 0, q->dma_drain_size); + + sg->page_link &= ~0x02; + sg = sg_next(sg); + sg_set_page(sg, virt_to_page(q->dma_drain_buffer), + q->dma_drain_size, + ((unsigned long)q->dma_drain_buffer) & + (PAGE_SIZE - 1)); + nsegs++; + rq->extra_len += q->dma_drain_size; + } + + if (sg) + sg_mark_end(sg); + + return nsegs; +} +EXPORT_SYMBOL(blk_rq_map_sg); + +static inline int ll_new_hw_segment(struct request_queue *q, + struct request *req, + struct bio *bio) +{ + int nr_phys_segs = bio_phys_segments(q, bio); + + if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; + } + + /* + * This will form the start of a new hw segment. Bump both + * counters. + */ + req->nr_phys_segments += nr_phys_segs; + return 1; +} + +int ll_back_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio) +{ + unsigned short max_sectors; + + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) + max_sectors = queue_max_hw_sectors(q); + else + max_sectors = queue_max_sectors(q); + + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; + } + if (!bio_flagged(req->biotail, BIO_SEG_VALID)) + blk_recount_segments(q, req->biotail); + if (!bio_flagged(bio, BIO_SEG_VALID)) + blk_recount_segments(q, bio); + + return ll_new_hw_segment(q, req, bio); +} + +int ll_front_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio) +{ + unsigned short max_sectors; + + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) + max_sectors = queue_max_hw_sectors(q); + else + max_sectors = queue_max_sectors(q); + + + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { + req->cmd_flags |= REQ_NOMERGE; + if (req == q->last_merge) + q->last_merge = NULL; + return 0; + } + if (!bio_flagged(bio, BIO_SEG_VALID)) + blk_recount_segments(q, bio); + if (!bio_flagged(req->bio, BIO_SEG_VALID)) + blk_recount_segments(q, req->bio); + + return ll_new_hw_segment(q, req, bio); +} + +static int ll_merge_requests_fn(struct request_queue *q, struct request *req, + struct request *next) +{ + int total_phys_segments; + unsigned int seg_size = + req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; + + /* + * First check if the either of the requests are re-queued + * requests. Can't merge them if they are. + */ + if (req->special || next->special) + return 0; + + /* + * Will it become too large? + */ + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) + return 0; + + total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; + if (blk_phys_contig_segment(q, req->biotail, next->bio)) { + if (req->nr_phys_segments == 1) + req->bio->bi_seg_front_size = seg_size; + if (next->nr_phys_segments == 1) + next->biotail->bi_seg_back_size = seg_size; + total_phys_segments--; + } + + if (total_phys_segments > queue_max_segments(q)) + return 0; + + /* Merge is OK... */ + req->nr_phys_segments = total_phys_segments; + return 1; +} + +/** + * blk_rq_set_mixed_merge - mark a request as mixed merge + * @rq: request to mark as mixed merge + * + * Description: + * @rq is about to be mixed merged. Make sure the attributes + * which can be mixed are set in each bio and mark @rq as mixed + * merged. + */ +void blk_rq_set_mixed_merge(struct request *rq) +{ + unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; + struct bio *bio; + + if (rq->cmd_flags & REQ_MIXED_MERGE) + return; + + /* + * @rq will no longer represent mixable attributes for all the + * contained bios. It will just track those of the first one. + * Distributes the attributs to each bio. + */ + for (bio = rq->bio; bio; bio = bio->bi_next) { + WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && + (bio->bi_rw & REQ_FAILFAST_MASK) != ff); + bio->bi_rw |= ff; + } + rq->cmd_flags |= REQ_MIXED_MERGE; +} + +static void blk_account_io_merge(struct request *req) +{ + if (blk_do_io_stat(req)) { + struct hd_struct *part; + int cpu; + + cpu = part_stat_lock(); + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); + + part_round_stats(cpu, part); + part_dec_in_flight(part, rq_data_dir(req)); + + part_stat_unlock(); + } +} + +/* + * Has to be called with the request spinlock acquired + */ +static int attempt_merge(struct request_queue *q, struct request *req, + struct request *next) +{ + if (!rq_mergeable(req) || !rq_mergeable(next)) + return 0; + + /* + * Don't merge file system requests and discard requests + */ + if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD)) + return 0; + + /* + * Don't merge discard requests and secure discard requests + */ + if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE)) + return 0; + + /* + * not contiguous + */ + if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) + return 0; + + if (rq_data_dir(req) != rq_data_dir(next) + || req->rq_disk != next->rq_disk + || next->special) + return 0; + + if (blk_integrity_rq(req) != blk_integrity_rq(next)) + return 0; + + /* + * If we are allowed to merge, then append bio list + * from next to rq and release next. merge_requests_fn + * will have updated segment counts, update sector + * counts here. + */ + if (!ll_merge_requests_fn(q, req, next)) + return 0; + + /* + * If failfast settings disagree or any of the two is already + * a mixed merge, mark both as mixed before proceeding. This + * makes sure that all involved bios have mixable attributes + * set properly. + */ + if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || + (req->cmd_flags & REQ_FAILFAST_MASK) != + (next->cmd_flags & REQ_FAILFAST_MASK)) { + blk_rq_set_mixed_merge(req); + blk_rq_set_mixed_merge(next); + } + + /* + * At this point we have either done a back merge + * or front merge. We need the smaller start_time of + * the merged requests to be the current request + * for accounting purposes. + */ + if (time_after(req->start_time, next->start_time)) + req->start_time = next->start_time; + + req->biotail->bi_next = next->bio; + req->biotail = next->biotail; + + req->__data_len += blk_rq_bytes(next); + + elv_merge_requests(q, req, next); + + /* + * 'next' is going away, so update stats accordingly + */ + blk_account_io_merge(next); + + req->ioprio = ioprio_best(req->ioprio, next->ioprio); + if (blk_rq_cpu_valid(next)) + req->cpu = next->cpu; + + /* owner-ship of bio passed from next to req */ + next->bio = NULL; + __blk_put_request(q, next); + return 1; +} + +int attempt_back_merge(struct request_queue *q, struct request *rq) +{ + struct request *next = elv_latter_request(q, rq); + + if (next) + return attempt_merge(q, rq, next); + + return 0; +} + +int attempt_front_merge(struct request_queue *q, struct request *rq) +{ + struct request *prev = elv_former_request(q, rq); + + if (prev) + return attempt_merge(q, prev, rq); + + return 0; +} diff --git a/block/blk-settings.c b/block/blk-settings.c new file mode 100644 index 00000000..e8b3dc5c --- /dev/null +++ b/block/blk-settings.c @@ -0,0 +1,786 @@ +/* + * Functions related to setting various queue properties from drivers + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ +#include <linux/gcd.h> +#include <linux/lcm.h> +#include <linux/jiffies.h> +#include <linux/gfp.h> + +#include "blk.h" + +unsigned long blk_max_low_pfn; +EXPORT_SYMBOL(blk_max_low_pfn); + +unsigned long blk_max_pfn; + +/** + * blk_queue_prep_rq - set a prepare_request function for queue + * @q: queue + * @pfn: prepare_request function + * + * It's possible for a queue to register a prepare_request callback which + * is invoked before the request is handed to the request_fn. The goal of + * the function is to prepare a request for I/O, it can be used to build a + * cdb from the request data for instance. + * + */ +void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) +{ + q->prep_rq_fn = pfn; +} +EXPORT_SYMBOL(blk_queue_prep_rq); + +/** + * blk_queue_unprep_rq - set an unprepare_request function for queue + * @q: queue + * @ufn: unprepare_request function + * + * It's possible for a queue to register an unprepare_request callback + * which is invoked before the request is finally completed. The goal + * of the function is to deallocate any data that was allocated in the + * prepare_request callback. + * + */ +void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) +{ + q->unprep_rq_fn = ufn; +} +EXPORT_SYMBOL(blk_queue_unprep_rq); + +/** + * blk_queue_merge_bvec - set a merge_bvec function for queue + * @q: queue + * @mbfn: merge_bvec_fn + * + * Usually queues have static limitations on the max sectors or segments that + * we can put in a request. Stacking drivers may have some settings that + * are dynamic, and thus we have to query the queue whether it is ok to + * add a new bio_vec to a bio at a given offset or not. If the block device + * has such limitations, it needs to register a merge_bvec_fn to control + * the size of bio's sent to it. Note that a block device *must* allow a + * single page to be added to an empty bio. The block device driver may want + * to use the bio_split() function to deal with these bio's. By default + * no merge_bvec_fn is defined for a queue, and only the fixed limits are + * honored. + */ +void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) +{ + q->merge_bvec_fn = mbfn; +} +EXPORT_SYMBOL(blk_queue_merge_bvec); + +void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) +{ + q->softirq_done_fn = fn; +} +EXPORT_SYMBOL(blk_queue_softirq_done); + +void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) +{ + q->rq_timeout = timeout; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); + +void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn) +{ + q->rq_timed_out_fn = fn; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out); + +void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) +{ + q->lld_busy_fn = fn; +} +EXPORT_SYMBOL_GPL(blk_queue_lld_busy); + +/** + * blk_set_default_limits - reset limits to default values + * @lim: the queue_limits structure to reset + * + * Description: + * Returns a queue_limit struct to its default state. Can be used by + * stacking drivers like DM that stage table swaps and reuse an + * existing device queue. + */ +void blk_set_default_limits(struct queue_limits *lim) +{ + lim->max_segments = BLK_MAX_SEGMENTS; + lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; + lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; + lim->max_sectors = BLK_DEF_MAX_SECTORS; + lim->max_hw_sectors = INT_MAX; + lim->max_discard_sectors = 0; + lim->discard_granularity = 0; + lim->discard_alignment = 0; + lim->discard_misaligned = 0; + lim->discard_zeroes_data = -1; + lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; + lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); + lim->alignment_offset = 0; + lim->io_opt = 0; + lim->misaligned = 0; + lim->cluster = 1; +} +EXPORT_SYMBOL(blk_set_default_limits); + +/** + * blk_queue_make_request - define an alternate make_request function for a device + * @q: the request queue for the device to be affected + * @mfn: the alternate make_request function + * + * Description: + * The normal way for &struct bios to be passed to a device + * driver is for them to be collected into requests on a request + * queue, and then to allow the device driver to select requests + * off that queue when it is ready. This works well for many block + * devices. However some block devices (typically virtual devices + * such as md or lvm) do not benefit from the processing on the + * request queue, and are served best by having the requests passed + * directly to them. This can be achieved by providing a function + * to blk_queue_make_request(). + * + * Caveat: + * The driver that does this *must* be able to deal appropriately + * with buffers in "highmemory". This can be accomplished by either calling + * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling + * blk_queue_bounce() to create a buffer in normal memory. + **/ +void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) +{ + /* + * set defaults + */ + q->nr_requests = BLKDEV_MAX_RQ; + + q->make_request_fn = mfn; + blk_queue_dma_alignment(q, 511); + blk_queue_congestion_threshold(q); + q->nr_batching = BLK_BATCH_REQ; + + q->unplug_thresh = 4; /* hmm */ + q->unplug_delay = msecs_to_jiffies(3); /* 3 milliseconds */ + if (q->unplug_delay == 0) + q->unplug_delay = 1; + + q->unplug_timer.function = blk_unplug_timeout; + q->unplug_timer.data = (unsigned long)q; + + blk_set_default_limits(&q->limits); + blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); + + /* + * If the caller didn't supply a lock, fall back to our embedded + * per-queue locks + */ + if (!q->queue_lock) + q->queue_lock = &q->__queue_lock; + + /* + * by default assume old behaviour and bounce for any highmem page + */ + blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); +} +EXPORT_SYMBOL(blk_queue_make_request); + +/** + * blk_queue_bounce_limit - set bounce buffer limit for queue + * @q: the request queue for the device + * @dma_mask: the maximum address the device can handle + * + * Description: + * Different hardware can have different requirements as to what pages + * it can do I/O directly to. A low level driver can call + * blk_queue_bounce_limit to have lower memory pages allocated as bounce + * buffers for doing I/O to pages residing above @dma_mask. + **/ +void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) +{ + unsigned long b_pfn = dma_mask >> PAGE_SHIFT; + int dma = 0; + + q->bounce_gfp = GFP_NOIO; +#if BITS_PER_LONG == 64 + /* + * Assume anything <= 4GB can be handled by IOMMU. Actually + * some IOMMUs can handle everything, but I don't know of a + * way to test this here. + */ + if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) + dma = 1; + q->limits.bounce_pfn = max_low_pfn; +#else + if (b_pfn < blk_max_low_pfn) + dma = 1; + q->limits.bounce_pfn = b_pfn; +#endif + if (dma) { + init_emergency_isa_pool(); + q->bounce_gfp = GFP_NOIO | GFP_DMA; + q->limits.bounce_pfn = b_pfn; + } +} +EXPORT_SYMBOL(blk_queue_bounce_limit); + +/** + * blk_queue_max_hw_sectors - set max sectors for a request for this queue + * @q: the request queue for the device + * @max_hw_sectors: max hardware sectors in the usual 512b unit + * + * Description: + * Enables a low level driver to set a hard upper limit, + * max_hw_sectors, on the size of requests. max_hw_sectors is set by + * the device driver based upon the combined capabilities of I/O + * controller and storage device. + * + * max_sectors is a soft limit imposed by the block layer for + * filesystem type requests. This value can be overridden on a + * per-device basis in /sys/block/<device>/queue/max_sectors_kb. + * The soft limit can not exceed max_hw_sectors. + **/ +void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) +{ + if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { + max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_hw_sectors); + } + + q->limits.max_hw_sectors = max_hw_sectors; + q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, + BLK_DEF_MAX_SECTORS); +} +EXPORT_SYMBOL(blk_queue_max_hw_sectors); + +/** + * blk_queue_max_discard_sectors - set max sectors for a single discard + * @q: the request queue for the device + * @max_discard_sectors: maximum number of sectors to discard + **/ +void blk_queue_max_discard_sectors(struct request_queue *q, + unsigned int max_discard_sectors) +{ + q->limits.max_discard_sectors = max_discard_sectors; +} +EXPORT_SYMBOL(blk_queue_max_discard_sectors); + +/** + * blk_queue_max_segments - set max hw segments for a request for this queue + * @q: the request queue for the device + * @max_segments: max number of segments + * + * Description: + * Enables a low level driver to set an upper limit on the number of + * hw data segments in a request. + **/ +void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) +{ + if (!max_segments) { + max_segments = 1; + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_segments); + } + + q->limits.max_segments = max_segments; +} +EXPORT_SYMBOL(blk_queue_max_segments); + +/** + * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg + * @q: the request queue for the device + * @max_size: max size of segment in bytes + * + * Description: + * Enables a low level driver to set an upper limit on the size of a + * coalesced segment + **/ +void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) +{ + if (max_size < PAGE_CACHE_SIZE) { + max_size = PAGE_CACHE_SIZE; + printk(KERN_INFO "%s: set to minimum %d\n", + __func__, max_size); + } + + q->limits.max_segment_size = max_size; +} +EXPORT_SYMBOL(blk_queue_max_segment_size); + +/** + * blk_queue_logical_block_size - set logical block size for the queue + * @q: the request queue for the device + * @size: the logical block size, in bytes + * + * Description: + * This should be set to the lowest possible block size that the + * storage device can address. The default of 512 covers most + * hardware. + **/ +void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) +{ + q->limits.logical_block_size = size; + + if (q->limits.physical_block_size < size) + q->limits.physical_block_size = size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_logical_block_size); + +/** + * blk_queue_physical_block_size - set physical block size for the queue + * @q: the request queue for the device + * @size: the physical block size, in bytes + * + * Description: + * This should be set to the lowest possible sector size that the + * hardware can operate on without reverting to read-modify-write + * operations. + */ +void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) +{ + q->limits.physical_block_size = size; + + if (q->limits.physical_block_size < q->limits.logical_block_size) + q->limits.physical_block_size = q->limits.logical_block_size; + + if (q->limits.io_min < q->limits.physical_block_size) + q->limits.io_min = q->limits.physical_block_size; +} +EXPORT_SYMBOL(blk_queue_physical_block_size); + +/** + * blk_queue_alignment_offset - set physical block alignment offset + * @q: the request queue for the device + * @offset: alignment offset in bytes + * + * Description: + * Some devices are naturally misaligned to compensate for things like + * the legacy DOS partition table 63-sector offset. Low-level drivers + * should call this function for devices whose first sector is not + * naturally aligned. + */ +void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) +{ + q->limits.alignment_offset = + offset & (q->limits.physical_block_size - 1); + q->limits.misaligned = 0; +} +EXPORT_SYMBOL(blk_queue_alignment_offset); + +/** + * blk_limits_io_min - set minimum request size for a device + * @limits: the queue limits + * @min: smallest I/O size in bytes + * + * Description: + * Some devices have an internal block size bigger than the reported + * hardware sector size. This function can be used to signal the + * smallest I/O the device can perform without incurring a performance + * penalty. + */ +void blk_limits_io_min(struct queue_limits *limits, unsigned int min) +{ + limits->io_min = min; + + if (limits->io_min < limits->logical_block_size) + limits->io_min = limits->logical_block_size; + + if (limits->io_min < limits->physical_block_size) + limits->io_min = limits->physical_block_size; +} +EXPORT_SYMBOL(blk_limits_io_min); + +/** + * blk_queue_io_min - set minimum request size for the queue + * @q: the request queue for the device + * @min: smallest I/O size in bytes + * + * Description: + * Storage devices may report a granularity or preferred minimum I/O + * size which is the smallest request the device can perform without + * incurring a performance penalty. For disk drives this is often the + * physical block size. For RAID arrays it is often the stripe chunk + * size. A properly aligned multiple of minimum_io_size is the + * preferred request size for workloads where a high number of I/O + * operations is desired. + */ +void blk_queue_io_min(struct request_queue *q, unsigned int min) +{ + blk_limits_io_min(&q->limits, min); +} +EXPORT_SYMBOL(blk_queue_io_min); + +/** + * blk_limits_io_opt - set optimal request size for a device + * @limits: the queue limits + * @opt: smallest I/O size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) +{ + limits->io_opt = opt; +} +EXPORT_SYMBOL(blk_limits_io_opt); + +/** + * blk_queue_io_opt - set optimal request size for the queue + * @q: the request queue for the device + * @opt: optimal request size in bytes + * + * Description: + * Storage devices may report an optimal I/O size, which is the + * device's preferred unit for sustained I/O. This is rarely reported + * for disk drives. For RAID arrays it is usually the stripe width or + * the internal track size. A properly aligned multiple of + * optimal_io_size is the preferred request size for workloads where + * sustained throughput is desired. + */ +void blk_queue_io_opt(struct request_queue *q, unsigned int opt) +{ + blk_limits_io_opt(&q->limits, opt); +} +EXPORT_SYMBOL(blk_queue_io_opt); + +/* + * Returns the minimum that is _not_ zero, unless both are zero. + */ +#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) + +/** + * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers + * @t: the stacking driver (top) + * @b: the underlying device (bottom) + **/ +void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) +{ + blk_stack_limits(&t->limits, &b->limits, 0); +} +EXPORT_SYMBOL(blk_queue_stack_limits); + +/** + * blk_stack_limits - adjust queue_limits for stacked devices + * @t: the stacking driver limits (top device) + * @b: the underlying queue limits (bottom, component device) + * @start: first data sector within component device + * + * Description: + * This function is used by stacking drivers like MD and DM to ensure + * that all component devices have compatible block sizes and + * alignments. The stacking driver must provide a queue_limits + * struct (top) and then iteratively call the stacking function for + * all component (bottom) devices. The stacking function will + * attempt to combine the values and ensure proper alignment. + * + * Returns 0 if the top and bottom queue_limits are compatible. The + * top device's block sizes and alignment offsets may be adjusted to + * ensure alignment with the bottom device. If no compatible sizes + * and alignments exist, -1 is returned and the resulting top + * queue_limits will have the misaligned flag set to indicate that + * the alignment_offset is undefined. + */ +int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + sector_t start) +{ + unsigned int top, bottom, alignment, ret = 0; + + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); + + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, + b->seg_boundary_mask); + + t->max_segments = min_not_zero(t->max_segments, b->max_segments); + + t->max_segment_size = min_not_zero(t->max_segment_size, + b->max_segment_size); + + t->misaligned |= b->misaligned; + + alignment = queue_limit_alignment_offset(b, start); + + /* Bottom device has different alignment. Check that it is + * compatible with the current top alignment. + */ + if (t->alignment_offset != alignment) { + + top = max(t->physical_block_size, t->io_min) + + t->alignment_offset; + bottom = max(b->physical_block_size, b->io_min) + alignment; + + /* Verify that top and bottom intervals line up */ + if (max(top, bottom) & (min(top, bottom) - 1)) { + t->misaligned = 1; + ret = -1; + } + } + + t->logical_block_size = max(t->logical_block_size, + b->logical_block_size); + + t->physical_block_size = max(t->physical_block_size, + b->physical_block_size); + + t->io_min = max(t->io_min, b->io_min); + t->io_opt = lcm(t->io_opt, b->io_opt); + + t->cluster &= b->cluster; + t->discard_zeroes_data &= b->discard_zeroes_data; + + /* Physical block size a multiple of the logical block size? */ + if (t->physical_block_size & (t->logical_block_size - 1)) { + t->physical_block_size = t->logical_block_size; + t->misaligned = 1; + ret = -1; + } + + /* Minimum I/O a multiple of the physical block size? */ + if (t->io_min & (t->physical_block_size - 1)) { + t->io_min = t->physical_block_size; + t->misaligned = 1; + ret = -1; + } + + /* Optimal I/O a multiple of the physical block size? */ + if (t->io_opt & (t->physical_block_size - 1)) { + t->io_opt = 0; + t->misaligned = 1; + ret = -1; + } + + /* Find lowest common alignment_offset */ + t->alignment_offset = lcm(t->alignment_offset, alignment) + & (max(t->physical_block_size, t->io_min) - 1); + + /* Verify that new alignment_offset is on a logical block boundary */ + if (t->alignment_offset & (t->logical_block_size - 1)) { + t->misaligned = 1; + ret = -1; + } + + /* Discard alignment and granularity */ + if (b->discard_granularity) { + alignment = queue_limit_discard_alignment(b, start); + + if (t->discard_granularity != 0 && + t->discard_alignment != alignment) { + top = t->discard_granularity + t->discard_alignment; + bottom = b->discard_granularity + alignment; + + /* Verify that top and bottom intervals line up */ + if (max(top, bottom) & (min(top, bottom) - 1)) + t->discard_misaligned = 1; + } + + t->max_discard_sectors = min_not_zero(t->max_discard_sectors, + b->max_discard_sectors); + t->discard_granularity = max(t->discard_granularity, + b->discard_granularity); + t->discard_alignment = lcm(t->discard_alignment, alignment) & + (t->discard_granularity - 1); + } + + return ret; +} +EXPORT_SYMBOL(blk_stack_limits); + +/** + * bdev_stack_limits - adjust queue limits for stacked drivers + * @t: the stacking driver limits (top device) + * @bdev: the component block_device (bottom) + * @start: first data sector within component device + * + * Description: + * Merges queue limits for a top device and a block_device. Returns + * 0 if alignment didn't change. Returns -1 if adding the bottom + * device caused misalignment. + */ +int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, + sector_t start) +{ + struct request_queue *bq = bdev_get_queue(bdev); + + start += get_start_sect(bdev); + + return blk_stack_limits(t, &bq->limits, start); +} +EXPORT_SYMBOL(bdev_stack_limits); + +/** + * disk_stack_limits - adjust queue limits for stacked drivers + * @disk: MD/DM gendisk (top) + * @bdev: the underlying block device (bottom) + * @offset: offset to beginning of data within component device + * + * Description: + * Merges the limits for a top level gendisk and a bottom level + * block_device. + */ +void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, + sector_t offset) +{ + struct request_queue *t = disk->queue; + + if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; + + disk_name(disk, 0, top); + bdevname(bdev, bottom); + + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", + top, bottom); + } +} +EXPORT_SYMBOL(disk_stack_limits); + +/** + * blk_queue_dma_pad - set pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Set dma pad mask. + * + * Appending pad buffer to a request modifies the last entry of a + * scatter list such that it includes the pad buffer. + **/ +void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) +{ + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_dma_pad); + +/** + * blk_queue_update_dma_pad - update pad mask + * @q: the request queue for the device + * @mask: pad mask + * + * Update dma pad mask. + * + * Appending pad buffer to a request modifies the last entry of a + * scatter list such that it includes the pad buffer. + **/ +void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) +{ + if (mask > q->dma_pad_mask) + q->dma_pad_mask = mask; +} +EXPORT_SYMBOL(blk_queue_update_dma_pad); + +/** + * blk_queue_dma_drain - Set up a drain buffer for excess dma. + * @q: the request queue for the device + * @dma_drain_needed: fn which returns non-zero if drain is necessary + * @buf: physically contiguous buffer + * @size: size of the buffer in bytes + * + * Some devices have excess DMA problems and can't simply discard (or + * zero fill) the unwanted piece of the transfer. They have to have a + * real area of memory to transfer it into. The use case for this is + * ATAPI devices in DMA mode. If the packet command causes a transfer + * bigger than the transfer size some HBAs will lock up if there + * aren't DMA elements to contain the excess transfer. What this API + * does is adjust the queue so that the buf is always appended + * silently to the scatterlist. + * + * Note: This routine adjusts max_hw_segments to make room for appending + * the drain buffer. If you call blk_queue_max_segments() after calling + * this routine, you must set the limit to one fewer than your device + * can support otherwise there won't be room for the drain buffer. + */ +int blk_queue_dma_drain(struct request_queue *q, + dma_drain_needed_fn *dma_drain_needed, + void *buf, unsigned int size) +{ + if (queue_max_segments(q) < 2) + return -EINVAL; + /* make room for appending the drain */ + blk_queue_max_segments(q, queue_max_segments(q) - 1); + q->dma_drain_needed = dma_drain_needed; + q->dma_drain_buffer = buf; + q->dma_drain_size = size; + + return 0; +} +EXPORT_SYMBOL_GPL(blk_queue_dma_drain); + +/** + * blk_queue_segment_boundary - set boundary rules for segment merging + * @q: the request queue for the device + * @mask: the memory boundary mask + **/ +void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) +{ + if (mask < PAGE_CACHE_SIZE - 1) { + mask = PAGE_CACHE_SIZE - 1; + printk(KERN_INFO "%s: set to minimum %lx\n", + __func__, mask); + } + + q->limits.seg_boundary_mask = mask; +} +EXPORT_SYMBOL(blk_queue_segment_boundary); + +/** + * blk_queue_dma_alignment - set dma length and memory alignment + * @q: the request queue for the device + * @mask: alignment mask + * + * description: + * set required memory and length alignment for direct dma transactions. + * this is used when building direct io requests for the queue. + * + **/ +void blk_queue_dma_alignment(struct request_queue *q, int mask) +{ + q->dma_alignment = mask; +} +EXPORT_SYMBOL(blk_queue_dma_alignment); + +/** + * blk_queue_update_dma_alignment - update dma length and memory alignment + * @q: the request queue for the device + * @mask: alignment mask + * + * description: + * update required memory and length alignment for direct dma transactions. + * If the requested alignment is larger than the current alignment, then + * the current queue alignment is updated to the new value, otherwise it + * is left alone. The design of this is to allow multiple objects + * (driver, device, transport etc) to set their respective + * alignments without having them interfere. + * + **/ +void blk_queue_update_dma_alignment(struct request_queue *q, int mask) +{ + BUG_ON(mask > PAGE_SIZE); + + if (mask > q->dma_alignment) + q->dma_alignment = mask; +} +EXPORT_SYMBOL(blk_queue_update_dma_alignment); + +static int __init blk_settings_init(void) +{ + blk_max_low_pfn = max_low_pfn - 1; + blk_max_pfn = max_pfn - 1; + return 0; +} +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(blk_settings_init); +#else +subsys_initcall(blk_settings_init); +#endif diff --git a/block/blk-softirq.c b/block/blk-softirq.c new file mode 100644 index 00000000..ee9c2160 --- /dev/null +++ b/block/blk-softirq.c @@ -0,0 +1,175 @@ +/* + * Functions related to softirq rq completions + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/interrupt.h> +#include <linux/cpu.h> + +#include "blk.h" + +static DEFINE_PER_CPU(struct list_head, blk_cpu_done); + +/* + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +static void blk_done_softirq(struct softirq_action *h) +{ + struct list_head *cpu_list, local_list; + + local_irq_disable(); + cpu_list = &__get_cpu_var(blk_cpu_done); + list_replace_init(cpu_list, &local_list); + local_irq_enable(); + + while (!list_empty(&local_list)) { + struct request *rq; + + rq = list_entry(local_list.next, struct request, csd.list); + list_del_init(&rq->csd.list); + rq->q->softirq_done_fn(rq); + } +} + +#if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) +static void trigger_softirq(void *data) +{ + struct request *rq = data; + unsigned long flags; + struct list_head *list; + + local_irq_save(flags); + list = &__get_cpu_var(blk_cpu_done); + list_add_tail(&rq->csd.list, list); + + if (list->next == &rq->csd.list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); +} + +/* + * Setup and invoke a run of 'trigger_softirq' on the given cpu. + */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + if (cpu_online(cpu)) { + struct call_single_data *data = &rq->csd; + + data->func = trigger_softirq; + data->info = rq; + data->flags = 0; + + __smp_call_function_single(cpu, data, 0); + return 0; + } + + return 1; +} +#else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ +static int raise_blk_irq(int cpu, struct request *rq) +{ + return 1; +} +#endif + +static int __cpuinit blk_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + /* + * If a CPU goes away, splice its entries to the current CPU + * and trigger a run of the softirq + */ + if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + int cpu = (unsigned long) hcpu; + + local_irq_disable(); + list_splice_init(&per_cpu(blk_cpu_done, cpu), + &__get_cpu_var(blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); + } + + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata blk_cpu_notifier = { + .notifier_call = blk_cpu_notify, +}; + +void __blk_complete_request(struct request *req) +{ + struct request_queue *q = req->q; + unsigned long flags; + int ccpu, cpu, group_cpu; + + BUG_ON(!q->softirq_done_fn); + + local_irq_save(flags); + cpu = smp_processor_id(); + group_cpu = blk_cpu_to_group(cpu); + + /* + * Select completion CPU + */ + if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) + ccpu = req->cpu; + else + ccpu = cpu; + + if (ccpu == cpu || ccpu == group_cpu) { + struct list_head *list; +do_local: + list = &__get_cpu_var(blk_cpu_done); + list_add_tail(&req->csd.list, list); + + /* + * if the list only contains our just added request, + * signal a raise of the softirq. If there are already + * entries there, someone already raised the irq but it + * hasn't run yet. + */ + if (list->next == &req->csd.list) + raise_softirq_irqoff(BLOCK_SOFTIRQ); + } else if (raise_blk_irq(ccpu, req)) + goto do_local; + + local_irq_restore(flags); +} + +/** + * blk_complete_request - end I/O on a request + * @req: the request being processed + * + * Description: + * Ends all I/O on a request. It does not handle partial completions, + * unless the driver actually implements this in its completion callback + * through requeueing. The actual completion happens out-of-order, + * through a softirq handler. The user must have registered a completion + * callback through blk_queue_softirq_done(). + **/ +void blk_complete_request(struct request *req) +{ + if (unlikely(blk_should_fake_timeout(req->q))) + return; + if (!blk_mark_rq_complete(req)) + __blk_complete_request(req); +} +EXPORT_SYMBOL(blk_complete_request); + +static __init int blk_softirq_init(void) +{ + int i; + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); + + open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); + register_hotcpu_notifier(&blk_cpu_notifier); + return 0; +} +subsys_initcall(blk_softirq_init); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c new file mode 100644 index 00000000..a26c930d --- /dev/null +++ b/block/blk-sysfs.c @@ -0,0 +1,535 @@ +/* + * Functions related to sysfs handling + */ +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/blktrace_api.h> + +#include "blk.h" + +struct queue_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct request_queue *, char *); + ssize_t (*store)(struct request_queue *, const char *, size_t); +}; + +static ssize_t +queue_var_show(unsigned long var, char *page) +{ + return sprintf(page, "%lu\n", var); +} + +static ssize_t +queue_var_store(unsigned long *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +static ssize_t queue_requests_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->nr_requests, (page)); +} + +static ssize_t +queue_requests_store(struct request_queue *q, const char *page, size_t count) +{ + struct request_list *rl = &q->rq; + unsigned long nr; + int ret; + + if (!q->request_fn) + return -EINVAL; + + ret = queue_var_store(&nr, page, count); + if (nr < BLKDEV_MIN_RQ) + nr = BLKDEV_MIN_RQ; + + spin_lock_irq(q->queue_lock); + q->nr_requests = nr; + blk_queue_congestion_threshold(q); + + if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_SYNC); + else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_SYNC); + + if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) + blk_set_queue_congested(q, BLK_RW_ASYNC); + else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) + blk_clear_queue_congested(q, BLK_RW_ASYNC); + + if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { + blk_set_queue_full(q, BLK_RW_SYNC); + } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { + blk_clear_queue_full(q, BLK_RW_SYNC); + wake_up(&rl->wait[BLK_RW_SYNC]); + } + + if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { + blk_set_queue_full(q, BLK_RW_ASYNC); + } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { + blk_clear_queue_full(q, BLK_RW_ASYNC); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + spin_unlock_irq(q->queue_lock); + return ret; +} + +static ssize_t queue_ra_show(struct request_queue *q, char *page) +{ + unsigned long ra_kb = q->backing_dev_info.ra_pages << + (PAGE_CACHE_SHIFT - 10); + + return queue_var_show(ra_kb, (page)); +} + +static ssize_t +queue_ra_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long ra_kb; + ssize_t ret = queue_var_store(&ra_kb, page, count); + + q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); + + return ret; +} + +static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) +{ + int max_sectors_kb = queue_max_sectors(q) >> 1; + + return queue_var_show(max_sectors_kb, (page)); +} + +static ssize_t queue_max_segments_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_max_segments(q), (page)); +} + +static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) +{ + if (blk_queue_cluster(q)) + return queue_var_show(queue_max_segment_size(q), (page)); + + return queue_var_show(PAGE_CACHE_SIZE, (page)); +} + +static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_logical_block_size(q), page); +} + +static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_physical_block_size(q), page); +} + +static ssize_t queue_io_min_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_min(q), page); +} + +static ssize_t queue_io_opt_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_io_opt(q), page); +} + +static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.discard_granularity, page); +} + +static ssize_t queue_discard_max_show(struct request_queue *q, char *page) +{ + return queue_var_show(q->limits.max_discard_sectors << 9, page); +} + +static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) +{ + return queue_var_show(queue_discard_zeroes_data(q), page); +} + +static ssize_t +queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) +{ + unsigned long max_sectors_kb, + max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, + page_kb = 1 << (PAGE_CACHE_SHIFT - 10); + ssize_t ret = queue_var_store(&max_sectors_kb, page, count); + + if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) + return -EINVAL; + + spin_lock_irq(q->queue_lock); + q->limits.max_sectors = max_sectors_kb << 1; + spin_unlock_irq(q->queue_lock); + + return ret; +} + +static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) +{ + int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; + + return queue_var_show(max_hw_sectors_kb, (page)); +} + +#define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ +static ssize_t \ +queue_show_##name(struct request_queue *q, char *page) \ +{ \ + int bit; \ + bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ + return queue_var_show(neg ? !bit : bit, page); \ +} \ +static ssize_t \ +queue_store_##name(struct request_queue *q, const char *page, size_t count) \ +{ \ + unsigned long val; \ + ssize_t ret; \ + ret = queue_var_store(&val, page, count); \ + if (neg) \ + val = !val; \ + \ + spin_lock_irq(q->queue_lock); \ + if (val) \ + queue_flag_set(QUEUE_FLAG_##flag, q); \ + else \ + queue_flag_clear(QUEUE_FLAG_##flag, q); \ + spin_unlock_irq(q->queue_lock); \ + return ret; \ +} + +QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); +QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); +QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); +#undef QUEUE_SYSFS_BIT_FNS + +static ssize_t queue_nomerges_show(struct request_queue *q, char *page) +{ + return queue_var_show((blk_queue_nomerges(q) << 1) | + blk_queue_noxmerges(q), page); +} + +static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned long nm; + ssize_t ret = queue_var_store(&nm, page, count); + + spin_lock_irq(q->queue_lock); + queue_flag_clear(QUEUE_FLAG_NOMERGES, q); + queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); + if (nm == 2) + queue_flag_set(QUEUE_FLAG_NOMERGES, q); + else if (nm) + queue_flag_set(QUEUE_FLAG_NOXMERGES, q); + spin_unlock_irq(q->queue_lock); + + return ret; +} + +static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) +{ + bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); + + return queue_var_show(set, page); +} + +static ssize_t +queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) +{ + ssize_t ret = -EINVAL; +#if defined(CONFIG_USE_GENERIC_SMP_HELPERS) + unsigned long val; + + ret = queue_var_store(&val, page, count); + spin_lock_irq(q->queue_lock); + if (val) + queue_flag_set(QUEUE_FLAG_SAME_COMP, q); + else + queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); + spin_unlock_irq(q->queue_lock); +#endif + return ret; +} + +static struct queue_sysfs_entry queue_requests_entry = { + .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, + .show = queue_requests_show, + .store = queue_requests_store, +}; + +static struct queue_sysfs_entry queue_ra_entry = { + .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, + .show = queue_ra_show, + .store = queue_ra_store, +}; + +static struct queue_sysfs_entry queue_max_sectors_entry = { + .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, + .show = queue_max_sectors_show, + .store = queue_max_sectors_store, +}; + +static struct queue_sysfs_entry queue_max_hw_sectors_entry = { + .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, + .show = queue_max_hw_sectors_show, +}; + +static struct queue_sysfs_entry queue_max_segments_entry = { + .attr = {.name = "max_segments", .mode = S_IRUGO }, + .show = queue_max_segments_show, +}; + +static struct queue_sysfs_entry queue_max_segment_size_entry = { + .attr = {.name = "max_segment_size", .mode = S_IRUGO }, + .show = queue_max_segment_size_show, +}; + +static struct queue_sysfs_entry queue_iosched_entry = { + .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, + .show = elv_iosched_show, + .store = elv_iosched_store, +}; + +static struct queue_sysfs_entry queue_hw_sector_size_entry = { + .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_logical_block_size_entry = { + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, + .show = queue_logical_block_size_show, +}; + +static struct queue_sysfs_entry queue_physical_block_size_entry = { + .attr = {.name = "physical_block_size", .mode = S_IRUGO }, + .show = queue_physical_block_size_show, +}; + +static struct queue_sysfs_entry queue_io_min_entry = { + .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, + .show = queue_io_min_show, +}; + +static struct queue_sysfs_entry queue_io_opt_entry = { + .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, + .show = queue_io_opt_show, +}; + +static struct queue_sysfs_entry queue_discard_granularity_entry = { + .attr = {.name = "discard_granularity", .mode = S_IRUGO }, + .show = queue_discard_granularity_show, +}; + +static struct queue_sysfs_entry queue_discard_max_entry = { + .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, + .show = queue_discard_max_show, +}; + +static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { + .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, + .show = queue_discard_zeroes_data_show, +}; + +static struct queue_sysfs_entry queue_nonrot_entry = { + .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_nonrot, + .store = queue_store_nonrot, +}; + +static struct queue_sysfs_entry queue_nomerges_entry = { + .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, + .show = queue_nomerges_show, + .store = queue_nomerges_store, +}; + +static struct queue_sysfs_entry queue_rq_affinity_entry = { + .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, + .show = queue_rq_affinity_show, + .store = queue_rq_affinity_store, +}; + +static struct queue_sysfs_entry queue_iostats_entry = { + .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_iostats, + .store = queue_store_iostats, +}; + +static struct queue_sysfs_entry queue_random_entry = { + .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, + .show = queue_show_random, + .store = queue_store_random, +}; + +static struct attribute *default_attrs[] = { + &queue_requests_entry.attr, + &queue_ra_entry.attr, + &queue_max_hw_sectors_entry.attr, + &queue_max_sectors_entry.attr, + &queue_max_segments_entry.attr, + &queue_max_segment_size_entry.attr, + &queue_iosched_entry.attr, + &queue_hw_sector_size_entry.attr, + &queue_logical_block_size_entry.attr, + &queue_physical_block_size_entry.attr, + &queue_io_min_entry.attr, + &queue_io_opt_entry.attr, + &queue_discard_granularity_entry.attr, + &queue_discard_max_entry.attr, + &queue_discard_zeroes_data_entry.attr, + &queue_nonrot_entry.attr, + &queue_nomerges_entry.attr, + &queue_rq_affinity_entry.attr, + &queue_iostats_entry.attr, + &queue_random_entry.attr, + NULL, +}; + +#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) + +static ssize_t +queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct queue_sysfs_entry *entry = to_queue(attr); + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + ssize_t res; + + if (!entry->show) + return -EIO; + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->show(q, page); + mutex_unlock(&q->sysfs_lock); + return res; +} + +static ssize_t +queue_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct queue_sysfs_entry *entry = to_queue(attr); + struct request_queue *q; + ssize_t res; + + if (!entry->store) + return -EIO; + + q = container_of(kobj, struct request_queue, kobj); + mutex_lock(&q->sysfs_lock); + if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { + mutex_unlock(&q->sysfs_lock); + return -ENOENT; + } + res = entry->store(q, page, length); + mutex_unlock(&q->sysfs_lock); + return res; +} + +/** + * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed + * @kobj: the kobj belonging of the request queue to be released + * + * Description: + * blk_cleanup_queue is the pair to blk_init_queue() or + * blk_queue_make_request(). It should be called when a request queue is + * being released; typically when a block device is being de-registered. + * Currently, its primary task it to free all the &struct request + * structures that were allocated to the queue and the queue itself. + * + * Caveat: + * Hopefully the low level driver will have finished any + * outstanding requests first... + **/ +static void blk_release_queue(struct kobject *kobj) +{ + struct request_queue *q = + container_of(kobj, struct request_queue, kobj); + struct request_list *rl = &q->rq; + + blk_sync_queue(q); + + if (rl->rq_pool) + mempool_destroy(rl->rq_pool); + + if (q->queue_tags) + __blk_queue_free_tags(q); + + blk_trace_shutdown(q); + + bdi_destroy(&q->backing_dev_info); + kmem_cache_free(blk_requestq_cachep, q); +} + +static const struct sysfs_ops queue_sysfs_ops = { + .show = queue_attr_show, + .store = queue_attr_store, +}; + +struct kobj_type blk_queue_ktype = { + .sysfs_ops = &queue_sysfs_ops, + .default_attrs = default_attrs, + .release = blk_release_queue, +}; + +int blk_register_queue(struct gendisk *disk) +{ + int ret; + struct device *dev = disk_to_dev(disk); + + struct request_queue *q = disk->queue; + + if (WARN_ON(!q)) + return -ENXIO; + + ret = blk_trace_init_sysfs(dev); + if (ret) + return ret; + + ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); + if (ret < 0) + return ret; + + kobject_uevent(&q->kobj, KOBJ_ADD); + + if (!q->request_fn) + return 0; + + ret = elv_register_queue(q); + if (ret) { + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + blk_trace_remove_sysfs(disk_to_dev(disk)); + kobject_put(&dev->kobj); + return ret; + } + + return 0; +} + +void blk_unregister_queue(struct gendisk *disk) +{ + struct request_queue *q = disk->queue; + + if (WARN_ON(!q)) + return; + + if (q->request_fn) + elv_unregister_queue(q); + + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + blk_trace_remove_sysfs(disk_to_dev(disk)); + kobject_put(&disk_to_dev(disk)->kobj); +} diff --git a/block/blk-tag.c b/block/blk-tag.c new file mode 100644 index 00000000..ece65fc4 --- /dev/null +++ b/block/blk-tag.c @@ -0,0 +1,406 @@ +/* + * Functions related to tagged command queuing + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/bio.h> +#include <linux/blkdev.h> +#include <linux/slab.h> + +#include "blk.h" + +/** + * blk_queue_find_tag - find a request by its tag and queue + * @q: The request queue for the device + * @tag: The tag of the request + * + * Notes: + * Should be used when a device returns a tag and you want to match + * it with a request. + * + * no locks need be held. + **/ +struct request *blk_queue_find_tag(struct request_queue *q, int tag) +{ + return blk_map_queue_find_tag(q->queue_tags, tag); +} +EXPORT_SYMBOL(blk_queue_find_tag); + +/** + * __blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * Tries to free the specified @bqt. Returns true if it was + * actually freed and false if there are still references using it + */ +static int __blk_free_tags(struct blk_queue_tag *bqt) +{ + int retval; + + retval = atomic_dec_and_test(&bqt->refcnt); + if (retval) { + BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < + bqt->max_depth); + + kfree(bqt->tag_index); + bqt->tag_index = NULL; + + kfree(bqt->tag_map); + bqt->tag_map = NULL; + + kfree(bqt); + } + + return retval; +} + +/** + * __blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * blk_cleanup_queue() will take care of calling this function, if tagging + * has been used. So there's no need to call this directly. + **/ +void __blk_queue_free_tags(struct request_queue *q) +{ + struct blk_queue_tag *bqt = q->queue_tags; + + if (!bqt) + return; + + __blk_free_tags(bqt); + + q->queue_tags = NULL; + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); +} + +/** + * blk_free_tags - release a given set of tag maintenance info + * @bqt: the tag map to free + * + * For externally managed @bqt frees the map. Callers of this + * function must guarantee to have released all the queues that + * might have been using this tag map. + */ +void blk_free_tags(struct blk_queue_tag *bqt) +{ + if (unlikely(!__blk_free_tags(bqt))) + BUG(); +} +EXPORT_SYMBOL(blk_free_tags); + +/** + * blk_queue_free_tags - release tag maintenance info + * @q: the request queue for the device + * + * Notes: + * This is used to disable tagged queuing to a device, yet leave + * queue in function. + **/ +void blk_queue_free_tags(struct request_queue *q) +{ + queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); +} +EXPORT_SYMBOL(blk_queue_free_tags); + +static int +init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) +{ + struct request **tag_index; + unsigned long *tag_map; + int nr_ulongs; + + if (q && depth > q->nr_requests * 2) { + depth = q->nr_requests * 2; + printk(KERN_ERR "%s: adjusted depth to %d\n", + __func__, depth); + } + + tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC); + if (!tag_index) + goto fail; + + nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG; + tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); + if (!tag_map) + goto fail; + + tags->real_max_depth = depth; + tags->max_depth = depth; + tags->tag_index = tag_index; + tags->tag_map = tag_map; + + return 0; +fail: + kfree(tag_index); + return -ENOMEM; +} + +static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, + int depth) +{ + struct blk_queue_tag *tags; + + tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC); + if (!tags) + goto fail; + + if (init_tag_map(q, tags, depth)) + goto fail; + + atomic_set(&tags->refcnt, 1); + return tags; +fail: + kfree(tags); + return NULL; +} + +/** + * blk_init_tags - initialize the tag info for an external tag map + * @depth: the maximum queue depth supported + **/ +struct blk_queue_tag *blk_init_tags(int depth) +{ + return __blk_queue_init_tags(NULL, depth); +} +EXPORT_SYMBOL(blk_init_tags); + +/** + * blk_queue_init_tags - initialize the queue tag info + * @q: the request queue for the device + * @depth: the maximum queue depth supported + * @tags: the tag to use + * + * Queue lock must be held here if the function is called to resize an + * existing map. + **/ +int blk_queue_init_tags(struct request_queue *q, int depth, + struct blk_queue_tag *tags) +{ + int rc; + + BUG_ON(tags && q->queue_tags && tags != q->queue_tags); + + if (!tags && !q->queue_tags) { + tags = __blk_queue_init_tags(q, depth); + + if (!tags) + goto fail; + } else if (q->queue_tags) { + rc = blk_queue_resize_tags(q, depth); + if (rc) + return rc; + queue_flag_set(QUEUE_FLAG_QUEUED, q); + return 0; + } else + atomic_inc(&tags->refcnt); + + /* + * assign it, all done + */ + q->queue_tags = tags; + queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); + INIT_LIST_HEAD(&q->tag_busy_list); + return 0; +fail: + kfree(tags); + return -ENOMEM; +} +EXPORT_SYMBOL(blk_queue_init_tags); + +/** + * blk_queue_resize_tags - change the queueing depth + * @q: the request queue for the device + * @new_depth: the new max command queueing depth + * + * Notes: + * Must be called with the queue lock held. + **/ +int blk_queue_resize_tags(struct request_queue *q, int new_depth) +{ + struct blk_queue_tag *bqt = q->queue_tags; + struct request **tag_index; + unsigned long *tag_map; + int max_depth, nr_ulongs; + + if (!bqt) + return -ENXIO; + + /* + * if we already have large enough real_max_depth. just + * adjust max_depth. *NOTE* as requests with tag value + * between new_depth and real_max_depth can be in-flight, tag + * map can not be shrunk blindly here. + */ + if (new_depth <= bqt->real_max_depth) { + bqt->max_depth = new_depth; + return 0; + } + + /* + * Currently cannot replace a shared tag map with a new + * one, so error out if this is the case + */ + if (atomic_read(&bqt->refcnt) != 1) + return -EBUSY; + + /* + * save the old state info, so we can copy it back + */ + tag_index = bqt->tag_index; + tag_map = bqt->tag_map; + max_depth = bqt->real_max_depth; + + if (init_tag_map(q, bqt, new_depth)) + return -ENOMEM; + + memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); + nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; + memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); + + kfree(tag_index); + kfree(tag_map); + return 0; +} +EXPORT_SYMBOL(blk_queue_resize_tags); + +/** + * blk_queue_end_tag - end tag operations for a request + * @q: the request queue for the device + * @rq: the request that has completed + * + * Description: + * Typically called when end_that_request_first() returns %0, meaning + * all transfers have been done for a request. It's important to call + * this function before end_that_request_last(), as that will put the + * request back on the free list thus corrupting the internal tag list. + * + * Notes: + * queue lock must be held. + **/ +void blk_queue_end_tag(struct request_queue *q, struct request *rq) +{ + struct blk_queue_tag *bqt = q->queue_tags; + int tag = rq->tag; + + BUG_ON(tag == -1); + + if (unlikely(tag >= bqt->real_max_depth)) + /* + * This can happen after tag depth has been reduced. + * FIXME: how about a warning or info message here? + */ + return; + + list_del_init(&rq->queuelist); + rq->cmd_flags &= ~REQ_QUEUED; + rq->tag = -1; + + if (unlikely(bqt->tag_index[tag] == NULL)) + printk(KERN_ERR "%s: tag %d is missing\n", + __func__, tag); + + bqt->tag_index[tag] = NULL; + + if (unlikely(!test_bit(tag, bqt->tag_map))) { + printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", + __func__, tag); + return; + } + /* + * The tag_map bit acts as a lock for tag_index[bit], so we need + * unlock memory barrier semantics. + */ + clear_bit_unlock(tag, bqt->tag_map); +} +EXPORT_SYMBOL(blk_queue_end_tag); + +/** + * blk_queue_start_tag - find a free tag and assign it + * @q: the request queue for the device + * @rq: the block request that needs tagging + * + * Description: + * This can either be used as a stand-alone helper, or possibly be + * assigned as the queue &prep_rq_fn (in which case &struct request + * automagically gets a tag assigned). Note that this function + * assumes that any type of request can be queued! if this is not + * true for your device, you must check the request type before + * calling this function. The request will also be removed from + * the request queue, so it's the drivers responsibility to readd + * it if it should need to be restarted for some reason. + * + * Notes: + * queue lock must be held. + **/ +int blk_queue_start_tag(struct request_queue *q, struct request *rq) +{ + struct blk_queue_tag *bqt = q->queue_tags; + unsigned max_depth; + int tag; + + if (unlikely((rq->cmd_flags & REQ_QUEUED))) { + printk(KERN_ERR + "%s: request %p for device [%s] already tagged %d", + __func__, rq, + rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); + BUG(); + } + + /* + * Protect against shared tag maps, as we may not have exclusive + * access to the tag map. + * + * We reserve a few tags just for sync IO, since we don't want + * to starve sync IO on behalf of flooding async IO. + */ + max_depth = bqt->max_depth; + if (!rq_is_sync(rq) && max_depth > 1) { + max_depth -= 2; + if (!max_depth) + max_depth = 1; + if (q->in_flight[BLK_RW_ASYNC] > max_depth) + return 1; + } + + do { + tag = find_first_zero_bit(bqt->tag_map, max_depth); + if (tag >= max_depth) + return 1; + + } while (test_and_set_bit_lock(tag, bqt->tag_map)); + /* + * We need lock ordering semantics given by test_and_set_bit_lock. + * See blk_queue_end_tag for details. + */ + + rq->cmd_flags |= REQ_QUEUED; + rq->tag = tag; + bqt->tag_index[tag] = rq; + blk_start_request(rq); + list_add(&rq->queuelist, &q->tag_busy_list); + return 0; +} +EXPORT_SYMBOL(blk_queue_start_tag); + +/** + * blk_queue_invalidate_tags - invalidate all pending tags + * @q: the request queue for the device + * + * Description: + * Hardware conditions may dictate a need to stop all pending requests. + * In this case, we will safely clear the block side of the tag queue and + * readd all requests to the request queue in the right order. + * + * Notes: + * queue lock must be held. + **/ +void blk_queue_invalidate_tags(struct request_queue *q) +{ + struct list_head *tmp, *n; + + list_for_each_safe(tmp, n, &q->tag_busy_list) + blk_requeue_request(q, list_entry_rq(tmp)); +} +EXPORT_SYMBOL(blk_queue_invalidate_tags); diff --git a/block/blk-timeout.c b/block/blk-timeout.c new file mode 100644 index 00000000..4f0c06c7 --- /dev/null +++ b/block/blk-timeout.c @@ -0,0 +1,237 @@ +/* + * Functions related to generic timeout handling of requests. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/fault-inject.h> + +#include "blk.h" + +#ifdef CONFIG_FAIL_IO_TIMEOUT + +static DECLARE_FAULT_ATTR(fail_io_timeout); + +static int __init setup_fail_io_timeout(char *str) +{ + return setup_fault_attr(&fail_io_timeout, str); +} +__setup("fail_io_timeout=", setup_fail_io_timeout); + +int blk_should_fake_timeout(struct request_queue *q) +{ + if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) + return 0; + + return should_fail(&fail_io_timeout, 1); +} + +static int __init fail_io_timeout_debugfs(void) +{ + return init_fault_attr_dentries(&fail_io_timeout, "fail_io_timeout"); +} + +late_initcall(fail_io_timeout_debugfs); + +ssize_t part_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + int set = test_bit(QUEUE_FLAG_FAIL_IO, &disk->queue->queue_flags); + + return sprintf(buf, "%d\n", set != 0); +} + +ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct gendisk *disk = dev_to_disk(dev); + int val; + + if (count) { + struct request_queue *q = disk->queue; + char *p = (char *) buf; + + val = simple_strtoul(p, &p, 10); + spin_lock_irq(q->queue_lock); + if (val) + queue_flag_set(QUEUE_FLAG_FAIL_IO, q); + else + queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); + spin_unlock_irq(q->queue_lock); + } + + return count; +} + +#endif /* CONFIG_FAIL_IO_TIMEOUT */ + +/* + * blk_delete_timer - Delete/cancel timer for a given function. + * @req: request that we are canceling timer for + * + */ +void blk_delete_timer(struct request *req) +{ + list_del_init(&req->timeout_list); +} + +static void blk_rq_timed_out(struct request *req) +{ + struct request_queue *q = req->q; + enum blk_eh_timer_return ret; + + ret = q->rq_timed_out_fn(req); + switch (ret) { + case BLK_EH_HANDLED: + __blk_complete_request(req); + break; + case BLK_EH_RESET_TIMER: + blk_clear_rq_complete(req); + blk_add_timer(req); + break; + case BLK_EH_NOT_HANDLED: + /* + * LLD handles this for now but in the future + * we can send a request msg to abort the command + * and we can move more of the generic scsi eh code to + * the blk layer. + */ + break; + default: + printk(KERN_ERR "block: bad eh return: %d\n", ret); + break; + } +} + +void blk_rq_timed_out_timer(unsigned long data) +{ + struct request_queue *q = (struct request_queue *) data; + unsigned long flags, next = 0; + struct request *rq, *tmp; + int next_set = 0; + + spin_lock_irqsave(q->queue_lock, flags); + + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { + if (time_after_eq(jiffies, rq->deadline)) { + list_del_init(&rq->timeout_list); + + /* + * Check if we raced with end io completion + */ + if (blk_mark_rq_complete(rq)) + continue; + blk_rq_timed_out(rq); + } else if (!next_set || time_after(next, rq->deadline)) { + next = rq->deadline; + next_set = 1; + } + } + + if (next_set) + mod_timer(&q->timeout, round_jiffies_up(next)); + + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/** + * blk_abort_request -- Request request recovery for the specified command + * @req: pointer to the request of interest + * + * This function requests that the block layer start recovery for the + * request by deleting the timer and calling the q's timeout function. + * LLDDs who implement their own error recovery MAY ignore the timeout + * event if they generated blk_abort_req. Must hold queue lock. + */ +void blk_abort_request(struct request *req) +{ + if (blk_mark_rq_complete(req)) + return; + blk_delete_timer(req); + blk_rq_timed_out(req); +} +EXPORT_SYMBOL_GPL(blk_abort_request); + +/** + * blk_add_timer - Start timeout timer for a single request + * @req: request that is about to start running. + * + * Notes: + * Each request has its own timer, and as it is added to the queue, we + * set up the timer. When the request completes, we cancel the timer. + */ +void blk_add_timer(struct request *req) +{ + struct request_queue *q = req->q; + unsigned long expiry; + + if (!q->rq_timed_out_fn) + return; + + BUG_ON(!list_empty(&req->timeout_list)); + BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); + + /* + * Some LLDs, like scsi, peek at the timeout to prevent a + * command from being retried forever. + */ + if (!req->timeout) + req->timeout = q->rq_timeout; + + req->deadline = jiffies + req->timeout; + list_add_tail(&req->timeout_list, &q->timeout_list); + + /* + * If the timer isn't already pending or this timeout is earlier + * than an existing one, modify the timer. Round up to next nearest + * second. + */ + expiry = round_jiffies_up(req->deadline); + + if (!timer_pending(&q->timeout) || + time_before(expiry, q->timeout.expires)) + mod_timer(&q->timeout, expiry); +} + +/** + * blk_abort_queue -- Abort all request on given queue + * @queue: pointer to queue + * + */ +void blk_abort_queue(struct request_queue *q) +{ + unsigned long flags; + struct request *rq, *tmp; + LIST_HEAD(list); + + /* + * Not a request based block device, nothing to abort + */ + if (!q->request_fn) + return; + + spin_lock_irqsave(q->queue_lock, flags); + + elv_abort_queue(q); + + /* + * Splice entries to local list, to avoid deadlocking if entries + * get readded to the timeout list by error handling + */ + list_splice_init(&q->timeout_list, &list); + + list_for_each_entry_safe(rq, tmp, &list, timeout_list) + blk_abort_request(rq); + + /* + * Occasionally, blk_abort_request() will return without + * deleting the element from the list. Make sure we add those back + * instead of leaving them on the local stack list. + */ + list_splice(&list, &q->timeout_list); + + spin_unlock_irqrestore(q->queue_lock, flags); + +} +EXPORT_SYMBOL_GPL(blk_abort_queue); diff --git a/block/blk.h b/block/blk.h new file mode 100644 index 00000000..d6b911ac --- /dev/null +++ b/block/blk.h @@ -0,0 +1,174 @@ +#ifndef BLK_INTERNAL_H +#define BLK_INTERNAL_H + +/* Amount of time in which a process may batch requests */ +#define BLK_BATCH_TIME (HZ/50UL) + +/* Number of requests a "batching" process may submit */ +#define BLK_BATCH_REQ 32 + +extern struct kmem_cache *blk_requestq_cachep; +extern struct kobj_type blk_queue_ktype; + +void init_request_from_bio(struct request *req, struct bio *bio); +void blk_rq_bio_prep(struct request_queue *q, struct request *rq, + struct bio *bio); +int blk_rq_append_bio(struct request_queue *q, struct request *rq, + struct bio *bio); +void blk_dequeue_request(struct request *rq); +void __blk_queue_free_tags(struct request_queue *q); + +void blk_unplug_work(struct work_struct *work); +void blk_unplug_timeout(unsigned long data); +void blk_rq_timed_out_timer(unsigned long data); +void blk_delete_timer(struct request *); +void blk_add_timer(struct request *); +void __generic_unplug_device(struct request_queue *); + +/* + * Internal atomic flags for request handling + */ +enum rq_atomic_flags { + REQ_ATOM_COMPLETE = 0, +}; + +/* + * EH timer and IO completion will both attempt to 'grab' the request, make + * sure that only one of them suceeds + */ +static inline int blk_mark_rq_complete(struct request *rq) +{ + return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +static inline void blk_clear_rq_complete(struct request *rq) +{ + clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); +} + +/* + * Internal elevator interface + */ +#define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) + +static inline struct request *__elv_next_request(struct request_queue *q) +{ + struct request *rq; + + while (1) { + while (!list_empty(&q->queue_head)) { + rq = list_entry_rq(q->queue_head.next); + if (blk_do_ordered(q, &rq)) + return rq; + } + + if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) + return NULL; + } +} + +static inline void elv_activate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_activate_req_fn) + e->ops->elevator_activate_req_fn(q, rq); +} + +static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_deactivate_req_fn) + e->ops->elevator_deactivate_req_fn(q, rq); +} + +#ifdef CONFIG_FAIL_IO_TIMEOUT +int blk_should_fake_timeout(struct request_queue *); +ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); +ssize_t part_timeout_store(struct device *, struct device_attribute *, + const char *, size_t); +#else +static inline int blk_should_fake_timeout(struct request_queue *q) +{ + return 0; +} +#endif + +struct io_context *current_io_context(gfp_t gfp_flags, int node); + +int ll_back_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio); +int ll_front_merge_fn(struct request_queue *q, struct request *req, + struct bio *bio); +int attempt_back_merge(struct request_queue *q, struct request *rq); +int attempt_front_merge(struct request_queue *q, struct request *rq); +void blk_recalc_rq_segments(struct request *rq); +void blk_rq_set_mixed_merge(struct request *rq); + +void blk_queue_congestion_threshold(struct request_queue *q); + +int blk_dev_init(void); + +void elv_quiesce_start(struct request_queue *q); +void elv_quiesce_end(struct request_queue *q); + + +/* + * Return the threshold (number of used requests) at which the queue is + * considered to be congested. It include a little hysteresis to keep the + * context switch rate down. + */ +static inline int queue_congestion_on_threshold(struct request_queue *q) +{ + return q->nr_congestion_on; +} + +/* + * The threshold at which a queue is considered to be uncongested + */ +static inline int queue_congestion_off_threshold(struct request_queue *q) +{ + return q->nr_congestion_off; +} + +#if defined(CONFIG_BLK_DEV_INTEGRITY) + +#define rq_for_each_integrity_segment(bvl, _rq, _iter) \ + __rq_for_each_bio(_iter.bio, _rq) \ + bip_for_each_vec(bvl, _iter.bio->bi_integrity, _iter.i) + +#endif /* BLK_DEV_INTEGRITY */ + +static inline int blk_cpu_to_group(int cpu) +{ + int group = NR_CPUS; +#ifdef CONFIG_SCHED_MC + const struct cpumask *mask = cpu_coregroup_mask(cpu); + group = cpumask_first(mask); +#elif defined(CONFIG_SCHED_SMT) + group = cpumask_first(topology_thread_cpumask(cpu)); +#else + return cpu; +#endif + if (likely(group < NR_CPUS)) + return group; + return cpu; +} + +/* + * Contribute to IO statistics IFF: + * + * a) it's attached to a gendisk, and + * b) the queue had IO stats enabled when this request was started, and + * c) it's a file system request or a discard request + */ +static inline int blk_do_io_stat(struct request *rq) +{ + return rq->rq_disk && + (rq->cmd_flags & REQ_IO_STAT) && + (rq->cmd_type == REQ_TYPE_FS || + (rq->cmd_flags & REQ_DISCARD)); +} + +#endif diff --git a/block/bsg.c b/block/bsg.c new file mode 100644 index 00000000..0c008705 --- /dev/null +++ b/block/bsg.c @@ -0,0 +1,1121 @@ +/* + * bsg.c - block layer implementation of the sg v4 interface + * + * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs + * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com> + * + * This file is subject to the terms and conditions of the GNU General Public + * License version 2. See the file "COPYING" in the main directory of this + * archive for more details. + * + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/file.h> +#include <linux/blkdev.h> +#include <linux/poll.h> +#include <linux/cdev.h> +#include <linux/jiffies.h> +#include <linux/percpu.h> +#include <linux/uio.h> +#include <linux/idr.h> +#include <linux/bsg.h> +#include <linux/smp_lock.h> +#include <linux/slab.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_ioctl.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_driver.h> +#include <scsi/sg.h> + +#define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver" +#define BSG_VERSION "0.4" + +struct bsg_device { + struct request_queue *queue; + spinlock_t lock; + struct list_head busy_list; + struct list_head done_list; + struct hlist_node dev_list; + atomic_t ref_count; + int queued_cmds; + int done_cmds; + wait_queue_head_t wq_done; + wait_queue_head_t wq_free; + char name[20]; + int max_queue; + unsigned long flags; +}; + +enum { + BSG_F_BLOCK = 1, +}; + +#define BSG_DEFAULT_CMDS 64 +#define BSG_MAX_DEVS 32768 + +#undef BSG_DEBUG + +#ifdef BSG_DEBUG +#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args) +#else +#define dprintk(fmt, args...) +#endif + +static DEFINE_MUTEX(bsg_mutex); +static DEFINE_IDR(bsg_minor_idr); + +#define BSG_LIST_ARRAY_SIZE 8 +static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE]; + +static struct class *bsg_class; +static int bsg_major; + +static struct kmem_cache *bsg_cmd_cachep; + +/* + * our internal command type + */ +struct bsg_command { + struct bsg_device *bd; + struct list_head list; + struct request *rq; + struct bio *bio; + struct bio *bidi_bio; + int err; + struct sg_io_v4 hdr; + char sense[SCSI_SENSE_BUFFERSIZE]; +}; + +static void bsg_free_command(struct bsg_command *bc) +{ + struct bsg_device *bd = bc->bd; + unsigned long flags; + + kmem_cache_free(bsg_cmd_cachep, bc); + + spin_lock_irqsave(&bd->lock, flags); + bd->queued_cmds--; + spin_unlock_irqrestore(&bd->lock, flags); + + wake_up(&bd->wq_free); +} + +static struct bsg_command *bsg_alloc_command(struct bsg_device *bd) +{ + struct bsg_command *bc = ERR_PTR(-EINVAL); + + spin_lock_irq(&bd->lock); + + if (bd->queued_cmds >= bd->max_queue) + goto out; + + bd->queued_cmds++; + spin_unlock_irq(&bd->lock); + + bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL); + if (unlikely(!bc)) { + spin_lock_irq(&bd->lock); + bd->queued_cmds--; + bc = ERR_PTR(-ENOMEM); + goto out; + } + + bc->bd = bd; + INIT_LIST_HEAD(&bc->list); + dprintk("%s: returning free cmd %p\n", bd->name, bc); + return bc; +out: + spin_unlock_irq(&bd->lock); + return bc; +} + +static inline struct hlist_head *bsg_dev_idx_hash(int index) +{ + return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)]; +} + +static int bsg_io_schedule(struct bsg_device *bd) +{ + DEFINE_WAIT(wait); + int ret = 0; + + spin_lock_irq(&bd->lock); + + BUG_ON(bd->done_cmds > bd->queued_cmds); + + /* + * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no + * work to do", even though we return -ENOSPC after this same test + * during bsg_write() -- there, it means our buffer can't have more + * bsg_commands added to it, thus has no space left. + */ + if (bd->done_cmds == bd->queued_cmds) { + ret = -ENODATA; + goto unlock; + } + + if (!test_bit(BSG_F_BLOCK, &bd->flags)) { + ret = -EAGAIN; + goto unlock; + } + + prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE); + spin_unlock_irq(&bd->lock); + io_schedule(); + finish_wait(&bd->wq_done, &wait); + + return ret; +unlock: + spin_unlock_irq(&bd->lock); + return ret; +} + +static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_v4 *hdr, struct bsg_device *bd, + fmode_t has_write_perm) +{ + if (hdr->request_len > BLK_MAX_CDB) { + rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); + if (!rq->cmd) + return -ENOMEM; + } + + if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request, + hdr->request_len)) + return -EFAULT; + + if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) { + if (blk_verify_command(rq->cmd, has_write_perm)) + return -EPERM; + } else if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + /* + * fill in request structure + */ + rq->cmd_len = hdr->request_len; + rq->cmd_type = REQ_TYPE_BLOCK_PC; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +/* + * Check if sg_io_v4 from user is allowed and valid + */ +static int +bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) +{ + int ret = 0; + + if (hdr->guard != 'Q') + return -EINVAL; + + switch (hdr->protocol) { + case BSG_PROTOCOL_SCSI: + switch (hdr->subprotocol) { + case BSG_SUB_PROTOCOL_SCSI_CMD: + case BSG_SUB_PROTOCOL_SCSI_TRANSPORT: + break; + default: + ret = -EINVAL; + } + break; + default: + ret = -EINVAL; + } + + *rw = hdr->dout_xfer_len ? WRITE : READ; + return ret; +} + +/* + * map sg_io_v4 to a request. + */ +static struct request * +bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, + u8 *sense) +{ + struct request_queue *q = bd->queue; + struct request *rq, *next_rq = NULL; + int ret, rw; + unsigned int dxfer_len; + void *dxferp = NULL; + + dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp, + hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp, + hdr->din_xfer_len); + + ret = bsg_validate_sgv4_hdr(q, hdr, &rw); + if (ret) + return ERR_PTR(ret); + + /* + * map scatter-gather elements separately and string them to request + */ + rq = blk_get_request(q, rw, GFP_KERNEL); + if (!rq) + return ERR_PTR(-ENOMEM); + ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); + if (ret) + goto out; + + if (rw == WRITE && hdr->din_xfer_len) { + if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { + ret = -EOPNOTSUPP; + goto out; + } + + next_rq = blk_get_request(q, READ, GFP_KERNEL); + if (!next_rq) { + ret = -ENOMEM; + goto out; + } + rq->next_rq = next_rq; + next_rq->cmd_type = rq->cmd_type; + + dxferp = (void*)(unsigned long)hdr->din_xferp; + ret = blk_rq_map_user(q, next_rq, NULL, dxferp, + hdr->din_xfer_len, GFP_KERNEL); + if (ret) + goto out; + } + + if (hdr->dout_xfer_len) { + dxfer_len = hdr->dout_xfer_len; + dxferp = (void*)(unsigned long)hdr->dout_xferp; + } else if (hdr->din_xfer_len) { + dxfer_len = hdr->din_xfer_len; + dxferp = (void*)(unsigned long)hdr->din_xferp; + } else + dxfer_len = 0; + + if (dxfer_len) { + ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len, + GFP_KERNEL); + if (ret) + goto out; + } + + rq->sense = sense; + rq->sense_len = 0; + + return rq; +out: + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); + blk_put_request(rq); + if (next_rq) { + blk_rq_unmap_user(next_rq->bio); + blk_put_request(next_rq); + } + return ERR_PTR(ret); +} + +/* + * async completion call-back from the block layer, when scsi/ide/whatever + * calls end_that_request_last() on a request + */ +static void bsg_rq_end_io(struct request *rq, int uptodate) +{ + struct bsg_command *bc = rq->end_io_data; + struct bsg_device *bd = bc->bd; + unsigned long flags; + + dprintk("%s: finished rq %p bc %p, bio %p stat %d\n", + bd->name, rq, bc, bc->bio, uptodate); + + bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration); + + spin_lock_irqsave(&bd->lock, flags); + list_move_tail(&bc->list, &bd->done_list); + bd->done_cmds++; + spin_unlock_irqrestore(&bd->lock, flags); + + wake_up(&bd->wq_done); +} + +/* + * do final setup of a 'bc' and submit the matching 'rq' to the block + * layer for io + */ +static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, + struct bsg_command *bc, struct request *rq) +{ + int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL)); + + /* + * add bc command to busy queue and submit rq for io + */ + bc->rq = rq; + bc->bio = rq->bio; + if (rq->next_rq) + bc->bidi_bio = rq->next_rq->bio; + bc->hdr.duration = jiffies; + spin_lock_irq(&bd->lock); + list_add_tail(&bc->list, &bd->busy_list); + spin_unlock_irq(&bd->lock); + + dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc); + + rq->end_io_data = bc; + blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io); +} + +static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) +{ + struct bsg_command *bc = NULL; + + spin_lock_irq(&bd->lock); + if (bd->done_cmds) { + bc = list_first_entry(&bd->done_list, struct bsg_command, list); + list_del(&bc->list); + bd->done_cmds--; + } + spin_unlock_irq(&bd->lock); + + return bc; +} + +/* + * Get a finished command from the done list + */ +static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd) +{ + struct bsg_command *bc; + int ret; + + do { + bc = bsg_next_done_cmd(bd); + if (bc) + break; + + if (!test_bit(BSG_F_BLOCK, &bd->flags)) { + bc = ERR_PTR(-EAGAIN); + break; + } + + ret = wait_event_interruptible(bd->wq_done, bd->done_cmds); + if (ret) { + bc = ERR_PTR(-ERESTARTSYS); + break; + } + } while (1); + + dprintk("%s: returning done %p\n", bd->name, bc); + + return bc; +} + +static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, + struct bio *bio, struct bio *bidi_bio) +{ + int ret = 0; + + dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); + /* + * fill in all the output members + */ + hdr->device_status = rq->errors & 0xff; + hdr->transport_status = host_byte(rq->errors); + hdr->driver_status = driver_byte(rq->errors); + hdr->info = 0; + if (hdr->device_status || hdr->transport_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->response_len = 0; + + if (rq->sense_len && hdr->response) { + int len = min_t(unsigned int, hdr->max_response_len, + rq->sense_len); + + ret = copy_to_user((void*)(unsigned long)hdr->response, + rq->sense, len); + if (!ret) + hdr->response_len = len; + else + ret = -EFAULT; + } + + if (rq->next_rq) { + hdr->dout_resid = rq->resid_len; + hdr->din_resid = rq->next_rq->resid_len; + blk_rq_unmap_user(bidi_bio); + blk_put_request(rq->next_rq); + } else if (rq_data_dir(rq) == READ) + hdr->din_resid = rq->resid_len; + else + hdr->dout_resid = rq->resid_len; + + /* + * If the request generated a negative error number, return it + * (providing we aren't already returning an error); if it's + * just a protocol response (i.e. non negative), that gets + * processed above. + */ + if (!ret && rq->errors < 0) + ret = rq->errors; + + blk_rq_unmap_user(bio); + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); + blk_put_request(rq); + + return ret; +} + +static int bsg_complete_all_commands(struct bsg_device *bd) +{ + struct bsg_command *bc; + int ret, tret; + + dprintk("%s: entered\n", bd->name); + + /* + * wait for all commands to complete + */ + ret = 0; + do { + ret = bsg_io_schedule(bd); + /* + * look for -ENODATA specifically -- we'll sometimes get + * -ERESTARTSYS when we've taken a signal, but we can't + * return until we're done freeing the queue, so ignore + * it. The signal will get handled when we're done freeing + * the bsg_device. + */ + } while (ret != -ENODATA); + + /* + * discard done commands + */ + ret = 0; + do { + spin_lock_irq(&bd->lock); + if (!bd->queued_cmds) { + spin_unlock_irq(&bd->lock); + break; + } + spin_unlock_irq(&bd->lock); + + bc = bsg_get_done_cmd(bd); + if (IS_ERR(bc)) + break; + + tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, + bc->bidi_bio); + if (!ret) + ret = tret; + + bsg_free_command(bc); + } while (1); + + return ret; +} + +static int +__bsg_read(char __user *buf, size_t count, struct bsg_device *bd, + const struct iovec *iov, ssize_t *bytes_read) +{ + struct bsg_command *bc; + int nr_commands, ret; + + if (count % sizeof(struct sg_io_v4)) + return -EINVAL; + + ret = 0; + nr_commands = count / sizeof(struct sg_io_v4); + while (nr_commands) { + bc = bsg_get_done_cmd(bd); + if (IS_ERR(bc)) { + ret = PTR_ERR(bc); + break; + } + + /* + * this is the only case where we need to copy data back + * after completing the request. so do that here, + * bsg_complete_work() cannot do that for us + */ + ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, + bc->bidi_bio); + + if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr))) + ret = -EFAULT; + + bsg_free_command(bc); + + if (ret) + break; + + buf += sizeof(struct sg_io_v4); + *bytes_read += sizeof(struct sg_io_v4); + nr_commands--; + } + + return ret; +} + +static inline void bsg_set_block(struct bsg_device *bd, struct file *file) +{ + if (file->f_flags & O_NONBLOCK) + clear_bit(BSG_F_BLOCK, &bd->flags); + else + set_bit(BSG_F_BLOCK, &bd->flags); +} + +/* + * Check if the error is a "real" error that we should return. + */ +static inline int err_block_err(int ret) +{ + if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN) + return 1; + + return 0; +} + +static ssize_t +bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + struct bsg_device *bd = file->private_data; + int ret; + ssize_t bytes_read; + + dprintk("%s: read %Zd bytes\n", bd->name, count); + + bsg_set_block(bd, file); + + bytes_read = 0; + ret = __bsg_read(buf, count, bd, NULL, &bytes_read); + *ppos = bytes_read; + + if (!bytes_read || (bytes_read && err_block_err(ret))) + bytes_read = ret; + + return bytes_read; +} + +static int __bsg_write(struct bsg_device *bd, const char __user *buf, + size_t count, ssize_t *bytes_written, + fmode_t has_write_perm) +{ + struct bsg_command *bc; + struct request *rq; + int ret, nr_commands; + + if (count % sizeof(struct sg_io_v4)) + return -EINVAL; + + nr_commands = count / sizeof(struct sg_io_v4); + rq = NULL; + bc = NULL; + ret = 0; + while (nr_commands) { + struct request_queue *q = bd->queue; + + bc = bsg_alloc_command(bd); + if (IS_ERR(bc)) { + ret = PTR_ERR(bc); + bc = NULL; + break; + } + + if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) { + ret = -EFAULT; + break; + } + + /* + * get a request, fill in the blanks, and add to request queue + */ + rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); + if (IS_ERR(rq)) { + ret = PTR_ERR(rq); + rq = NULL; + break; + } + + bsg_add_command(bd, q, bc, rq); + bc = NULL; + rq = NULL; + nr_commands--; + buf += sizeof(struct sg_io_v4); + *bytes_written += sizeof(struct sg_io_v4); + } + + if (bc) + bsg_free_command(bc); + + return ret; +} + +static ssize_t +bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + struct bsg_device *bd = file->private_data; + ssize_t bytes_written; + int ret; + + dprintk("%s: write %Zd bytes\n", bd->name, count); + + bsg_set_block(bd, file); + + bytes_written = 0; + ret = __bsg_write(bd, buf, count, &bytes_written, + file->f_mode & FMODE_WRITE); + + *ppos = bytes_written; + + /* + * return bytes written on non-fatal errors + */ + if (!bytes_written || (bytes_written && err_block_err(ret))) + bytes_written = ret; + + dprintk("%s: returning %Zd\n", bd->name, bytes_written); + return bytes_written; +} + +static struct bsg_device *bsg_alloc_device(void) +{ + struct bsg_device *bd; + + bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL); + if (unlikely(!bd)) + return NULL; + + spin_lock_init(&bd->lock); + + bd->max_queue = BSG_DEFAULT_CMDS; + + INIT_LIST_HEAD(&bd->busy_list); + INIT_LIST_HEAD(&bd->done_list); + INIT_HLIST_NODE(&bd->dev_list); + + init_waitqueue_head(&bd->wq_free); + init_waitqueue_head(&bd->wq_done); + return bd; +} + +static void bsg_kref_release_function(struct kref *kref) +{ + struct bsg_class_device *bcd = + container_of(kref, struct bsg_class_device, ref); + struct device *parent = bcd->parent; + + if (bcd->release) + bcd->release(bcd->parent); + + put_device(parent); +} + +static int bsg_put_device(struct bsg_device *bd) +{ + int ret = 0, do_free; + struct request_queue *q = bd->queue; + + mutex_lock(&bsg_mutex); + + do_free = atomic_dec_and_test(&bd->ref_count); + if (!do_free) { + mutex_unlock(&bsg_mutex); + goto out; + } + + hlist_del(&bd->dev_list); + mutex_unlock(&bsg_mutex); + + dprintk("%s: tearing down\n", bd->name); + + /* + * close can always block + */ + set_bit(BSG_F_BLOCK, &bd->flags); + + /* + * correct error detection baddies here again. it's the responsibility + * of the app to properly reap commands before close() if it wants + * fool-proof error detection + */ + ret = bsg_complete_all_commands(bd); + + kfree(bd); +out: + kref_put(&q->bsg_dev.ref, bsg_kref_release_function); + if (do_free) + blk_put_queue(q); + return ret; +} + +static struct bsg_device *bsg_add_device(struct inode *inode, + struct request_queue *rq, + struct file *file) +{ + struct bsg_device *bd; + int ret; +#ifdef BSG_DEBUG + unsigned char buf[32]; +#endif + ret = blk_get_queue(rq); + if (ret) + return ERR_PTR(-ENXIO); + + bd = bsg_alloc_device(); + if (!bd) { + blk_put_queue(rq); + return ERR_PTR(-ENOMEM); + } + + bd->queue = rq; + + bsg_set_block(bd, file); + + atomic_set(&bd->ref_count, 1); + mutex_lock(&bsg_mutex); + hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); + + strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); + dprintk("bound to <%s>, max queue %d\n", + format_dev_t(buf, inode->i_rdev), bd->max_queue); + + mutex_unlock(&bsg_mutex); + return bd; +} + +static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) +{ + struct bsg_device *bd; + struct hlist_node *entry; + + mutex_lock(&bsg_mutex); + + hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { + if (bd->queue == q) { + atomic_inc(&bd->ref_count); + goto found; + } + } + bd = NULL; +found: + mutex_unlock(&bsg_mutex); + return bd; +} + +static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) +{ + struct bsg_device *bd; + struct bsg_class_device *bcd; + + /* + * find the class device + */ + mutex_lock(&bsg_mutex); + bcd = idr_find(&bsg_minor_idr, iminor(inode)); + if (bcd) + kref_get(&bcd->ref); + mutex_unlock(&bsg_mutex); + + if (!bcd) + return ERR_PTR(-ENODEV); + + bd = __bsg_get_device(iminor(inode), bcd->queue); + if (bd) + return bd; + + bd = bsg_add_device(inode, bcd->queue, file); + if (IS_ERR(bd)) + kref_put(&bcd->ref, bsg_kref_release_function); + + return bd; +} + +static int bsg_open(struct inode *inode, struct file *file) +{ + struct bsg_device *bd; + + lock_kernel(); + bd = bsg_get_device(inode, file); + unlock_kernel(); + + if (IS_ERR(bd)) + return PTR_ERR(bd); + + file->private_data = bd; + return 0; +} + +static int bsg_release(struct inode *inode, struct file *file) +{ + struct bsg_device *bd = file->private_data; + + file->private_data = NULL; + return bsg_put_device(bd); +} + +static unsigned int bsg_poll(struct file *file, poll_table *wait) +{ + struct bsg_device *bd = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &bd->wq_done, wait); + poll_wait(file, &bd->wq_free, wait); + + spin_lock_irq(&bd->lock); + if (!list_empty(&bd->done_list)) + mask |= POLLIN | POLLRDNORM; + if (bd->queued_cmds >= bd->max_queue) + mask |= POLLOUT; + spin_unlock_irq(&bd->lock); + + return mask; +} + +static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct bsg_device *bd = file->private_data; + int __user *uarg = (int __user *) arg; + int ret; + + switch (cmd) { + /* + * our own ioctls + */ + case SG_GET_COMMAND_Q: + return put_user(bd->max_queue, uarg); + case SG_SET_COMMAND_Q: { + int queue; + + if (get_user(queue, uarg)) + return -EFAULT; + if (queue < 1) + return -EINVAL; + + spin_lock_irq(&bd->lock); + bd->max_queue = queue; + spin_unlock_irq(&bd->lock); + return 0; + } + + /* + * SCSI/sg ioctls + */ + case SG_GET_VERSION_NUM: + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + case SG_SET_TIMEOUT: + case SG_GET_TIMEOUT: + case SG_GET_RESERVED_SIZE: + case SG_SET_RESERVED_SIZE: + case SG_EMULATED_HOST: + case SCSI_IOCTL_SEND_COMMAND: { + void __user *uarg = (void __user *) arg; + return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg); + } + case SG_IO: { + struct request *rq; + struct bio *bio, *bidi_bio = NULL; + struct sg_io_v4 hdr; + int at_head; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + + if (copy_from_user(&hdr, uarg, sizeof(hdr))) + return -EFAULT; + + rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + bio = rq->bio; + if (rq->next_rq) + bidi_bio = rq->next_rq->bio; + + at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL)); + blk_execute_rq(bd->queue, NULL, rq, at_head); + ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio); + + if (copy_to_user(uarg, &hdr, sizeof(hdr))) + return -EFAULT; + + return ret; + } + /* + * block device ioctls + */ + default: +#if 0 + return ioctl_by_bdev(bd->bdev, cmd, arg); +#else + return -ENOTTY; +#endif + } +} + +static const struct file_operations bsg_fops = { + .read = bsg_read, + .write = bsg_write, + .poll = bsg_poll, + .open = bsg_open, + .release = bsg_release, + .unlocked_ioctl = bsg_ioctl, + .owner = THIS_MODULE, +}; + +void bsg_unregister_queue(struct request_queue *q) +{ + struct bsg_class_device *bcd = &q->bsg_dev; + + if (!bcd->class_dev) + return; + + mutex_lock(&bsg_mutex); + idr_remove(&bsg_minor_idr, bcd->minor); + sysfs_remove_link(&q->kobj, "bsg"); + device_unregister(bcd->class_dev); + bcd->class_dev = NULL; + kref_put(&bcd->ref, bsg_kref_release_function); + mutex_unlock(&bsg_mutex); +} +EXPORT_SYMBOL_GPL(bsg_unregister_queue); + +int bsg_register_queue(struct request_queue *q, struct device *parent, + const char *name, void (*release)(struct device *)) +{ + struct bsg_class_device *bcd; + dev_t dev; + int ret, minor; + struct device *class_dev = NULL; + const char *devname; + + if (name) + devname = name; + else + devname = dev_name(parent); + + /* + * we need a proper transport to send commands, not a stacked device + */ + if (!q->request_fn) + return 0; + + bcd = &q->bsg_dev; + memset(bcd, 0, sizeof(*bcd)); + + mutex_lock(&bsg_mutex); + + ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); + if (!ret) { + ret = -ENOMEM; + goto unlock; + } + + ret = idr_get_new(&bsg_minor_idr, bcd, &minor); + if (ret < 0) + goto unlock; + + if (minor >= BSG_MAX_DEVS) { + printk(KERN_ERR "bsg: too many bsg devices\n"); + ret = -EINVAL; + goto remove_idr; + } + + bcd->minor = minor; + bcd->queue = q; + bcd->parent = get_device(parent); + bcd->release = release; + kref_init(&bcd->ref); + dev = MKDEV(bsg_major, bcd->minor); + class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname); + if (IS_ERR(class_dev)) { + ret = PTR_ERR(class_dev); + goto put_dev; + } + bcd->class_dev = class_dev; + + if (q->kobj.sd) { + ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); + if (ret) + goto unregister_class_dev; + } + + mutex_unlock(&bsg_mutex); + return 0; + +unregister_class_dev: + device_unregister(class_dev); +put_dev: + put_device(parent); +remove_idr: + idr_remove(&bsg_minor_idr, minor); +unlock: + mutex_unlock(&bsg_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(bsg_register_queue); + +static struct cdev bsg_cdev; + +static char *bsg_devnode(struct device *dev, mode_t *mode) +{ + return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev)); +} + +static int __init bsg_init(void) +{ + int ret, i; + dev_t devid; + + bsg_cmd_cachep = kmem_cache_create("bsg_cmd", + sizeof(struct bsg_command), 0, 0, NULL); + if (!bsg_cmd_cachep) { + printk(KERN_ERR "bsg: failed creating slab cache\n"); + return -ENOMEM; + } + + for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++) + INIT_HLIST_HEAD(&bsg_device_list[i]); + + bsg_class = class_create(THIS_MODULE, "bsg"); + if (IS_ERR(bsg_class)) { + ret = PTR_ERR(bsg_class); + goto destroy_kmemcache; + } + bsg_class->devnode = bsg_devnode; + + ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg"); + if (ret) + goto destroy_bsg_class; + + bsg_major = MAJOR(devid); + + cdev_init(&bsg_cdev, &bsg_fops); + ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS); + if (ret) + goto unregister_chrdev; + + printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION + " loaded (major %d)\n", bsg_major); + return 0; +unregister_chrdev: + unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS); +destroy_bsg_class: + class_destroy(bsg_class); +destroy_kmemcache: + kmem_cache_destroy(bsg_cmd_cachep); + return ret; +} + +MODULE_AUTHOR("Jens Axboe"); +MODULE_DESCRIPTION(BSG_DESCRIPTION); +MODULE_LICENSE("GPL"); + +device_initcall(bsg_init); diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c new file mode 100644 index 00000000..31d33787 --- /dev/null +++ b/block/cfq-iosched.c @@ -0,0 +1,4155 @@ +/* + * CFQ, or complete fairness queueing, disk scheduler. + * + * Based on ideas from a previously unfinished io + * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. + * + * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/jiffies.h> +#include <linux/rbtree.h> +#include <linux/ioprio.h> +#include <linux/blktrace_api.h> +#include "cfq.h" + +/* + * tunables + */ +/* max queue in one round of service */ +static const int cfq_quantum = 8; +static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; +/* maximum backwards seek, in KiB */ +static const int cfq_back_max = 16 * 1024; +/* penalty of a backwards seek */ +static const int cfq_back_penalty = 2; +static const int cfq_slice_sync = HZ / 10; +static int cfq_slice_async = HZ / 25; +static const int cfq_slice_async_rq = 2; +static int cfq_slice_idle = HZ / 125; +static int cfq_group_idle = HZ / 125; +static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ +static const int cfq_hist_divisor = 4; + +/* + * offset from end of service tree + */ +#define CFQ_IDLE_DELAY (HZ / 5) + +/* + * below this threshold, we consider thinktime immediate + */ +#define CFQ_MIN_TT (2) + +#define CFQ_SLICE_SCALE (5) +#define CFQ_HW_QUEUE_MIN (5) +#define CFQ_SERVICE_SHIFT 12 + +#define CFQQ_SEEK_THR (sector_t)(8 * 100) +#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) +#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) +#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) + +#define RQ_CIC(rq) \ + ((struct cfq_io_context *) (rq)->elevator_private) +#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) +#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3) + +static struct kmem_cache *cfq_pool; +static struct kmem_cache *cfq_ioc_pool; + +static DEFINE_PER_CPU(unsigned long, cfq_ioc_count); +static struct completion *ioc_gone; +static DEFINE_SPINLOCK(ioc_gone_lock); + +static DEFINE_SPINLOCK(cic_index_lock); +static DEFINE_IDA(cic_index_ida); + +#define CFQ_PRIO_LISTS IOPRIO_BE_NR +#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) +#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) + +#define sample_valid(samples) ((samples) > 80) +#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) + +/* + * Most of our rbtree usage is for sorting with min extraction, so + * if we cache the leftmost node we don't have to walk down the tree + * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should + * move this into the elevator for the rq sorting as well. + */ +struct cfq_rb_root { + struct rb_root rb; + struct rb_node *left; + unsigned count; + unsigned total_weight; + u64 min_vdisktime; + struct rb_node *active; +}; +#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ + .count = 0, .min_vdisktime = 0, } + +/* + * Per process-grouping structure + */ +struct cfq_queue { + /* reference count */ + atomic_t ref; + /* various state flags, see below */ + unsigned int flags; + /* parent cfq_data */ + struct cfq_data *cfqd; + /* service_tree member */ + struct rb_node rb_node; + /* service_tree key */ + unsigned long rb_key; + /* prio tree member */ + struct rb_node p_node; + /* prio tree root we belong to, if any */ + struct rb_root *p_root; + /* sorted list of pending requests */ + struct rb_root sort_list; + /* if fifo isn't expired, next request to serve */ + struct request *next_rq; + /* requests queued in sort_list */ + int queued[2]; + /* currently allocated requests */ + int allocated[2]; + /* fifo list of requests in sort_list */ + struct list_head fifo; + + /* time when queue got scheduled in to dispatch first request. */ + unsigned long dispatch_start; + unsigned int allocated_slice; + unsigned int slice_dispatch; + /* time when first request from queue completed and slice started. */ + unsigned long slice_start; + unsigned long slice_end; + long slice_resid; + + /* pending metadata requests */ + int meta_pending; + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; + + /* io prio of this group */ + unsigned short ioprio, org_ioprio; + unsigned short ioprio_class, org_ioprio_class; + + pid_t pid; + + u32 seek_history; + sector_t last_request_pos; + + struct cfq_rb_root *service_tree; + struct cfq_queue *new_cfqq; + struct cfq_group *cfqg; + struct cfq_group *orig_cfqg; + /* Number of sectors dispatched from queue in single dispatch round */ + unsigned long nr_sectors; +}; + +/* + * First index in the service_trees. + * IDLE is handled separately, so it has negative index + */ +enum wl_prio_t { + BE_WORKLOAD = 0, + RT_WORKLOAD = 1, + IDLE_WORKLOAD = 2, +}; + +/* + * Second index in the service_trees. + */ +enum wl_type_t { + ASYNC_WORKLOAD = 0, + SYNC_NOIDLE_WORKLOAD = 1, + SYNC_WORKLOAD = 2 +}; + +/* This is per cgroup per device grouping structure */ +struct cfq_group { + /* group service_tree member */ + struct rb_node rb_node; + + /* group service_tree key */ + u64 vdisktime; + unsigned int weight; + bool on_st; + + /* number of cfqq currently on this group */ + int nr_cfqq; + + /* Per group busy queus average. Useful for workload slice calc. */ + unsigned int busy_queues_avg[2]; + /* + * rr lists of queues with requests, onle rr for each priority class. + * Counts are embedded in the cfq_rb_root + */ + struct cfq_rb_root service_trees[2][3]; + struct cfq_rb_root service_tree_idle; + + unsigned long saved_workload_slice; + enum wl_type_t saved_workload; + enum wl_prio_t saved_serving_prio; + struct blkio_group blkg; +#ifdef CONFIG_CFQ_GROUP_IOSCHED + struct hlist_node cfqd_node; + atomic_t ref; +#endif + /* number of requests that are on the dispatch list or inside driver */ + int dispatched; +}; + +/* + * Per block device queue structure + */ +struct cfq_data { + struct request_queue *queue; + /* Root service tree for cfq_groups */ + struct cfq_rb_root grp_service_tree; + struct cfq_group root_group; + + /* + * The priority currently being served + */ + enum wl_prio_t serving_prio; + enum wl_type_t serving_type; + unsigned long workload_expires; + struct cfq_group *serving_group; + bool noidle_tree_requires_idle; + + /* + * Each priority tree is sorted by next_request position. These + * trees are used when determining if two or more queues are + * interleaving requests (see cfq_close_cooperator). + */ + struct rb_root prio_trees[CFQ_PRIO_LISTS]; + + unsigned int busy_queues; + + int rq_in_driver; + int rq_in_flight[2]; + + /* + * queue-depth detection + */ + int rq_queued; + int hw_tag; + /* + * hw_tag can be + * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) + * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) + * 0 => no NCQ + */ + int hw_tag_est_depth; + unsigned int hw_tag_samples; + + /* + * idle window management + */ + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + + struct cfq_queue *active_queue; + struct cfq_io_context *active_cic; + + /* + * async queue for each priority case + */ + struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; + struct cfq_queue *async_idle_cfqq; + + sector_t last_position; + + /* + * tunables, see top of file + */ + unsigned int cfq_quantum; + unsigned int cfq_fifo_expire[2]; + unsigned int cfq_back_penalty; + unsigned int cfq_back_max; + unsigned int cfq_slice[2]; + unsigned int cfq_slice_async_rq; + unsigned int cfq_slice_idle; + unsigned int cfq_group_idle; + unsigned int cfq_latency; + unsigned int cfq_group_isolation; + + unsigned int cic_index; + struct list_head cic_list; + + /* + * Fallback dummy cfqq for extreme OOM conditions + */ + struct cfq_queue oom_cfqq; + + unsigned long last_delayed_sync; + + /* List of cfq groups being managed on this device*/ + struct hlist_head cfqg_list; + struct rcu_head rcu; +}; + +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); + +static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, + enum wl_prio_t prio, + enum wl_type_t type) +{ + if (!cfqg) + return NULL; + + if (prio == IDLE_WORKLOAD) + return &cfqg->service_tree_idle; + + return &cfqg->service_trees[prio][type]; +} + +enum cfqq_state_flags { + CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ + CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ + CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ + CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ + CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ + CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ + CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ + CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ + CFQ_CFQQ_FLAG_sync, /* synchronous queue */ + CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ + CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ + CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ + CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ +}; + +#define CFQ_CFQQ_FNS(name) \ +static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ +{ \ + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ +} \ +static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ +{ \ + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ +} \ +static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ +{ \ + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ +} + +CFQ_CFQQ_FNS(on_rr); +CFQ_CFQQ_FNS(wait_request); +CFQ_CFQQ_FNS(must_dispatch); +CFQ_CFQQ_FNS(must_alloc_slice); +CFQ_CFQQ_FNS(fifo_expire); +CFQ_CFQQ_FNS(idle_window); +CFQ_CFQQ_FNS(prio_changed); +CFQ_CFQQ_FNS(slice_new); +CFQ_CFQQ_FNS(sync); +CFQ_CFQQ_FNS(coop); +CFQ_CFQQ_FNS(split_coop); +CFQ_CFQQ_FNS(deep); +CFQ_CFQQ_FNS(wait_busy); +#undef CFQ_CFQQ_FNS + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ + cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ + blkg_path(&(cfqq)->cfqg->blkg), ##args); + +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ + blkg_path(&(cfqg)->blkg), ##args); \ + +#else +#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) +#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); +#endif +#define cfq_log(cfqd, fmt, args...) \ + blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) + +/* Traverses through cfq group service trees */ +#define for_each_cfqg_st(cfqg, i, j, st) \ + for (i = 0; i <= IDLE_WORKLOAD; i++) \ + for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ + : &cfqg->service_tree_idle; \ + (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ + (i == IDLE_WORKLOAD && j == 0); \ + j++, st = i < IDLE_WORKLOAD ? \ + &cfqg->service_trees[i][j]: NULL) \ + + +static inline bool iops_mode(struct cfq_data *cfqd) +{ + /* + * If we are not idling on queues and it is a NCQ drive, parallel + * execution of requests is on and measuring time is not possible + * in most of the cases until and unless we drive shallower queue + * depths and that becomes a performance bottleneck. In such cases + * switch to start providing fairness in terms of number of IOs. + */ + if (!cfqd->cfq_slice_idle && cfqd->hw_tag) + return true; + else + return false; +} + +static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq) +{ + if (cfq_class_idle(cfqq)) + return IDLE_WORKLOAD; + if (cfq_class_rt(cfqq)) + return RT_WORKLOAD; + return BE_WORKLOAD; +} + + +static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) +{ + if (!cfq_cfqq_sync(cfqq)) + return ASYNC_WORKLOAD; + if (!cfq_cfqq_idle_window(cfqq)) + return SYNC_NOIDLE_WORKLOAD; + return SYNC_WORKLOAD; +} + +static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl, + struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + if (wl == IDLE_WORKLOAD) + return cfqg->service_tree_idle.count; + + return cfqg->service_trees[wl][ASYNC_WORKLOAD].count + + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count + + cfqg->service_trees[wl][SYNC_WORKLOAD].count; +} + +static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg) +{ + return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; +} + +static void cfq_dispatch_insert(struct request_queue *, struct request *); +static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, + struct io_context *, gfp_t); +static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, + struct io_context *); + +static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, + bool is_sync) +{ + return cic->cfqq[is_sync]; +} + +static inline void cic_set_cfqq(struct cfq_io_context *cic, + struct cfq_queue *cfqq, bool is_sync) +{ + cic->cfqq[is_sync] = cfqq; +} + +#define CIC_DEAD_KEY 1ul +#define CIC_DEAD_INDEX_SHIFT 1 + +static inline void *cfqd_dead_key(struct cfq_data *cfqd) +{ + return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY); +} + +static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic) +{ + struct cfq_data *cfqd = cic->key; + + if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY)) + return NULL; + + return cfqd; +} + +/* + * We regard a request as SYNC, if it's either a read or has the SYNC bit + * set (in which case it could also be direct WRITE). + */ +static inline bool cfq_bio_sync(struct bio *bio) +{ + return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); +} + +/* + * scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing + */ +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) +{ + if (cfqd->busy_queues) { + cfq_log(cfqd, "schedule dispatch"); + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); + } +} + +static int cfq_queue_empty(struct request_queue *q) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + return !cfqd->rq_queued; +} + +/* + * Scale schedule slice based on io priority. Use the sync time slice only + * if a queue is marked sync and has sync io queued. A sync queue with async + * io only, should not get full sync slice length. + */ +static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, + unsigned short prio) +{ + const int base_slice = cfqd->cfq_slice[sync]; + + WARN_ON(prio >= IOPRIO_BE_NR); + + return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); +} + +static inline int +cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); +} + +static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg) +{ + u64 d = delta << CFQ_SERVICE_SHIFT; + + d = d * BLKIO_WEIGHT_DEFAULT; + do_div(d, cfqg->weight); + return d; +} + +static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta > 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) +{ + s64 delta = (s64)(vdisktime - min_vdisktime); + if (delta < 0) + min_vdisktime = vdisktime; + + return min_vdisktime; +} + +static void update_min_vdisktime(struct cfq_rb_root *st) +{ + u64 vdisktime = st->min_vdisktime; + struct cfq_group *cfqg; + + if (st->active) { + cfqg = rb_entry_cfqg(st->active); + vdisktime = cfqg->vdisktime; + } + + if (st->left) { + cfqg = rb_entry_cfqg(st->left); + vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); + } + + st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime); +} + +/* + * get averaged number of queues of RT/BE priority. + * average is updated, with a formula that gives more weight to higher numbers, + * to quickly follows sudden increases and decrease slowly + */ + +static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, + struct cfq_group *cfqg, bool rt) +{ + unsigned min_q, max_q; + unsigned mult = cfq_hist_divisor - 1; + unsigned round = cfq_hist_divisor / 2; + unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); + + min_q = min(cfqg->busy_queues_avg[rt], busy); + max_q = max(cfqg->busy_queues_avg[rt], busy); + cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / + cfq_hist_divisor; + return cfqg->busy_queues_avg[rt]; +} + +static inline unsigned +cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + + return cfq_target_latency * cfqg->weight / st->total_weight; +} + +static inline void +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned slice = cfq_prio_to_slice(cfqd, cfqq); + if (cfqd->cfq_latency) { + /* + * interested queues (we consider only the ones with the same + * priority class in the cfq group) + */ + unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, + cfq_class_rt(cfqq)); + unsigned sync_slice = cfqd->cfq_slice[1]; + unsigned expect_latency = sync_slice * iq; + unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); + + if (expect_latency > group_slice) { + unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; + /* scale low_slice according to IO priority + * and sync vs async */ + unsigned low_slice = + min(slice, base_low_slice * slice / sync_slice); + /* the adapted slice value is scaled to fit all iqs + * into the target latency */ + slice = max(slice * group_slice / expect_latency, + low_slice); + } + } + cfqq->slice_start = jiffies; + cfqq->slice_end = jiffies + slice; + cfqq->allocated_slice = slice; + cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); +} + +/* + * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end + * isn't valid until the first request from the dispatch is activated + * and the slice time set. + */ +static inline bool cfq_slice_used(struct cfq_queue *cfqq) +{ + if (cfq_cfqq_slice_new(cfqq)) + return 0; + if (time_before(jiffies, cfqq->slice_end)) + return 0; + + return 1; +} + +/* + * Lifted from AS - choose which of rq1 and rq2 that is best served now. + * We choose the request that is closest to the head right now. Distance + * behind the head is penalized and only allowed to a certain extent. + */ +static struct request * +cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) +{ + sector_t s1, s2, d1 = 0, d2 = 0; + unsigned long back_max; +#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ + unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + + if (rq1 == NULL || rq1 == rq2) + return rq2; + if (rq2 == NULL) + return rq1; + + if (rq_is_sync(rq1) && !rq_is_sync(rq2)) + return rq1; + else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) + return rq2; + if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) + return rq1; + else if ((rq2->cmd_flags & REQ_META) && + !(rq1->cmd_flags & REQ_META)) + return rq2; + + s1 = blk_rq_pos(rq1); + s2 = blk_rq_pos(rq2); + + /* + * by definition, 1KiB is 2 sectors + */ + back_max = cfqd->cfq_back_max * 2; + + /* + * Strict one way elevator _except_ in the case where we allow + * short backward seeks which are biased as twice the cost of a + * similar forward seek. + */ + if (s1 >= last) + d1 = s1 - last; + else if (s1 + back_max >= last) + d1 = (last - s1) * cfqd->cfq_back_penalty; + else + wrap |= CFQ_RQ1_WRAP; + + if (s2 >= last) + d2 = s2 - last; + else if (s2 + back_max >= last) + d2 = (last - s2) * cfqd->cfq_back_penalty; + else + wrap |= CFQ_RQ2_WRAP; + + /* Found required data */ + + /* + * By doing switch() on the bit mask "wrap" we avoid having to + * check two variables for all permutations: --> faster! + */ + switch (wrap) { + case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ + if (d1 < d2) + return rq1; + else if (d2 < d1) + return rq2; + else { + if (s1 >= s2) + return rq1; + else + return rq2; + } + + case CFQ_RQ2_WRAP: + return rq1; + case CFQ_RQ1_WRAP: + return rq2; + case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ + default: + /* + * Since both rqs are wrapped, + * start with the one that's further behind head + * (--> only *one* back seek required), + * since back seek takes more time than forward. + */ + if (s1 <= s2) + return rq1; + else + return rq2; + } +} + +/* + * The below is leftmost cache rbtree addon + */ +static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) +{ + /* Service tree is empty */ + if (!root->count) + return NULL; + + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry(root->left, struct cfq_queue, rb_node); + + return NULL; +} + +static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) +{ + if (!root->left) + root->left = rb_first(&root->rb); + + if (root->left) + return rb_entry_cfqg(root->left); + + return NULL; +} + +static void rb_erase_init(struct rb_node *n, struct rb_root *root) +{ + rb_erase(n, root); + RB_CLEAR_NODE(n); +} + +static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) +{ + if (root->left == n) + root->left = NULL; + rb_erase_init(n, &root->rb); + --root->count; +} + +/* + * would be nice to take fifo expire time into account as well + */ +static struct request * +cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *last) +{ + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); + struct request *next = NULL, *prev = NULL; + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + + if (rbprev) + prev = rb_entry_rq(rbprev); + + if (rbnext) + next = rb_entry_rq(rbnext); + else { + rbnext = rb_first(&cfqq->sort_list); + if (rbnext && rbnext != &last->rb_node) + next = rb_entry_rq(rbnext); + } + + return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); +} + +static unsigned long cfq_slice_offset(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + /* + * just an approximation, should be ok. + */ + return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - + cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); +} + +static inline s64 +cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + return cfqg->vdisktime - st->min_vdisktime; +} + +static void +__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) +{ + struct rb_node **node = &st->rb.rb_node; + struct rb_node *parent = NULL; + struct cfq_group *__cfqg; + s64 key = cfqg_key(st, cfqg); + int left = 1; + + while (*node != NULL) { + parent = *node; + __cfqg = rb_entry_cfqg(parent); + + if (key < cfqg_key(st, __cfqg)) + node = &parent->rb_left; + else { + node = &parent->rb_right; + left = 0; + } + } + + if (left) + st->left = &cfqg->rb_node; + + rb_link_node(&cfqg->rb_node, parent, node); + rb_insert_color(&cfqg->rb_node, &st->rb); +} + +static void +cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *__cfqg; + struct rb_node *n; + + cfqg->nr_cfqq++; + if (cfqg->on_st) + return; + + /* + * Currently put the group at the end. Later implement something + * so that groups get lesser vtime based on their weights, so that + * if group does not loose all if it was not continously backlogged. + */ + n = rb_last(&st->rb); + if (n) { + __cfqg = rb_entry_cfqg(n); + cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; + } else + cfqg->vdisktime = st->min_vdisktime; + + __cfq_group_service_tree_add(st, cfqg); + cfqg->on_st = true; + st->total_weight += cfqg->weight; +} + +static void +cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + + if (st->active == &cfqg->rb_node) + st->active = NULL; + + BUG_ON(cfqg->nr_cfqq < 1); + cfqg->nr_cfqq--; + + /* If there are other cfq queues under this group, don't delete it */ + if (cfqg->nr_cfqq) + return; + + cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); + cfqg->on_st = false; + st->total_weight -= cfqg->weight; + if (!RB_EMPTY_NODE(&cfqg->rb_node)) + cfq_rb_erase(&cfqg->rb_node, st); + cfqg->saved_workload_slice = 0; + cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1); +} + +static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq) +{ + unsigned int slice_used; + + /* + * Queue got expired before even a single request completed or + * got expired immediately after first request completion. + */ + if (!cfqq->slice_start || cfqq->slice_start == jiffies) { + /* + * Also charge the seek time incurred to the group, otherwise + * if there are mutiple queues in the group, each can dispatch + * a single request on seeky media and cause lots of seek time + * and group will never know it. + */ + slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), + 1); + } else { + slice_used = jiffies - cfqq->slice_start; + if (slice_used > cfqq->allocated_slice) + slice_used = cfqq->allocated_slice; + } + + return slice_used; +} + +static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, + struct cfq_queue *cfqq) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + unsigned int used_sl, charge; + int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) + - cfqg->service_tree_idle.count; + + BUG_ON(nr_sync < 0); + used_sl = charge = cfq_cfqq_slice_usage(cfqq); + + if (iops_mode(cfqd)) + charge = cfqq->slice_dispatch; + else if (!cfq_cfqq_sync(cfqq) && !nr_sync) + charge = cfqq->allocated_slice; + + /* Can't update vdisktime while group is on service tree */ + cfq_rb_erase(&cfqg->rb_node, st); + cfqg->vdisktime += cfq_scale_slice(charge, cfqg); + __cfq_group_service_tree_add(st, cfqg); + + /* This group is being expired. Save the context */ + if (time_after(cfqd->workload_expires, jiffies)) { + cfqg->saved_workload_slice = cfqd->workload_expires + - jiffies; + cfqg->saved_workload = cfqd->serving_type; + cfqg->saved_serving_prio = cfqd->serving_prio; + } else + cfqg->saved_workload_slice = 0; + + cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, + st->min_vdisktime); + cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" + " sect=%u", used_sl, cfqq->slice_dispatch, charge, + iops_mode(cfqd), cfqq->nr_sectors); + cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl); + cfq_blkiocg_set_start_empty_time(&cfqg->blkg); +} + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg) +{ + if (blkg) + return container_of(blkg, struct cfq_group, blkg); + return NULL; +} + +void +cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight) +{ + cfqg_of_blkg(blkg)->weight = weight; +} + +static struct cfq_group * +cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) +{ + struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); + struct cfq_group *cfqg = NULL; + void *key = cfqd; + int i, j; + struct cfq_rb_root *st; + struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; + unsigned int major, minor; + + cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); + if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { + sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); + cfqg->blkg.dev = MKDEV(major, minor); + goto done; + } + if (cfqg || !create) + goto done; + + cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node); + if (!cfqg) + goto done; + + for_each_cfqg_st(cfqg, i, j, st) + *st = CFQ_RB_ROOT; + RB_CLEAR_NODE(&cfqg->rb_node); + + /* + * Take the initial reference that will be released on destroy + * This can be thought of a joint reference by cgroup and + * elevator which will be dropped by either elevator exit + * or cgroup deletion path depending on who is exiting first. + */ + atomic_set(&cfqg->ref, 1); + + /* + * Add group onto cgroup list. It might happen that bdi->dev is + * not initiliazed yet. Initialize this new group without major + * and minor info and this info will be filled in once a new thread + * comes for IO. See code above. + */ + if (bdi->dev) { + sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor); + cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, + MKDEV(major, minor)); + } else + cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd, + 0); + + cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev); + + /* Add group on cfqd list */ + hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); + +done: + return cfqg; +} + +/* + * Search for the cfq group current task belongs to. If create = 1, then also + * create the cfq group if it does not exist. request_queue lock must be held. + */ +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) +{ + struct cgroup *cgroup; + struct cfq_group *cfqg = NULL; + + rcu_read_lock(); + cgroup = task_cgroup(current, blkio_subsys_id); + cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create); + if (!cfqg && create) + cfqg = &cfqd->root_group; + rcu_read_unlock(); + return cfqg; +} + +static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) +{ + atomic_inc(&cfqg->ref); + return cfqg; +} + +static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) +{ + /* Currently, all async queues are mapped to root group */ + if (!cfq_cfqq_sync(cfqq)) + cfqg = &cfqq->cfqd->root_group; + + cfqq->cfqg = cfqg; + /* cfqq reference on cfqg */ + atomic_inc(&cfqq->cfqg->ref); +} + +static void cfq_put_cfqg(struct cfq_group *cfqg) +{ + struct cfq_rb_root *st; + int i, j; + + BUG_ON(atomic_read(&cfqg->ref) <= 0); + if (!atomic_dec_and_test(&cfqg->ref)) + return; + for_each_cfqg_st(cfqg, i, j, st) + BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); + kfree(cfqg); +} + +static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + /* Something wrong if we are trying to remove same group twice */ + BUG_ON(hlist_unhashed(&cfqg->cfqd_node)); + + hlist_del_init(&cfqg->cfqd_node); + + /* + * Put the reference taken at the time of creation so that when all + * queues are gone, group can be destroyed. + */ + cfq_put_cfqg(cfqg); +} + +static void cfq_release_cfq_groups(struct cfq_data *cfqd) +{ + struct hlist_node *pos, *n; + struct cfq_group *cfqg; + + hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) { + /* + * If cgroup removal path got to blk_group first and removed + * it from cgroup list, then it will take care of destroying + * cfqg also. + */ + if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg)) + cfq_destroy_cfqg(cfqd, cfqg); + } +} + +/* + * Blk cgroup controller notification saying that blkio_group object is being + * delinked as associated cgroup object is going away. That also means that + * no new IO will come in this group. So get rid of this group as soon as + * any pending IO in the group is finished. + * + * This function is called under rcu_read_lock(). key is the rcu protected + * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu + * read lock. + * + * "key" was fetched from blkio_group under blkio_cgroup->lock. That means + * it should not be NULL as even if elevator was exiting, cgroup deltion + * path got to it first. + */ +void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg) +{ + unsigned long flags; + struct cfq_data *cfqd = key; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg)); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +#else /* GROUP_IOSCHED */ +static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) +{ + return &cfqd->root_group; +} + +static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) +{ + return cfqg; +} + +static inline void +cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { + cfqq->cfqg = cfqg; +} + +static void cfq_release_cfq_groups(struct cfq_data *cfqd) {} +static inline void cfq_put_cfqg(struct cfq_group *cfqg) {} + +#endif /* GROUP_IOSCHED */ + +/* + * The cfqd->service_trees holds all pending cfq_queue's that have + * requests waiting to be processed. It is sorted in the order that + * we will service the queues. + */ +static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, + bool add_front) +{ + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + unsigned long rb_key; + struct cfq_rb_root *service_tree; + int left; + int new_cfqq = 1; + int group_changed = 0; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfqd->cfq_group_isolation + && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD + && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) { + /* Move this cfq to root group */ + cfq_log_cfqq(cfqd, cfqq, "moving to root group"); + if (!RB_EMPTY_NODE(&cfqq->rb_node)) + cfq_group_service_tree_del(cfqd, cfqq->cfqg); + cfqq->orig_cfqg = cfqq->cfqg; + cfqq->cfqg = &cfqd->root_group; + atomic_inc(&cfqd->root_group.ref); + group_changed = 1; + } else if (!cfqd->cfq_group_isolation + && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { + /* cfqq is sequential now needs to go to its original group */ + BUG_ON(cfqq->cfqg != &cfqd->root_group); + if (!RB_EMPTY_NODE(&cfqq->rb_node)) + cfq_group_service_tree_del(cfqd, cfqq->cfqg); + cfq_put_cfqg(cfqq->cfqg); + cfqq->cfqg = cfqq->orig_cfqg; + cfqq->orig_cfqg = NULL; + group_changed = 1; + cfq_log_cfqq(cfqd, cfqq, "moved to origin group"); + } +#endif + + service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), + cfqq_type(cfqq)); + if (cfq_class_idle(cfqq)) { + rb_key = CFQ_IDLE_DELAY; + parent = rb_last(&service_tree->rb); + if (parent && parent != &cfqq->rb_node) { + __cfqq = rb_entry(parent, struct cfq_queue, rb_node); + rb_key += __cfqq->rb_key; + } else + rb_key += jiffies; + } else if (!add_front) { + /* + * Get our rb key offset. Subtract any residual slice + * value carried from last service. A negative resid + * count indicates slice overrun, and this should position + * the next service time further away in the tree. + */ + rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; + rb_key -= cfqq->slice_resid; + cfqq->slice_resid = 0; + } else { + rb_key = -HZ; + __cfqq = cfq_rb_first(service_tree); + rb_key += __cfqq ? __cfqq->rb_key : jiffies; + } + + if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + new_cfqq = 0; + /* + * same position, nothing more to do + */ + if (rb_key == cfqq->rb_key && + cfqq->service_tree == service_tree) + return; + + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; + } + + left = 1; + parent = NULL; + cfqq->service_tree = service_tree; + p = &service_tree->rb.rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + __cfqq = rb_entry(parent, struct cfq_queue, rb_node); + + /* + * sort by key, that represents service time. + */ + if (time_before(rb_key, __cfqq->rb_key)) + n = &(*p)->rb_left; + else { + n = &(*p)->rb_right; + left = 0; + } + + p = n; + } + + if (left) + service_tree->left = &cfqq->rb_node; + + cfqq->rb_key = rb_key; + rb_link_node(&cfqq->rb_node, parent, p); + rb_insert_color(&cfqq->rb_node, &service_tree->rb); + service_tree->count++; + if ((add_front || !new_cfqq) && !group_changed) + return; + cfq_group_service_tree_add(cfqd, cfqq->cfqg); +} + +static struct cfq_queue * +cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, + sector_t sector, struct rb_node **ret_parent, + struct rb_node ***rb_link) +{ + struct rb_node **p, *parent; + struct cfq_queue *cfqq = NULL; + + parent = NULL; + p = &root->rb_node; + while (*p) { + struct rb_node **n; + + parent = *p; + cfqq = rb_entry(parent, struct cfq_queue, p_node); + + /* + * Sort strictly based on sector. Smallest to the left, + * largest to the right. + */ + if (sector > blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_right; + else if (sector < blk_rq_pos(cfqq->next_rq)) + n = &(*p)->rb_left; + else + break; + p = n; + cfqq = NULL; + } + + *ret_parent = parent; + if (rb_link) + *rb_link = p; + return cfqq; +} + +static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct rb_node **p, *parent; + struct cfq_queue *__cfqq; + + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + + if (cfq_class_idle(cfqq)) + return; + if (!cfqq->next_rq) + return; + + cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, + blk_rq_pos(cfqq->next_rq), &parent, &p); + if (!__cfqq) { + rb_link_node(&cfqq->p_node, parent, p); + rb_insert_color(&cfqq->p_node, cfqq->p_root); + } else + cfqq->p_root = NULL; +} + +/* + * Update cfqq's position in the service tree. + */ +static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + /* + * Resorting requires the cfqq to be on the RR list already. + */ + if (cfq_cfqq_on_rr(cfqq)) { + cfq_service_tree_add(cfqd, cfqq, 0); + cfq_prio_tree_add(cfqd, cfqq); + } +} + +/* + * add to busy list of queues for service, trying to be fair in ordering + * the pending list according to last request service + */ +static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); + BUG_ON(cfq_cfqq_on_rr(cfqq)); + cfq_mark_cfqq_on_rr(cfqq); + cfqd->busy_queues++; + + cfq_resort_rr_list(cfqd, cfqq); +} + +/* + * Called when the cfqq no longer has requests pending, remove it from + * the service tree. + */ +static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); + BUG_ON(!cfq_cfqq_on_rr(cfqq)); + cfq_clear_cfqq_on_rr(cfqq); + + if (!RB_EMPTY_NODE(&cfqq->rb_node)) { + cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); + cfqq->service_tree = NULL; + } + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + + cfq_group_service_tree_del(cfqd, cfqq->cfqg); + BUG_ON(!cfqd->busy_queues); + cfqd->busy_queues--; +} + +/* + * rb tree support functions + */ +static void cfq_del_rq_rb(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + const int sync = rq_is_sync(rq); + + BUG_ON(!cfqq->queued[sync]); + cfqq->queued[sync]--; + + elv_rb_del(&cfqq->sort_list, rq); + + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { + /* + * Queue will be deleted from service tree when we actually + * expire it later. Right now just remove it from prio tree + * as it is empty. + */ + if (cfqq->p_root) { + rb_erase(&cfqq->p_node, cfqq->p_root); + cfqq->p_root = NULL; + } + } +} + +static void cfq_add_rq_rb(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + struct cfq_data *cfqd = cfqq->cfqd; + struct request *__alias, *prev; + + cfqq->queued[rq_is_sync(rq)]++; + + /* + * looks a little odd, but the first insert might return an alias. + * if that happens, put the alias on the dispatch list + */ + while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) + cfq_dispatch_insert(cfqd->queue, __alias); + + if (!cfq_cfqq_on_rr(cfqq)) + cfq_add_cfqq_rr(cfqd, cfqq); + + /* + * check if this request is a better next-serve candidate + */ + prev = cfqq->next_rq; + cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); + + /* + * adjust priority tree position, if ->next_rq changes + */ + if (prev != cfqq->next_rq) + cfq_prio_tree_add(cfqd, cfqq); + + BUG_ON(!cfqq->next_rq); +} + +static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) +{ + elv_rb_del(&cfqq->sort_list, rq); + cfqq->queued[rq_is_sync(rq)]--; + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, + rq_data_dir(rq), rq_is_sync(rq)); + cfq_add_rq_rb(rq); + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, + &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq), + rq_is_sync(rq)); +} + +static struct request * +cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) +{ + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct cfq_queue *cfqq; + + cic = cfq_cic_lookup(cfqd, tsk->io_context); + if (!cic) + return NULL; + + cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); + if (cfqq) { + sector_t sector = bio->bi_sector + bio_sectors(bio); + + return elv_rb_find(&cfqq->sort_list, sector); + } + + return NULL; +} + +static void cfq_activate_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + cfqd->rq_in_driver++; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", + cfqd->rq_in_driver); + + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); +} + +static void cfq_deactivate_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + + WARN_ON(!cfqd->rq_in_driver); + cfqd->rq_in_driver--; + cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", + cfqd->rq_in_driver); +} + +static void cfq_remove_request(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + if (cfqq->next_rq == rq) + cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); + + list_del_init(&rq->queuelist); + cfq_del_rq_rb(rq); + + cfqq->cfqd->rq_queued--; + cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, + rq_data_dir(rq), rq_is_sync(rq)); + if (rq->cmd_flags & REQ_META) { + WARN_ON(!cfqq->meta_pending); + cfqq->meta_pending--; + } +} + +static int cfq_merge(struct request_queue *q, struct request **req, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct request *__rq; + + __rq = cfq_find_rq_fmerge(cfqd, bio); + if (__rq && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } + + return ELEVATOR_NO_MERGE; +} + +static void cfq_merged_request(struct request_queue *q, struct request *req, + int type) +{ + if (type == ELEVATOR_FRONT_MERGE) { + struct cfq_queue *cfqq = RQ_CFQQ(req); + + cfq_reposition_rq_rb(cfqq, req); + } +} + +static void cfq_bio_merged(struct request_queue *q, struct request *req, + struct bio *bio) +{ + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg, + bio_data_dir(bio), cfq_bio_sync(bio)); +} + +static void +cfq_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + /* + * reposition in fifo if next is older than rq + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(rq_fifo_time(next), rq_fifo_time(rq))) { + list_move(&rq->queuelist, &next->queuelist); + rq_set_fifo_time(rq, rq_fifo_time(next)); + } + + if (cfqq->next_rq == next) + cfqq->next_rq = rq; + cfq_remove_request(next); + cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg, + rq_data_dir(next), rq_is_sync(next)); +} + +static int cfq_allow_merge(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + struct cfq_queue *cfqq; + + /* + * Disallow merge of a sync bio into an async request. + */ + if (cfq_bio_sync(bio) && !rq_is_sync(rq)) + return false; + + /* + * Lookup the cfqq that this bio will be queued with. Allow + * merge only if rq is queued there. + */ + cic = cfq_cic_lookup(cfqd, current->io_context); + if (!cic) + return false; + + cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); + return cfqq == RQ_CFQQ(rq); +} + +static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + del_timer(&cfqd->idle_slice_timer); + cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg); +} + +static void __cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + if (cfqq) { + cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d", + cfqd->serving_prio, cfqd->serving_type); + cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg); + cfqq->slice_start = 0; + cfqq->dispatch_start = jiffies; + cfqq->allocated_slice = 0; + cfqq->slice_end = 0; + cfqq->slice_dispatch = 0; + cfqq->nr_sectors = 0; + + cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_must_dispatch(cfqq); + cfq_clear_cfqq_must_alloc_slice(cfqq); + cfq_clear_cfqq_fifo_expire(cfqq); + cfq_mark_cfqq_slice_new(cfqq); + + cfq_del_timer(cfqd, cfqq); + } + + cfqd->active_queue = cfqq; +} + +/* + * current cfqq expired its slice (or was too idle), select new one + */ +static void +__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, + bool timed_out) +{ + cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); + + if (cfq_cfqq_wait_request(cfqq)) + cfq_del_timer(cfqd, cfqq); + + cfq_clear_cfqq_wait_request(cfqq); + cfq_clear_cfqq_wait_busy(cfqq); + + /* + * If this cfqq is shared between multiple processes, check to + * make sure that those processes are still issuing I/Os within + * the mean seek distance. If not, it may be time to break the + * queues apart again. + */ + if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) + cfq_mark_cfqq_split_coop(cfqq); + + /* + * store what was left of this slice, if the queue idled/timed out + */ + if (timed_out && !cfq_cfqq_slice_new(cfqq)) { + cfqq->slice_resid = cfqq->slice_end - jiffies; + cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); + } + + cfq_group_served(cfqd, cfqq->cfqg, cfqq); + + if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) + cfq_del_cfqq_rr(cfqd, cfqq); + + cfq_resort_rr_list(cfqd, cfqq); + + if (cfqq == cfqd->active_queue) + cfqd->active_queue = NULL; + + if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active) + cfqd->grp_service_tree.active = NULL; + + if (cfqd->active_cic) { + put_io_context(cfqd->active_cic->ioc); + cfqd->active_cic = NULL; + } +} + +static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqq) + __cfq_slice_expired(cfqd, cfqq, timed_out); +} + +/* + * Get next queue for service. Unless we have a queue preemption, + * we'll simply select the first cfqq in the service tree. + */ +static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) +{ + struct cfq_rb_root *service_tree = + service_tree_for(cfqd->serving_group, cfqd->serving_prio, + cfqd->serving_type); + + if (!cfqd->rq_queued) + return NULL; + + /* There is nothing to dispatch */ + if (!service_tree) + return NULL; + if (RB_EMPTY_ROOT(&service_tree->rb)) + return NULL; + return cfq_rb_first(service_tree); +} + +static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg; + struct cfq_queue *cfqq; + int i, j; + struct cfq_rb_root *st; + + if (!cfqd->rq_queued) + return NULL; + + cfqg = cfq_get_next_cfqg(cfqd); + if (!cfqg) + return NULL; + + for_each_cfqg_st(cfqg, i, j, st) + if ((cfqq = cfq_rb_first(st)) != NULL) + return cfqq; + return NULL; +} + +/* + * Get and set a new active queue for service. + */ +static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + if (!cfqq) + cfqq = cfq_get_next_queue(cfqd); + + __cfq_set_active_queue(cfqd, cfqq); + return cfqq; +} + +static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, + struct request *rq) +{ + if (blk_rq_pos(rq) >= cfqd->last_position) + return blk_rq_pos(rq) - cfqd->last_position; + else + return cfqd->last_position - blk_rq_pos(rq); +} + +static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; +} + +static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; + struct rb_node *parent, *node; + struct cfq_queue *__cfqq; + sector_t sector = cfqd->last_position; + + if (RB_EMPTY_ROOT(root)) + return NULL; + + /* + * First, if we find a request starting at the end of the last + * request, choose it. + */ + __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); + if (__cfqq) + return __cfqq; + + /* + * If the exact sector wasn't found, the parent of the NULL leaf + * will contain the closest sector. + */ + __cfqq = rb_entry(parent, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) + return __cfqq; + + if (blk_rq_pos(__cfqq->next_rq) < sector) + node = rb_next(&__cfqq->p_node); + else + node = rb_prev(&__cfqq->p_node); + if (!node) + return NULL; + + __cfqq = rb_entry(node, struct cfq_queue, p_node); + if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) + return __cfqq; + + return NULL; +} + +/* + * cfqd - obvious + * cur_cfqq - passed in so that we don't decide that the current queue is + * closely cooperating with itself. + * + * So, basically we're assuming that that cur_cfqq has dispatched at least + * one request, and that cfqd->last_position reflects a position on the disk + * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid + * assumption. + */ +static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, + struct cfq_queue *cur_cfqq) +{ + struct cfq_queue *cfqq; + + if (cfq_class_idle(cur_cfqq)) + return NULL; + if (!cfq_cfqq_sync(cur_cfqq)) + return NULL; + if (CFQQ_SEEKY(cur_cfqq)) + return NULL; + + /* + * Don't search priority tree if it's the only queue in the group. + */ + if (cur_cfqq->cfqg->nr_cfqq == 1) + return NULL; + + /* + * We should notice if some of the queues are cooperating, eg + * working closely on the same area of the disk. In that case, + * we can group them together and don't waste time idling. + */ + cfqq = cfqq_close(cfqd, cur_cfqq); + if (!cfqq) + return NULL; + + /* If new queue belongs to different cfq_group, don't choose it */ + if (cur_cfqq->cfqg != cfqq->cfqg) + return NULL; + + /* + * It only makes sense to merge sync queues. + */ + if (!cfq_cfqq_sync(cfqq)) + return NULL; + if (CFQQ_SEEKY(cfqq)) + return NULL; + + /* + * Do not merge queues of different priority classes + */ + if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) + return NULL; + + return cfqq; +} + +/* + * Determine whether we should enforce idle window for this queue. + */ + +static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + enum wl_prio_t prio = cfqq_prio(cfqq); + struct cfq_rb_root *service_tree = cfqq->service_tree; + + BUG_ON(!service_tree); + BUG_ON(!service_tree->count); + + if (!cfqd->cfq_slice_idle) + return false; + + /* We never do for idle class queues. */ + if (prio == IDLE_WORKLOAD) + return false; + + /* We do for queues that were marked with idle window flag. */ + if (cfq_cfqq_idle_window(cfqq) && + !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) + return true; + + /* + * Otherwise, we do only if they are the last ones + * in their service tree. + */ + if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) + return 1; + cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", + service_tree->count); + return 0; +} + +static void cfq_arm_slice_timer(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + struct cfq_io_context *cic; + unsigned long sl, group_idle = 0; + + /* + * SSD device without seek penalty, disable idling. But only do so + * for devices that support queuing, otherwise we still have a problem + * with sync vs async workloads. + */ + if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) + return; + + WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); + WARN_ON(cfq_cfqq_slice_new(cfqq)); + + /* + * idle is disabled, either manually or by past process history + */ + if (!cfq_should_idle(cfqd, cfqq)) { + /* no queue idling. Check for group idling */ + if (cfqd->cfq_group_idle) + group_idle = cfqd->cfq_group_idle; + else + return; + } + + /* + * still active requests from this queue, don't idle + */ + if (cfqq->dispatched) + return; + + /* + * task has exited, don't wait + */ + cic = cfqd->active_cic; + if (!cic || !atomic_read(&cic->ioc->nr_tasks)) + return; + + /* + * If our average think time is larger than the remaining time + * slice, then don't idle. This avoids overrunning the allotted + * time slice. + */ + if (sample_valid(cic->ttime_samples) && + (cfqq->slice_end - jiffies < cic->ttime_mean)) { + cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", + cic->ttime_mean); + return; + } + + /* There are other queues in the group, don't do group idle */ + if (group_idle && cfqq->cfqg->nr_cfqq > 1) + return; + + cfq_mark_cfqq_wait_request(cfqq); + + if (group_idle) + sl = cfqd->cfq_group_idle; + else + sl = cfqd->cfq_slice_idle; + + mod_timer(&cfqd->idle_slice_timer, jiffies + sl); + cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg); + cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, + group_idle ? 1 : 0); +} + +/* + * Move request from internal lists to the request queue dispatch list. + */ +static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); + + cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); + cfq_remove_request(rq); + cfqq->dispatched++; + (RQ_CFQG(rq))->dispatched++; + elv_dispatch_sort(q, rq); + + cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; + cfqq->nr_sectors += blk_rq_sectors(rq); + cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq), + rq_data_dir(rq), rq_is_sync(rq)); +} + +/* + * return expired entry, or NULL to just start from scratch in rbtree + */ +static struct request *cfq_check_fifo(struct cfq_queue *cfqq) +{ + struct request *rq = NULL; + + if (cfq_cfqq_fifo_expire(cfqq)) + return NULL; + + cfq_mark_cfqq_fifo_expire(cfqq); + + if (list_empty(&cfqq->fifo)) + return NULL; + + rq = rq_entry_fifo(cfqq->fifo.next); + if (time_before(jiffies, rq_fifo_time(rq))) + rq = NULL; + + cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); + return rq; +} + +static inline int +cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + const int base_rq = cfqd->cfq_slice_async_rq; + + WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); + + return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio)); +} + +/* + * Must be called with the queue_lock held. + */ +static int cfqq_process_refs(struct cfq_queue *cfqq) +{ + int process_refs, io_refs; + + io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; + process_refs = atomic_read(&cfqq->ref) - io_refs; + BUG_ON(process_refs < 0); + return process_refs; +} + +static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) +{ + int process_refs, new_process_refs; + struct cfq_queue *__cfqq; + + /* + * If there are no process references on the new_cfqq, then it is + * unsafe to follow the ->new_cfqq chain as other cfqq's in the + * chain may have dropped their last reference (not just their + * last process reference). + */ + if (!cfqq_process_refs(new_cfqq)) + return; + + /* Avoid a circular list and skip interim queue merges */ + while ((__cfqq = new_cfqq->new_cfqq)) { + if (__cfqq == cfqq) + return; + new_cfqq = __cfqq; + } + + process_refs = cfqq_process_refs(cfqq); + new_process_refs = cfqq_process_refs(new_cfqq); + /* + * If the process for the cfqq has gone away, there is no + * sense in merging the queues. + */ + if (process_refs == 0 || new_process_refs == 0) + return; + + /* + * Merge in the direction of the lesser amount of work. + */ + if (new_process_refs >= process_refs) { + cfqq->new_cfqq = new_cfqq; + atomic_add(process_refs, &new_cfqq->ref); + } else { + new_cfqq->new_cfqq = cfqq; + atomic_add(new_process_refs, &cfqq->ref); + } +} + +static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, + struct cfq_group *cfqg, enum wl_prio_t prio) +{ + struct cfq_queue *queue; + int i; + bool key_valid = false; + unsigned long lowest_key = 0; + enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; + + for (i = 0; i <= SYNC_WORKLOAD; ++i) { + /* select the one with lowest rb_key */ + queue = cfq_rb_first(service_tree_for(cfqg, prio, i)); + if (queue && + (!key_valid || time_before(queue->rb_key, lowest_key))) { + lowest_key = queue->rb_key; + cur_best = i; + key_valid = true; + } + } + + return cur_best; +} + +static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) +{ + unsigned slice; + unsigned count; + struct cfq_rb_root *st; + unsigned group_slice; + + if (!cfqg) { + cfqd->serving_prio = IDLE_WORKLOAD; + cfqd->workload_expires = jiffies + 1; + return; + } + + /* Choose next priority. RT > BE > IDLE */ + if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) + cfqd->serving_prio = RT_WORKLOAD; + else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) + cfqd->serving_prio = BE_WORKLOAD; + else { + cfqd->serving_prio = IDLE_WORKLOAD; + cfqd->workload_expires = jiffies + 1; + return; + } + + /* + * For RT and BE, we have to choose also the type + * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload + * expiration time + */ + st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); + count = st->count; + + /* + * check workload expiration, and that we still have other queues ready + */ + if (count && !time_after(jiffies, cfqd->workload_expires)) + return; + + /* otherwise select new workload type */ + cfqd->serving_type = + cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); + st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type); + count = st->count; + + /* + * the workload slice is computed as a fraction of target latency + * proportional to the number of queues in that workload, over + * all the queues in the same priority class + */ + group_slice = cfq_group_slice(cfqd, cfqg); + + slice = group_slice * count / + max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio], + cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg)); + + if (cfqd->serving_type == ASYNC_WORKLOAD) { + unsigned int tmp; + + /* + * Async queues are currently system wide. Just taking + * proportion of queues with-in same group will lead to higher + * async ratio system wide as generally root group is going + * to have higher weight. A more accurate thing would be to + * calculate system wide asnc/sync ratio. + */ + tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); + tmp = tmp/cfqd->busy_queues; + slice = min_t(unsigned, slice, tmp); + + /* async workload slice is scaled down according to + * the sync/async slice ratio. */ + slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; + } else + /* sync workload slice is at least 2 * cfq_slice_idle */ + slice = max(slice, 2 * cfqd->cfq_slice_idle); + + slice = max_t(unsigned, slice, CFQ_MIN_TT); + cfq_log(cfqd, "workload slice:%d", slice); + cfqd->workload_expires = jiffies + slice; + cfqd->noidle_tree_requires_idle = false; +} + +static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) +{ + struct cfq_rb_root *st = &cfqd->grp_service_tree; + struct cfq_group *cfqg; + + if (RB_EMPTY_ROOT(&st->rb)) + return NULL; + cfqg = cfq_rb_first_group(st); + st->active = &cfqg->rb_node; + update_min_vdisktime(st); + return cfqg; +} + +static void cfq_choose_cfqg(struct cfq_data *cfqd) +{ + struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); + + cfqd->serving_group = cfqg; + + /* Restore the workload type data */ + if (cfqg->saved_workload_slice) { + cfqd->workload_expires = jiffies + cfqg->saved_workload_slice; + cfqd->serving_type = cfqg->saved_workload; + cfqd->serving_prio = cfqg->saved_serving_prio; + } else + cfqd->workload_expires = jiffies - 1; + + choose_service_tree(cfqd, cfqg); +} + +/* + * Select a queue for service. If we have a current active queue, + * check whether to continue servicing it, or retrieve and set a new one. + */ +static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq, *new_cfqq = NULL; + + cfqq = cfqd->active_queue; + if (!cfqq) + goto new_queue; + + if (!cfqd->rq_queued) + return NULL; + + /* + * We were waiting for group to get backlogged. Expire the queue + */ + if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) + goto expire; + + /* + * The active queue has run out of time, expire it and select new. + */ + if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { + /* + * If slice had not expired at the completion of last request + * we might not have turned on wait_busy flag. Don't expire + * the queue yet. Allow the group to get backlogged. + * + * The very fact that we have used the slice, that means we + * have been idling all along on this queue and it should be + * ok to wait for this request to complete. + */ + if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) + && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } else + goto check_group_idle; + } + + /* + * The active queue has requests and isn't expired, allow it to + * dispatch. + */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + goto keep_queue; + + /* + * If another queue has a request waiting within our mean seek + * distance, let it run. The expire code will check for close + * cooperators and put the close queue at the front of the service + * tree. If possible, merge the expiring queue with the new cfqq. + */ + new_cfqq = cfq_close_cooperator(cfqd, cfqq); + if (new_cfqq) { + if (!cfqq->new_cfqq) + cfq_setup_merge(cfqq, new_cfqq); + goto expire; + } + + /* + * No requests pending. If the active queue still has requests in + * flight or is idling for a new request, allow either of these + * conditions to happen (or time out) before selecting a new queue. + */ + if (timer_pending(&cfqd->idle_slice_timer)) { + cfqq = NULL; + goto keep_queue; + } + + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { + cfqq = NULL; + goto keep_queue; + } + + /* + * If group idle is enabled and there are requests dispatched from + * this group, wait for requests to complete. + */ +check_group_idle: + if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 + && cfqq->cfqg->dispatched) { + cfqq = NULL; + goto keep_queue; + } + +expire: + cfq_slice_expired(cfqd, 0); +new_queue: + /* + * Current queue expired. Check if we have to switch to a new + * service tree + */ + if (!new_cfqq) + cfq_choose_cfqg(cfqd); + + cfqq = cfq_set_active_queue(cfqd, new_cfqq); +keep_queue: + return cfqq; +} + +static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) +{ + int dispatched = 0; + + while (cfqq->next_rq) { + cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); + dispatched++; + } + + BUG_ON(!list_empty(&cfqq->fifo)); + + /* By default cfqq is not expired if it is empty. Do it explicitly */ + __cfq_slice_expired(cfqq->cfqd, cfqq, 0); + return dispatched; +} + +/* + * Drain our current requests. Used for barriers and when switching + * io schedulers on-the-fly. + */ +static int cfq_forced_dispatch(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq; + int dispatched = 0; + + /* Expire the timeslice of the current active queue first */ + cfq_slice_expired(cfqd, 0); + while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { + __cfq_set_active_queue(cfqd, cfqq); + dispatched += __cfq_forced_dispatch_cfqq(cfqq); + } + + BUG_ON(cfqd->busy_queues); + + cfq_log(cfqd, "forced_dispatch=%d", dispatched); + return dispatched; +} + +static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, + struct cfq_queue *cfqq) +{ + /* the queue hasn't finished any request, can't estimate */ + if (cfq_cfqq_slice_new(cfqq)) + return 1; + if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, + cfqq->slice_end)) + return 1; + + return 0; +} + +static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + unsigned int max_dispatch; + + /* + * Drain async requests before we start sync IO + */ + if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) + return false; + + /* + * If this is an async queue and we have sync IO in flight, let it wait + */ + if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) + return false; + + max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + + /* + * Does this cfqq already have too much IO in flight? + */ + if (cfqq->dispatched >= max_dispatch) { + /* + * idle queue must always only have a single IO in flight + */ + if (cfq_class_idle(cfqq)) + return false; + + /* + * We have other queues, don't allow more IO from this one + */ + if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq)) + return false; + + /* + * Sole queue user, no limit + */ + if (cfqd->busy_queues == 1) + max_dispatch = -1; + else + /* + * Normally we start throttling cfqq when cfq_quantum/2 + * requests have been dispatched. But we can drive + * deeper queue depths at the beginning of slice + * subjected to upper limit of cfq_quantum. + * */ + max_dispatch = cfqd->cfq_quantum; + } + + /* + * Async queues must wait a bit before being allowed dispatch. + * We also ramp up the dispatch depth gradually for async IO, + * based on the last sync IO we serviced + */ + if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { + unsigned long last_sync = jiffies - cfqd->last_delayed_sync; + unsigned int depth; + + depth = last_sync / cfqd->cfq_slice[1]; + if (!depth && !cfqq->dispatched) + depth = 1; + if (depth < max_dispatch) + max_dispatch = depth; + } + + /* + * If we're below the current max, allow a dispatch + */ + return cfqq->dispatched < max_dispatch; +} + +/* + * Dispatch a request from cfqq, moving them to the request queue + * dispatch list. + */ +static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct request *rq; + + BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); + + if (!cfq_may_dispatch(cfqd, cfqq)) + return false; + + /* + * follow expired path, else get first next available + */ + rq = cfq_check_fifo(cfqq); + if (!rq) + rq = cfqq->next_rq; + + /* + * insert request into driver dispatch list + */ + cfq_dispatch_insert(cfqd->queue, rq); + + if (!cfqd->active_cic) { + struct cfq_io_context *cic = RQ_CIC(rq); + + atomic_long_inc(&cic->ioc->refcount); + cfqd->active_cic = cic; + } + + return true; +} + +/* + * Find the cfqq that we need to service and move a request from that to the + * dispatch list + */ +static int cfq_dispatch_requests(struct request_queue *q, int force) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq; + + if (!cfqd->busy_queues) + return 0; + + if (unlikely(force)) + return cfq_forced_dispatch(cfqd); + + cfqq = cfq_select_queue(cfqd); + if (!cfqq) + return 0; + + /* + * Dispatch a request from this cfqq, if it is allowed + */ + if (!cfq_dispatch_request(cfqd, cfqq)) + return 0; + + cfqq->slice_dispatch++; + cfq_clear_cfqq_must_dispatch(cfqq); + + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && + cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || + cfq_class_idle(cfqq))) { + cfqq->slice_end = jiffies + 1; + cfq_slice_expired(cfqd, 0); + } + + cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); + return 1; +} + +/* + * task holds one reference to the queue, dropped when task exits. each rq + * in-flight on this queue also holds a reference, dropped when rq is freed. + * + * Each cfq queue took a reference on the parent group. Drop it now. + * queue lock must be held here. + */ +static void cfq_put_queue(struct cfq_queue *cfqq) +{ + struct cfq_data *cfqd = cfqq->cfqd; + struct cfq_group *cfqg, *orig_cfqg; + + BUG_ON(atomic_read(&cfqq->ref) <= 0); + + if (!atomic_dec_and_test(&cfqq->ref)) + return; + + cfq_log_cfqq(cfqd, cfqq, "put_queue"); + BUG_ON(rb_first(&cfqq->sort_list)); + BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); + cfqg = cfqq->cfqg; + orig_cfqg = cfqq->orig_cfqg; + + if (unlikely(cfqd->active_queue == cfqq)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } + + BUG_ON(cfq_cfqq_on_rr(cfqq)); + kmem_cache_free(cfq_pool, cfqq); + cfq_put_cfqg(cfqg); + if (orig_cfqg) + cfq_put_cfqg(orig_cfqg); +} + +/* + * Must always be called with the rcu_read_lock() held + */ +static void +__call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + struct cfq_io_context *cic; + struct hlist_node *n; + + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) + func(ioc, cic); +} + +/* + * Call func for each cic attached to this ioc. + */ +static void +call_for_each_cic(struct io_context *ioc, + void (*func)(struct io_context *, struct cfq_io_context *)) +{ + rcu_read_lock(); + __call_for_each_cic(ioc, func); + rcu_read_unlock(); +} + +static void cfq_cic_free_rcu(struct rcu_head *head) +{ + struct cfq_io_context *cic; + + cic = container_of(head, struct cfq_io_context, rcu_head); + + kmem_cache_free(cfq_ioc_pool, cic); + elv_ioc_count_dec(cfq_ioc_count); + + if (ioc_gone) { + /* + * CFQ scheduler is exiting, grab exit lock and check + * the pending io context count. If it hits zero, + * complete ioc_gone and set it back to NULL + */ + spin_lock(&ioc_gone_lock); + if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) { + complete(ioc_gone); + ioc_gone = NULL; + } + spin_unlock(&ioc_gone_lock); + } +} + +static void cfq_cic_free(struct cfq_io_context *cic) +{ + call_rcu(&cic->rcu_head, cfq_cic_free_rcu); +} + +static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic) +{ + unsigned long flags; + unsigned long dead_key = (unsigned long) cic->key; + + BUG_ON(!(dead_key & CIC_DEAD_KEY)); + + spin_lock_irqsave(&ioc->lock, flags); + radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT); + hlist_del_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + cfq_cic_free(cic); +} + +/* + * Must be called with rcu_read_lock() held or preemption otherwise disabled. + * Only two callers of this - ->dtor() which is called with the rcu_read_lock(), + * and ->trim() which is called with the task lock held + */ +static void cfq_free_io_context(struct io_context *ioc) +{ + /* + * ioc->refcount is zero here, or we are called from elv_unregister(), + * so no more cic's are allowed to be linked into this ioc. So it + * should be ok to iterate over the known list, we will see all cic's + * since no new ones are added. + */ + __call_for_each_cic(ioc, cic_free_func); +} + +static void cfq_put_cooperator(struct cfq_queue *cfqq) +{ + struct cfq_queue *__cfqq, *next; + + /* + * If this queue was scheduled to merge with another queue, be + * sure to drop the reference taken on that queue (and others in + * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. + */ + __cfqq = cfqq->new_cfqq; + while (__cfqq) { + if (__cfqq == cfqq) { + WARN(1, "cfqq->new_cfqq loop detected\n"); + break; + } + next = __cfqq->new_cfqq; + cfq_put_queue(__cfqq); + __cfqq = next; + } +} + +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (unlikely(cfqq == cfqd->active_queue)) { + __cfq_slice_expired(cfqd, cfqq, 0); + cfq_schedule_dispatch(cfqd); + } + + cfq_put_cooperator(cfqq); + + cfq_put_queue(cfqq); +} + +static void __cfq_exit_single_io_context(struct cfq_data *cfqd, + struct cfq_io_context *cic) +{ + struct io_context *ioc = cic->ioc; + + list_del_init(&cic->queue_list); + + /* + * Make sure dead mark is seen for dead queues + */ + smp_wmb(); + cic->key = cfqd_dead_key(cfqd); + + if (ioc->ioc_data == cic) + rcu_assign_pointer(ioc->ioc_data, NULL); + + if (cic->cfqq[BLK_RW_ASYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); + cic->cfqq[BLK_RW_ASYNC] = NULL; + } + + if (cic->cfqq[BLK_RW_SYNC]) { + cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); + cic->cfqq[BLK_RW_SYNC] = NULL; + } +} + +static void cfq_exit_single_io_context(struct io_context *ioc, + struct cfq_io_context *cic) +{ + struct cfq_data *cfqd = cic_to_cfqd(cic); + + if (cfqd) { + struct request_queue *q = cfqd->queue; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + + /* + * Ensure we get a fresh copy of the ->key to prevent + * race between exiting task and queue + */ + smp_read_barrier_depends(); + if (cic->key == cfqd) + __cfq_exit_single_io_context(cfqd, cic); + + spin_unlock_irqrestore(q->queue_lock, flags); + } +} + +/* + * The process that ioc belongs to has exited, we need to clean up + * and put the internal structures we have that belongs to that process. + */ +static void cfq_exit_io_context(struct io_context *ioc) +{ + call_for_each_cic(ioc, cfq_exit_single_io_context); +} + +static struct cfq_io_context * +cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) +{ + struct cfq_io_context *cic; + + cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO, + cfqd->queue->node); + if (cic) { + cic->last_end_request = jiffies; + INIT_LIST_HEAD(&cic->queue_list); + INIT_HLIST_NODE(&cic->cic_list); + cic->dtor = cfq_free_io_context; + cic->exit = cfq_exit_io_context; + elv_ioc_count_inc(cfq_ioc_count); + } + + return cic; +} + +static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) +{ + struct task_struct *tsk = current; + int ioprio_class; + + if (!cfq_cfqq_prio_changed(cfqq)) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, inherit CPU scheduling settings + */ + cfqq->ioprio = task_nice_ioprio(tsk); + cfqq->ioprio_class = task_nice_ioclass(tsk); + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = task_ioprio(ioc); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfq_clear_cfqq_idle_window(cfqq); + break; + } + + /* + * keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue + */ + cfqq->org_ioprio = cfqq->ioprio; + cfqq->org_ioprio_class = cfqq->ioprio_class; + cfq_clear_cfqq_prio_changed(cfqq); +} + +static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic) +{ + struct cfq_data *cfqd = cic_to_cfqd(cic); + struct cfq_queue *cfqq; + unsigned long flags; + + if (unlikely(!cfqd)) + return; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + cfqq = cic->cfqq[BLK_RW_ASYNC]; + if (cfqq) { + struct cfq_queue *new_cfqq; + new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc, + GFP_ATOMIC); + if (new_cfqq) { + cic->cfqq[BLK_RW_ASYNC] = new_cfqq; + cfq_put_queue(cfqq); + } + } + + cfqq = cic->cfqq[BLK_RW_SYNC]; + if (cfqq) + cfq_mark_cfqq_prio_changed(cfqq); + + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +static void cfq_ioc_set_ioprio(struct io_context *ioc) +{ + call_for_each_cic(ioc, changed_ioprio); + ioc->ioprio_changed = 0; +} + +static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, + pid_t pid, bool is_sync) +{ + RB_CLEAR_NODE(&cfqq->rb_node); + RB_CLEAR_NODE(&cfqq->p_node); + INIT_LIST_HEAD(&cfqq->fifo); + + atomic_set(&cfqq->ref, 0); + cfqq->cfqd = cfqd; + + cfq_mark_cfqq_prio_changed(cfqq); + + if (is_sync) { + if (!cfq_class_idle(cfqq)) + cfq_mark_cfqq_idle_window(cfqq); + cfq_mark_cfqq_sync(cfqq); + } + cfqq->pid = pid; +} + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic) +{ + struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1); + struct cfq_data *cfqd = cic_to_cfqd(cic); + unsigned long flags; + struct request_queue *q; + + if (unlikely(!cfqd)) + return; + + q = cfqd->queue; + + spin_lock_irqsave(q->queue_lock, flags); + + if (sync_cfqq) { + /* + * Drop reference to sync queue. A new sync queue will be + * assigned in new group upon arrival of a fresh request. + */ + cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); + cic_set_cfqq(cic, NULL, 1); + cfq_put_queue(sync_cfqq); + } + + spin_unlock_irqrestore(q->queue_lock, flags); +} + +static void cfq_ioc_set_cgroup(struct io_context *ioc) +{ + call_for_each_cic(ioc, changed_cgroup); + ioc->cgroup_changed = 0; +} +#endif /* CONFIG_CFQ_GROUP_IOSCHED */ + +static struct cfq_queue * +cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, + struct io_context *ioc, gfp_t gfp_mask) +{ + struct cfq_queue *cfqq, *new_cfqq = NULL; + struct cfq_io_context *cic; + struct cfq_group *cfqg; + +retry: + cfqg = cfq_get_cfqg(cfqd, 1); + cic = cfq_cic_lookup(cfqd, ioc); + /* cic always exists here */ + cfqq = cic_to_cfqq(cic, is_sync); + + /* + * Always try a new alloc if we fell back to the OOM cfqq + * originally, since it should just be a temporary situation. + */ + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = NULL; + if (new_cfqq) { + cfqq = new_cfqq; + new_cfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(cfqd->queue->queue_lock); + new_cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_ZERO, + cfqd->queue->node); + spin_lock_irq(cfqd->queue->queue_lock); + if (new_cfqq) + goto retry; + } else { + cfqq = kmem_cache_alloc_node(cfq_pool, + gfp_mask | __GFP_ZERO, + cfqd->queue->node); + } + + if (cfqq) { + cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); + cfq_init_prio_data(cfqq, ioc); + cfq_link_cfqq_cfqg(cfqq, cfqg); + cfq_log_cfqq(cfqd, cfqq, "alloced"); + } else + cfqq = &cfqd->oom_cfqq; + } + + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + + return cfqq; +} + +static struct cfq_queue ** +cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) +{ + switch (ioprio_class) { + case IOPRIO_CLASS_RT: + return &cfqd->async_cfqq[0][ioprio]; + case IOPRIO_CLASS_BE: + return &cfqd->async_cfqq[1][ioprio]; + case IOPRIO_CLASS_IDLE: + return &cfqd->async_idle_cfqq; + default: + BUG(); + } +} + +static struct cfq_queue * +cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, + gfp_t gfp_mask) +{ + const int ioprio = task_ioprio(ioc); + const int ioprio_class = task_ioprio_class(ioc); + struct cfq_queue **async_cfqq = NULL; + struct cfq_queue *cfqq = NULL; + + if (!is_sync) { + async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); + cfqq = *async_cfqq; + } + + if (!cfqq) + cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); + + /* + * pin the queue now that it's allocated, scheduler exit will prune it + */ + if (!is_sync && !(*async_cfqq)) { + atomic_inc(&cfqq->ref); + *async_cfqq = cfqq; + } + + atomic_inc(&cfqq->ref); + return cfqq; +} + +/* + * We drop cfq io contexts lazily, so we may find a dead one. + */ +static void +cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic) +{ + unsigned long flags; + + WARN_ON(!list_empty(&cic->queue_list)); + BUG_ON(cic->key != cfqd_dead_key(cfqd)); + + spin_lock_irqsave(&ioc->lock, flags); + + BUG_ON(ioc->ioc_data == cic); + + radix_tree_delete(&ioc->radix_root, cfqd->cic_index); + hlist_del_rcu(&cic->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + cfq_cic_free(cic); +} + +static struct cfq_io_context * +cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc) +{ + struct cfq_io_context *cic; + unsigned long flags; + + if (unlikely(!ioc)) + return NULL; + + rcu_read_lock(); + + /* + * we maintain a last-hit cache, to avoid browsing over the tree + */ + cic = rcu_dereference(ioc->ioc_data); + if (cic && cic->key == cfqd) { + rcu_read_unlock(); + return cic; + } + + do { + cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index); + rcu_read_unlock(); + if (!cic) + break; + if (unlikely(cic->key != cfqd)) { + cfq_drop_dead_cic(cfqd, ioc, cic); + rcu_read_lock(); + continue; + } + + spin_lock_irqsave(&ioc->lock, flags); + rcu_assign_pointer(ioc->ioc_data, cic); + spin_unlock_irqrestore(&ioc->lock, flags); + break; + } while (1); + + return cic; +} + +/* + * Add cic into ioc, using cfqd as the search key. This enables us to lookup + * the process specific cfq io context when entered from the block layer. + * Also adds the cic to a per-cfqd list, used when this queue is removed. + */ +static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, + struct cfq_io_context *cic, gfp_t gfp_mask) +{ + unsigned long flags; + int ret; + + ret = radix_tree_preload(gfp_mask); + if (!ret) { + cic->ioc = ioc; + cic->key = cfqd; + + spin_lock_irqsave(&ioc->lock, flags); + ret = radix_tree_insert(&ioc->radix_root, + cfqd->cic_index, cic); + if (!ret) + hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list); + spin_unlock_irqrestore(&ioc->lock, flags); + + radix_tree_preload_end(); + + if (!ret) { + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + list_add(&cic->queue_list, &cfqd->cic_list); + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); + } + } + + if (ret) + printk(KERN_ERR "cfq: cic link failed!\n"); + + return ret; +} + +/* + * Setup general io context and cfq io context. There can be several cfq + * io contexts per general io context, if this process is doing io to more + * than one device managed by cfq. + */ +static struct cfq_io_context * +cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) +{ + struct io_context *ioc = NULL; + struct cfq_io_context *cic; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + ioc = get_io_context(gfp_mask, cfqd->queue->node); + if (!ioc) + return NULL; + + cic = cfq_cic_lookup(cfqd, ioc); + if (cic) + goto out; + + cic = cfq_alloc_io_context(cfqd, gfp_mask); + if (cic == NULL) + goto err; + + if (cfq_cic_link(cfqd, ioc, cic, gfp_mask)) + goto err_free; + +out: + smp_read_barrier_depends(); + if (unlikely(ioc->ioprio_changed)) + cfq_ioc_set_ioprio(ioc); + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (unlikely(ioc->cgroup_changed)) + cfq_ioc_set_cgroup(ioc); +#endif + return cic; +err_free: + cfq_cic_free(cic); +err: + put_io_context(ioc); + return NULL; +} + +static void +cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) +{ + unsigned long elapsed = jiffies - cic->last_end_request; + unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); + + cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; + cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; + cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; +} + +static void +cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + sector_t sdist = 0; + sector_t n_sec = blk_rq_sectors(rq); + if (cfqq->last_request_pos) { + if (cfqq->last_request_pos < blk_rq_pos(rq)) + sdist = blk_rq_pos(rq) - cfqq->last_request_pos; + else + sdist = cfqq->last_request_pos - blk_rq_pos(rq); + } + + cfqq->seek_history <<= 1; + if (blk_queue_nonrot(cfqd->queue)) + cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); + else + cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); +} + +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter + */ +static void +cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_io_context *cic) +{ + int old_idle, enable_idle; + + /* + * Don't idle for async or idle io prio class + */ + if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) + return; + + enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); + + if (cfqq->queued[0] + cfqq->queued[1] >= 4) + cfq_mark_cfqq_deep(cfqq); + + if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || + (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) + enable_idle = 0; + else if (sample_valid(cic->ttime_samples)) { + if (cic->ttime_mean > cfqd->cfq_slice_idle) + enable_idle = 0; + else + enable_idle = 1; + } + + if (old_idle != enable_idle) { + cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); + if (enable_idle) + cfq_mark_cfqq_idle_window(cfqq); + else + cfq_clear_cfqq_idle_window(cfqq); + } +} + +/* + * Check if new_cfqq should preempt the currently active queue. Return 0 for + * no or if we aren't sure, a 1 will cause a preempt. + */ +static bool +cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + struct request *rq) +{ + struct cfq_queue *cfqq; + + cfqq = cfqd->active_queue; + if (!cfqq) + return false; + + if (cfq_class_idle(new_cfqq)) + return false; + + if (cfq_class_idle(cfqq)) + return true; + + /* + * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. + */ + if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) + return false; + + /* + * if the new request is sync, but the currently running queue is + * not, let the sync request have priority. + */ + if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) + return true; + + if (new_cfqq->cfqg != cfqq->cfqg) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* Allow preemption only if we are idling on sync-noidle tree */ + if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD && + cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && + new_cfqq->service_tree->count == 2 && + RB_EMPTY_ROOT(&cfqq->sort_list)) + return true; + + /* + * So both queues are sync. Let the new request get disk time if + * it's a metadata request and the current queue is doing regular IO. + */ + if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) + return true; + + /* + * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. + */ + if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) + return true; + + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) + return false; + + /* + * if this request is as-good as one we would expect from the + * current cfqq, let it preempt + */ + if (cfq_rq_close(cfqd, cfqq, rq)) + return true; + + return false; +} + +/* + * cfqq preempts the active queue. if we allowed preempt with no slice left, + * let it have half of its nominal slice. + */ +static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "preempt"); + cfq_slice_expired(cfqd, 1); + + /* + * Put the new queue at the front of the of the current list, + * so we know that it will be selected next. + */ + BUG_ON(!cfq_cfqq_on_rr(cfqq)); + + cfq_service_tree_add(cfqd, cfqq, 1); + + cfqq->slice_end = 0; + cfq_mark_cfqq_slice_new(cfqq); +} + +/* + * Called when a new fs request (rq) is added (to cfqq). Check if there's + * something we should do about it + */ +static void +cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct request *rq) +{ + struct cfq_io_context *cic = RQ_CIC(rq); + + cfqd->rq_queued++; + if (rq->cmd_flags & REQ_META) + cfqq->meta_pending++; + + cfq_update_io_thinktime(cfqd, cic); + cfq_update_io_seektime(cfqd, cfqq, rq); + cfq_update_idle_window(cfqd, cfqq, cic); + + cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + + if (cfqq == cfqd->active_queue) { + /* + * Remember that we saw a request from this process, but + * don't start queuing just yet. Otherwise we risk seeing lots + * of tiny requests, because we disrupt the normal plugging + * and merging. If the request is already larger than a single + * page, let it rip immediately. For that case we assume that + * merging is already done. Ditto for a busy system that + * has other work pending, don't risk delaying until the + * idle timer unplug to continue working. + */ + if (cfq_cfqq_wait_request(cfqq)) { + if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || + cfqd->busy_queues > 1) { + cfq_del_timer(cfqd, cfqq); + cfq_clear_cfqq_wait_request(cfqq); + __blk_run_queue(cfqd->queue); + } else { + cfq_blkiocg_update_idle_time_stats( + &cfqq->cfqg->blkg); + cfq_mark_cfqq_must_dispatch(cfqq); + } + } + } else if (cfq_should_preempt(cfqd, cfqq, rq)) { + /* + * not the active queue - expire current slice if it is + * idle and has expired it's mean thinktime or this new queue + * has some old slice time left and is of higher priority or + * this new queue is RT and the current one is BE + */ + cfq_preempt_queue(cfqd, cfqq); + __blk_run_queue(cfqd->queue); + } +} + +static void cfq_insert_request(struct request_queue *q, struct request *rq) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + cfq_log_cfqq(cfqd, cfqq, "insert_request"); + cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); + + rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); + list_add_tail(&rq->queuelist, &cfqq->fifo); + cfq_add_rq_rb(rq); + cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg, + &cfqd->serving_group->blkg, rq_data_dir(rq), + rq_is_sync(rq)); + cfq_rq_enqueued(cfqd, cfqq, rq); +} + +/* + * Update hw_tag based on peak queue depth over 50 samples under + * sufficient load. + */ +static void cfq_update_hw_tag(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) + cfqd->hw_tag_est_depth = cfqd->rq_in_driver; + + if (cfqd->hw_tag == 1) + return; + + if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && + cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) + return; + + /* + * If active queue hasn't enough requests and can idle, cfq might not + * dispatch sufficient requests to hardware. Don't zero hw_tag in this + * case + */ + if (cfqq && cfq_cfqq_idle_window(cfqq) && + cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < + CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) + return; + + if (cfqd->hw_tag_samples++ < 50) + return; + + if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) + cfqd->hw_tag = 1; + else + cfqd->hw_tag = 0; +} + +static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_io_context *cic = cfqd->active_cic; + + /* If the queue already has requests, don't wait */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + return false; + + /* If there are other queues in the group, don't wait */ + if (cfqq->cfqg->nr_cfqq > 1) + return false; + + if (cfq_slice_used(cfqq)) + return true; + + /* if slice left is less than think time, wait busy */ + if (cic && sample_valid(cic->ttime_samples) + && (cfqq->slice_end - jiffies < cic->ttime_mean)) + return true; + + /* + * If think times is less than a jiffy than ttime_mean=0 and above + * will not be true. It might happen that slice has not expired yet + * but will expire soon (4-5 ns) during select_queue(). To cover the + * case where think time is less than a jiffy, mark the queue wait + * busy if only 1 jiffy is left in the slice. + */ + if (cfqq->slice_end - jiffies == 1) + return true; + + return false; +} + +static void cfq_completed_request(struct request_queue *q, struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + struct cfq_data *cfqd = cfqq->cfqd; + const int sync = rq_is_sync(rq); + unsigned long now; + + now = jiffies; + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", + !!(rq->cmd_flags & REQ_NOIDLE)); + + cfq_update_hw_tag(cfqd); + + WARN_ON(!cfqd->rq_in_driver); + WARN_ON(!cfqq->dispatched); + cfqd->rq_in_driver--; + cfqq->dispatched--; + (RQ_CFQG(rq))->dispatched--; + cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg, + rq_start_time_ns(rq), rq_io_start_time_ns(rq), + rq_data_dir(rq), rq_is_sync(rq)); + + cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; + + if (sync) { + RQ_CIC(rq)->last_end_request = now; + if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) + cfqd->last_delayed_sync = now; + } + + /* + * If this is the active queue, check if it needs to be expired, + * or if we want to idle in case it has no pending requests. + */ + if (cfqd->active_queue == cfqq) { + const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); + + if (cfq_cfqq_slice_new(cfqq)) { + cfq_set_prio_slice(cfqd, cfqq); + cfq_clear_cfqq_slice_new(cfqq); + } + + /* + * Should we wait for next request to come in before we expire + * the queue. + */ + if (cfq_should_wait_busy(cfqd, cfqq)) { + unsigned long extend_sl = cfqd->cfq_slice_idle; + if (!cfqd->cfq_slice_idle) + extend_sl = cfqd->cfq_group_idle; + cfqq->slice_end = jiffies + extend_sl; + cfq_mark_cfqq_wait_busy(cfqq); + cfq_log_cfqq(cfqd, cfqq, "will busy wait"); + } + + /* + * Idling is not enabled on: + * - expired queues + * - idle-priority queues + * - async queues + * - queues with still some requests queued + * - when there is a close cooperator + */ + if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) + cfq_slice_expired(cfqd, 1); + else if (sync && cfqq_empty && + !cfq_close_cooperator(cfqd, cfqq)) { + cfqd->noidle_tree_requires_idle |= + !(rq->cmd_flags & REQ_NOIDLE); + /* + * Idling is enabled for SYNC_WORKLOAD. + * SYNC_NOIDLE_WORKLOAD idles at the end of the tree + * only if we processed at least one !REQ_NOIDLE request + */ + if (cfqd->serving_type == SYNC_WORKLOAD + || cfqd->noidle_tree_requires_idle + || cfqq->cfqg->nr_cfqq == 1) + cfq_arm_slice_timer(cfqd); + } + } + + if (!cfqd->rq_in_driver) + cfq_schedule_dispatch(cfqd); +} + +/* + * we temporarily boost lower priority queues if they are holding fs exclusive + * resources. they are boosted to normal prio (CLASS_BE/4) + */ +static void cfq_prio_boost(struct cfq_queue *cfqq) +{ + if (has_fs_excl()) { + /* + * boost idle prio on transactions that would lock out other + * users of the filesystem + */ + if (cfq_class_idle(cfqq)) + cfqq->ioprio_class = IOPRIO_CLASS_BE; + if (cfqq->ioprio > IOPRIO_NORM) + cfqq->ioprio = IOPRIO_NORM; + } else { + /* + * unboost the queue (if needed) + */ + cfqq->ioprio_class = cfqq->org_ioprio_class; + cfqq->ioprio = cfqq->org_ioprio; + } +} + +static inline int __cfq_may_queue(struct cfq_queue *cfqq) +{ + if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { + cfq_mark_cfqq_must_alloc_slice(cfqq); + return ELV_MQUEUE_MUST; + } + + return ELV_MQUEUE_MAY; +} + +static int cfq_may_queue(struct request_queue *q, int rw) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; + struct cfq_io_context *cic; + struct cfq_queue *cfqq; + + /* + * don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * so just lookup a possibly existing queue, or return 'may queue' + * if that fails + */ + cic = cfq_cic_lookup(cfqd, tsk->io_context); + if (!cic) + return ELV_MQUEUE_MAY; + + cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); + if (cfqq) { + cfq_init_prio_data(cfqq, cic->ioc); + cfq_prio_boost(cfqq); + + return __cfq_may_queue(cfqq); + } + + return ELV_MQUEUE_MAY; +} + +/* + * queue lock held here + */ +static void cfq_put_request(struct request *rq) +{ + struct cfq_queue *cfqq = RQ_CFQQ(rq); + + if (cfqq) { + const int rw = rq_data_dir(rq); + + BUG_ON(!cfqq->allocated[rw]); + cfqq->allocated[rw]--; + + put_io_context(RQ_CIC(rq)->ioc); + + rq->elevator_private = NULL; + rq->elevator_private2 = NULL; + + /* Put down rq reference on cfqg */ + cfq_put_cfqg(RQ_CFQG(rq)); + rq->elevator_private3 = NULL; + + cfq_put_queue(cfqq); + } +} + +static struct cfq_queue * +cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic, + struct cfq_queue *cfqq) +{ + cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); + cic_set_cfqq(cic, cfqq->new_cfqq, 1); + cfq_mark_cfqq_coop(cfqq->new_cfqq); + cfq_put_queue(cfqq); + return cic_to_cfqq(cic, 1); +} + +/* + * Returns NULL if a new cfqq should be allocated, or the old cfqq if this + * was the last process referring to said cfqq. + */ +static struct cfq_queue * +split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) +{ + if (cfqq_process_refs(cfqq) == 1) { + cfqq->pid = current->pid; + cfq_clear_cfqq_coop(cfqq); + cfq_clear_cfqq_split_coop(cfqq); + return cfqq; + } + + cic_set_cfqq(cic, NULL, 1); + + cfq_put_cooperator(cfqq); + + cfq_put_queue(cfqq); + return NULL; +} +/* + * Allocate cfq data structures associated with this request. + */ +static int +cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_io_context *cic; + const int rw = rq_data_dir(rq); + const bool is_sync = rq_is_sync(rq); + struct cfq_queue *cfqq; + unsigned long flags; + + might_sleep_if(gfp_mask & __GFP_WAIT); + + cic = cfq_get_io_context(cfqd, gfp_mask); + + spin_lock_irqsave(q->queue_lock, flags); + + if (!cic) + goto queue_fail; + +new_queue: + cfqq = cic_to_cfqq(cic, is_sync); + if (!cfqq || cfqq == &cfqd->oom_cfqq) { + cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); + cic_set_cfqq(cic, cfqq, is_sync); + } else { + /* + * If the queue was seeky for too long, break it apart. + */ + if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { + cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); + cfqq = split_cfqq(cic, cfqq); + if (!cfqq) + goto new_queue; + } + + /* + * Check to see if this queue is scheduled to merge with + * another, closely cooperating queue. The merging of + * queues happens here as it must be done in process context. + * The reference on new_cfqq was taken in merge_cfqqs. + */ + if (cfqq->new_cfqq) + cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); + } + + cfqq->allocated[rw]++; + atomic_inc(&cfqq->ref); + + spin_unlock_irqrestore(q->queue_lock, flags); + + rq->elevator_private = cic; + rq->elevator_private2 = cfqq; + rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); + return 0; + +queue_fail: + if (cic) + put_io_context(cic->ioc); + + cfq_schedule_dispatch(cfqd); + spin_unlock_irqrestore(q->queue_lock, flags); + cfq_log(cfqd, "set_request fail"); + return 1; +} + +static void cfq_kick_queue(struct work_struct *work) +{ + struct cfq_data *cfqd = + container_of(work, struct cfq_data, unplug_work); + struct request_queue *q = cfqd->queue; + + spin_lock_irq(q->queue_lock); + __blk_run_queue(cfqd->queue); + spin_unlock_irq(q->queue_lock); +} + +/* + * Timer running if the active_queue is currently idling inside its time slice + */ +static void cfq_idle_slice_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + struct cfq_queue *cfqq; + unsigned long flags; + int timed_out = 1; + + cfq_log(cfqd, "idle timer fired"); + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + cfqq = cfqd->active_queue; + if (cfqq) { + timed_out = 0; + + /* + * We saw a request before the queue expired, let it through + */ + if (cfq_cfqq_must_dispatch(cfqq)) + goto out_kick; + + /* + * expired + */ + if (cfq_slice_used(cfqq)) + goto expire; + + /* + * only expire and reinvoke request handler, if there are + * other queues with pending requests + */ + if (!cfqd->busy_queues) + goto out_cont; + + /* + * not expired and it has a request pending, let it dispatch + */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + goto out_kick; + + /* + * Queue depth flag is reset only when the idle didn't succeed + */ + cfq_clear_cfqq_deep(cfqq); + } +expire: + cfq_slice_expired(cfqd, timed_out); +out_kick: + cfq_schedule_dispatch(cfqd); +out_cont: + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) +{ + del_timer_sync(&cfqd->idle_slice_timer); + cancel_work_sync(&cfqd->unplug_work); +} + +static void cfq_put_async_queues(struct cfq_data *cfqd) +{ + int i; + + for (i = 0; i < IOPRIO_BE_NR; i++) { + if (cfqd->async_cfqq[0][i]) + cfq_put_queue(cfqd->async_cfqq[0][i]); + if (cfqd->async_cfqq[1][i]) + cfq_put_queue(cfqd->async_cfqq[1][i]); + } + + if (cfqd->async_idle_cfqq) + cfq_put_queue(cfqd->async_idle_cfqq); +} + +static void cfq_cfqd_free(struct rcu_head *head) +{ + kfree(container_of(head, struct cfq_data, rcu)); +} + +static void cfq_exit_queue(struct elevator_queue *e) +{ + struct cfq_data *cfqd = e->elevator_data; + struct request_queue *q = cfqd->queue; + + cfq_shutdown_timer_wq(cfqd); + + spin_lock_irq(q->queue_lock); + + if (cfqd->active_queue) + __cfq_slice_expired(cfqd, cfqd->active_queue, 0); + + while (!list_empty(&cfqd->cic_list)) { + struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, + struct cfq_io_context, + queue_list); + + __cfq_exit_single_io_context(cfqd, cic); + } + + cfq_put_async_queues(cfqd); + cfq_release_cfq_groups(cfqd); + cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg); + + spin_unlock_irq(q->queue_lock); + + cfq_shutdown_timer_wq(cfqd); + + spin_lock(&cic_index_lock); + ida_remove(&cic_index_ida, cfqd->cic_index); + spin_unlock(&cic_index_lock); + + /* Wait for cfqg->blkg->key accessors to exit their grace periods. */ + call_rcu(&cfqd->rcu, cfq_cfqd_free); +} + +static int cfq_alloc_cic_index(void) +{ + int index, error; + + do { + if (!ida_pre_get(&cic_index_ida, GFP_KERNEL)) + return -ENOMEM; + + spin_lock(&cic_index_lock); + error = ida_get_new(&cic_index_ida, &index); + spin_unlock(&cic_index_lock); + if (error && error != -EAGAIN) + return error; + } while (error); + + return index; +} + +static void *cfq_init_queue(struct request_queue *q) +{ + struct cfq_data *cfqd; + int i, j; + struct cfq_group *cfqg; + struct cfq_rb_root *st; + + i = cfq_alloc_cic_index(); + if (i < 0) + return NULL; + + cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node); + if (!cfqd) + return NULL; + + cfqd->cic_index = i; + + /* Init root service tree */ + cfqd->grp_service_tree = CFQ_RB_ROOT; + + /* Init root group */ + cfqg = &cfqd->root_group; + for_each_cfqg_st(cfqg, i, j, st) + *st = CFQ_RB_ROOT; + RB_CLEAR_NODE(&cfqg->rb_node); + + /* Give preference to root group over other groups */ + cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + /* + * Take a reference to root group which we never drop. This is just + * to make sure that cfq_put_cfqg() does not try to kfree root group + */ + atomic_set(&cfqg->ref, 1); + rcu_read_lock(); + cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, + (void *)cfqd, 0); + rcu_read_unlock(); +#endif + /* + * Not strictly needed (since RB_ROOT just clears the node and we + * zeroed cfqd on alloc), but better be safe in case someone decides + * to add magic to the rb code + */ + for (i = 0; i < CFQ_PRIO_LISTS; i++) + cfqd->prio_trees[i] = RB_ROOT; + + /* + * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. + * Grab a permanent reference to it, so that the normal code flow + * will not attempt to free it. + */ + cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); + atomic_inc(&cfqd->oom_cfqq.ref); + cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); + + INIT_LIST_HEAD(&cfqd->cic_list); + + cfqd->queue = q; + + init_timer(&cfqd->idle_slice_timer); + cfqd->idle_slice_timer.function = cfq_idle_slice_timer; + cfqd->idle_slice_timer.data = (unsigned long) cfqd; + + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); + + cfqd->cfq_quantum = cfq_quantum; + cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; + cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; + cfqd->cfq_back_max = cfq_back_max; + cfqd->cfq_back_penalty = cfq_back_penalty; + cfqd->cfq_slice[0] = cfq_slice_async; + cfqd->cfq_slice[1] = cfq_slice_sync; + cfqd->cfq_slice_async_rq = cfq_slice_async_rq; + cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_group_idle = cfq_group_idle; + cfqd->cfq_latency = 1; + cfqd->cfq_group_isolation = 0; + cfqd->hw_tag = -1; + /* + * we optimistically start assuming sync ops weren't delayed in last + * second, in order to have larger depth for async operations. + */ + cfqd->last_delayed_sync = jiffies - HZ; + return cfqd; +} + +static void cfq_slab_kill(void) +{ + /* + * Caller already ensured that pending RCU callbacks are completed, + * so we should have no busy allocations at this point. + */ + if (cfq_pool) + kmem_cache_destroy(cfq_pool); + if (cfq_ioc_pool) + kmem_cache_destroy(cfq_ioc_pool); +} + +static int __init cfq_slab_setup(void) +{ + cfq_pool = KMEM_CACHE(cfq_queue, 0); + if (!cfq_pool) + goto fail; + + cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0); + if (!cfq_ioc_pool) + goto fail; + + return 0; +fail: + cfq_slab_kill(); + return -ENOMEM; +} + +/* + * sysfs parts below --> + */ +static ssize_t +cfq_var_show(unsigned int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +cfq_var_store(unsigned int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtoul(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct cfq_data *cfqd = e->elevator_data; \ + unsigned int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return cfq_var_show(__data, (page)); \ +} +SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); +SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); +SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); +SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); +SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); +SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); +SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); +SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); +SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); +SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); +SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct cfq_data *cfqd = e->elevator_data; \ + unsigned int __data; \ + int ret = cfq_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, + UINT_MAX, 1); +STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); +STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, + UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, + UINT_MAX, 0); +STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); +STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0); +#undef STORE_FUNCTION + +#define CFQ_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) + +static struct elv_fs_entry cfq_attrs[] = { + CFQ_ATTR(quantum), + CFQ_ATTR(fifo_expire_sync), + CFQ_ATTR(fifo_expire_async), + CFQ_ATTR(back_seek_max), + CFQ_ATTR(back_seek_penalty), + CFQ_ATTR(slice_sync), + CFQ_ATTR(slice_async), + CFQ_ATTR(slice_async_rq), + CFQ_ATTR(slice_idle), + CFQ_ATTR(group_idle), + CFQ_ATTR(low_latency), + CFQ_ATTR(group_isolation), + __ATTR_NULL +}; + +static struct elevator_type iosched_cfq = { + .ops = { + .elevator_merge_fn = cfq_merge, + .elevator_merged_fn = cfq_merged_request, + .elevator_merge_req_fn = cfq_merged_requests, + .elevator_allow_merge_fn = cfq_allow_merge, + .elevator_bio_merged_fn = cfq_bio_merged, + .elevator_dispatch_fn = cfq_dispatch_requests, + .elevator_add_req_fn = cfq_insert_request, + .elevator_activate_req_fn = cfq_activate_request, + .elevator_deactivate_req_fn = cfq_deactivate_request, + .elevator_queue_empty_fn = cfq_queue_empty, + .elevator_completed_req_fn = cfq_completed_request, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_set_req_fn = cfq_set_request, + .elevator_put_req_fn = cfq_put_request, + .elevator_may_queue_fn = cfq_may_queue, + .elevator_init_fn = cfq_init_queue, + .elevator_exit_fn = cfq_exit_queue, + .trim = cfq_free_io_context, + }, + .elevator_attrs = cfq_attrs, + .elevator_name = "cfq", + .elevator_owner = THIS_MODULE, +}; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static struct blkio_policy_type blkio_policy_cfq = { + .ops = { + .blkio_unlink_group_fn = cfq_unlink_blkio_group, + .blkio_update_group_weight_fn = cfq_update_blkio_group_weight, + }, +}; +#else +static struct blkio_policy_type blkio_policy_cfq; +#endif + +static int __init cfq_init(void) +{ + /* + * could be 0 on HZ < 1000 setups + */ + if (!cfq_slice_async) + cfq_slice_async = 1; + if (!cfq_slice_idle) + cfq_slice_idle = 1; + +#ifdef CONFIG_CFQ_GROUP_IOSCHED + if (!cfq_group_idle) + cfq_group_idle = 1; +#else + cfq_group_idle = 0; +#endif + if (cfq_slab_setup()) + return -ENOMEM; + + elv_register(&iosched_cfq); + blkio_policy_register(&blkio_policy_cfq); + + return 0; +} + +static void __exit cfq_exit(void) +{ + DECLARE_COMPLETION_ONSTACK(all_gone); + blkio_policy_unregister(&blkio_policy_cfq); + elv_unregister(&iosched_cfq); + ioc_gone = &all_gone; + /* ioc_gone's update must be visible before reading ioc_count */ + smp_wmb(); + + /* + * this also protects us from entering cfq_slab_kill() with + * pending RCU callbacks + */ + if (elv_ioc_count_read(cfq_ioc_count)) + wait_for_completion(&all_gone); + ida_destroy(&cic_index_ida); + cfq_slab_kill(); +} + +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(cfq_init); +#else +module_init(cfq_init); +#endif +module_exit(cfq_exit); + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler"); diff --git a/block/cfq.h b/block/cfq.h new file mode 100644 index 00000000..93448e5a --- /dev/null +++ b/block/cfq.h @@ -0,0 +1,115 @@ +#ifndef _CFQ_H +#define _CFQ_H +#include "blk-cgroup.h" + +#ifdef CONFIG_CFQ_GROUP_IOSCHED +static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, + struct blkio_group *curr_blkg, bool direction, bool sync) +{ + blkiocg_update_io_add_stats(blkg, curr_blkg, direction, sync); +} + +static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue) +{ + blkiocg_update_dequeue_stats(blkg, dequeue); +} + +static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, + unsigned long time) +{ + blkiocg_update_timeslice_used(blkg, time); +} + +static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) +{ + blkiocg_set_start_empty_time(blkg); +} + +static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, + bool direction, bool sync) +{ + blkiocg_update_io_remove_stats(blkg, direction, sync); +} + +static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, + bool direction, bool sync) +{ + blkiocg_update_io_merged_stats(blkg, direction, sync); +} + +static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) +{ + blkiocg_update_idle_time_stats(blkg); +} + +static inline void +cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) +{ + blkiocg_update_avg_queue_size_stats(blkg); +} + +static inline void +cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) +{ + blkiocg_update_set_idle_time_stats(blkg); +} + +static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, + uint64_t bytes, bool direction, bool sync) +{ + blkiocg_update_dispatch_stats(blkg, bytes, direction, sync); +} + +static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) +{ + blkiocg_update_completion_stats(blkg, start_time, io_start_time, + direction, sync); +} + +static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) { + blkiocg_add_blkio_group(blkcg, blkg, key, dev); +} + +static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + return blkiocg_del_blkio_group(blkg); +} + +#else /* CFQ_GROUP_IOSCHED */ +static inline void cfq_blkiocg_update_io_add_stats(struct blkio_group *blkg, + struct blkio_group *curr_blkg, bool direction, bool sync) {} + +static inline void cfq_blkiocg_update_dequeue_stats(struct blkio_group *blkg, + unsigned long dequeue) {} + +static inline void cfq_blkiocg_update_timeslice_used(struct blkio_group *blkg, + unsigned long time) {} +static inline void cfq_blkiocg_set_start_empty_time(struct blkio_group *blkg) {} +static inline void cfq_blkiocg_update_io_remove_stats(struct blkio_group *blkg, + bool direction, bool sync) {} +static inline void cfq_blkiocg_update_io_merged_stats(struct blkio_group *blkg, + bool direction, bool sync) {} +static inline void cfq_blkiocg_update_idle_time_stats(struct blkio_group *blkg) +{ +} +static inline void +cfq_blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg) {} + +static inline void +cfq_blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) {} + +static inline void cfq_blkiocg_update_dispatch_stats(struct blkio_group *blkg, + uint64_t bytes, bool direction, bool sync) {} +static inline void cfq_blkiocg_update_completion_stats(struct blkio_group *blkg, uint64_t start_time, uint64_t io_start_time, bool direction, bool sync) {} + +static inline void cfq_blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, + struct blkio_group *blkg, void *key, dev_t dev) {} +static inline int cfq_blkiocg_del_blkio_group(struct blkio_group *blkg) +{ + return 0; +} + +#endif /* CFQ_GROUP_IOSCHED */ +#endif diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c new file mode 100644 index 00000000..119f07b7 --- /dev/null +++ b/block/compat_ioctl.c @@ -0,0 +1,768 @@ +#include <linux/blkdev.h> +#include <linux/blkpg.h> +#include <linux/blktrace_api.h> +#include <linux/cdrom.h> +#include <linux/compat.h> +#include <linux/elevator.h> +#include <linux/fd.h> +#include <linux/hdreg.h> +#include <linux/slab.h> +#include <linux/syscalls.h> +#include <linux/smp_lock.h> +#include <linux/types.h> +#include <linux/uaccess.h> + +static int compat_put_ushort(unsigned long arg, unsigned short val) +{ + return put_user(val, (unsigned short __user *)compat_ptr(arg)); +} + +static int compat_put_int(unsigned long arg, int val) +{ + return put_user(val, (compat_int_t __user *)compat_ptr(arg)); +} + +static int compat_put_uint(unsigned long arg, unsigned int val) +{ + return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); +} + +static int compat_put_long(unsigned long arg, long val) +{ + return put_user(val, (compat_long_t __user *)compat_ptr(arg)); +} + +static int compat_put_ulong(unsigned long arg, compat_ulong_t val) +{ + return put_user(val, (compat_ulong_t __user *)compat_ptr(arg)); +} + +static int compat_put_u64(unsigned long arg, u64 val) +{ + return put_user(val, (compat_u64 __user *)compat_ptr(arg)); +} + +struct compat_hd_geometry { + unsigned char heads; + unsigned char sectors; + unsigned short cylinders; + u32 start; +}; + +static int compat_hdio_getgeo(struct gendisk *disk, struct block_device *bdev, + struct compat_hd_geometry __user *ugeo) +{ + struct hd_geometry geo; + int ret; + + if (!ugeo) + return -EINVAL; + if (!disk->fops->getgeo) + return -ENOTTY; + + /* + * We need to set the startsect first, the driver may + * want to override it. + */ + geo.start = get_start_sect(bdev); + ret = disk->fops->getgeo(bdev, &geo); + if (ret) + return ret; + + ret = copy_to_user(ugeo, &geo, 4); + ret |= __put_user(geo.start, &ugeo->start); + if (ret) + ret = -EFAULT; + + return ret; +} + +static int compat_hdio_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + unsigned long kval; + unsigned int __user *uvp; + int error; + + set_fs(KERNEL_DS); + error = __blkdev_driver_ioctl(bdev, mode, + cmd, (unsigned long)(&kval)); + set_fs(old_fs); + + if (error == 0) { + uvp = compat_ptr(arg); + if (put_user(kval, uvp)) + error = -EFAULT; + } + return error; +} + +struct compat_cdrom_read_audio { + union cdrom_addr addr; + u8 addr_format; + compat_int_t nframes; + compat_caddr_t buf; +}; + +struct compat_cdrom_generic_command { + unsigned char cmd[CDROM_PACKET_SIZE]; + compat_caddr_t buffer; + compat_uint_t buflen; + compat_int_t stat; + compat_caddr_t sense; + unsigned char data_direction; + compat_int_t quiet; + compat_int_t timeout; + compat_caddr_t reserved[1]; +}; + +static int compat_cdrom_read_audio(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct cdrom_read_audio __user *cdread_audio; + struct compat_cdrom_read_audio __user *cdread_audio32; + __u32 data; + void __user *datap; + + cdread_audio = compat_alloc_user_space(sizeof(*cdread_audio)); + cdread_audio32 = compat_ptr(arg); + + if (copy_in_user(&cdread_audio->addr, + &cdread_audio32->addr, + (sizeof(*cdread_audio32) - + sizeof(compat_caddr_t)))) + return -EFAULT; + + if (get_user(data, &cdread_audio32->buf)) + return -EFAULT; + datap = compat_ptr(data); + if (put_user(datap, &cdread_audio->buf)) + return -EFAULT; + + return __blkdev_driver_ioctl(bdev, mode, cmd, + (unsigned long)cdread_audio); +} + +static int compat_cdrom_generic_command(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + struct cdrom_generic_command __user *cgc; + struct compat_cdrom_generic_command __user *cgc32; + u32 data; + unsigned char dir; + int itmp; + + cgc = compat_alloc_user_space(sizeof(*cgc)); + cgc32 = compat_ptr(arg); + + if (copy_in_user(&cgc->cmd, &cgc32->cmd, sizeof(cgc->cmd)) || + get_user(data, &cgc32->buffer) || + put_user(compat_ptr(data), &cgc->buffer) || + copy_in_user(&cgc->buflen, &cgc32->buflen, + (sizeof(unsigned int) + sizeof(int))) || + get_user(data, &cgc32->sense) || + put_user(compat_ptr(data), &cgc->sense) || + get_user(dir, &cgc32->data_direction) || + put_user(dir, &cgc->data_direction) || + get_user(itmp, &cgc32->quiet) || + put_user(itmp, &cgc->quiet) || + get_user(itmp, &cgc32->timeout) || + put_user(itmp, &cgc->timeout) || + get_user(data, &cgc32->reserved[0]) || + put_user(compat_ptr(data), &cgc->reserved[0])) + return -EFAULT; + + return __blkdev_driver_ioctl(bdev, mode, cmd, (unsigned long)cgc); +} + +struct compat_blkpg_ioctl_arg { + compat_int_t op; + compat_int_t flags; + compat_int_t datalen; + compat_caddr_t data; +}; + +static int compat_blkpg_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, struct compat_blkpg_ioctl_arg __user *ua32) +{ + struct blkpg_ioctl_arg __user *a = compat_alloc_user_space(sizeof(*a)); + compat_caddr_t udata; + compat_int_t n; + int err; + + err = get_user(n, &ua32->op); + err |= put_user(n, &a->op); + err |= get_user(n, &ua32->flags); + err |= put_user(n, &a->flags); + err |= get_user(n, &ua32->datalen); + err |= put_user(n, &a->datalen); + err |= get_user(udata, &ua32->data); + err |= put_user(compat_ptr(udata), &a->data); + if (err) + return err; + + return blkdev_ioctl(bdev, mode, cmd, (unsigned long)a); +} + +#define BLKBSZGET_32 _IOR(0x12, 112, int) +#define BLKBSZSET_32 _IOW(0x12, 113, int) +#define BLKGETSIZE64_32 _IOR(0x12, 114, int) + +struct compat_floppy_struct { + compat_uint_t size; + compat_uint_t sect; + compat_uint_t head; + compat_uint_t track; + compat_uint_t stretch; + unsigned char gap; + unsigned char rate; + unsigned char spec1; + unsigned char fmt_gap; + const compat_caddr_t name; +}; + +struct compat_floppy_drive_params { + char cmos; + compat_ulong_t max_dtr; + compat_ulong_t hlt; + compat_ulong_t hut; + compat_ulong_t srt; + compat_ulong_t spinup; + compat_ulong_t spindown; + unsigned char spindown_offset; + unsigned char select_delay; + unsigned char rps; + unsigned char tracks; + compat_ulong_t timeout; + unsigned char interleave_sect; + struct floppy_max_errors max_errors; + char flags; + char read_track; + short autodetect[8]; + compat_int_t checkfreq; + compat_int_t native_format; +}; + +struct compat_floppy_drive_struct { + signed char flags; + compat_ulong_t spinup_date; + compat_ulong_t select_date; + compat_ulong_t first_read_date; + short probed_format; + short track; + short maxblock; + short maxtrack; + compat_int_t generation; + compat_int_t keep_data; + compat_int_t fd_ref; + compat_int_t fd_device; + compat_int_t last_checked; + compat_caddr_t dmabuf; + compat_int_t bufblocks; +}; + +struct compat_floppy_fdc_state { + compat_int_t spec1; + compat_int_t spec2; + compat_int_t dtr; + unsigned char version; + unsigned char dor; + compat_ulong_t address; + unsigned int rawcmd:2; + unsigned int reset:1; + unsigned int need_configure:1; + unsigned int perp_mode:2; + unsigned int has_fifo:1; + unsigned int driver_version; + unsigned char track[4]; +}; + +struct compat_floppy_write_errors { + unsigned int write_errors; + compat_ulong_t first_error_sector; + compat_int_t first_error_generation; + compat_ulong_t last_error_sector; + compat_int_t last_error_generation; + compat_uint_t badness; +}; + +#define FDSETPRM32 _IOW(2, 0x42, struct compat_floppy_struct) +#define FDDEFPRM32 _IOW(2, 0x43, struct compat_floppy_struct) +#define FDGETPRM32 _IOR(2, 0x04, struct compat_floppy_struct) +#define FDSETDRVPRM32 _IOW(2, 0x90, struct compat_floppy_drive_params) +#define FDGETDRVPRM32 _IOR(2, 0x11, struct compat_floppy_drive_params) +#define FDGETDRVSTAT32 _IOR(2, 0x12, struct compat_floppy_drive_struct) +#define FDPOLLDRVSTAT32 _IOR(2, 0x13, struct compat_floppy_drive_struct) +#define FDGETFDCSTAT32 _IOR(2, 0x15, struct compat_floppy_fdc_state) +#define FDWERRORGET32 _IOR(2, 0x17, struct compat_floppy_write_errors) + +static struct { + unsigned int cmd32; + unsigned int cmd; +} fd_ioctl_trans_table[] = { + { FDSETPRM32, FDSETPRM }, + { FDDEFPRM32, FDDEFPRM }, + { FDGETPRM32, FDGETPRM }, + { FDSETDRVPRM32, FDSETDRVPRM }, + { FDGETDRVPRM32, FDGETDRVPRM }, + { FDGETDRVSTAT32, FDGETDRVSTAT }, + { FDPOLLDRVSTAT32, FDPOLLDRVSTAT }, + { FDGETFDCSTAT32, FDGETFDCSTAT }, + { FDWERRORGET32, FDWERRORGET } +}; + +#define NR_FD_IOCTL_TRANS ARRAY_SIZE(fd_ioctl_trans_table) + +static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + mm_segment_t old_fs = get_fs(); + void *karg = NULL; + unsigned int kcmd = 0; + int i, err; + + for (i = 0; i < NR_FD_IOCTL_TRANS; i++) + if (cmd == fd_ioctl_trans_table[i].cmd32) { + kcmd = fd_ioctl_trans_table[i].cmd; + break; + } + if (!kcmd) + return -EINVAL; + + switch (cmd) { + case FDSETPRM32: + case FDDEFPRM32: + case FDGETPRM32: + { + compat_uptr_t name; + struct compat_floppy_struct __user *uf; + struct floppy_struct *f; + + uf = compat_ptr(arg); + f = karg = kmalloc(sizeof(struct floppy_struct), GFP_KERNEL); + if (!karg) + return -ENOMEM; + if (cmd == FDGETPRM32) + break; + err = __get_user(f->size, &uf->size); + err |= __get_user(f->sect, &uf->sect); + err |= __get_user(f->head, &uf->head); + err |= __get_user(f->track, &uf->track); + err |= __get_user(f->stretch, &uf->stretch); + err |= __get_user(f->gap, &uf->gap); + err |= __get_user(f->rate, &uf->rate); + err |= __get_user(f->spec1, &uf->spec1); + err |= __get_user(f->fmt_gap, &uf->fmt_gap); + err |= __get_user(name, &uf->name); + f->name = compat_ptr(name); + if (err) { + err = -EFAULT; + goto out; + } + break; + } + case FDSETDRVPRM32: + case FDGETDRVPRM32: + { + struct compat_floppy_drive_params __user *uf; + struct floppy_drive_params *f; + + uf = compat_ptr(arg); + f = karg = kmalloc(sizeof(struct floppy_drive_params), GFP_KERNEL); + if (!karg) + return -ENOMEM; + if (cmd == FDGETDRVPRM32) + break; + err = __get_user(f->cmos, &uf->cmos); + err |= __get_user(f->max_dtr, &uf->max_dtr); + err |= __get_user(f->hlt, &uf->hlt); + err |= __get_user(f->hut, &uf->hut); + err |= __get_user(f->srt, &uf->srt); + err |= __get_user(f->spinup, &uf->spinup); + err |= __get_user(f->spindown, &uf->spindown); + err |= __get_user(f->spindown_offset, &uf->spindown_offset); + err |= __get_user(f->select_delay, &uf->select_delay); + err |= __get_user(f->rps, &uf->rps); + err |= __get_user(f->tracks, &uf->tracks); + err |= __get_user(f->timeout, &uf->timeout); + err |= __get_user(f->interleave_sect, &uf->interleave_sect); + err |= __copy_from_user(&f->max_errors, &uf->max_errors, sizeof(f->max_errors)); + err |= __get_user(f->flags, &uf->flags); + err |= __get_user(f->read_track, &uf->read_track); + err |= __copy_from_user(f->autodetect, uf->autodetect, sizeof(f->autodetect)); + err |= __get_user(f->checkfreq, &uf->checkfreq); + err |= __get_user(f->native_format, &uf->native_format); + if (err) { + err = -EFAULT; + goto out; + } + break; + } + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + karg = kmalloc(sizeof(struct floppy_drive_struct), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + case FDGETFDCSTAT32: + karg = kmalloc(sizeof(struct floppy_fdc_state), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + case FDWERRORGET32: + karg = kmalloc(sizeof(struct floppy_write_errors), GFP_KERNEL); + if (!karg) + return -ENOMEM; + break; + default: + return -EINVAL; + } + set_fs(KERNEL_DS); + err = __blkdev_driver_ioctl(bdev, mode, kcmd, (unsigned long)karg); + set_fs(old_fs); + if (err) + goto out; + switch (cmd) { + case FDGETPRM32: + { + struct floppy_struct *f = karg; + struct compat_floppy_struct __user *uf = compat_ptr(arg); + + err = __put_user(f->size, &uf->size); + err |= __put_user(f->sect, &uf->sect); + err |= __put_user(f->head, &uf->head); + err |= __put_user(f->track, &uf->track); + err |= __put_user(f->stretch, &uf->stretch); + err |= __put_user(f->gap, &uf->gap); + err |= __put_user(f->rate, &uf->rate); + err |= __put_user(f->spec1, &uf->spec1); + err |= __put_user(f->fmt_gap, &uf->fmt_gap); + err |= __put_user((u64)f->name, (compat_caddr_t __user *)&uf->name); + break; + } + case FDGETDRVPRM32: + { + struct compat_floppy_drive_params __user *uf; + struct floppy_drive_params *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->cmos, &uf->cmos); + err |= __put_user(f->max_dtr, &uf->max_dtr); + err |= __put_user(f->hlt, &uf->hlt); + err |= __put_user(f->hut, &uf->hut); + err |= __put_user(f->srt, &uf->srt); + err |= __put_user(f->spinup, &uf->spinup); + err |= __put_user(f->spindown, &uf->spindown); + err |= __put_user(f->spindown_offset, &uf->spindown_offset); + err |= __put_user(f->select_delay, &uf->select_delay); + err |= __put_user(f->rps, &uf->rps); + err |= __put_user(f->tracks, &uf->tracks); + err |= __put_user(f->timeout, &uf->timeout); + err |= __put_user(f->interleave_sect, &uf->interleave_sect); + err |= __copy_to_user(&uf->max_errors, &f->max_errors, sizeof(f->max_errors)); + err |= __put_user(f->flags, &uf->flags); + err |= __put_user(f->read_track, &uf->read_track); + err |= __copy_to_user(uf->autodetect, f->autodetect, sizeof(f->autodetect)); + err |= __put_user(f->checkfreq, &uf->checkfreq); + err |= __put_user(f->native_format, &uf->native_format); + break; + } + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + { + struct compat_floppy_drive_struct __user *uf; + struct floppy_drive_struct *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->flags, &uf->flags); + err |= __put_user(f->spinup_date, &uf->spinup_date); + err |= __put_user(f->select_date, &uf->select_date); + err |= __put_user(f->first_read_date, &uf->first_read_date); + err |= __put_user(f->probed_format, &uf->probed_format); + err |= __put_user(f->track, &uf->track); + err |= __put_user(f->maxblock, &uf->maxblock); + err |= __put_user(f->maxtrack, &uf->maxtrack); + err |= __put_user(f->generation, &uf->generation); + err |= __put_user(f->keep_data, &uf->keep_data); + err |= __put_user(f->fd_ref, &uf->fd_ref); + err |= __put_user(f->fd_device, &uf->fd_device); + err |= __put_user(f->last_checked, &uf->last_checked); + err |= __put_user((u64)f->dmabuf, &uf->dmabuf); + err |= __put_user((u64)f->bufblocks, &uf->bufblocks); + break; + } + case FDGETFDCSTAT32: + { + struct compat_floppy_fdc_state __user *uf; + struct floppy_fdc_state *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->spec1, &uf->spec1); + err |= __put_user(f->spec2, &uf->spec2); + err |= __put_user(f->dtr, &uf->dtr); + err |= __put_user(f->version, &uf->version); + err |= __put_user(f->dor, &uf->dor); + err |= __put_user(f->address, &uf->address); + err |= __copy_to_user((char __user *)&uf->address + sizeof(uf->address), + (char *)&f->address + sizeof(f->address), sizeof(int)); + err |= __put_user(f->driver_version, &uf->driver_version); + err |= __copy_to_user(uf->track, f->track, sizeof(f->track)); + break; + } + case FDWERRORGET32: + { + struct compat_floppy_write_errors __user *uf; + struct floppy_write_errors *f = karg; + + uf = compat_ptr(arg); + err = __put_user(f->write_errors, &uf->write_errors); + err |= __put_user(f->first_error_sector, &uf->first_error_sector); + err |= __put_user(f->first_error_generation, &uf->first_error_generation); + err |= __put_user(f->last_error_sector, &uf->last_error_sector); + err |= __put_user(f->last_error_generation, &uf->last_error_generation); + err |= __put_user(f->badness, &uf->badness); + break; + } + default: + break; + } + if (err) + err = -EFAULT; + +out: + kfree(karg); + return err; +} + +static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + switch (cmd) { + case HDIO_GET_UNMASKINTR: + case HDIO_GET_MULTCOUNT: + case HDIO_GET_KEEPSETTINGS: + case HDIO_GET_32BIT: + case HDIO_GET_NOWERR: + case HDIO_GET_DMA: + case HDIO_GET_NICE: + case HDIO_GET_WCACHE: + case HDIO_GET_ACOUSTIC: + case HDIO_GET_ADDRESS: + case HDIO_GET_BUSSTATE: + return compat_hdio_ioctl(bdev, mode, cmd, arg); + case FDSETPRM32: + case FDDEFPRM32: + case FDGETPRM32: + case FDSETDRVPRM32: + case FDGETDRVPRM32: + case FDGETDRVSTAT32: + case FDPOLLDRVSTAT32: + case FDGETFDCSTAT32: + case FDWERRORGET32: + return compat_fd_ioctl(bdev, mode, cmd, arg); + case CDROMREADAUDIO: + return compat_cdrom_read_audio(bdev, mode, cmd, arg); + case CDROM_SEND_PACKET: + return compat_cdrom_generic_command(bdev, mode, cmd, arg); + + /* + * No handler required for the ones below, we just need to + * convert arg to a 64 bit pointer. + */ + case BLKSECTSET: + /* + * 0x03 -- HD/IDE ioctl's used by hdparm and friends. + * Some need translations, these do not. + */ + case HDIO_GET_IDENTITY: + case HDIO_DRIVE_TASK: + case HDIO_DRIVE_CMD: + /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ + case 0x330: + /* 0x02 -- Floppy ioctls */ + case FDMSGON: + case FDMSGOFF: + case FDSETEMSGTRESH: + case FDFLUSH: + case FDWERRORCLR: + case FDSETMAXERRS: + case FDGETMAXERRS: + case FDGETDRVTYP: + case FDEJECT: + case FDCLRPRM: + case FDFMTBEG: + case FDFMTEND: + case FDRESET: + case FDTWADDLE: + case FDFMTTRK: + case FDRAWCMD: + /* CDROM stuff */ + case CDROMPAUSE: + case CDROMRESUME: + case CDROMPLAYMSF: + case CDROMPLAYTRKIND: + case CDROMREADTOCHDR: + case CDROMREADTOCENTRY: + case CDROMSTOP: + case CDROMSTART: + case CDROMEJECT: + case CDROMVOLCTRL: + case CDROMSUBCHNL: + case CDROMMULTISESSION: + case CDROM_GET_MCN: + case CDROMRESET: + case CDROMVOLREAD: + case CDROMSEEK: + case CDROMPLAYBLK: + case CDROMCLOSETRAY: + case CDROM_DISC_STATUS: + case CDROM_CHANGER_NSLOTS: + case CDROM_GET_CAPABILITY: + /* Ignore cdrom.h about these next 5 ioctls, they absolutely do + * not take a struct cdrom_read, instead they take a struct cdrom_msf + * which is compatible. + */ + case CDROMREADMODE2: + case CDROMREADMODE1: + case CDROMREADRAW: + case CDROMREADCOOKED: + case CDROMREADALL: + /* DVD ioctls */ + case DVD_READ_STRUCT: + case DVD_WRITE_STRUCT: + case DVD_AUTH: + arg = (unsigned long)compat_ptr(arg); + /* These intepret arg as an unsigned long, not as a pointer, + * so we must not do compat_ptr() conversion. */ + case HDIO_SET_MULTCOUNT: + case HDIO_SET_UNMASKINTR: + case HDIO_SET_KEEPSETTINGS: + case HDIO_SET_32BIT: + case HDIO_SET_NOWERR: + case HDIO_SET_DMA: + case HDIO_SET_PIO_MODE: + case HDIO_SET_NICE: + case HDIO_SET_WCACHE: + case HDIO_SET_ACOUSTIC: + case HDIO_SET_BUSSTATE: + case HDIO_SET_ADDRESS: + case CDROMEJECT_SW: + case CDROM_SET_OPTIONS: + case CDROM_CLEAR_OPTIONS: + case CDROM_SELECT_SPEED: + case CDROM_SELECT_DISC: + case CDROM_MEDIA_CHANGED: + case CDROM_DRIVE_STATUS: + case CDROM_LOCKDOOR: + case CDROM_DEBUG: + break; + default: + /* unknown ioctl number */ + return -ENOIOCTLCMD; + } + + return __blkdev_driver_ioctl(bdev, mode, cmd, arg); +} + +/* Most of the generic ioctls are handled in the normal fallback path. + This assumes the blkdev's low level compat_ioctl always returns + ENOIOCTLCMD for unknown ioctls. */ +long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + int ret = -ENOIOCTLCMD; + struct inode *inode = file->f_mapping->host; + struct block_device *bdev = inode->i_bdev; + struct gendisk *disk = bdev->bd_disk; + fmode_t mode = file->f_mode; + struct backing_dev_info *bdi; + loff_t size; + + /* + * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have + * to updated it before every ioctl. + */ + if (file->f_flags & O_NDELAY) + mode |= FMODE_NDELAY; + else + mode &= ~FMODE_NDELAY; + + switch (cmd) { + case HDIO_GETGEO: + return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); + case BLKPBSZGET: + return compat_put_uint(arg, bdev_physical_block_size(bdev)); + case BLKIOMIN: + return compat_put_uint(arg, bdev_io_min(bdev)); + case BLKIOOPT: + return compat_put_uint(arg, bdev_io_opt(bdev)); + case BLKALIGNOFF: + return compat_put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return compat_put_uint(arg, bdev_discard_zeroes_data(bdev)); + case BLKFLSBUF: + case BLKROSET: + case BLKDISCARD: + case BLKSECDISCARD: + /* + * the ones below are implemented in blkdev_locked_ioctl, + * but we call blkdev_ioctl, which gets the lock for us + */ + case BLKRRPART: + return blkdev_ioctl(bdev, mode, cmd, + (unsigned long)compat_ptr(arg)); + case BLKBSZSET_32: + return blkdev_ioctl(bdev, mode, BLKBSZSET, + (unsigned long)compat_ptr(arg)); + case BLKPG: + return compat_blkpg_ioctl(bdev, mode, cmd, compat_ptr(arg)); + case BLKRAGET: + case BLKFRAGET: + if (!arg) + return -EINVAL; + bdi = blk_get_backing_dev_info(bdev); + if (bdi == NULL) + return -ENOTTY; + return compat_put_long(arg, + (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + case BLKROGET: /* compatible */ + return compat_put_int(arg, bdev_read_only(bdev) != 0); + case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ + return compat_put_int(arg, block_size(bdev)); + case BLKSSZGET: /* get block device hardware sector size */ + return compat_put_int(arg, bdev_logical_block_size(bdev)); + case BLKSECTGET: + return compat_put_ushort(arg, + queue_max_sectors(bdev_get_queue(bdev))); + case BLKRASET: /* compatible, but no compat_ptr (!) */ + case BLKFRASET: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + bdi = blk_get_backing_dev_info(bdev); + if (bdi == NULL) + return -ENOTTY; + bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + return 0; + case BLKGETSIZE: + size = bdev->bd_inode->i_size; + if ((size >> 9) > ~0UL) + return -EFBIG; + return compat_put_ulong(arg, size >> 9); + + case BLKGETSIZE64_32: + return compat_put_u64(arg, bdev->bd_inode->i_size); + + case BLKTRACESETUP32: + case BLKTRACESTART: /* compatible */ + case BLKTRACESTOP: /* compatible */ + case BLKTRACETEARDOWN: /* compatible */ + ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); + return ret; + default: + if (disk->fops->compat_ioctl) + ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg); + if (ret == -ENOIOCTLCMD) + ret = compat_blkdev_driver_ioctl(bdev, mode, cmd, arg); + return ret; + } +} diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c new file mode 100644 index 00000000..f446430b --- /dev/null +++ b/block/deadline-iosched.c @@ -0,0 +1,481 @@ +/* + * Deadline i/o scheduler. + * + * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> + */ +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/compiler.h> +#include <linux/rbtree.h> + +/* + * See Documentation/block/deadline-iosched.txt + */ +static const int read_expire = HZ / 2; /* max time before a read is submitted. */ +static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ +static const int writes_starved = 2; /* max times reads can starve a write */ +static const int fifo_batch = 16; /* # of sequential requests treated as one + by the above parameters. For throughput. */ + +struct deadline_data { + /* + * run time data + */ + + /* + * requests (deadline_rq s) are present on both sort_list and fifo_list + */ + struct rb_root sort_list[2]; + struct list_head fifo_list[2]; + + /* + * next in sort order. read, write or both are NULL + */ + struct request *next_rq[2]; + unsigned int batching; /* number of sequential requests made */ + sector_t last_sector; /* head position */ + unsigned int starved; /* times reads have starved writes */ + + /* + * settings that change how the i/o scheduler behaves + */ + int fifo_expire[2]; + int fifo_batch; + int writes_starved; + int front_merges; +}; + +static void deadline_move_request(struct deadline_data *, struct request *); + +static inline struct rb_root * +deadline_rb_root(struct deadline_data *dd, struct request *rq) +{ + return &dd->sort_list[rq_data_dir(rq)]; +} + +/* + * get the request after `rq' in sector-sorted order + */ +static inline struct request * +deadline_latter_request(struct request *rq) +{ + struct rb_node *node = rb_next(&rq->rb_node); + + if (node) + return rb_entry_rq(node); + + return NULL; +} + +static void +deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) +{ + struct rb_root *root = deadline_rb_root(dd, rq); + struct request *__alias; + + while (unlikely(__alias = elv_rb_add(root, rq))) + deadline_move_request(dd, __alias); +} + +static inline void +deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + if (dd->next_rq[data_dir] == rq) + dd->next_rq[data_dir] = deadline_latter_request(rq); + + elv_rb_del(deadline_rb_root(dd, rq), rq); +} + +/* + * add rq to rbtree and fifo + */ +static void +deadline_add_request(struct request_queue *q, struct request *rq) +{ + struct deadline_data *dd = q->elevator->elevator_data; + const int data_dir = rq_data_dir(rq); + + deadline_add_rq_rb(dd, rq); + + /* + * set expire time and add to fifo list + */ + rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); + list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); +} + +/* + * remove rq from rbtree and fifo. + */ +static void deadline_remove_request(struct request_queue *q, struct request *rq) +{ + struct deadline_data *dd = q->elevator->elevator_data; + + rq_fifo_clear(rq); + deadline_del_rq_rb(dd, rq); +} + +static int +deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) +{ + struct deadline_data *dd = q->elevator->elevator_data; + struct request *__rq; + int ret; + + /* + * check for front merge + */ + if (dd->front_merges) { + sector_t sector = bio->bi_sector + bio_sectors(bio); + + __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); + if (__rq) { + BUG_ON(sector != blk_rq_pos(__rq)); + + if (elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_FRONT_MERGE; + goto out; + } + } + } + + return ELEVATOR_NO_MERGE; +out: + *req = __rq; + return ret; +} + +static void deadline_merged_request(struct request_queue *q, + struct request *req, int type) +{ + struct deadline_data *dd = q->elevator->elevator_data; + + /* + * if the merge was a front merge, we need to reposition request + */ + if (type == ELEVATOR_FRONT_MERGE) { + elv_rb_del(deadline_rb_root(dd, req), req); + deadline_add_rq_rb(dd, req); + } +} + +static void +deadline_merged_requests(struct request_queue *q, struct request *req, + struct request *next) +{ + /* + * if next expires before rq, assign its expire time to rq + * and move into next position (next will be deleted) in fifo + */ + if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { + if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { + list_move(&req->queuelist, &next->queuelist); + rq_set_fifo_time(req, rq_fifo_time(next)); + } + } + + /* + * kill knowledge of next, this one is a goner + */ + deadline_remove_request(q, next); +} + +/* + * move request from sort list to dispatch queue. + */ +static inline void +deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) +{ + struct request_queue *q = rq->q; + + deadline_remove_request(q, rq); + elv_dispatch_add_tail(q, rq); +} + +/* + * move an entry to dispatch queue + */ +static void +deadline_move_request(struct deadline_data *dd, struct request *rq) +{ + const int data_dir = rq_data_dir(rq); + + dd->next_rq[READ] = NULL; + dd->next_rq[WRITE] = NULL; + dd->next_rq[data_dir] = deadline_latter_request(rq); + + dd->last_sector = rq_end_sector(rq); + + /* + * take it off the sort and fifo list, move + * to dispatch queue + */ + deadline_move_to_dispatch(dd, rq); +} + +/* + * deadline_check_fifo returns 0 if there are no expired requests on the fifo, + * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) + */ +static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) +{ + struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); + + /* + * rq is expired! + */ + if (time_after(jiffies, rq_fifo_time(rq))) + return 1; + + return 0; +} + +/* + * deadline_dispatch_requests selects the best request according to + * read/write expire, fifo_batch, etc + */ +static int deadline_dispatch_requests(struct request_queue *q, int force) +{ + struct deadline_data *dd = q->elevator->elevator_data; + const int reads = !list_empty(&dd->fifo_list[READ]); + const int writes = !list_empty(&dd->fifo_list[WRITE]); + struct request *rq; + int data_dir; + + /* + * batches are currently reads XOR writes + */ + if (dd->next_rq[WRITE]) + rq = dd->next_rq[WRITE]; + else + rq = dd->next_rq[READ]; + + if (rq && dd->batching < dd->fifo_batch) + /* we have a next request are still entitled to batch */ + goto dispatch_request; + + /* + * at this point we are not running a batch. select the appropriate + * data direction (read / write) + */ + + if (reads) { + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); + + if (writes && (dd->starved++ >= dd->writes_starved)) + goto dispatch_writes; + + data_dir = READ; + + goto dispatch_find_request; + } + + /* + * there are either no reads or writes have been starved + */ + + if (writes) { +dispatch_writes: + BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); + + dd->starved = 0; + + data_dir = WRITE; + + goto dispatch_find_request; + } + + return 0; + +dispatch_find_request: + /* + * we are not running a batch, find best request for selected data_dir + */ + if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { + /* + * A deadline has expired, the last request was in the other + * direction, or we have run out of higher-sectored requests. + * Start again from the request with the earliest expiry time. + */ + rq = rq_entry_fifo(dd->fifo_list[data_dir].next); + } else { + /* + * The last req was the same dir and we have a next request in + * sort order. No expired requests so continue on from here. + */ + rq = dd->next_rq[data_dir]; + } + + dd->batching = 0; + +dispatch_request: + /* + * rq is the selected appropriate request. + */ + dd->batching++; + deadline_move_request(dd, rq); + + return 1; +} + +static int deadline_queue_empty(struct request_queue *q) +{ + struct deadline_data *dd = q->elevator->elevator_data; + + return list_empty(&dd->fifo_list[WRITE]) + && list_empty(&dd->fifo_list[READ]); +} + +static void deadline_exit_queue(struct elevator_queue *e) +{ + struct deadline_data *dd = e->elevator_data; + + BUG_ON(!list_empty(&dd->fifo_list[READ])); + BUG_ON(!list_empty(&dd->fifo_list[WRITE])); + + kfree(dd); +} + +/* + * initialize elevator private data (deadline_data). + */ +static void *deadline_init_queue(struct request_queue *q) +{ + struct deadline_data *dd; + + dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); + if (!dd) + return NULL; + + INIT_LIST_HEAD(&dd->fifo_list[READ]); + INIT_LIST_HEAD(&dd->fifo_list[WRITE]); + dd->sort_list[READ] = RB_ROOT; + dd->sort_list[WRITE] = RB_ROOT; + dd->fifo_expire[READ] = read_expire; + dd->fifo_expire[WRITE] = write_expire; + dd->writes_starved = writes_starved; + dd->front_merges = 1; + dd->fifo_batch = fifo_batch; + return dd; +} + +/* + * sysfs parts below + */ + +static ssize_t +deadline_var_show(int var, char *page) +{ + return sprintf(page, "%d\n", var); +} + +static ssize_t +deadline_var_store(int *var, const char *page, size_t count) +{ + char *p = (char *) page; + + *var = simple_strtol(p, &p, 10); + return count; +} + +#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, char *page) \ +{ \ + struct deadline_data *dd = e->elevator_data; \ + int __data = __VAR; \ + if (__CONV) \ + __data = jiffies_to_msecs(__data); \ + return deadline_var_show(__data, (page)); \ +} +SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); +SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); +SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); +SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); +SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); +#undef SHOW_FUNCTION + +#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ +static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +{ \ + struct deadline_data *dd = e->elevator_data; \ + int __data; \ + int ret = deadline_var_store(&__data, (page), count); \ + if (__data < (MIN)) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ + if (__CONV) \ + *(__PTR) = msecs_to_jiffies(__data); \ + else \ + *(__PTR) = __data; \ + return ret; \ +} +STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); +STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); +STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); +STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); +#undef STORE_FUNCTION + +#define DD_ATTR(name) \ + __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ + deadline_##name##_store) + +static struct elv_fs_entry deadline_attrs[] = { + DD_ATTR(read_expire), + DD_ATTR(write_expire), + DD_ATTR(writes_starved), + DD_ATTR(front_merges), + DD_ATTR(fifo_batch), + __ATTR_NULL +}; + +static struct elevator_type iosched_deadline = { + .ops = { + .elevator_merge_fn = deadline_merge, + .elevator_merged_fn = deadline_merged_request, + .elevator_merge_req_fn = deadline_merged_requests, + .elevator_dispatch_fn = deadline_dispatch_requests, + .elevator_add_req_fn = deadline_add_request, + .elevator_queue_empty_fn = deadline_queue_empty, + .elevator_former_req_fn = elv_rb_former_request, + .elevator_latter_req_fn = elv_rb_latter_request, + .elevator_init_fn = deadline_init_queue, + .elevator_exit_fn = deadline_exit_queue, + }, + + .elevator_attrs = deadline_attrs, + .elevator_name = "deadline", + .elevator_owner = THIS_MODULE, +}; + +static int __init deadline_init(void) +{ + elv_register(&iosched_deadline); + + return 0; +} + +static void __exit deadline_exit(void) +{ + elv_unregister(&iosched_deadline); +} + +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(deadline_init); +#else +module_init(deadline_init); +#endif +module_exit(deadline_exit); + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("deadline IO scheduler"); diff --git a/block/elevator.c b/block/elevator.c new file mode 100644 index 00000000..4e11559a --- /dev/null +++ b/block/elevator.c @@ -0,0 +1,1173 @@ +/* + * Block device elevator/IO-scheduler. + * + * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE + * + * 30042000 Jens Axboe <axboe@kernel.dk> : + * + * Split the elevator a bit so that it is possible to choose a different + * one or even write a new "plug in". There are three pieces: + * - elevator_fn, inserts a new request in the queue list + * - elevator_merge_fn, decides whether a new buffer can be merged with + * an existing request + * - elevator_dequeue_fn, called when a request is taken off the active list + * + * 20082000 Dave Jones <davej@suse.de> : + * Removed tests for max-bomb-segments, which was breaking elvtune + * when run without -bN + * + * Jens: + * - Rework again to work with bio instead of buffer_heads + * - loose bi_dev comparisons, partition handling is right now + * - completely modularize elevator setup and teardown + * + */ +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/compiler.h> +#include <linux/delay.h> +#include <linux/blktrace_api.h> +#include <linux/hash.h> +#include <linux/uaccess.h> + +#include <trace/events/block.h> + +#include "blk.h" + +static DEFINE_SPINLOCK(elv_list_lock); +static LIST_HEAD(elv_list); + +/* + * Merge hash stuff. + */ +static const int elv_hash_shift = 6; +#define ELV_HASH_BLOCK(sec) ((sec) >> 3) +#define ELV_HASH_FN(sec) \ + (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) +#define ELV_HASH_ENTRIES (1 << elv_hash_shift) +#define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) + +/* + * Query io scheduler to see if the current process issuing bio may be + * merged with rq. + */ +static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) +{ + struct request_queue *q = rq->q; + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_allow_merge_fn) + return e->ops->elevator_allow_merge_fn(q, rq, bio); + + return 1; +} + +/* + * can we safely merge with this request? + */ +int elv_rq_merge_ok(struct request *rq, struct bio *bio) +{ + if (!rq_mergeable(rq)) + return 0; + + /* + * Don't merge file system requests and discard requests + */ + if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) + return 0; + + /* + * Don't merge discard requests and secure discard requests + */ + if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE)) + return 0; + + /* + * different data direction or already started, don't merge + */ + if (bio_data_dir(bio) != rq_data_dir(rq)) + return 0; + + /* + * must be same device and not a special request + */ + if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special) + return 0; + + /* + * only merge integrity protected bio into ditto rq + */ + if (bio_integrity(bio) != blk_integrity_rq(rq)) + return 0; + + if (!elv_iosched_allow_merge(rq, bio)) + return 0; + + return 1; +} +EXPORT_SYMBOL(elv_rq_merge_ok); + +static inline int elv_try_merge(struct request *__rq, struct bio *bio) +{ + int ret = ELEVATOR_NO_MERGE; + + /* + * we can merge and sequence is ok, check if it's possible + */ + if (elv_rq_merge_ok(__rq, bio)) { + if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) + ret = ELEVATOR_BACK_MERGE; + else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) + ret = ELEVATOR_FRONT_MERGE; + } + + return ret; +} + +static struct elevator_type *elevator_find(const char *name) +{ + struct elevator_type *e; + + list_for_each_entry(e, &elv_list, list) { + if (!strcmp(e->elevator_name, name)) + return e; + } + + return NULL; +} + +static void elevator_put(struct elevator_type *e) +{ + module_put(e->elevator_owner); +} + +static struct elevator_type *elevator_get(const char *name) +{ + struct elevator_type *e; + + spin_lock(&elv_list_lock); + + e = elevator_find(name); + if (!e) { + char elv[ELV_NAME_MAX + strlen("-iosched")]; + + spin_unlock(&elv_list_lock); + + snprintf(elv, sizeof(elv), "%s-iosched", name); + + request_module("%s", elv); + spin_lock(&elv_list_lock); + e = elevator_find(name); + } + + if (e && !try_module_get(e->elevator_owner)) + e = NULL; + + spin_unlock(&elv_list_lock); + + return e; +} + +static void *elevator_init_queue(struct request_queue *q, + struct elevator_queue *eq) +{ + return eq->ops->elevator_init_fn(q); +} + +static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, + void *data) +{ + q->elevator = eq; + eq->elevator_data = data; +} + +static char chosen_elevator[16]; + +static int __init elevator_setup(char *str) +{ + /* + * Be backwards-compatible with previous kernels, so users + * won't get the wrong elevator. + */ + strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1); + return 1; +} + +__setup("elevator=", elevator_setup); + +static struct kobj_type elv_ktype; + +static struct elevator_queue *elevator_alloc(struct request_queue *q, + struct elevator_type *e) +{ + struct elevator_queue *eq; + int i; + + eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node); + if (unlikely(!eq)) + goto err; + + eq->ops = &e->ops; + eq->elevator_type = e; + kobject_init(&eq->kobj, &elv_ktype); + mutex_init(&eq->sysfs_lock); + + eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES, + GFP_KERNEL, q->node); + if (!eq->hash) + goto err; + + for (i = 0; i < ELV_HASH_ENTRIES; i++) + INIT_HLIST_HEAD(&eq->hash[i]); + + return eq; +err: + kfree(eq); + elevator_put(e); + return NULL; +} + +static void elevator_release(struct kobject *kobj) +{ + struct elevator_queue *e; + + e = container_of(kobj, struct elevator_queue, kobj); + elevator_put(e->elevator_type); + kfree(e->hash); + kfree(e); +} + +int elevator_init(struct request_queue *q, char *name) +{ + struct elevator_type *e = NULL; + struct elevator_queue *eq; + void *data; + + if (unlikely(q->elevator)) + return 0; + + INIT_LIST_HEAD(&q->queue_head); + q->last_merge = NULL; + q->end_sector = 0; + q->boundary_rq = NULL; + + if (name) { + e = elevator_get(name); + if (!e) + return -EINVAL; + } + + if (!e && *chosen_elevator) { + e = elevator_get(chosen_elevator); + if (!e) + printk(KERN_ERR "I/O scheduler %s not found\n", + chosen_elevator); + } + + if (!e) { + e = elevator_get(CONFIG_DEFAULT_IOSCHED); + if (!e) { + printk(KERN_ERR + "Default I/O scheduler not found. " \ + "Using noop.\n"); + e = elevator_get("noop"); + } + } + + eq = elevator_alloc(q, e); + if (!eq) + return -ENOMEM; + + data = elevator_init_queue(q, eq); + if (!data) { + kobject_put(&eq->kobj); + return -ENOMEM; + } + + elevator_attach(q, eq, data); + return 0; +} +EXPORT_SYMBOL(elevator_init); + +void elevator_exit(struct elevator_queue *e) +{ + mutex_lock(&e->sysfs_lock); + if (e->ops->elevator_exit_fn) + e->ops->elevator_exit_fn(e); + e->ops = NULL; + mutex_unlock(&e->sysfs_lock); + + kobject_put(&e->kobj); +} +EXPORT_SYMBOL(elevator_exit); + +static inline void __elv_rqhash_del(struct request *rq) +{ + hlist_del_init(&rq->hash); +} + +static void elv_rqhash_del(struct request_queue *q, struct request *rq) +{ + if (ELV_ON_HASH(rq)) + __elv_rqhash_del(rq); +} + +static void elv_rqhash_add(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + BUG_ON(ELV_ON_HASH(rq)); + hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]); +} + +static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) +{ + __elv_rqhash_del(rq); + elv_rqhash_add(q, rq); +} + +static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) +{ + struct elevator_queue *e = q->elevator; + struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)]; + struct hlist_node *entry, *next; + struct request *rq; + + hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) { + BUG_ON(!ELV_ON_HASH(rq)); + + if (unlikely(!rq_mergeable(rq))) { + __elv_rqhash_del(rq); + continue; + } + + if (rq_hash_key(rq) == offset) + return rq; + } + + return NULL; +} + +/* + * RB-tree support functions for inserting/lookup/removal of requests + * in a sorted RB tree. + */ +struct request *elv_rb_add(struct rb_root *root, struct request *rq) +{ + struct rb_node **p = &root->rb_node; + struct rb_node *parent = NULL; + struct request *__rq; + + while (*p) { + parent = *p; + __rq = rb_entry(parent, struct request, rb_node); + + if (blk_rq_pos(rq) < blk_rq_pos(__rq)) + p = &(*p)->rb_left; + else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) + p = &(*p)->rb_right; + else + return __rq; + } + + rb_link_node(&rq->rb_node, parent, p); + rb_insert_color(&rq->rb_node, root); + return NULL; +} +EXPORT_SYMBOL(elv_rb_add); + +void elv_rb_del(struct rb_root *root, struct request *rq) +{ + BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); + rb_erase(&rq->rb_node, root); + RB_CLEAR_NODE(&rq->rb_node); +} +EXPORT_SYMBOL(elv_rb_del); + +struct request *elv_rb_find(struct rb_root *root, sector_t sector) +{ + struct rb_node *n = root->rb_node; + struct request *rq; + + while (n) { + rq = rb_entry(n, struct request, rb_node); + + if (sector < blk_rq_pos(rq)) + n = n->rb_left; + else if (sector > blk_rq_pos(rq)) + n = n->rb_right; + else + return rq; + } + + return NULL; +} +EXPORT_SYMBOL(elv_rb_find); + +/* + * Insert rq into dispatch queue of q. Queue lock must be held on + * entry. rq is sort instead into the dispatch queue. To be used by + * specific elevators. + */ +void elv_dispatch_sort(struct request_queue *q, struct request *rq) +{ + sector_t boundary; + struct list_head *entry; + int stop_flags; + + if (q->last_merge == rq) + q->last_merge = NULL; + + elv_rqhash_del(q, rq); + + q->nr_sorted--; + + boundary = q->end_sector; + stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; + list_for_each_prev(entry, &q->queue_head) { + struct request *pos = list_entry_rq(entry); + + if ((rq->cmd_flags & REQ_DISCARD) != + (pos->cmd_flags & REQ_DISCARD)) + break; + if (rq_data_dir(rq) != rq_data_dir(pos)) + break; + if (pos->cmd_flags & stop_flags) + break; + if (blk_rq_pos(rq) >= boundary) { + if (blk_rq_pos(pos) < boundary) + continue; + } else { + if (blk_rq_pos(pos) >= boundary) + break; + } + if (blk_rq_pos(rq) >= blk_rq_pos(pos)) + break; + } + + list_add(&rq->queuelist, entry); +} +EXPORT_SYMBOL(elv_dispatch_sort); + +/* + * Insert rq into dispatch queue of q. Queue lock must be held on + * entry. rq is added to the back of the dispatch queue. To be used by + * specific elevators. + */ +void elv_dispatch_add_tail(struct request_queue *q, struct request *rq) +{ + if (q->last_merge == rq) + q->last_merge = NULL; + + elv_rqhash_del(q, rq); + + q->nr_sorted--; + + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + list_add_tail(&rq->queuelist, &q->queue_head); +} +EXPORT_SYMBOL(elv_dispatch_add_tail); + +int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) +{ + struct elevator_queue *e = q->elevator; + struct request *__rq; + int ret; + + /* + * Levels of merges: + * nomerges: No merges at all attempted + * noxmerges: Only simple one-hit cache try + * merges: All merge tries attempted + */ + if (blk_queue_nomerges(q)) + return ELEVATOR_NO_MERGE; + + /* + * First try one-hit cache. + */ + if (q->last_merge) { + ret = elv_try_merge(q->last_merge, bio); + if (ret != ELEVATOR_NO_MERGE) { + *req = q->last_merge; + return ret; + } + } + + if (blk_queue_noxmerges(q)) + return ELEVATOR_NO_MERGE; + + /* + * See if our hash lookup can find a potential backmerge. + */ + __rq = elv_rqhash_find(q, bio->bi_sector); + if (__rq && elv_rq_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_BACK_MERGE; + } + + if (e->ops->elevator_merge_fn) + return e->ops->elevator_merge_fn(q, req, bio); + + return ELEVATOR_NO_MERGE; +} + +void elv_merged_request(struct request_queue *q, struct request *rq, int type) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_merged_fn) + e->ops->elevator_merged_fn(q, rq, type); + + if (type == ELEVATOR_BACK_MERGE) + elv_rqhash_reposition(q, rq); + + q->last_merge = rq; +} + +void elv_merge_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_merge_req_fn) + e->ops->elevator_merge_req_fn(q, rq, next); + + elv_rqhash_reposition(q, rq); + elv_rqhash_del(q, next); + + q->nr_sorted--; + q->last_merge = rq; +} + +void elv_bio_merged(struct request_queue *q, struct request *rq, + struct bio *bio) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_bio_merged_fn) + e->ops->elevator_bio_merged_fn(q, rq, bio); +} + +void elv_requeue_request(struct request_queue *q, struct request *rq) +{ + /* + * it already went through dequeue, we need to decrement the + * in_flight count again + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]--; + if (rq->cmd_flags & REQ_SORTED) + elv_deactivate_rq(q, rq); + } + + rq->cmd_flags &= ~REQ_STARTED; + + elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); +} + +void elv_drain_elevator(struct request_queue *q) +{ + static int printed; + while (q->elevator->ops->elevator_dispatch_fn(q, 1)) + ; + if (q->nr_sorted == 0) + return; + if (printed++ < 10) { + printk(KERN_ERR "%s: forced dispatching is broken " + "(nr_sorted=%u), please report this\n", + q->elevator->elevator_type->elevator_name, q->nr_sorted); + } +} + +/* + * Call with queue lock held, interrupts disabled + */ +void elv_quiesce_start(struct request_queue *q) +{ + if (!q->elevator) + return; + + queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); + + /* + * make sure we don't have any requests in flight + */ + elv_drain_elevator(q); + while (q->rq.elvpriv) { + __blk_run_queue(q); + spin_unlock_irq(q->queue_lock); + msleep(10); + spin_lock_irq(q->queue_lock); + elv_drain_elevator(q); + } +} + +void elv_quiesce_end(struct request_queue *q) +{ + queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); +} + +void elv_insert(struct request_queue *q, struct request *rq, int where) +{ + struct list_head *pos; + unsigned ordseq; + int unplug_it = 1; + + trace_block_rq_insert(q, rq); + + rq->q = q; + + switch (where) { + case ELEVATOR_INSERT_FRONT: + rq->cmd_flags |= REQ_SOFTBARRIER; + + list_add(&rq->queuelist, &q->queue_head); + break; + + case ELEVATOR_INSERT_BACK: + rq->cmd_flags |= REQ_SOFTBARRIER; + elv_drain_elevator(q); + list_add_tail(&rq->queuelist, &q->queue_head); + /* + * We kick the queue here for the following reasons. + * - The elevator might have returned NULL previously + * to delay requests and returned them now. As the + * queue wasn't empty before this request, ll_rw_blk + * won't run the queue on return, resulting in hang. + * - Usually, back inserted requests won't be merged + * with anything. There's no point in delaying queue + * processing. + */ + __blk_run_queue(q); + break; + + case ELEVATOR_INSERT_SORT: + BUG_ON(rq->cmd_type != REQ_TYPE_FS && + !(rq->cmd_flags & REQ_DISCARD)); + rq->cmd_flags |= REQ_SORTED; + q->nr_sorted++; + if (rq_mergeable(rq)) { + elv_rqhash_add(q, rq); + if (!q->last_merge) + q->last_merge = rq; + } + + /* + * Some ioscheds (cfq) run q->request_fn directly, so + * rq cannot be accessed after calling + * elevator_add_req_fn. + */ + q->elevator->ops->elevator_add_req_fn(q, rq); + break; + + case ELEVATOR_INSERT_REQUEUE: + /* + * If ordered flush isn't in progress, we do front + * insertion; otherwise, requests should be requeued + * in ordseq order. + */ + rq->cmd_flags |= REQ_SOFTBARRIER; + + /* + * Most requeues happen because of a busy condition, + * don't force unplug of the queue for that case. + */ + unplug_it = 0; + + if (q->ordseq == 0) { + list_add(&rq->queuelist, &q->queue_head); + break; + } + + ordseq = blk_ordered_req_seq(rq); + + list_for_each(pos, &q->queue_head) { + struct request *pos_rq = list_entry_rq(pos); + if (ordseq <= blk_ordered_req_seq(pos_rq)) + break; + } + + list_add_tail(&rq->queuelist, pos); + break; + + default: + printk(KERN_ERR "%s: bad insertion point %d\n", + __func__, where); + BUG(); + } + + if (unplug_it && blk_queue_plugged(q)) { + int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] + - queue_in_flight(q); + + if (nrq >= q->unplug_thresh) + __generic_unplug_device(q); + } +} + +void __elv_add_request(struct request_queue *q, struct request *rq, int where, + int plug) +{ + if (q->ordcolor) + rq->cmd_flags |= REQ_ORDERED_COLOR; + + if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { + /* + * toggle ordered color + */ + if (rq->cmd_flags & REQ_HARDBARRIER) + q->ordcolor ^= 1; + + /* + * barriers implicitly indicate back insertion + */ + if (where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + /* + * this request is scheduling boundary, update + * end_sector + */ + if (rq->cmd_type == REQ_TYPE_FS || + (rq->cmd_flags & REQ_DISCARD)) { + q->end_sector = rq_end_sector(rq); + q->boundary_rq = rq; + } + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && + where == ELEVATOR_INSERT_SORT) + where = ELEVATOR_INSERT_BACK; + + if (plug) + blk_plug_device(q); + + elv_insert(q, rq, where); +} +EXPORT_SYMBOL(__elv_add_request); + +void elv_add_request(struct request_queue *q, struct request *rq, int where, + int plug) +{ + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + __elv_add_request(q, rq, where, plug); + spin_unlock_irqrestore(q->queue_lock, flags); +} +EXPORT_SYMBOL(elv_add_request); + +int elv_queue_empty(struct request_queue *q) +{ + struct elevator_queue *e = q->elevator; + + if (!list_empty(&q->queue_head)) + return 0; + + if (e->ops->elevator_queue_empty_fn) + return e->ops->elevator_queue_empty_fn(q); + + return 1; +} +EXPORT_SYMBOL(elv_queue_empty); + +struct request *elv_latter_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_latter_req_fn) + return e->ops->elevator_latter_req_fn(q, rq); + return NULL; +} + +struct request *elv_former_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_former_req_fn) + return e->ops->elevator_former_req_fn(q, rq); + return NULL; +} + +int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_set_req_fn) + return e->ops->elevator_set_req_fn(q, rq, gfp_mask); + + rq->elevator_private = NULL; + return 0; +} + +void elv_put_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_put_req_fn) + e->ops->elevator_put_req_fn(rq); +} + +int elv_may_queue(struct request_queue *q, int rw) +{ + struct elevator_queue *e = q->elevator; + + if (e->ops->elevator_may_queue_fn) + return e->ops->elevator_may_queue_fn(q, rw); + + return ELV_MQUEUE_MAY; +} + +void elv_abort_queue(struct request_queue *q) +{ + struct request *rq; + + while (!list_empty(&q->queue_head)) { + rq = list_entry_rq(q->queue_head.next); + rq->cmd_flags |= REQ_QUIET; + trace_block_rq_abort(q, rq); + /* + * Mark this request as started so we don't trigger + * any debug logic in the end I/O path. + */ + blk_start_request(rq); + __blk_end_request_all(rq, -EIO); + } +} +EXPORT_SYMBOL(elv_abort_queue); + +void elv_completed_request(struct request_queue *q, struct request *rq) +{ + struct elevator_queue *e = q->elevator; + + /* + * request is released from the driver, io must be done + */ + if (blk_account_rq(rq)) { + q->in_flight[rq_is_sync(rq)]--; + if ((rq->cmd_flags & REQ_SORTED) && + e->ops->elevator_completed_req_fn) + e->ops->elevator_completed_req_fn(q, rq); + } + + /* + * Check if the queue is waiting for fs requests to be + * drained for flush sequence. + */ + if (unlikely(q->ordseq)) { + struct request *next = NULL; + + if (!list_empty(&q->queue_head)) + next = list_entry_rq(q->queue_head.next); + + if (!queue_in_flight(q) && + blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && + (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { + blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); + __blk_run_queue(q); + } + } +} + +#define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) + +static ssize_t +elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; + ssize_t error; + + if (!entry->show) + return -EIO; + + e = container_of(kobj, struct elevator_queue, kobj); + mutex_lock(&e->sysfs_lock); + error = e->ops ? entry->show(e, page) : -ENOENT; + mutex_unlock(&e->sysfs_lock); + return error; +} + +static ssize_t +elv_attr_store(struct kobject *kobj, struct attribute *attr, + const char *page, size_t length) +{ + struct elv_fs_entry *entry = to_elv(attr); + struct elevator_queue *e; + ssize_t error; + + if (!entry->store) + return -EIO; + + e = container_of(kobj, struct elevator_queue, kobj); + mutex_lock(&e->sysfs_lock); + error = e->ops ? entry->store(e, page, length) : -ENOENT; + mutex_unlock(&e->sysfs_lock); + return error; +} + +static const struct sysfs_ops elv_sysfs_ops = { + .show = elv_attr_show, + .store = elv_attr_store, +}; + +static struct kobj_type elv_ktype = { + .sysfs_ops = &elv_sysfs_ops, + .release = elevator_release, +}; + +int elv_register_queue(struct request_queue *q) +{ + struct elevator_queue *e = q->elevator; + int error; + + error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); + if (!error) { + struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; + if (attr) { + while (attr->attr.name) { + if (sysfs_create_file(&e->kobj, &attr->attr)) + break; + attr++; + } + } + kobject_uevent(&e->kobj, KOBJ_ADD); + e->registered = 1; + } + return error; +} +EXPORT_SYMBOL(elv_register_queue); + +static void __elv_unregister_queue(struct elevator_queue *e) +{ + kobject_uevent(&e->kobj, KOBJ_REMOVE); + kobject_del(&e->kobj); + e->registered = 0; +} + +void elv_unregister_queue(struct request_queue *q) +{ + if (q) + __elv_unregister_queue(q->elevator); +} +EXPORT_SYMBOL(elv_unregister_queue); + +void elv_register(struct elevator_type *e) +{ + char *def = ""; + + spin_lock(&elv_list_lock); + BUG_ON(elevator_find(e->elevator_name)); + list_add_tail(&e->list, &elv_list); + spin_unlock(&elv_list_lock); + + if (!strcmp(e->elevator_name, chosen_elevator) || + (!*chosen_elevator && + !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) + def = " (default)"; + + printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, + def); +} +EXPORT_SYMBOL_GPL(elv_register); + +void elv_unregister(struct elevator_type *e) +{ + struct task_struct *g, *p; + + /* + * Iterate every thread in the process to remove the io contexts. + */ + if (e->ops.trim) { + read_lock(&tasklist_lock); + do_each_thread(g, p) { + task_lock(p); + if (p->io_context) + e->ops.trim(p->io_context); + task_unlock(p); + } while_each_thread(g, p); + read_unlock(&tasklist_lock); + } + + spin_lock(&elv_list_lock); + list_del_init(&e->list); + spin_unlock(&elv_list_lock); +} +EXPORT_SYMBOL_GPL(elv_unregister); + +/* + * switch to new_e io scheduler. be careful not to introduce deadlocks - + * we don't free the old io scheduler, before we have allocated what we + * need for the new one. this way we have a chance of going back to the old + * one, if the new one fails init for some reason. + */ +static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) +{ + struct elevator_queue *old_elevator, *e; + void *data; + int err; + + /* + * Allocate new elevator + */ + e = elevator_alloc(q, new_e); + if (!e) + return -ENOMEM; + + data = elevator_init_queue(q, e); + if (!data) { + kobject_put(&e->kobj); + return -ENOMEM; + } + + /* + * Turn on BYPASS and drain all requests w/ elevator private data + */ + spin_lock_irq(q->queue_lock); + elv_quiesce_start(q); + + /* + * Remember old elevator. + */ + old_elevator = q->elevator; + + /* + * attach and start new elevator + */ + elevator_attach(q, e, data); + + spin_unlock_irq(q->queue_lock); + + if (old_elevator->registered) { + __elv_unregister_queue(old_elevator); + + err = elv_register_queue(q); + if (err) + goto fail_register; + } + + /* + * finally exit old elevator and turn off BYPASS. + */ + elevator_exit(old_elevator); + spin_lock_irq(q->queue_lock); + elv_quiesce_end(q); + spin_unlock_irq(q->queue_lock); + + blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); + + return 0; + +fail_register: + /* + * switch failed, exit the new io scheduler and reattach the old + * one again (along with re-adding the sysfs dir) + */ + elevator_exit(e); + q->elevator = old_elevator; + elv_register_queue(q); + + spin_lock_irq(q->queue_lock); + queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); + spin_unlock_irq(q->queue_lock); + + return err; +} + +/* + * Switch this queue to the given IO scheduler. + */ +int elevator_change(struct request_queue *q, const char *name) +{ + char elevator_name[ELV_NAME_MAX]; + struct elevator_type *e; + + if (!q->elevator) + return -ENXIO; + + strlcpy(elevator_name, name, sizeof(elevator_name)); + e = elevator_get(strstrip(elevator_name)); + if (!e) { + printk(KERN_ERR "elevator: type %s not found\n", elevator_name); + return -EINVAL; + } + + if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) { + elevator_put(e); + return 0; + } + + return elevator_switch(q, e); +} +EXPORT_SYMBOL(elevator_change); + +ssize_t elv_iosched_store(struct request_queue *q, const char *name, + size_t count) +{ + int ret; + + if (!q->elevator) + return count; + + ret = elevator_change(q, name); + if (!ret) + return count; + + printk(KERN_ERR "elevator: switch to %s failed\n", name); + return ret; +} + +ssize_t elv_iosched_show(struct request_queue *q, char *name) +{ + struct elevator_queue *e = q->elevator; + struct elevator_type *elv; + struct elevator_type *__e; + int len = 0; + + if (!q->elevator || !blk_queue_stackable(q)) + return sprintf(name, "none\n"); + + elv = e->elevator_type; + + spin_lock(&elv_list_lock); + list_for_each_entry(__e, &elv_list, list) { + if (!strcmp(elv->elevator_name, __e->elevator_name)) + len += sprintf(name+len, "[%s] ", elv->elevator_name); + else + len += sprintf(name+len, "%s ", __e->elevator_name); + } + spin_unlock(&elv_list_lock); + + len += sprintf(len+name, "\n"); + return len; +} + +struct request *elv_rb_former_request(struct request_queue *q, + struct request *rq) +{ + struct rb_node *rbprev = rb_prev(&rq->rb_node); + + if (rbprev) + return rb_entry_rq(rbprev); + + return NULL; +} +EXPORT_SYMBOL(elv_rb_former_request); + +struct request *elv_rb_latter_request(struct request_queue *q, + struct request *rq) +{ + struct rb_node *rbnext = rb_next(&rq->rb_node); + + if (rbnext) + return rb_entry_rq(rbnext); + + return NULL; +} +EXPORT_SYMBOL(elv_rb_latter_request); diff --git a/block/genhd.c b/block/genhd.c new file mode 100644 index 00000000..f33c56c3 --- /dev/null +++ b/block/genhd.c @@ -0,0 +1,1296 @@ +/* + * gendisk handling + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/genhd.h> +#include <linux/kdev_t.h> +#include <linux/kernel.h> +#include <linux/blkdev.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/kmod.h> +#include <linux/kobj_map.h> +#include <linux/buffer_head.h> +#include <linux/mutex.h> +#include <linux/idr.h> + +#include "blk.h" + +static DEFINE_MUTEX(block_class_lock); +#ifndef CONFIG_SYSFS_DEPRECATED +struct kobject *block_depr; +#endif + +/* for extended dynamic devt allocation, currently only one major is used */ +#define MAX_EXT_DEVT (1 << MINORBITS) + +/* For extended devt allocation. ext_devt_mutex prevents look up + * results from going away underneath its user. + */ +static DEFINE_MUTEX(ext_devt_mutex); +static DEFINE_IDR(ext_devt_idr); + +static struct device_type disk_type; + +/** + * disk_get_part - get partition + * @disk: disk to look partition from + * @partno: partition number + * + * Look for partition @partno from @disk. If found, increment + * reference count and return it. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * Pointer to the found partition on success, NULL if not found. + */ +struct hd_struct *disk_get_part(struct gendisk *disk, int partno) +{ + struct hd_struct *part = NULL; + struct disk_part_tbl *ptbl; + + if (unlikely(partno < 0)) + return NULL; + + rcu_read_lock(); + + ptbl = rcu_dereference(disk->part_tbl); + if (likely(partno < ptbl->len)) { + part = rcu_dereference(ptbl->part[partno]); + if (part) + get_device(part_to_dev(part)); + } + + rcu_read_unlock(); + + return part; +} +EXPORT_SYMBOL_GPL(disk_get_part); + +/** + * disk_part_iter_init - initialize partition iterator + * @piter: iterator to initialize + * @disk: disk to iterate over + * @flags: DISK_PITER_* flags + * + * Initialize @piter so that it iterates over partitions of @disk. + * + * CONTEXT: + * Don't care. + */ +void disk_part_iter_init(struct disk_part_iter *piter, struct gendisk *disk, + unsigned int flags) +{ + struct disk_part_tbl *ptbl; + + rcu_read_lock(); + ptbl = rcu_dereference(disk->part_tbl); + + piter->disk = disk; + piter->part = NULL; + + if (flags & DISK_PITER_REVERSE) + piter->idx = ptbl->len - 1; + else if (flags & (DISK_PITER_INCL_PART0 | DISK_PITER_INCL_EMPTY_PART0)) + piter->idx = 0; + else + piter->idx = 1; + + piter->flags = flags; + + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(disk_part_iter_init); + +/** + * disk_part_iter_next - proceed iterator to the next partition and return it + * @piter: iterator of interest + * + * Proceed @piter to the next partition and return it. + * + * CONTEXT: + * Don't care. + */ +struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter) +{ + struct disk_part_tbl *ptbl; + int inc, end; + + /* put the last partition */ + disk_put_part(piter->part); + piter->part = NULL; + + /* get part_tbl */ + rcu_read_lock(); + ptbl = rcu_dereference(piter->disk->part_tbl); + + /* determine iteration parameters */ + if (piter->flags & DISK_PITER_REVERSE) { + inc = -1; + if (piter->flags & (DISK_PITER_INCL_PART0 | + DISK_PITER_INCL_EMPTY_PART0)) + end = -1; + else + end = 0; + } else { + inc = 1; + end = ptbl->len; + } + + /* iterate to the next partition */ + for (; piter->idx != end; piter->idx += inc) { + struct hd_struct *part; + + part = rcu_dereference(ptbl->part[piter->idx]); + if (!part) + continue; + if (!part->nr_sects && + !(piter->flags & DISK_PITER_INCL_EMPTY) && + !(piter->flags & DISK_PITER_INCL_EMPTY_PART0 && + piter->idx == 0)) + continue; + + get_device(part_to_dev(part)); + piter->part = part; + piter->idx += inc; + break; + } + + rcu_read_unlock(); + + return piter->part; +} +EXPORT_SYMBOL_GPL(disk_part_iter_next); + +/** + * disk_part_iter_exit - finish up partition iteration + * @piter: iter of interest + * + * Called when iteration is over. Cleans up @piter. + * + * CONTEXT: + * Don't care. + */ +void disk_part_iter_exit(struct disk_part_iter *piter) +{ + disk_put_part(piter->part); + piter->part = NULL; +} +EXPORT_SYMBOL_GPL(disk_part_iter_exit); + +static inline int sector_in_part(struct hd_struct *part, sector_t sector) +{ + return part->start_sect <= sector && + sector < part->start_sect + part->nr_sects; +} + +/** + * disk_map_sector_rcu - map sector to partition + * @disk: gendisk of interest + * @sector: sector to map + * + * Find out which partition @sector maps to on @disk. This is + * primarily used for stats accounting. + * + * CONTEXT: + * RCU read locked. The returned partition pointer is valid only + * while preemption is disabled. + * + * RETURNS: + * Found partition on success, part0 is returned if no partition matches + */ +struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector) +{ + struct disk_part_tbl *ptbl; + struct hd_struct *part; + int i; + + ptbl = rcu_dereference(disk->part_tbl); + + part = rcu_dereference(ptbl->last_lookup); + if (part && sector_in_part(part, sector)) + return part; + + for (i = 1; i < ptbl->len; i++) { + part = rcu_dereference(ptbl->part[i]); + + if (part && sector_in_part(part, sector)) { + rcu_assign_pointer(ptbl->last_lookup, part); + return part; + } + } + return &disk->part0; +} +EXPORT_SYMBOL_GPL(disk_map_sector_rcu); + +/* + * Can be deleted altogether. Later. + * + */ +static struct blk_major_name { + struct blk_major_name *next; + int major; + char name[16]; +} *major_names[BLKDEV_MAJOR_HASH_SIZE]; + +/* index in the above - for now: assume no multimajor ranges */ +static inline int major_to_index(int major) +{ + return major % BLKDEV_MAJOR_HASH_SIZE; +} + +#ifdef CONFIG_PROC_FS +void blkdev_show(struct seq_file *seqf, off_t offset) +{ + struct blk_major_name *dp; + + if (offset < BLKDEV_MAJOR_HASH_SIZE) { + mutex_lock(&block_class_lock); + for (dp = major_names[offset]; dp; dp = dp->next) + seq_printf(seqf, "%3d %s\n", dp->major, dp->name); + mutex_unlock(&block_class_lock); + } +} +#endif /* CONFIG_PROC_FS */ + +/** + * register_blkdev - register a new block device + * + * @major: the requested major device number [1..255]. If @major=0, try to + * allocate any unused major number. + * @name: the name of the new block device as a zero terminated string + * + * The @name must be unique within the system. + * + * The return value depends on the @major input parameter. + * - if a major device number was requested in range [1..255] then the + * function returns zero on success, or a negative error code + * - if any unused major number was requested with @major=0 parameter + * then the return value is the allocated major number in range + * [1..255] or a negative error code otherwise + */ +int register_blkdev(unsigned int major, const char *name) +{ + struct blk_major_name **n, *p; + int index, ret = 0; + + mutex_lock(&block_class_lock); + + /* temporary */ + if (major == 0) { + for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) { + if (major_names[index] == NULL) + break; + } + + if (index == 0) { + printk("register_blkdev: failed to get major for %s\n", + name); + ret = -EBUSY; + goto out; + } + major = index; + ret = major; + } + + p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL); + if (p == NULL) { + ret = -ENOMEM; + goto out; + } + + p->major = major; + strlcpy(p->name, name, sizeof(p->name)); + p->next = NULL; + index = major_to_index(major); + + for (n = &major_names[index]; *n; n = &(*n)->next) { + if ((*n)->major == major) + break; + } + if (!*n) + *n = p; + else + ret = -EBUSY; + + if (ret < 0) { + printk("register_blkdev: cannot get major %d for %s\n", + major, name); + kfree(p); + } +out: + mutex_unlock(&block_class_lock); + return ret; +} + +EXPORT_SYMBOL(register_blkdev); + +void unregister_blkdev(unsigned int major, const char *name) +{ + struct blk_major_name **n; + struct blk_major_name *p = NULL; + int index = major_to_index(major); + + mutex_lock(&block_class_lock); + for (n = &major_names[index]; *n; n = &(*n)->next) + if ((*n)->major == major) + break; + if (!*n || strcmp((*n)->name, name)) { + WARN_ON(1); + } else { + p = *n; + *n = p->next; + } + mutex_unlock(&block_class_lock); + kfree(p); +} + +EXPORT_SYMBOL(unregister_blkdev); + +static struct kobj_map *bdev_map; + +/** + * blk_mangle_minor - scatter minor numbers apart + * @minor: minor number to mangle + * + * Scatter consecutively allocated @minor number apart if MANGLE_DEVT + * is enabled. Mangling twice gives the original value. + * + * RETURNS: + * Mangled value. + * + * CONTEXT: + * Don't care. + */ +static int blk_mangle_minor(int minor) +{ +#ifdef CONFIG_DEBUG_BLOCK_EXT_DEVT + int i; + + for (i = 0; i < MINORBITS / 2; i++) { + int low = minor & (1 << i); + int high = minor & (1 << (MINORBITS - 1 - i)); + int distance = MINORBITS - 1 - 2 * i; + + minor ^= low | high; /* clear both bits */ + low <<= distance; /* swap the positions */ + high >>= distance; + minor |= low | high; /* and set */ + } +#endif + return minor; +} + +/** + * blk_alloc_devt - allocate a dev_t for a partition + * @part: partition to allocate dev_t for + * @devt: out parameter for resulting dev_t + * + * Allocate a dev_t for block device. + * + * RETURNS: + * 0 on success, allocated dev_t is returned in *@devt. -errno on + * failure. + * + * CONTEXT: + * Might sleep. + */ +int blk_alloc_devt(struct hd_struct *part, dev_t *devt) +{ + struct gendisk *disk = part_to_disk(part); + int idx, rc; + + /* in consecutive minor range? */ + if (part->partno < disk->minors) { + *devt = MKDEV(disk->major, disk->first_minor + part->partno); + return 0; + } + + /* allocate ext devt */ + do { + if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) + return -ENOMEM; + rc = idr_get_new(&ext_devt_idr, part, &idx); + } while (rc == -EAGAIN); + + if (rc) + return rc; + + if (idx > MAX_EXT_DEVT) { + idr_remove(&ext_devt_idr, idx); + return -EBUSY; + } + + *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); + return 0; +} + +/** + * blk_free_devt - free a dev_t + * @devt: dev_t to free + * + * Free @devt which was allocated using blk_alloc_devt(). + * + * CONTEXT: + * Might sleep. + */ +void blk_free_devt(dev_t devt) +{ + might_sleep(); + + if (devt == MKDEV(0, 0)) + return; + + if (MAJOR(devt) == BLOCK_EXT_MAJOR) { + mutex_lock(&ext_devt_mutex); + idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + mutex_unlock(&ext_devt_mutex); + } +} + +static char *bdevt_str(dev_t devt, char *buf) +{ + if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) { + char tbuf[BDEVT_SIZE]; + snprintf(tbuf, BDEVT_SIZE, "%02x%02x", MAJOR(devt), MINOR(devt)); + snprintf(buf, BDEVT_SIZE, "%-9s", tbuf); + } else + snprintf(buf, BDEVT_SIZE, "%03x:%05x", MAJOR(devt), MINOR(devt)); + + return buf; +} + +/* + * Register device numbers dev..(dev+range-1) + * range must be nonzero + * The hash chain is sorted on range, so that subranges can override. + */ +void blk_register_region(dev_t devt, unsigned long range, struct module *module, + struct kobject *(*probe)(dev_t, int *, void *), + int (*lock)(dev_t, void *), void *data) +{ + kobj_map(bdev_map, devt, range, module, probe, lock, data); +} + +EXPORT_SYMBOL(blk_register_region); + +void blk_unregister_region(dev_t devt, unsigned long range) +{ + kobj_unmap(bdev_map, devt, range); +} + +EXPORT_SYMBOL(blk_unregister_region); + +static struct kobject *exact_match(dev_t devt, int *partno, void *data) +{ + struct gendisk *p = data; + + return &disk_to_dev(p)->kobj; +} + +static int exact_lock(dev_t devt, void *data) +{ + struct gendisk *p = data; + + if (!get_disk(p)) + return -1; + return 0; +} + +/** + * add_disk - add partitioning information to kernel list + * @disk: per-device partitioning information + * + * This function registers the partitioning information in @disk + * with the kernel. + * + * FIXME: error handling + */ +void add_disk(struct gendisk *disk) +{ + struct backing_dev_info *bdi; + dev_t devt; + int retval; + + /* minors == 0 indicates to use ext devt from part0 and should + * be accompanied with EXT_DEVT flag. Make sure all + * parameters make sense. + */ + WARN_ON(disk->minors && !(disk->major || disk->first_minor)); + WARN_ON(!disk->minors && !(disk->flags & GENHD_FL_EXT_DEVT)); + + disk->flags |= GENHD_FL_UP; + + retval = blk_alloc_devt(&disk->part0, &devt); + if (retval) { + WARN_ON(1); + return; + } + disk_to_dev(disk)->devt = devt; + + /* ->major and ->first_minor aren't supposed to be + * dereferenced from here on, but set them just in case. + */ + disk->major = MAJOR(devt); + disk->first_minor = MINOR(devt); + + /* Register BDI before referencing it from bdev */ + bdi = &disk->queue->backing_dev_info; + bdi_register_dev(bdi, disk_devt(disk)); + + blk_register_region(disk_devt(disk), disk->minors, NULL, + exact_match, exact_lock, disk); + register_disk(disk); + blk_register_queue(disk); + + retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, + "bdi"); + WARN_ON(retval); +} + +EXPORT_SYMBOL(add_disk); +EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */ + +void unlink_gendisk(struct gendisk *disk) +{ + sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); + bdi_unregister(&disk->queue->backing_dev_info); + blk_unregister_queue(disk); + blk_unregister_region(disk_devt(disk), disk->minors); +} + +/** + * get_gendisk - get partitioning information for a given device + * @devt: device to get partitioning information for + * @partno: returned partition index + * + * This function gets the structure containing partitioning + * information for the given device @devt. + */ +struct gendisk *get_gendisk(dev_t devt, int *partno) +{ + struct gendisk *disk = NULL; + + if (MAJOR(devt) != BLOCK_EXT_MAJOR) { + struct kobject *kobj; + + kobj = kobj_lookup(bdev_map, devt, partno); + if (kobj) + disk = dev_to_disk(kobj_to_dev(kobj)); + } else { + struct hd_struct *part; + + mutex_lock(&ext_devt_mutex); + part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); + if (part && get_disk(part_to_disk(part))) { + *partno = part->partno; + disk = part_to_disk(part); + } + mutex_unlock(&ext_devt_mutex); + } + + return disk; +} +EXPORT_SYMBOL(get_gendisk); + +/** + * bdget_disk - do bdget() by gendisk and partition number + * @disk: gendisk of interest + * @partno: partition number + * + * Find partition @partno from @disk, do bdget() on it. + * + * CONTEXT: + * Don't care. + * + * RETURNS: + * Resulting block_device on success, NULL on failure. + */ +struct block_device *bdget_disk(struct gendisk *disk, int partno) +{ + struct hd_struct *part; + struct block_device *bdev = NULL; + + part = disk_get_part(disk, partno); + if (part) + bdev = bdget(part_devt(part)); + disk_put_part(part); + + return bdev; +} +EXPORT_SYMBOL(bdget_disk); + +/* + * print a full list of all partitions - intended for places where the root + * filesystem can't be mounted and thus to give the victim some idea of what + * went wrong + */ +void __init printk_all_partitions(void) +{ + struct class_dev_iter iter; + struct device *dev; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct disk_part_iter piter; + struct hd_struct *part; + char name_buf[BDEVNAME_SIZE]; + char devt_buf[BDEVT_SIZE]; + + /* + * Don't show empty devices or things that have been + * surpressed + */ + if (get_capacity(disk) == 0 || + (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)) + continue; + + /* + * Note, unlike /proc/partitions, I am showing the + * numbers in hex - the same format as the root= + * option takes. + */ + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) { + bool is_part0 = part == &disk->part0; + + printk("%s%s %10llu %s", is_part0 ? "" : " ", + bdevt_str(part_devt(part), devt_buf), + (unsigned long long)part->nr_sects >> 1, + disk_name(disk, part->partno, name_buf)); + if (is_part0) { + if (disk->driverfs_dev != NULL && + disk->driverfs_dev->driver != NULL) + printk(" driver: %s\n", + disk->driverfs_dev->driver->name); + else + printk(" (driver?)\n"); + } else + printk("\n"); + } + disk_part_iter_exit(&piter); + } + class_dev_iter_exit(&iter); +} + +#ifdef CONFIG_PROC_FS +/* iterator */ +static void *disk_seqf_start(struct seq_file *seqf, loff_t *pos) +{ + loff_t skip = *pos; + struct class_dev_iter *iter; + struct device *dev; + + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return ERR_PTR(-ENOMEM); + + seqf->private = iter; + class_dev_iter_init(iter, &block_class, NULL, &disk_type); + do { + dev = class_dev_iter_next(iter); + if (!dev) + return NULL; + } while (skip--); + + return dev_to_disk(dev); +} + +static void *disk_seqf_next(struct seq_file *seqf, void *v, loff_t *pos) +{ + struct device *dev; + + (*pos)++; + dev = class_dev_iter_next(seqf->private); + if (dev) + return dev_to_disk(dev); + + return NULL; +} + +static void disk_seqf_stop(struct seq_file *seqf, void *v) +{ + struct class_dev_iter *iter = seqf->private; + + /* stop is called even after start failed :-( */ + if (iter) { + class_dev_iter_exit(iter); + kfree(iter); + } +} + +static void *show_partition_start(struct seq_file *seqf, loff_t *pos) +{ + static void *p; + + p = disk_seqf_start(seqf, pos); + if (!IS_ERR(p) && p && !*pos) + seq_puts(seqf, "major minor #blocks name\n\n"); + return p; +} + +static int show_partition(struct seq_file *seqf, void *v) +{ + struct gendisk *sgp = v; + struct disk_part_iter piter; + struct hd_struct *part; + char buf[BDEVNAME_SIZE]; + + /* Don't show non-partitionable removeable devices or empty devices */ + if (!get_capacity(sgp) || (!disk_partitionable(sgp) && + (sgp->flags & GENHD_FL_REMOVABLE))) + return 0; + if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO) + return 0; + + /* show the full disk and all non-0 size partitions of it */ + disk_part_iter_init(&piter, sgp, DISK_PITER_INCL_PART0); + while ((part = disk_part_iter_next(&piter))) + seq_printf(seqf, "%4d %7d %10llu %s\n", + MAJOR(part_devt(part)), MINOR(part_devt(part)), + (unsigned long long)part->nr_sects >> 1, + disk_name(sgp, part->partno, buf)); + disk_part_iter_exit(&piter); + + return 0; +} + +static const struct seq_operations partitions_op = { + .start = show_partition_start, + .next = disk_seqf_next, + .stop = disk_seqf_stop, + .show = show_partition +}; + +static int partitions_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &partitions_op); +} + +static const struct file_operations proc_partitions_operations = { + .open = partitions_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + + +static struct kobject *base_probe(dev_t devt, int *partno, void *data) +{ + if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) + /* Make old-style 2.4 aliases work */ + request_module("block-major-%d", MAJOR(devt)); + return NULL; +} + +static int __init genhd_device_init(void) +{ + int error; + + block_class.dev_kobj = sysfs_dev_block_kobj; + error = class_register(&block_class); + if (unlikely(error)) { + printk("Error: class register\n"); + return error; + } + bdev_map = kobj_map_init(base_probe, &block_class_lock); + blk_dev_init(); + + register_blkdev(BLOCK_EXT_MAJOR, "blkext"); + +#ifndef CONFIG_SYSFS_DEPRECATED + /* create top-level block dir */ + block_depr = kobject_create_and_add("block", NULL); +#endif + return 0; +} + +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(genhd_device_init); +#else +subsys_initcall(genhd_device_init); +#endif + +static ssize_t disk_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", disk->minors); +} + +static ssize_t disk_ext_range_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", disk_max_parts(disk)); +} + +static ssize_t disk_removable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", + (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0)); +} + +static ssize_t disk_ro_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", get_disk_ro(disk) ? 1 : 0); +} + +static ssize_t disk_capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%x\n", disk->flags); +} + +static ssize_t disk_alignment_offset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); +} + +static ssize_t disk_discard_alignment_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct gendisk *disk = dev_to_disk(dev); + + return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue)); +} + +static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); +static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); +static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); +static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); +static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); +static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); +static DEVICE_ATTR(discard_alignment, S_IRUGO, disk_discard_alignment_show, + NULL); +static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); +static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); +#ifdef CONFIG_FAIL_MAKE_REQUEST +static struct device_attribute dev_attr_fail = + __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); +#endif +#ifdef CONFIG_FAIL_IO_TIMEOUT +static struct device_attribute dev_attr_fail_timeout = + __ATTR(io-timeout-fail, S_IRUGO|S_IWUSR, part_timeout_show, + part_timeout_store); +#endif + +static struct attribute *disk_attrs[] = { + &dev_attr_range.attr, + &dev_attr_ext_range.attr, + &dev_attr_removable.attr, + &dev_attr_ro.attr, + &dev_attr_size.attr, + &dev_attr_alignment_offset.attr, + &dev_attr_discard_alignment.attr, + &dev_attr_capability.attr, + &dev_attr_stat.attr, + &dev_attr_inflight.attr, +#ifdef CONFIG_FAIL_MAKE_REQUEST + &dev_attr_fail.attr, +#endif +#ifdef CONFIG_FAIL_IO_TIMEOUT + &dev_attr_fail_timeout.attr, +#endif + NULL +}; + +static struct attribute_group disk_attr_group = { + .attrs = disk_attrs, +}; + +static const struct attribute_group *disk_attr_groups[] = { + &disk_attr_group, + NULL +}; + +static void disk_free_ptbl_rcu_cb(struct rcu_head *head) +{ + struct disk_part_tbl *ptbl = + container_of(head, struct disk_part_tbl, rcu_head); + + kfree(ptbl); +} + +/** + * disk_replace_part_tbl - replace disk->part_tbl in RCU-safe way + * @disk: disk to replace part_tbl for + * @new_ptbl: new part_tbl to install + * + * Replace disk->part_tbl with @new_ptbl in RCU-safe way. The + * original ptbl is freed using RCU callback. + * + * LOCKING: + * Matching bd_mutx locked. + */ +static void disk_replace_part_tbl(struct gendisk *disk, + struct disk_part_tbl *new_ptbl) +{ + struct disk_part_tbl *old_ptbl = disk->part_tbl; + + rcu_assign_pointer(disk->part_tbl, new_ptbl); + + if (old_ptbl) { + rcu_assign_pointer(old_ptbl->last_lookup, NULL); + call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb); + } +} + +/** + * disk_expand_part_tbl - expand disk->part_tbl + * @disk: disk to expand part_tbl for + * @partno: expand such that this partno can fit in + * + * Expand disk->part_tbl such that @partno can fit in. disk->part_tbl + * uses RCU to allow unlocked dereferencing for stats and other stuff. + * + * LOCKING: + * Matching bd_mutex locked, might sleep. + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int disk_expand_part_tbl(struct gendisk *disk, int partno) +{ + struct disk_part_tbl *old_ptbl = disk->part_tbl; + struct disk_part_tbl *new_ptbl; + int len = old_ptbl ? old_ptbl->len : 0; + int target = partno + 1; + size_t size; + int i; + + /* disk_max_parts() is zero during initialization, ignore if so */ + if (disk_max_parts(disk) && target > disk_max_parts(disk)) + return -EINVAL; + + if (target <= len) + return 0; + + size = sizeof(*new_ptbl) + target * sizeof(new_ptbl->part[0]); + new_ptbl = kzalloc_node(size, GFP_KERNEL, disk->node_id); + if (!new_ptbl) + return -ENOMEM; + + new_ptbl->len = target; + + for (i = 0; i < len; i++) + rcu_assign_pointer(new_ptbl->part[i], old_ptbl->part[i]); + + disk_replace_part_tbl(disk, new_ptbl); + return 0; +} + +static void disk_release(struct device *dev) +{ + struct gendisk *disk = dev_to_disk(dev); + + kfree(disk->random); + disk_replace_part_tbl(disk, NULL); + free_part_stats(&disk->part0); + kfree(disk); +} +struct class block_class = { + .name = "block", +}; + +static char *block_devnode(struct device *dev, mode_t *mode) +{ + struct gendisk *disk = dev_to_disk(dev); + + if (disk->devnode) + return disk->devnode(disk, mode); + return NULL; +} + +static struct device_type disk_type = { + .name = "disk", + .groups = disk_attr_groups, + .release = disk_release, + .devnode = block_devnode, +}; + +#ifdef CONFIG_PROC_FS +/* + * aggregate disk stat collector. Uses the same stats that the sysfs + * entries do, above, but makes them available through one seq_file. + * + * The output looks suspiciously like /proc/partitions with a bunch of + * extra fields. + */ +static int diskstats_show(struct seq_file *seqf, void *v) +{ + struct gendisk *gp = v; + struct disk_part_iter piter; + struct hd_struct *hd; + char buf[BDEVNAME_SIZE]; + int cpu; + + /* + if (&disk_to_dev(gp)->kobj.entry == block_class.devices.next) + seq_puts(seqf, "major minor name" + " rio rmerge rsect ruse wio wmerge " + "wsect wuse running use aveq" + "\n\n"); + */ + + disk_part_iter_init(&piter, gp, DISK_PITER_INCL_EMPTY_PART0); + while ((hd = disk_part_iter_next(&piter))) { + cpu = part_stat_lock(); + part_round_stats(cpu, hd); + part_stat_unlock(); + seq_printf(seqf, "%4d %7d %s %lu %lu %llu " + "%u %lu %lu %llu %u %u %u %u\n", + MAJOR(part_devt(hd)), MINOR(part_devt(hd)), + disk_name(gp, hd->partno, buf), + part_stat_read(hd, ios[0]), + part_stat_read(hd, merges[0]), + (unsigned long long)part_stat_read(hd, sectors[0]), + jiffies_to_msecs(part_stat_read(hd, ticks[0])), + part_stat_read(hd, ios[1]), + part_stat_read(hd, merges[1]), + (unsigned long long)part_stat_read(hd, sectors[1]), + jiffies_to_msecs(part_stat_read(hd, ticks[1])), + part_in_flight(hd), + jiffies_to_msecs(part_stat_read(hd, io_ticks)), + jiffies_to_msecs(part_stat_read(hd, time_in_queue)) + ); + } + disk_part_iter_exit(&piter); + + return 0; +} + +static const struct seq_operations diskstats_op = { + .start = disk_seqf_start, + .next = disk_seqf_next, + .stop = disk_seqf_stop, + .show = diskstats_show +}; + +static int diskstats_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &diskstats_op); +} + +static const struct file_operations proc_diskstats_operations = { + .open = diskstats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_genhd_init(void) +{ + proc_create("diskstats", 0, NULL, &proc_diskstats_operations); + proc_create("partitions", 0, NULL, &proc_partitions_operations); + return 0; +} +module_init(proc_genhd_init); +#endif /* CONFIG_PROC_FS */ + +static void media_change_notify_thread(struct work_struct *work) +{ + struct gendisk *gd = container_of(work, struct gendisk, async_notify); + char event[] = "MEDIA_CHANGE=1"; + char *envp[] = { event, NULL }; + + /* + * set enviroment vars to indicate which event this is for + * so that user space will know to go check the media status. + */ + kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); + put_device(gd->driverfs_dev); +} + +#if 0 +void genhd_media_change_notify(struct gendisk *disk) +{ + get_device(disk->driverfs_dev); + schedule_work(&disk->async_notify); +} +EXPORT_SYMBOL_GPL(genhd_media_change_notify); +#endif /* 0 */ + +dev_t blk_lookup_devt(const char *name, int partno) +{ + dev_t devt = MKDEV(0, 0); + struct class_dev_iter iter; + struct device *dev; + + class_dev_iter_init(&iter, &block_class, NULL, &disk_type); + while ((dev = class_dev_iter_next(&iter))) { + struct gendisk *disk = dev_to_disk(dev); + struct hd_struct *part; + + if (strcmp(dev_name(dev), name)) + continue; + + if (partno < disk->minors) { + /* We need to return the right devno, even + * if the partition doesn't exist yet. + */ + devt = MKDEV(MAJOR(dev->devt), + MINOR(dev->devt) + partno); + break; + } + part = disk_get_part(disk, partno); + if (part) { + devt = part_devt(part); + disk_put_part(part); + break; + } + disk_put_part(part); + } + class_dev_iter_exit(&iter); + return devt; +} +EXPORT_SYMBOL(blk_lookup_devt); + +struct gendisk *alloc_disk(int minors) +{ + return alloc_disk_node(minors, -1); +} +EXPORT_SYMBOL(alloc_disk); + +struct gendisk *alloc_disk_node(int minors, int node_id) +{ + struct gendisk *disk; + + disk = kmalloc_node(sizeof(struct gendisk), + GFP_KERNEL | __GFP_ZERO, node_id); + if (disk) { + if (!init_part_stats(&disk->part0)) { + kfree(disk); + return NULL; + } + disk->node_id = node_id; + if (disk_expand_part_tbl(disk, 0)) { + free_part_stats(&disk->part0); + kfree(disk); + return NULL; + } + disk->part_tbl->part[0] = &disk->part0; + + disk->minors = minors; + rand_initialize_disk(disk); + disk_to_dev(disk)->class = &block_class; + disk_to_dev(disk)->type = &disk_type; + device_initialize(disk_to_dev(disk)); + INIT_WORK(&disk->async_notify, + media_change_notify_thread); + } + return disk; +} +EXPORT_SYMBOL(alloc_disk_node); + +struct kobject *get_disk(struct gendisk *disk) +{ + struct module *owner; + struct kobject *kobj; + + if (!disk->fops) + return NULL; + owner = disk->fops->owner; + if (owner && !try_module_get(owner)) + return NULL; + kobj = kobject_get(&disk_to_dev(disk)->kobj); + if (kobj == NULL) { + module_put(owner); + return NULL; + } + return kobj; + +} + +EXPORT_SYMBOL(get_disk); + +void put_disk(struct gendisk *disk) +{ + if (disk) + kobject_put(&disk_to_dev(disk)->kobj); +} + +EXPORT_SYMBOL(put_disk); + +static void set_disk_ro_uevent(struct gendisk *gd, int ro) +{ + char event[] = "DISK_RO=1"; + char *envp[] = { event, NULL }; + + if (!ro) + event[8] = '0'; + kobject_uevent_env(&disk_to_dev(gd)->kobj, KOBJ_CHANGE, envp); +} + +void set_device_ro(struct block_device *bdev, int flag) +{ + bdev->bd_part->policy = flag; +} + +EXPORT_SYMBOL(set_device_ro); + +void set_disk_ro(struct gendisk *disk, int flag) +{ + struct disk_part_iter piter; + struct hd_struct *part; + + if (disk->part0.policy != flag) { + set_disk_ro_uevent(disk, flag); + disk->part0.policy = flag; + } + + disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) + part->policy = flag; + disk_part_iter_exit(&piter); +} + +EXPORT_SYMBOL(set_disk_ro); + +int bdev_read_only(struct block_device *bdev) +{ + if (!bdev) + return 0; + return bdev->bd_part->policy; +} + +EXPORT_SYMBOL(bdev_read_only); + +int invalidate_partition(struct gendisk *disk, int partno) +{ + int res = 0; + struct block_device *bdev = bdget_disk(disk, partno); + if (bdev) { + fsync_bdev(bdev); + res = __invalidate_device(bdev); + bdput(bdev); + } + return res; +} + +EXPORT_SYMBOL(invalidate_partition); diff --git a/block/ioctl.c b/block/ioctl.c new file mode 100644 index 00000000..d8052f0d --- /dev/null +++ b/block/ioctl.c @@ -0,0 +1,327 @@ +#include <linux/capability.h> +#include <linux/blkdev.h> +#include <linux/gfp.h> +#include <linux/blkpg.h> +#include <linux/hdreg.h> +#include <linux/backing-dev.h> +#include <linux/buffer_head.h> +#include <linux/smp_lock.h> +#include <linux/blktrace_api.h> +#include <asm/uaccess.h> + +static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) +{ + struct block_device *bdevp; + struct gendisk *disk; + struct hd_struct *part; + struct blkpg_ioctl_arg a; + struct blkpg_partition p; + struct disk_part_iter piter; + long long start, length; + int partno; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg))) + return -EFAULT; + if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition))) + return -EFAULT; + disk = bdev->bd_disk; + if (bdev != bdev->bd_contains) + return -EINVAL; + partno = p.pno; + if (partno <= 0) + return -EINVAL; + switch (a.op) { + case BLKPG_ADD_PARTITION: + start = p.start >> 9; + length = p.length >> 9; + /* check for fit in a hd_struct */ + if (sizeof(sector_t) == sizeof(long) && + sizeof(long long) > sizeof(long)) { + long pstart = start, plength = length; + if (pstart != start || plength != length + || pstart < 0 || plength < 0) + return -EINVAL; + } + + mutex_lock(&bdev->bd_mutex); + + /* overlap? */ + disk_part_iter_init(&piter, disk, + DISK_PITER_INCL_EMPTY); + while ((part = disk_part_iter_next(&piter))) { + if (!(start + length <= part->start_sect || + start >= part->start_sect + part->nr_sects)) { + disk_part_iter_exit(&piter); + mutex_unlock(&bdev->bd_mutex); + return -EBUSY; + } + } + disk_part_iter_exit(&piter); + + /* all seems OK */ + part = add_partition(disk, partno, start, length, + ADDPART_FLAG_NONE); + mutex_unlock(&bdev->bd_mutex); + return IS_ERR(part) ? PTR_ERR(part) : 0; + case BLKPG_DEL_PARTITION: + part = disk_get_part(disk, partno); + if (!part) + return -ENXIO; + + bdevp = bdget(part_devt(part)); + disk_put_part(part); + if (!bdevp) + return -ENOMEM; + + mutex_lock(&bdevp->bd_mutex); + if (bdevp->bd_openers) { + mutex_unlock(&bdevp->bd_mutex); + bdput(bdevp); + return -EBUSY; + } + /* all seems OK */ + fsync_bdev(bdevp); + invalidate_bdev(bdevp); + + mutex_lock_nested(&bdev->bd_mutex, 1); + delete_partition(disk, partno); + mutex_unlock(&bdev->bd_mutex); + mutex_unlock(&bdevp->bd_mutex); + bdput(bdevp); + + return 0; + default: + return -EINVAL; + } +} + +static int blkdev_reread_part(struct block_device *bdev) +{ + struct gendisk *disk = bdev->bd_disk; + int res; + + if (!disk_partitionable(disk) || bdev != bdev->bd_contains) + return -EINVAL; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!mutex_trylock(&bdev->bd_mutex)) + return -EBUSY; + res = rescan_partitions(disk, bdev); + mutex_unlock(&bdev->bd_mutex); + return res; +} + +static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, + uint64_t len, int secure) +{ + unsigned long flags = BLKDEV_IFL_WAIT; + + if (start & 511) + return -EINVAL; + if (len & 511) + return -EINVAL; + start >>= 9; + len >>= 9; + + if (start + len > (bdev->bd_inode->i_size >> 9)) + return -EINVAL; + if (secure) + flags |= BLKDEV_IFL_SECURE; + return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags); +} + +static int put_ushort(unsigned long arg, unsigned short val) +{ + return put_user(val, (unsigned short __user *)arg); +} + +static int put_int(unsigned long arg, int val) +{ + return put_user(val, (int __user *)arg); +} + +static int put_uint(unsigned long arg, unsigned int val) +{ + return put_user(val, (unsigned int __user *)arg); +} + +static int put_long(unsigned long arg, long val) +{ + return put_user(val, (long __user *)arg); +} + +static int put_ulong(unsigned long arg, unsigned long val) +{ + return put_user(val, (unsigned long __user *)arg); +} + +static int put_u64(unsigned long arg, u64 val) +{ + return put_user(val, (u64 __user *)arg); +} + +int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, + unsigned cmd, unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + + if (disk->fops->ioctl) + return disk->fops->ioctl(bdev, mode, cmd, arg); + + return -ENOTTY; +} +/* + * For the record: _GPL here is only because somebody decided to slap it + * on the previous export. Sheer idiocy, since it wasn't copyrightable + * at all and could be open-coded without any exports by anybody who cares. + */ +EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); + +/* + * always keep this in sync with compat_blkdev_ioctl() + */ +int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg) +{ + struct gendisk *disk = bdev->bd_disk; + struct backing_dev_info *bdi; + loff_t size; + int ret, n; + + switch(cmd) { + case BLKFLSBUF: + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + /* -EINVAL to handle old uncorrected drivers */ + if (ret != -EINVAL && ret != -ENOTTY) + return ret; + + fsync_bdev(bdev); + invalidate_bdev(bdev); + return 0; + + case BLKROSET: + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + /* -EINVAL to handle old uncorrected drivers */ + if (ret != -EINVAL && ret != -ENOTTY) + return ret; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (get_user(n, (int __user *)(arg))) + return -EFAULT; + set_device_ro(bdev, n); + return 0; + + case BLKDISCARD: + case BLKSECDISCARD: { + uint64_t range[2]; + + if (!(mode & FMODE_WRITE)) + return -EBADF; + + if (copy_from_user(range, (void __user *)arg, sizeof(range))) + return -EFAULT; + + return blk_ioctl_discard(bdev, range[0], range[1], + cmd == BLKSECDISCARD); + } + + case HDIO_GETGEO: { + struct hd_geometry geo; + + if (!arg) + return -EINVAL; + if (!disk->fops->getgeo) + return -ENOTTY; + + /* + * We need to set the startsect first, the driver may + * want to override it. + */ + geo.start = get_start_sect(bdev); + ret = disk->fops->getgeo(bdev, &geo); + if (ret) + return ret; + if (copy_to_user((struct hd_geometry __user *)arg, &geo, + sizeof(geo))) + return -EFAULT; + return 0; + } + case BLKRAGET: + case BLKFRAGET: + if (!arg) + return -EINVAL; + bdi = blk_get_backing_dev_info(bdev); + if (bdi == NULL) + return -ENOTTY; + return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + case BLKROGET: + return put_int(arg, bdev_read_only(bdev) != 0); + case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ + return put_int(arg, block_size(bdev)); + case BLKSSZGET: /* get block device logical block size */ + return put_int(arg, bdev_logical_block_size(bdev)); + case BLKPBSZGET: /* get block device physical block size */ + return put_uint(arg, bdev_physical_block_size(bdev)); + case BLKIOMIN: + return put_uint(arg, bdev_io_min(bdev)); + case BLKIOOPT: + return put_uint(arg, bdev_io_opt(bdev)); + case BLKALIGNOFF: + return put_int(arg, bdev_alignment_offset(bdev)); + case BLKDISCARDZEROES: + return put_uint(arg, bdev_discard_zeroes_data(bdev)); + case BLKSECTGET: + return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); + case BLKRASET: + case BLKFRASET: + if(!capable(CAP_SYS_ADMIN)) + return -EACCES; + bdi = blk_get_backing_dev_info(bdev); + if (bdi == NULL) + return -ENOTTY; + bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + return 0; + case BLKBSZSET: + /* set the logical block size */ + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (!arg) + return -EINVAL; + if (get_user(n, (int __user *) arg)) + return -EFAULT; + if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0) + return -EBUSY; + ret = set_blocksize(bdev, n); + if (!(mode & FMODE_EXCL)) + bd_release(bdev); + return ret; + case BLKPG: + ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); + break; + case BLKRRPART: + ret = blkdev_reread_part(bdev); + break; + case BLKGETSIZE: + size = bdev->bd_inode->i_size; + if ((size >> 9) > ~0UL) + return -EFBIG; + return put_ulong(arg, size >> 9); + case BLKGETSIZE64: + return put_u64(arg, bdev->bd_inode->i_size); + case BLKTRACESTART: + case BLKTRACESTOP: + case BLKTRACESETUP: + case BLKTRACETEARDOWN: + ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); + break; + default: + ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg); + } + return ret; +} +EXPORT_SYMBOL_GPL(blkdev_ioctl); diff --git a/block/noop-iosched.c b/block/noop-iosched.c new file mode 100644 index 00000000..e624f36b --- /dev/null +++ b/block/noop-iosched.c @@ -0,0 +1,125 @@ +/* + * elevator noop + */ +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/bio.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/init.h> + +struct noop_data { + struct list_head queue; +}; + +static void noop_merged_requests(struct request_queue *q, struct request *rq, + struct request *next) +{ + list_del_init(&next->queuelist); +} + +static int noop_dispatch(struct request_queue *q, int force) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (!list_empty(&nd->queue)) { + struct request *rq; + rq = list_entry(nd->queue.next, struct request, queuelist); + list_del_init(&rq->queuelist); + elv_dispatch_sort(q, rq); + return 1; + } + return 0; +} + +static void noop_add_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + list_add_tail(&rq->queuelist, &nd->queue); +} + +static int noop_queue_empty(struct request_queue *q) +{ + struct noop_data *nd = q->elevator->elevator_data; + + return list_empty(&nd->queue); +} + +static struct request * +noop_former_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (rq->queuelist.prev == &nd->queue) + return NULL; + return list_entry(rq->queuelist.prev, struct request, queuelist); +} + +static struct request * +noop_latter_request(struct request_queue *q, struct request *rq) +{ + struct noop_data *nd = q->elevator->elevator_data; + + if (rq->queuelist.next == &nd->queue) + return NULL; + return list_entry(rq->queuelist.next, struct request, queuelist); +} + +static void *noop_init_queue(struct request_queue *q) +{ + struct noop_data *nd; + + nd = kmalloc_node(sizeof(*nd), GFP_KERNEL, q->node); + if (!nd) + return NULL; + INIT_LIST_HEAD(&nd->queue); + return nd; +} + +static void noop_exit_queue(struct elevator_queue *e) +{ + struct noop_data *nd = e->elevator_data; + + BUG_ON(!list_empty(&nd->queue)); + kfree(nd); +} + +static struct elevator_type elevator_noop = { + .ops = { + .elevator_merge_req_fn = noop_merged_requests, + .elevator_dispatch_fn = noop_dispatch, + .elevator_add_req_fn = noop_add_request, + .elevator_queue_empty_fn = noop_queue_empty, + .elevator_former_req_fn = noop_former_request, + .elevator_latter_req_fn = noop_latter_request, + .elevator_init_fn = noop_init_queue, + .elevator_exit_fn = noop_exit_queue, + }, + .elevator_name = "noop", + .elevator_owner = THIS_MODULE, +}; + +static int __init noop_init(void) +{ + elv_register(&elevator_noop); + + return 0; +} + +static void __exit noop_exit(void) +{ + elv_unregister(&elevator_noop); +} + +#ifdef CONFIG_FAST_RESUME +beforeresume_initcall(noop_init); +#else +module_init(noop_init); +#endif +module_exit(noop_exit); + + +MODULE_AUTHOR("Jens Axboe"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("No-op IO scheduler"); diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c new file mode 100644 index 00000000..4f4230b7 --- /dev/null +++ b/block/scsi_ioctl.c @@ -0,0 +1,699 @@ +/* + * Copyright (C) 2001 Jens Axboe <axboe@suse.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public Licens + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- + * + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/capability.h> +#include <linux/completion.h> +#include <linux/cdrom.h> +#include <linux/slab.h> +#include <linux/times.h> +#include <asm/uaccess.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_ioctl.h> +#include <scsi/scsi_cmnd.h> + +struct blk_cmd_filter { + unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; + unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; +}; + +static struct blk_cmd_filter blk_default_cmd_filter; + +/* Command group 3 is reserved and should never be used. */ +const unsigned char scsi_command_size_tbl[8] = +{ + 6, 10, 10, 12, + 16, 12, 10, 10 +}; +EXPORT_SYMBOL(scsi_command_size_tbl); + +#include <scsi/sg.h> + +static int sg_get_version(int __user *p) +{ + static const int sg_version_num = 30527; + return put_user(sg_version_num, p); +} + +static int scsi_get_idlun(struct request_queue *q, int __user *p) +{ + return put_user(0, p); +} + +static int scsi_get_bus(struct request_queue *q, int __user *p) +{ + return put_user(0, p); +} + +static int sg_get_timeout(struct request_queue *q) +{ + return jiffies_to_clock_t(q->sg_timeout); +} + +static int sg_set_timeout(struct request_queue *q, int __user *p) +{ + int timeout, err = get_user(timeout, p); + + if (!err) + q->sg_timeout = clock_t_to_jiffies(timeout); + + return err; +} + +static int sg_get_reserved_size(struct request_queue *q, int __user *p) +{ + unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9); + + return put_user(val, p); +} + +static int sg_set_reserved_size(struct request_queue *q, int __user *p) +{ + int size, err = get_user(size, p); + + if (err) + return err; + + if (size < 0) + return -EINVAL; + if (size > (queue_max_sectors(q) << 9)) + size = queue_max_sectors(q) << 9; + + q->sg_reserved_size = size; + return 0; +} + +/* + * will always return that we are ATAPI even for a real SCSI drive, I'm not + * so sure this is worth doing anything about (why would you care??) + */ +static int sg_emulated_host(struct request_queue *q, int __user *p) +{ + return put_user(1, p); +} + +static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) +{ + /* Basic read-only commands */ + __set_bit(TEST_UNIT_READY, filter->read_ok); + __set_bit(REQUEST_SENSE, filter->read_ok); + __set_bit(READ_6, filter->read_ok); + __set_bit(READ_10, filter->read_ok); + __set_bit(READ_12, filter->read_ok); + __set_bit(READ_16, filter->read_ok); + __set_bit(READ_BUFFER, filter->read_ok); + __set_bit(READ_DEFECT_DATA, filter->read_ok); + __set_bit(READ_CAPACITY, filter->read_ok); + __set_bit(READ_LONG, filter->read_ok); + __set_bit(INQUIRY, filter->read_ok); + __set_bit(MODE_SENSE, filter->read_ok); + __set_bit(MODE_SENSE_10, filter->read_ok); + __set_bit(LOG_SENSE, filter->read_ok); + __set_bit(START_STOP, filter->read_ok); + __set_bit(GPCMD_VERIFY_10, filter->read_ok); + __set_bit(VERIFY_16, filter->read_ok); + __set_bit(REPORT_LUNS, filter->read_ok); + __set_bit(SERVICE_ACTION_IN, filter->read_ok); + __set_bit(RECEIVE_DIAGNOSTIC, filter->read_ok); + __set_bit(MAINTENANCE_IN, filter->read_ok); + __set_bit(GPCMD_READ_BUFFER_CAPACITY, filter->read_ok); + + /* Audio CD commands */ + __set_bit(GPCMD_PLAY_CD, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_10, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_MSF, filter->read_ok); + __set_bit(GPCMD_PLAY_AUDIO_TI, filter->read_ok); + __set_bit(GPCMD_PAUSE_RESUME, filter->read_ok); + + /* CD/DVD data reading */ + __set_bit(GPCMD_READ_CD, filter->read_ok); + __set_bit(GPCMD_READ_CD_MSF, filter->read_ok); + __set_bit(GPCMD_READ_DISC_INFO, filter->read_ok); + __set_bit(GPCMD_READ_CDVD_CAPACITY, filter->read_ok); + __set_bit(GPCMD_READ_DVD_STRUCTURE, filter->read_ok); + __set_bit(GPCMD_READ_HEADER, filter->read_ok); + __set_bit(GPCMD_READ_TRACK_RZONE_INFO, filter->read_ok); + __set_bit(GPCMD_READ_SUBCHANNEL, filter->read_ok); + __set_bit(GPCMD_READ_TOC_PMA_ATIP, filter->read_ok); + __set_bit(GPCMD_REPORT_KEY, filter->read_ok); + __set_bit(GPCMD_SCAN, filter->read_ok); + __set_bit(GPCMD_GET_CONFIGURATION, filter->read_ok); + __set_bit(GPCMD_READ_FORMAT_CAPACITIES, filter->read_ok); + __set_bit(GPCMD_GET_EVENT_STATUS_NOTIFICATION, filter->read_ok); + __set_bit(GPCMD_GET_PERFORMANCE, filter->read_ok); + __set_bit(GPCMD_SEEK, filter->read_ok); + __set_bit(GPCMD_STOP_PLAY_SCAN, filter->read_ok); + + /* Basic writing commands */ + __set_bit(WRITE_6, filter->write_ok); + __set_bit(WRITE_10, filter->write_ok); + __set_bit(WRITE_VERIFY, filter->write_ok); + __set_bit(WRITE_12, filter->write_ok); + __set_bit(WRITE_VERIFY_12, filter->write_ok); + __set_bit(WRITE_16, filter->write_ok); + __set_bit(WRITE_LONG, filter->write_ok); + __set_bit(WRITE_LONG_2, filter->write_ok); + __set_bit(ERASE, filter->write_ok); + __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); + __set_bit(MODE_SELECT, filter->write_ok); + __set_bit(LOG_SELECT, filter->write_ok); + __set_bit(GPCMD_BLANK, filter->write_ok); + __set_bit(GPCMD_CLOSE_TRACK, filter->write_ok); + __set_bit(GPCMD_FLUSH_CACHE, filter->write_ok); + __set_bit(GPCMD_FORMAT_UNIT, filter->write_ok); + __set_bit(GPCMD_REPAIR_RZONE_TRACK, filter->write_ok); + __set_bit(GPCMD_RESERVE_RZONE_TRACK, filter->write_ok); + __set_bit(GPCMD_SEND_DVD_STRUCTURE, filter->write_ok); + __set_bit(GPCMD_SEND_EVENT, filter->write_ok); + __set_bit(GPCMD_SEND_KEY, filter->write_ok); + __set_bit(GPCMD_SEND_OPC, filter->write_ok); + __set_bit(GPCMD_SEND_CUE_SHEET, filter->write_ok); + __set_bit(GPCMD_SET_SPEED, filter->write_ok); + __set_bit(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL, filter->write_ok); + __set_bit(GPCMD_LOAD_UNLOAD, filter->write_ok); + __set_bit(GPCMD_SET_STREAMING, filter->write_ok); + __set_bit(GPCMD_SET_READ_AHEAD, filter->write_ok); +} + +int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm) +{ + struct blk_cmd_filter *filter = &blk_default_cmd_filter; + + /* root can do any command. */ + if (capable(CAP_SYS_RAWIO)) + return 0; + + /* if there's no filter set, assume we're filtering everything out */ + if (!filter) + return -EPERM; + + /* Anybody who can open the device can do a read-safe command */ + if (test_bit(cmd[0], filter->read_ok)) + return 0; + + /* Write-safe commands require a writable open */ + if (test_bit(cmd[0], filter->write_ok) && has_write_perm) + return 0; + + return -EPERM; +} +EXPORT_SYMBOL(blk_verify_command); + +static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, + struct sg_io_hdr *hdr, fmode_t mode) +{ + if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) + return -EPERM; + + /* + * fill in request structure + */ + rq->cmd_len = hdr->cmd_len; + rq->cmd_type = REQ_TYPE_BLOCK_PC; + + rq->timeout = msecs_to_jiffies(hdr->timeout); + if (!rq->timeout) + rq->timeout = q->sg_timeout; + if (!rq->timeout) + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + if (rq->timeout < BLK_MIN_SG_TIMEOUT) + rq->timeout = BLK_MIN_SG_TIMEOUT; + + return 0; +} + +static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr, + struct bio *bio) +{ + int r, ret = 0; + + /* + * fill in all the output members + */ + hdr->status = rq->errors & 0xff; + hdr->masked_status = status_byte(rq->errors); + hdr->msg_status = msg_byte(rq->errors); + hdr->host_status = host_byte(rq->errors); + hdr->driver_status = driver_byte(rq->errors); + hdr->info = 0; + if (hdr->masked_status || hdr->host_status || hdr->driver_status) + hdr->info |= SG_INFO_CHECK; + hdr->resid = rq->resid_len; + hdr->sb_len_wr = 0; + + if (rq->sense_len && hdr->sbp) { + int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len); + + if (!copy_to_user(hdr->sbp, rq->sense, len)) + hdr->sb_len_wr = len; + else + ret = -EFAULT; + } + + r = blk_rq_unmap_user(bio); + if (!ret) + ret = r; + blk_put_request(rq); + + return ret; +} + +static int sg_io(struct request_queue *q, struct gendisk *bd_disk, + struct sg_io_hdr *hdr, fmode_t mode) +{ + unsigned long start_time; + int writing = 0, ret = 0; + struct request *rq; + char sense[SCSI_SENSE_BUFFERSIZE]; + struct bio *bio; + + if (hdr->interface_id != 'S') + return -EINVAL; + if (hdr->cmd_len > BLK_MAX_CDB) + return -EINVAL; + + if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) + return -EIO; + + if (hdr->dxfer_len) + switch (hdr->dxfer_direction) { + default: + return -EINVAL; + case SG_DXFER_TO_DEV: + writing = 1; + break; + case SG_DXFER_TO_FROM_DEV: + case SG_DXFER_FROM_DEV: + break; + } + + rq = blk_get_request(q, writing ? WRITE : READ, GFP_KERNEL); + if (!rq) + return -ENOMEM; + + if (blk_fill_sghdr_rq(q, rq, hdr, mode)) { + blk_put_request(rq); + return -EFAULT; + } + + if (hdr->iovec_count) { + const int size = sizeof(struct sg_iovec) * hdr->iovec_count; + size_t iov_data_len; + struct sg_iovec *sg_iov; + struct iovec *iov; + int i; + + sg_iov = kmalloc(size, GFP_KERNEL); + if (!sg_iov) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(sg_iov, hdr->dxferp, size)) { + kfree(sg_iov); + ret = -EFAULT; + goto out; + } + + /* + * Sum up the vecs, making sure they don't overflow + */ + iov = (struct iovec *) sg_iov; + iov_data_len = 0; + for (i = 0; i < hdr->iovec_count; i++) { + if (iov_data_len + iov[i].iov_len < iov_data_len) { + kfree(sg_iov); + ret = -EINVAL; + goto out; + } + iov_data_len += iov[i].iov_len; + } + + /* SG_IO howto says that the shorter of the two wins */ + if (hdr->dxfer_len < iov_data_len) { + hdr->iovec_count = iov_shorten(iov, + hdr->iovec_count, + hdr->dxfer_len); + iov_data_len = hdr->dxfer_len; + } + + ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count, + iov_data_len, GFP_KERNEL); + kfree(sg_iov); + } else if (hdr->dxfer_len) + ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, + GFP_KERNEL); + + if (ret) + goto out; + + bio = rq->bio; + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + rq->retries = 0; + + start_time = jiffies; + + /* ignore return value. All information is passed back to caller + * (if he doesn't check that is his problem). + * N.B. a non-zero SCSI status is _not_ necessarily an error. + */ + blk_execute_rq(q, bd_disk, rq, 0); + + hdr->duration = jiffies_to_msecs(jiffies - start_time); + + return blk_complete_sghdr_rq(rq, hdr, bio); +out: + blk_put_request(rq); + return ret; +} + +/** + * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl + * @file: file this ioctl operates on (optional) + * @q: request queue to send scsi commands down + * @disk: gendisk to operate on (option) + * @sic: userspace structure describing the command to perform + * + * Send down the scsi command described by @sic to the device below + * the request queue @q. If @file is non-NULL it's used to perform + * fine-grained permission checks that allow users to send down + * non-destructive SCSI commands. If the caller has a struct gendisk + * available it should be passed in as @disk to allow the low level + * driver to use the information contained in it. A non-NULL @disk + * is only allowed if the caller knows that the low level driver doesn't + * need it (e.g. in the scsi subsystem). + * + * Notes: + * - This interface is deprecated - users should use the SG_IO + * interface instead, as this is a more flexible approach to + * performing SCSI commands on a device. + * - The SCSI command length is determined by examining the 1st byte + * of the given command. There is no way to override this. + * - Data transfers are limited to PAGE_SIZE + * - The length (x + y) must be at least OMAX_SB_LEN bytes long to + * accommodate the sense buffer when an error occurs. + * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that + * old code will not be surprised. + * - If a Unix error occurs (e.g. ENOMEM) then the user will receive + * a negative return and the Unix error code in 'errno'. + * If the SCSI command succeeds then 0 is returned. + * Positive numbers returned are the compacted SCSI error codes (4 + * bytes in one int) where the lowest byte is the SCSI status. + */ +#define OMAX_SB_LEN 16 /* For backward compatibility */ +int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + struct scsi_ioctl_command __user *sic) +{ + struct request *rq; + int err; + unsigned int in_len, out_len, bytes, opcode, cmdlen; + char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; + + if (!sic) + return -EINVAL; + + /* + * get in an out lengths, verify they don't exceed a page worth of data + */ + if (get_user(in_len, &sic->inlen)) + return -EFAULT; + if (get_user(out_len, &sic->outlen)) + return -EFAULT; + if (in_len > PAGE_SIZE || out_len > PAGE_SIZE) + return -EINVAL; + if (get_user(opcode, sic->data)) + return -EFAULT; + + bytes = max(in_len, out_len); + if (bytes) { + buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); + if (!buffer) + return -ENOMEM; + + } + + rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); + + cmdlen = COMMAND_SIZE(opcode); + + /* + * get command and data to send to device, if any + */ + err = -EFAULT; + rq->cmd_len = cmdlen; + if (copy_from_user(rq->cmd, sic->data, cmdlen)) + goto error; + + if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) + goto error; + + err = blk_verify_command(rq->cmd, mode & FMODE_WRITE); + if (err) + goto error; + + /* default. possible overriden later */ + rq->retries = 5; + + switch (opcode) { + case SEND_DIAGNOSTIC: + case FORMAT_UNIT: + rq->timeout = FORMAT_UNIT_TIMEOUT; + rq->retries = 1; + break; + case START_STOP: + rq->timeout = START_STOP_TIMEOUT; + break; + case MOVE_MEDIUM: + rq->timeout = MOVE_MEDIUM_TIMEOUT; + break; + case READ_ELEMENT_STATUS: + rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; + break; + case READ_DEFECT_DATA: + rq->timeout = READ_DEFECT_DATA_TIMEOUT; + rq->retries = 1; + break; + default: + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + break; + } + + if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { + err = DRIVER_ERROR << 24; + goto out; + } + + memset(sense, 0, sizeof(sense)); + rq->sense = sense; + rq->sense_len = 0; + rq->cmd_type = REQ_TYPE_BLOCK_PC; + + blk_execute_rq(q, disk, rq, 0); + +out: + err = rq->errors & 0xff; /* only 8 bit SCSI status */ + if (err) { + if (rq->sense_len && rq->sense) { + bytes = (OMAX_SB_LEN > rq->sense_len) ? + rq->sense_len : OMAX_SB_LEN; + if (copy_to_user(sic->data, rq->sense, bytes)) + err = -EFAULT; + } + } else { + if (copy_to_user(sic->data, buffer, out_len)) + err = -EFAULT; + } + +error: + kfree(buffer); + blk_put_request(rq); + return err; +} +EXPORT_SYMBOL_GPL(sg_scsi_ioctl); + +/* Send basic block requests */ +static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, + int cmd, int data) +{ + struct request *rq; + int err; + + rq = blk_get_request(q, WRITE, __GFP_WAIT); + rq->cmd_type = REQ_TYPE_BLOCK_PC; + rq->timeout = BLK_DEFAULT_SG_TIMEOUT; + rq->cmd[0] = cmd; + rq->cmd[4] = data; + rq->cmd_len = 6; + err = blk_execute_rq(q, bd_disk, rq, 0); + blk_put_request(rq); + + return err; +} + +static inline int blk_send_start_stop(struct request_queue *q, + struct gendisk *bd_disk, int data) +{ + return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); +} + +int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, + unsigned int cmd, void __user *arg) +{ + int err; + + if (!q || blk_get_queue(q)) + return -ENXIO; + + switch (cmd) { + /* + * new sgv3 interface + */ + case SG_GET_VERSION_NUM: + err = sg_get_version(arg); + break; + case SCSI_IOCTL_GET_IDLUN: + err = scsi_get_idlun(q, arg); + break; + case SCSI_IOCTL_GET_BUS_NUMBER: + err = scsi_get_bus(q, arg); + break; + case SG_SET_TIMEOUT: + err = sg_set_timeout(q, arg); + break; + case SG_GET_TIMEOUT: + err = sg_get_timeout(q); + break; + case SG_GET_RESERVED_SIZE: + err = sg_get_reserved_size(q, arg); + break; + case SG_SET_RESERVED_SIZE: + err = sg_set_reserved_size(q, arg); + break; + case SG_EMULATED_HOST: + err = sg_emulated_host(q, arg); + break; + case SG_IO: { + struct sg_io_hdr hdr; + + err = -EFAULT; + if (copy_from_user(&hdr, arg, sizeof(hdr))) + break; + err = sg_io(q, bd_disk, &hdr, mode); + if (err == -EFAULT) + break; + + if (copy_to_user(arg, &hdr, sizeof(hdr))) + err = -EFAULT; + break; + } + case CDROM_SEND_PACKET: { + struct cdrom_generic_command cgc; + struct sg_io_hdr hdr; + + err = -EFAULT; + if (copy_from_user(&cgc, arg, sizeof(cgc))) + break; + cgc.timeout = clock_t_to_jiffies(cgc.timeout); + memset(&hdr, 0, sizeof(hdr)); + hdr.interface_id = 'S'; + hdr.cmd_len = sizeof(cgc.cmd); + hdr.dxfer_len = cgc.buflen; + err = 0; + switch (cgc.data_direction) { + case CGC_DATA_UNKNOWN: + hdr.dxfer_direction = SG_DXFER_UNKNOWN; + break; + case CGC_DATA_WRITE: + hdr.dxfer_direction = SG_DXFER_TO_DEV; + break; + case CGC_DATA_READ: + hdr.dxfer_direction = SG_DXFER_FROM_DEV; + break; + case CGC_DATA_NONE: + hdr.dxfer_direction = SG_DXFER_NONE; + break; + default: + err = -EINVAL; + } + if (err) + break; + + hdr.dxferp = cgc.buffer; + hdr.sbp = cgc.sense; + if (hdr.sbp) + hdr.mx_sb_len = sizeof(struct request_sense); + hdr.timeout = jiffies_to_msecs(cgc.timeout); + hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd; + hdr.cmd_len = sizeof(cgc.cmd); + + err = sg_io(q, bd_disk, &hdr, mode); + if (err == -EFAULT) + break; + + if (hdr.status) + err = -EIO; + + cgc.stat = err; + cgc.buflen = hdr.resid; + if (copy_to_user(arg, &cgc, sizeof(cgc))) + err = -EFAULT; + + break; + } + + /* + * old junk scsi send command ioctl + */ + case SCSI_IOCTL_SEND_COMMAND: + printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm); + err = -EINVAL; + if (!arg) + break; + + err = sg_scsi_ioctl(q, bd_disk, mode, arg); + break; + case CDROMCLOSETRAY: + err = blk_send_start_stop(q, bd_disk, 0x03); + break; + case CDROMEJECT: + err = blk_send_start_stop(q, bd_disk, 0x02); + break; + default: + err = -ENOTTY; + } + + blk_put_queue(q); + return err; +} +EXPORT_SYMBOL(scsi_cmd_ioctl); + +static int __init blk_scsi_ioctl_init(void) +{ + blk_set_cmd_filter_defaults(&blk_default_cmd_filter); + return 0; +} +fs_initcall(blk_scsi_ioctl_init); |