diff options
author | Steve French <sfrench@us.ibm.com> | 2008-04-24 15:26:50 +0000 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-04-24 15:26:50 +0000 |
commit | 36d99df2fb474222ab47fbe8ae7385661033223b (patch) | |
tree | 962e068491b752a944f61c454fad3f8619a1ea3f /block | |
parent | 076d8423a98659a92837b07aa494cb74bfefe77c (diff) | |
parent | 3dc5063786b273f1aee545844f6bd4e9651ebffe (diff) | |
download | linux-3.10-36d99df2fb474222ab47fbe8ae7385661033223b.tar.gz linux-3.10-36d99df2fb474222ab47fbe8ae7385661033223b.tar.bz2 linux-3.10-36d99df2fb474222ab47fbe8ae7385661033223b.zip |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/Kconfig | 65 | ||||
-rw-r--r-- | block/blk-map.c | 46 | ||||
-rw-r--r-- | block/blk-merge.c | 9 | ||||
-rw-r--r-- | block/blk-sysfs.c | 10 | ||||
-rw-r--r-- | block/bsg.c | 63 | ||||
-rw-r--r-- | block/compat_ioctl.c | 1 |
6 files changed, 115 insertions, 79 deletions
diff --git a/block/Kconfig b/block/Kconfig index 7db9a411649..3e97f2bc446 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -5,14 +5,18 @@ menuconfig BLOCK bool "Enable the block layer" if EMBEDDED default y help - This permits the block layer to be removed from the kernel if it's not - needed (on some embedded devices for example). If this option is - disabled, then blockdev files will become unusable and some - filesystems (such as ext3) will become unavailable. + Provide block layer support for the kernel. - This option will also disable SCSI character devices and USB storage - since they make use of various block layer definitions and - facilities. + Disable this option to remove the block layer support from the + kernel. This may be useful for embedded devices. + + If this option is disabled: + + - block device files will become unusable + - some filesystems (such as ext3) will become unavailable. + + Also, SCSI character devices and USB storage will be disabled since + they make use of various block layer definitions and facilities. Say Y here unless you know you really don't want to mount disks and suchlike. @@ -23,9 +27,20 @@ config LBD bool "Support for Large Block Devices" depends on !64BIT help - Say Y here if you want to attach large (bigger than 2TB) discs to - your machine, or if you want to have a raid or loopback device - bigger than 2TB. Otherwise say N. + Enable block devices of size 2TB and larger. + + This option is required to support the full capacity of large + (2TB+) block devices, including RAID, disk, Network Block Device, + Logical Volume Manager (LVM) and loopback. + + For example, RAID devices are frequently bigger than the capacity + of the largest individual hard drive. + + This option is not required if you have individual disk drives + which total 2TB+ and you are not aggregating the capacity into + a large block device (e.g. using RAID or LVM). + + If unsure, say N. config BLK_DEV_IO_TRACE bool "Support for tracing block io actions" @@ -33,19 +48,21 @@ config BLK_DEV_IO_TRACE select RELAY select DEBUG_FS help - Say Y here, if you want to be able to trace the block layer actions + Say Y here if you want to be able to trace the block layer actions on a given queue. Tracing allows you to see any traffic happening - on a block device queue. For more information (and the user space - support tools needed), fetch the blktrace app from: + on a block device queue. For more information (and the userspace + support tools needed), fetch the blktrace tools from: git://git.kernel.dk/blktrace.git + If unsure, say N. + config LSF bool "Support for Large Single Files" depends on !64BIT help - Say Y here if you want to be able to handle very large files (bigger - than 2TB), otherwise say N. + Say Y here if you want to be able to handle very large files (2TB + and larger), otherwise say N. If unsure, say Y. @@ -53,14 +70,16 @@ config BLK_DEV_BSG bool "Block layer SG support v4 (EXPERIMENTAL)" depends on EXPERIMENTAL ---help--- - Saying Y here will enable generic SG (SCSI generic) v4 support - for any block device. - - Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 - can handle complicated SCSI commands: tagged variable length cdbs - with bidirectional data transfers and generic request/response - protocols (e.g. Task Management Functions and SMP in Serial - Attached SCSI). + Saying Y here will enable generic SG (SCSI generic) v4 support + for any block device. + + Unlike SG v3 (aka block/scsi_ioctl.c drivers/scsi/sg.c), SG v4 + can handle complicated SCSI commands: tagged variable length cdbs + with bidirectional data transfers and generic request/response + protocols (e.g. Task Management Functions and SMP in Serial + Attached SCSI). + + If unsure, say N. endif # BLOCK diff --git a/block/blk-map.c b/block/blk-map.c index c07d9c8317f..3c942bd6422 100644 --- a/block/blk-map.c +++ b/block/blk-map.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> +#include <scsi/sg.h> /* for struct sg_iovec */ #include "blk.h" @@ -140,25 +141,8 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq, ubuf += ret; } - /* - * __blk_rq_map_user() copies the buffers if starting address - * or length isn't aligned to dma_pad_mask. As the copied - * buffer is always page aligned, we know that there's enough - * room for padding. Extend the last bio and update - * rq->data_len accordingly. - * - * On unmap, bio_uncopy_user() will use unmodified - * bio_map_data pointed to by bio->bi_private. - */ - if (len & q->dma_pad_mask) { - unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; - struct bio *tail = rq->biotail; - - tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len; - tail->bi_size += pad_len; - - rq->extra_len += pad_len; - } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; rq->buffer = rq->data = NULL; return 0; @@ -194,15 +178,26 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; + int i, read = rq_data_dir(rq) == READ; + int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; - /* we don't allow misaligned data like bio_map_user() does. If the - * user is using sg, they're expected to know the alignment constraints - * and respect them accordingly */ - bio = bio_map_user_iov(q, NULL, iov, iov_count, - rq_data_dir(rq) == READ); + for (i = 0; i < iov_count; i++) { + unsigned long uaddr = (unsigned long)iov[i].iov_base; + + if (uaddr & queue_dma_alignment(q)) { + unaligned = 1; + break; + } + } + + if (unaligned || (q->dma_pad_mask & len)) + bio = bio_copy_user_iov(q, iov, iov_count, read); + else + bio = bio_map_user_iov(q, NULL, iov, iov_count, read); + if (IS_ERR(bio)) return PTR_ERR(bio); @@ -212,6 +207,9 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, return -EINVAL; } + if (!bio_flagged(bio, BIO_USER_MAPPED)) + rq->cmd_flags |= REQ_COPY_USER; + bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; diff --git a/block/blk-merge.c b/block/blk-merge.c index 0f58616bcd7..b5c5c4a9e3f 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -220,6 +220,15 @@ new_segment: bvprv = bvec; } /* segments in rq */ + + if (unlikely(rq->cmd_flags & REQ_COPY_USER) && + (rq->data_len & q->dma_pad_mask)) { + unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; + + sg->length += pad_len; + rq->extra_len += pad_len; + } + if (q->dma_drain_size && q->dma_drain_needed(rq)) { if (rq->cmd_flags & REQ_RW) memset(q->dma_drain_buffer, 0, q->dma_drain_size); diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 54d0db11615..fc41d83be22 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -276,9 +276,12 @@ int blk_register_queue(struct gendisk *disk) struct request_queue *q = disk->queue; - if (!q || !q->request_fn) + if (WARN_ON(!q)) return -ENXIO; + if (!q->request_fn) + return 0; + ret = kobject_add(&q->kobj, kobject_get(&disk->dev.kobj), "%s", "queue"); if (ret < 0) @@ -300,7 +303,10 @@ void blk_unregister_queue(struct gendisk *disk) { struct request_queue *q = disk->queue; - if (q && q->request_fn) { + if (WARN_ON(!q)) + return; + + if (q->request_fn) { elv_unregister_queue(q); kobject_uevent(&q->kobj, KOBJ_REMOVE); diff --git a/block/bsg.c b/block/bsg.c index 8917c5174dc..f51172ed27c 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -37,7 +37,6 @@ struct bsg_device { struct list_head done_list; struct hlist_node dev_list; atomic_t ref_count; - int minor; int queued_cmds; int done_cmds; wait_queue_head_t wq_done; @@ -368,7 +367,7 @@ static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd) spin_lock_irq(&bd->lock); if (bd->done_cmds) { - bc = list_entry(bd->done_list.next, struct bsg_command, list); + bc = list_first_entry(&bd->done_list, struct bsg_command, list); list_del(&bc->list); bd->done_cmds--; } @@ -468,8 +467,6 @@ static int bsg_complete_all_commands(struct bsg_device *bd) dprintk("%s: entered\n", bd->name); - set_bit(BSG_F_BLOCK, &bd->flags); - /* * wait for all commands to complete */ @@ -705,6 +702,7 @@ static struct bsg_device *bsg_alloc_device(void) static int bsg_put_device(struct bsg_device *bd) { int ret = 0; + struct device *dev = bd->queue->bsg_dev.dev; mutex_lock(&bsg_mutex); @@ -730,6 +728,7 @@ static int bsg_put_device(struct bsg_device *bd) kfree(bd); out: mutex_unlock(&bsg_mutex); + put_device(dev); return ret; } @@ -738,24 +737,28 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct file *file) { struct bsg_device *bd; + int ret; #ifdef BSG_DEBUG unsigned char buf[32]; #endif + ret = blk_get_queue(rq); + if (ret) + return ERR_PTR(-ENXIO); bd = bsg_alloc_device(); - if (!bd) + if (!bd) { + blk_put_queue(rq); return ERR_PTR(-ENOMEM); + } bd->queue = rq; - kobject_get(&rq->kobj); bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); - bd->minor = iminor(inode); mutex_lock(&bsg_mutex); - hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(bd->minor)); + hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); - strncpy(bd->name, rq->bsg_dev.class_dev->class_id, sizeof(bd->name) - 1); + strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1); dprintk("bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); @@ -763,23 +766,21 @@ static struct bsg_device *bsg_add_device(struct inode *inode, return bd; } -static struct bsg_device *__bsg_get_device(int minor) +static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { - struct bsg_device *bd = NULL; + struct bsg_device *bd; struct hlist_node *entry; mutex_lock(&bsg_mutex); - hlist_for_each(entry, bsg_dev_idx_hash(minor)) { - bd = hlist_entry(entry, struct bsg_device, dev_list); - if (bd->minor == minor) { + hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { + if (bd->queue == q) { atomic_inc(&bd->ref_count); - break; + goto found; } - - bd = NULL; } - + bd = NULL; +found: mutex_unlock(&bsg_mutex); return bd; } @@ -789,21 +790,27 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) struct bsg_device *bd; struct bsg_class_device *bcd; - bd = __bsg_get_device(iminor(inode)); - if (bd) - return bd; - /* * find the class device */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); + if (bcd) + get_device(bcd->dev); mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); - return bsg_add_device(inode, bcd->queue, file); + bd = __bsg_get_device(iminor(inode), bcd->queue); + if (bd) + return bd; + + bd = bsg_add_device(inode, bcd->queue, file); + if (IS_ERR(bd)) + put_device(bcd->dev); + + return bd; } static int bsg_open(struct inode *inode, struct file *file) @@ -939,10 +946,9 @@ void bsg_unregister_queue(struct request_queue *q) mutex_lock(&bsg_mutex); idr_remove(&bsg_minor_idr, bcd->minor); sysfs_remove_link(&q->kobj, "bsg"); - class_device_unregister(bcd->class_dev); + device_unregister(bcd->class_dev); put_device(bcd->dev); bcd->class_dev = NULL; - bcd->dev = NULL; mutex_unlock(&bsg_mutex); } EXPORT_SYMBOL_GPL(bsg_unregister_queue); @@ -953,7 +959,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev, struct bsg_class_device *bcd; dev_t dev; int ret, minor; - struct class_device *class_dev = NULL; + struct device *class_dev = NULL; const char *devname; if (name) @@ -992,8 +998,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev, bcd->queue = q; bcd->dev = get_device(gdev); dev = MKDEV(bsg_major, bcd->minor); - class_dev = class_device_create(bsg_class, NULL, dev, gdev, "%s", - devname); + class_dev = device_create(bsg_class, gdev, dev, "%s", devname); if (IS_ERR(class_dev)) { ret = PTR_ERR(class_dev); goto put_dev; @@ -1010,7 +1015,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev, return 0; unregister_class_dev: - class_device_unregister(class_dev); + device_unregister(class_dev); put_dev: put_device(gdev); remove_idr: diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index b73373216b0..c70d0b6f666 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -624,7 +624,6 @@ static int compat_blkdev_driver_ioctl(struct inode *inode, struct file *file, case HDIO_GET_IDENTITY: case HDIO_DRIVE_TASK: case HDIO_DRIVE_CMD: - case HDIO_SCAN_HWIF: /* 0x330 is reserved -- it used to be HDIO_GETGEO_BIG */ case 0x330: /* 0x02 -- Floppy ioctls */ |