diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-10-14 07:29:58 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-10-24 03:21:29 +0000 |
commit | df5fa691ce61aedd3e4dbcf960ee44f05b797d8b (patch) | |
tree | 9acd9f28f37eb697247f6f7256650b03932ede8a /drivers/target | |
parent | 6b20fa9aaf0c2f69ee6f9648e20ab2be0206705e (diff) | |
download | linux-3.10-df5fa691ce61aedd3e4dbcf960ee44f05b797d8b.tar.gz linux-3.10-df5fa691ce61aedd3e4dbcf960ee44f05b797d8b.tar.bz2 linux-3.10-df5fa691ce61aedd3e4dbcf960ee44f05b797d8b.zip |
target: make iblock_emulate_sync_cache asynchronous
Do not block the submitting thread when handling a SYNCHRONIZE CACHE command,
but implement it asynchronously by sending the FLUSH command ourself and
calling transport_complete_sync_cache from the completion handler.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_iblock.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index bf074c4b7a3..d9ad2a216ea 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -313,37 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size( return blocks_long; } +static void iblock_end_io_flush(struct bio *bio, int err) +{ + struct se_cmd *cmd = bio->bi_private; + + if (err) + pr_err("IBLOCK: cache flush failed: %d\n", err); + + if (cmd) + transport_complete_sync_cache(cmd, err == 0); + bio_put(bio); +} + /* - * Emulate SYCHRONIZE_CACHE_* + * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must + * always flush the whole cache. */ static void iblock_emulate_sync_cache(struct se_task *task) { struct se_cmd *cmd = task->task_se_cmd; struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr; int immed = (cmd->t_task_cdb[1] & 0x2); - sector_t error_sector; - int ret; + struct bio *bio; /* * If the Immediate bit is set, queue up the GOOD response - * for this SYNCHRONIZE_CACHE op + * for this SYNCHRONIZE_CACHE op. */ if (immed) transport_complete_sync_cache(cmd, 1); - /* - * blkdev_issue_flush() does not support a specifying a range, so - * we have to flush the entire cache. - */ - ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); - if (ret != 0) { - pr_err("IBLOCK: block_issue_flush() failed: %d " - " error_sector: %llu\n", ret, - (unsigned long long)error_sector); - } - + bio = bio_alloc(GFP_KERNEL, 0); + bio->bi_end_io = iblock_end_io_flush; + bio->bi_bdev = ib_dev->ibd_bd; if (!immed) - transport_complete_sync_cache(cmd, ret == 0); + bio->bi_private = cmd; + submit_bio(WRITE_FLUSH, bio); } /* |