summaryrefslogtreecommitdiff
path: root/drivers/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/iscsi/iscsi_target.c32
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c24
-rw-r--r--drivers/target/target_core_device.c20
-rw-r--r--drivers/target/target_core_file.c10
-rw-r--r--drivers/target/target_core_iblock.c17
-rw-r--r--drivers/target/target_core_pscsi.c46
6 files changed, 67 insertions, 82 deletions
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6fe6a6bab3f4..ddf6c2a7212b 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3596,10 +3596,7 @@ static int iscsit_send_reject(
void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
int ord, cpu;
- cpumask_t conn_allowed_cpumask;
-
- cpumask_and(&conn_allowed_cpumask, iscsit_global->allowed_cpumask,
- cpu_online_mask);
+ cpumask_var_t conn_allowed_cpumask;
/*
* bitmap_id is assigned from iscsit_global->ts_bitmap from
@@ -3609,13 +3606,28 @@ void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
* iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- cpumask_clear(conn->conn_cpumask);
- ord = conn->bitmap_id % cpumask_weight(&conn_allowed_cpumask);
- for_each_cpu(cpu, &conn_allowed_cpumask) {
- if (ord-- == 0) {
- cpumask_set_cpu(cpu, conn->conn_cpumask);
- return;
+ if (!zalloc_cpumask_var(&conn_allowed_cpumask, GFP_KERNEL)) {
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
+ for_each_online_cpu(cpu) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ return;
+ }
+ }
+ } else {
+ cpumask_and(conn_allowed_cpumask, iscsit_global->allowed_cpumask,
+ cpu_online_mask);
+
+ cpumask_clear(conn->conn_cpumask);
+ ord = conn->bitmap_id % cpumask_weight(conn_allowed_cpumask);
+ for_each_cpu(cpu, conn_allowed_cpumask) {
+ if (ord-- == 0) {
+ cpumask_set_cpu(cpu, conn->conn_cpumask);
+ free_cpumask_var(conn_allowed_cpumask);
+ return;
+ }
}
+ free_cpumask_var(conn_allowed_cpumask);
}
/*
* This should never be reached..
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 0cedcfe207b5..57b4fd56d92a 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -1137,23 +1137,27 @@ static ssize_t lio_target_wwn_cpus_allowed_list_show(
static ssize_t lio_target_wwn_cpus_allowed_list_store(
struct config_item *item, const char *page, size_t count)
{
- int ret;
+ int ret = -ENOMEM;
char *orig;
- cpumask_t new_allowed_cpumask;
+ cpumask_var_t new_allowed_cpumask;
+
+ if (!zalloc_cpumask_var(&new_allowed_cpumask, GFP_KERNEL))
+ goto out;
orig = kstrdup(page, GFP_KERNEL);
if (!orig)
- return -ENOMEM;
+ goto out_free_cpumask;
- cpumask_clear(&new_allowed_cpumask);
- ret = cpulist_parse(orig, &new_allowed_cpumask);
+ ret = cpulist_parse(orig, new_allowed_cpumask);
+ if (!ret)
+ cpumask_copy(iscsit_global->allowed_cpumask,
+ new_allowed_cpumask);
kfree(orig);
- if (ret != 0)
- return ret;
-
- cpumask_copy(iscsit_global->allowed_cpumask, &new_allowed_cpumask);
- return count;
+out_free_cpumask:
+ free_cpumask_var(new_allowed_cpumask);
+out:
+ return ret ? ret : count;
}
CONFIGFS_ATTR(lio_target_wwn_, cpus_allowed_list);
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 44bb380e7390..25f33eb25337 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -829,28 +829,26 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
}
/*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
+ * Check if the underlying struct block_device supports discard and if yes
+ * configure the UNMAP parameters.
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q)
+ struct block_device *bdev)
{
- int block_size = queue_logical_block_size(q);
+ int block_size = bdev_logical_block_size(bdev);
- if (!blk_queue_discard(q))
+ if (!bdev_max_discard_sectors(bdev))
return false;
attrib->max_unmap_lba_count =
- q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
+ bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
attrib->max_unmap_block_desc_count = 1;
- attrib->unmap_granularity = q->limits.discard_granularity / block_size;
- attrib->unmap_granularity_alignment = q->limits.discard_alignment /
- block_size;
- attrib->unmap_zeroes_data = !!(q->limits.max_write_zeroes_sectors);
+ attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
+ attrib->unmap_granularity_alignment =
+ bdev_discard_alignment(bdev) / block_size;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 8190b840065f..e68f1cc8ef98 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -134,10 +134,10 @@ static int fd_configure_device(struct se_device *dev)
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
unsigned long long dev_size;
- fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode));
+ fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
@@ -150,7 +150,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
@@ -159,7 +159,7 @@ static int fd_configure_device(struct se_device *dev)
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1;
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
@@ -558,7 +558,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
- GFP_KERNEL, 0);
+ GFP_KERNEL);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 87ede165ddba..378c80313a0f 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -119,7 +119,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
@@ -133,7 +133,7 @@ static int iblock_configure_device(struct se_device *dev)
else
dev->dev_attrib.max_write_same_len = 0xFFFF;
- if (blk_queue_nonrot(q))
+ if (bdev_nonrot(bd))
dev->dev_attrib.is_nonrot = 1;
bi = bdev_get_integrity(bd);
@@ -434,7 +434,7 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
- GFP_KERNEL, 0);
+ GFP_KERNEL);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -727,17 +727,16 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
/*
* Force writethrough using REQ_FUA if a volatile write cache
* is not enabled, or if initiator set the Force Unit Access bit.
*/
opf = REQ_OP_WRITE;
miter_dir = SG_MITER_TO_SG;
- if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
+ if (bdev_fua(ib_dev->ibd_bd)) {
if (cmd->se_cmd_flags & SCF_FUA)
opf |= REQ_FUA;
- else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ else if (!bdev_write_cache(ib_dev->ibd_bd))
opf |= REQ_FUA;
}
} else {
@@ -886,11 +885,7 @@ iblock_parse_cdb(struct se_cmd *cmd)
static bool iblock_get_write_cache(struct se_device *dev)
{
- struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct block_device *bd = ib_dev->ibd_bd;
- struct request_queue *q = bdev_get_queue(bd);
-
- return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
+ return bdev_write_cache(IBLOCK_DEV(dev)->ibd_bd);
}
static const struct target_backend_ops iblock_ops = {
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index ff292b75e23f..bb3fb18b2316 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -588,7 +588,7 @@ static void pscsi_destroy_device(struct se_device *dev)
}
static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status,
- unsigned char *req_sense)
+ unsigned char *req_sense, int valid_data)
{
struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev);
struct scsi_device *sd = pdv->pdv_sd;
@@ -681,7 +681,7 @@ after_mode_select:
* back despite framework assumption that a
* check condition means there is no data
*/
- if (sd->type == TYPE_TAPE &&
+ if (sd->type == TYPE_TAPE && valid_data &&
cmd->data_direction == DMA_FROM_DEVICE) {
/*
* is sense data valid, fixed format,
@@ -818,24 +818,8 @@ static ssize_t pscsi_show_configfs_dev_params(struct se_device *dev, char *b)
static void pscsi_bi_endio(struct bio *bio)
{
- bio_put(bio);
-}
-
-static inline struct bio *pscsi_get_bio(int nr_vecs)
-{
- struct bio *bio;
- /*
- * Use bio_malloc() following the comment in for bio -> struct request
- * in block/blk-core.c:blk_make_request()
- */
- bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
- if (!bio) {
- pr_err("PSCSI: bio_kmalloc() failed\n");
- return NULL;
- }
- bio->bi_end_io = pscsi_bi_endio;
-
- return bio;
+ bio_uninit(bio);
+ kfree(bio);
}
static sense_reason_t
@@ -878,15 +862,12 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio) {
new_bio:
nr_vecs = bio_max_segs(nr_pages);
- /*
- * Calls bio_kmalloc() and sets bio->bi_end_io()
- */
- bio = pscsi_get_bio(nr_vecs);
+ bio = bio_kmalloc(nr_vecs, GFP_KERNEL);
if (!bio)
goto fail;
-
- if (rw)
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs,
+ rw ? REQ_OP_WRITE : REQ_OP_READ);
+ bio->bi_end_io = pscsi_bi_endio;
pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
@@ -912,11 +893,6 @@ new_bio:
goto fail;
}
- /*
- * Clear the pointer so that another bio will
- * be allocated with pscsi_get_bio() above.
- */
- bio = NULL;
goto new_bio;
}
@@ -1032,6 +1008,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
struct se_cmd *cmd = req->end_io_data;
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(req);
enum sam_status scsi_status = scmd->result & 0xff;
+ int valid_data = cmd->data_length - scmd->resid_len;
u8 *cdb = cmd->priv;
if (scsi_status != SAM_STAT_GOOD) {
@@ -1039,12 +1016,11 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
" 0x%02x Result: 0x%08x\n", cmd, cdb[0], scmd->result);
}
- pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer);
+ pscsi_complete_cmd(cmd, scsi_status, scmd->sense_buffer, valid_data);
switch (host_byte(scmd->result)) {
case DID_OK:
- target_complete_cmd_with_length(cmd, scsi_status,
- cmd->data_length - scmd->resid_len);
+ target_complete_cmd_with_length(cmd, scsi_status, valid_data);
break;
default:
pr_debug("PSCSI Host Byte exception at cmd: %p CDB:"