summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorMax Gurtovoy <maxg@mellanox.com>2019-09-16 18:44:29 +0300
committerJens Axboe <axboe@kernel.dk>2019-09-17 20:03:49 -0600
commit54d4e6ab91eb24b47a58403d8561206e916f0242 (patch)
tree3bd64c92b614284071d893012abcf01bfc55cabc /block
parent5eaed68dd38c14907be2ce5a45c73a5f2acb75f4 (diff)
downloadlinux-riscv-54d4e6ab91eb24b47a58403d8561206e916f0242.tar.gz
linux-riscv-54d4e6ab91eb24b47a58403d8561206e916f0242.tar.bz2
linux-riscv-54d4e6ab91eb24b47a58403d8561206e916f0242.zip
block: centralize PI remapping logic to the block layer
Currently t10_pi_prepare/t10_pi_complete functions are called during the NVMe and SCSi layers command preparetion/completion, but their actual place should be the block layer since T10-PI is a general data integrity feature that is used by block storage protocols. Introduce .prepare_fn and .complete_fn callbacks within the integrity profile that each type can implement according to its needs. Suggested-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Suggested-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Signed-off-by: Max Gurtovoy <maxg@mellanox.com> Fixed to not call queue integrity functions if BLK_DEV_INTEGRITY isn't defined in the config. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-integrity.c11
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/t10-pi.c144
4 files changed, 100 insertions, 68 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 875e8d105067..d5e668ec751b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -34,6 +34,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/psi.h>
@@ -1436,6 +1437,12 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio)
return false;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ error == BLK_STS_OK)
+ req->q->integrity.profile->complete_fn(req, nr_bytes);
+#endif
+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error, __func__);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index ca39b4624cf8..ff1070edbb40 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -368,10 +368,21 @@ static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
return BLK_STS_OK;
}
+static void blk_integrity_nop_prepare(struct request *rq)
+{
+}
+
+static void blk_integrity_nop_complete(struct request *rq,
+ unsigned int nr_bytes)
+{
+}
+
static const struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
+ .prepare_fn = blk_integrity_nop_prepare,
+ .complete_fn = blk_integrity_nop_complete,
};
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 20a49be536b5..29275f5a996f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,7 @@
#include <trace/events/block.h>
#include <linux/blk-mq.h>
+#include <linux/t10-pi.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
*/
rq->nr_phys_segments++;
}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
+ q->integrity.profile->prepare_fn(rq);
+#endif
}
EXPORT_SYMBOL(blk_mq_start_request);
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 7fed58705d1a..0c0120a672f9 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -120,76 +120,22 @@ static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
-static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
-}
-
-const struct blk_integrity_profile t10_pi_type1_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = t10_pi_type1_generate_crc,
- .verify_fn = t10_pi_type1_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type1_crc);
-
-const struct blk_integrity_profile t10_pi_type1_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = t10_pi_type1_generate_ip,
- .verify_fn = t10_pi_type1_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type1_ip);
-
-const struct blk_integrity_profile t10_pi_type3_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = t10_pi_type3_generate_crc,
- .verify_fn = t10_pi_type3_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type3_crc);
-
-const struct blk_integrity_profile t10_pi_type3_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = t10_pi_type3_generate_ip,
- .verify_fn = t10_pi_type3_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type3_ip);
-
/**
- * t10_pi_prepare - prepare PI prior submitting request to device
+ * t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
* partitioning, MD/DM cloning, etc. the actual physical start sector is
* likely to be different. Remap protection information to match the
* physical LBA.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_prepare(struct request *rq, u8 protection_type)
+static void t10_pi_type1_prepare(struct request *rq)
{
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -222,13 +168,11 @@ void t10_pi_prepare(struct request *rq, u8 protection_type)
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
-EXPORT_SYMBOL(t10_pi_prepare);
/**
- * t10_pi_complete - prepare PI prior returning request to the block layer
+ * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
- * @intervals: total elements to prepare
+ * @nr_bytes: total bytes to prepare
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
@@ -236,19 +180,14 @@ EXPORT_SYMBOL(t10_pi_prepare);
* likely to be different. Since the physical start sector was submitted
* to the device, we should remap it back to virtual values expected by the
* block layer.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
+static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
+ unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -276,4 +215,73 @@ void t10_pi_complete(struct request *rq, u8 protection_type,
}
}
}
-EXPORT_SYMBOL(t10_pi_complete);
+
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_prepare(struct request *rq)
+{
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
+{
+}
+
+const struct blk_integrity_profile t10_pi_type1_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = t10_pi_type1_generate_crc,
+ .verify_fn = t10_pi_type1_verify_crc,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_crc);
+
+const struct blk_integrity_profile t10_pi_type1_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = t10_pi_type1_generate_ip,
+ .verify_fn = t10_pi_type1_verify_ip,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_ip);
+
+const struct blk_integrity_profile t10_pi_type3_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = t10_pi_type3_generate_crc,
+ .verify_fn = t10_pi_type3_verify_crc,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_crc);
+
+const struct blk_integrity_profile t10_pi_type3_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = t10_pi_type3_generate_ip,
+ .verify_fn = t10_pi_type3_verify_ip,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_ip);