summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 16:31:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 16:31:50 -0700
commit2e959dd87a9f58f1ad824d830e06388c9e328239 (patch)
treed0198a67d9bbf7522434db1e19bffdfa01c6bb91 /block
parent9c9fa97a8edbc3668dfc7a25de516e80c146e86f (diff)
parentd46fe2cb2dce7f5038473b5859e03f5e16b7428e (diff)
downloadlinux-rpi-2e959dd87a9f58f1ad824d830e06388c9e328239.tar.gz
linux-rpi-2e959dd87a9f58f1ad824d830e06388c9e328239.tar.bz2
linux-rpi-2e959dd87a9f58f1ad824d830e06388c9e328239.zip
Merge tag 'for-5.4/post-2019-09-24' of git://git.kernel.dk/linux-block
Pull more block updates from Jens Axboe: "Some later additions that weren't quite done for the first pull request, and also a few fixes that have arrived since. This contains: - Kill silly pktcdvd warning on attempting to register a non-scsi passthrough device (me) - Use symbolic constants for the block t10 protection types, and switch to handling it in core rather than in the drivers (Max) - libahci platform missing node put fix (Nishka) - Small series of fixes for BFQ (Paolo) - Fix possible nbd crash (Xiubo)" * tag 'for-5.4/post-2019-09-24' of git://git.kernel.dk/linux-block: block: drop device references in bsg_queue_rq() block: t10-pi: fix -Wswitch warning pktcdvd: remove warning on attempting to register non-passthrough dev ata: libahci_platform: Add of_node_put() before loop exit nbd: fix possible page fault for nbd disk nbd: rename the runtime flags as NBD_RT_ prefixed block, bfq: push up injection only after setting service time block, bfq: increase update frequency of inject limit block, bfq: reduce upper bound for inject limit to max_rq_in_driver+1 block, bfq: update inject limit only after injection occurred block: centralize PI remapping logic to the block layer block: use symbolic constants for t10_pi type
Diffstat (limited to 'block')
-rw-r--r--block/bfq-iosched.c35
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-integrity.c11
-rw-r--r--block/blk-mq.c6
-rw-r--r--block/bsg-lib.c10
-rw-r--r--block/t10-pi.c169
6 files changed, 144 insertions, 94 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index b33be928d164..0319d6339822 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2016,7 +2016,7 @@ static void bfq_add_request(struct request *rq)
(bfqq->last_serv_time_ns > 0 &&
bfqd->rqs_injected && bfqd->rq_in_driver > 0)) &&
time_is_before_eq_jiffies(bfqq->decrease_time_jif +
- msecs_to_jiffies(100))) {
+ msecs_to_jiffies(10))) {
bfqd->last_empty_occupied_ns = ktime_get_ns();
/*
* Start the state machine for measuring the
@@ -2025,7 +2025,21 @@ static void bfq_add_request(struct request *rq)
* be set when rq will be dispatched.
*/
bfqd->wait_dispatch = true;
- bfqd->rqs_injected = false;
+ /*
+ * If there is no I/O in service in the drive,
+ * then possible injection occurred before the
+ * arrival of rq will not affect the total
+ * service time of rq. So the injection limit
+ * must not be updated as a function of such
+ * total service time, unless new injection
+ * occurs before rq is completed. To have the
+ * injection limit updated only in the latter
+ * case, reset rqs_injected here (rqs_injected
+ * will be set in case injection is performed
+ * on bfqq before rq is completed).
+ */
+ if (bfqd->rq_in_driver == 0)
+ bfqd->rqs_injected = false;
}
}
@@ -5784,14 +5798,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns;
unsigned int old_limit = bfqq->inject_limit;
- if (bfqq->last_serv_time_ns > 0) {
+ if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) {
u64 threshold = (bfqq->last_serv_time_ns * 3)>>1;
if (tot_time_ns >= threshold && old_limit > 0) {
bfqq->inject_limit--;
bfqq->decrease_time_jif = jiffies;
} else if (tot_time_ns < threshold &&
- old_limit < bfqd->max_rq_in_driver<<1)
+ old_limit <= bfqd->max_rq_in_driver)
bfqq->inject_limit++;
}
@@ -5809,12 +5823,14 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
*/
if ((bfqq->last_serv_time_ns == 0 && bfqd->rq_in_driver == 1) ||
tot_time_ns < bfqq->last_serv_time_ns) {
+ if (bfqq->last_serv_time_ns == 0) {
+ /*
+ * Now we certainly have a base value: make sure we
+ * start trying injection.
+ */
+ bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
+ }
bfqq->last_serv_time_ns = tot_time_ns;
- /*
- * Now we certainly have a base value: make sure we
- * start trying injection.
- */
- bfqq->inject_limit = max_t(unsigned int, 1, old_limit);
} else if (!bfqd->rqs_injected && bfqd->rq_in_driver == 1)
/*
* No I/O injected and no request still in service in
@@ -5830,6 +5846,7 @@ static void bfq_update_inject_limit(struct bfq_data *bfqd,
/* update complete, not waiting for any request completion any longer */
bfqd->waited_rq = NULL;
+ bfqd->rqs_injected = false;
}
/*
diff --git a/block/blk-core.c b/block/blk-core.c
index 875e8d105067..d5e668ec751b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -34,6 +34,7 @@
#include <linux/ratelimit.h>
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#include <linux/t10-pi.h>
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/psi.h>
@@ -1436,6 +1437,12 @@ bool blk_update_request(struct request *req, blk_status_t error,
if (!req->bio)
return false;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ &&
+ error == BLK_STS_OK)
+ req->q->integrity.profile->complete_fn(req, nr_bytes);
+#endif
+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
!(req->rq_flags & RQF_QUIET)))
print_req_error(req, error, __func__);
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index ca39b4624cf8..ff1070edbb40 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -368,10 +368,21 @@ static blk_status_t blk_integrity_nop_fn(struct blk_integrity_iter *iter)
return BLK_STS_OK;
}
+static void blk_integrity_nop_prepare(struct request *rq)
+{
+}
+
+static void blk_integrity_nop_complete(struct request *rq,
+ unsigned int nr_bytes)
+{
+}
+
static const struct blk_integrity_profile nop_profile = {
.name = "nop",
.generate_fn = blk_integrity_nop_fn,
.verify_fn = blk_integrity_nop_fn,
+ .prepare_fn = blk_integrity_nop_prepare,
+ .complete_fn = blk_integrity_nop_complete,
};
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 20a49be536b5..29275f5a996f 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -30,6 +30,7 @@
#include <trace/events/block.h>
#include <linux/blk-mq.h>
+#include <linux/t10-pi.h>
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
@@ -700,6 +701,11 @@ void blk_mq_start_request(struct request *rq)
*/
rq->nr_phys_segments++;
}
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE)
+ q->integrity.profile->prepare_fn(rq);
+#endif
}
EXPORT_SYMBOL(blk_mq_start_request);
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 785dd58947f1..347dda16c2f4 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -266,6 +266,7 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
struct request *req = bd->rq;
struct bsg_set *bset =
container_of(q->tag_set, struct bsg_set, tag_set);
+ int sts = BLK_STS_IOERR;
int ret;
blk_mq_start_request(req);
@@ -274,14 +275,15 @@ static blk_status_t bsg_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_IOERR;
if (!bsg_prepare_job(dev, req))
- return BLK_STS_IOERR;
+ goto out;
ret = bset->job_fn(blk_mq_rq_to_pdu(req));
- if (ret)
- return BLK_STS_IOERR;
+ if (!ret)
+ sts = BLK_STS_OK;
+out:
put_device(dev);
- return BLK_STS_OK;
+ return sts;
}
/* called right after the request is allocated for the request_queue */
diff --git a/block/t10-pi.c b/block/t10-pi.c
index 0c0094609dd6..9803c7e0376e 100644
--- a/block/t10-pi.c
+++ b/block/t10-pi.c
@@ -27,7 +27,7 @@ static __be16 t10_pi_ip_fn(void *data, unsigned int len)
* tag.
*/
static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
@@ -37,7 +37,7 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
pi->guard_tag = fn(iter->data_buf, iter->interval);
pi->app_tag = 0;
- if (type == 1)
+ if (type == T10_PI_TYPE1_PROTECTION)
pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
else
pi->ref_tag = 0;
@@ -51,17 +51,18 @@ static blk_status_t t10_pi_generate(struct blk_integrity_iter *iter,
}
static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
- csum_fn *fn, unsigned int type)
+ csum_fn *fn, enum t10_dif_type type)
{
unsigned int i;
+ BUG_ON(type == T10_PI_TYPE0_PROTECTION);
+
for (i = 0 ; i < iter->data_size ; i += iter->interval) {
struct t10_pi_tuple *pi = iter->prot_buf;
__be16 csum;
- switch (type) {
- case 1:
- case 2:
+ if (type == T10_PI_TYPE1_PROTECTION ||
+ type == T10_PI_TYPE2_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE)
goto next;
@@ -73,12 +74,10 @@ static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
iter->seed, be32_to_cpu(pi->ref_tag));
return BLK_STS_PROTECTION;
}
- break;
- case 3:
+ } else if (type == T10_PI_TYPE3_PROTECTION) {
if (pi->app_tag == T10_PI_APP_ESCAPE &&
pi->ref_tag == T10_PI_REF_ESCAPE)
goto next;
- break;
}
csum = fn(iter->data_buf, iter->interval);
@@ -102,94 +101,40 @@ next:
static blk_status_t t10_pi_type1_generate_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_crc_fn, 1);
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_generate_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_generate(iter, t10_pi_ip_fn, 1);
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_crc(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_crc_fn, 1);
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE1_PROTECTION);
}
static blk_status_t t10_pi_type1_verify_ip(struct blk_integrity_iter *iter)
{
- return t10_pi_verify(iter, t10_pi_ip_fn, 1);
-}
-
-static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_crc_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_generate(iter, t10_pi_ip_fn, 3);
-}
-
-static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_crc_fn, 3);
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE1_PROTECTION);
}
-static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
-{
- return t10_pi_verify(iter, t10_pi_ip_fn, 3);
-}
-
-const struct blk_integrity_profile t10_pi_type1_crc = {
- .name = "T10-DIF-TYPE1-CRC",
- .generate_fn = t10_pi_type1_generate_crc,
- .verify_fn = t10_pi_type1_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type1_crc);
-
-const struct blk_integrity_profile t10_pi_type1_ip = {
- .name = "T10-DIF-TYPE1-IP",
- .generate_fn = t10_pi_type1_generate_ip,
- .verify_fn = t10_pi_type1_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type1_ip);
-
-const struct blk_integrity_profile t10_pi_type3_crc = {
- .name = "T10-DIF-TYPE3-CRC",
- .generate_fn = t10_pi_type3_generate_crc,
- .verify_fn = t10_pi_type3_verify_crc,
-};
-EXPORT_SYMBOL(t10_pi_type3_crc);
-
-const struct blk_integrity_profile t10_pi_type3_ip = {
- .name = "T10-DIF-TYPE3-IP",
- .generate_fn = t10_pi_type3_generate_ip,
- .verify_fn = t10_pi_type3_verify_ip,
-};
-EXPORT_SYMBOL(t10_pi_type3_ip);
-
/**
- * t10_pi_prepare - prepare PI prior submitting request to device
+ * t10_pi_type1_prepare - prepare PI prior submitting request to device
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
* partitioning, MD/DM cloning, etc. the actual physical start sector is
* likely to be different. Remap protection information to match the
* physical LBA.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_prepare(struct request *rq, u8 protection_type)
+static void t10_pi_type1_prepare(struct request *rq)
{
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -222,13 +167,11 @@ void t10_pi_prepare(struct request *rq, u8 protection_type)
bip->bip_flags |= BIP_MAPPED_INTEGRITY;
}
}
-EXPORT_SYMBOL(t10_pi_prepare);
/**
- * t10_pi_complete - prepare PI prior returning request to the block layer
+ * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
* @rq: request with PI that should be prepared
- * @protection_type: PI type (Type 1/Type 2/Type 3)
- * @intervals: total elements to prepare
+ * @nr_bytes: total bytes to prepare
*
* For Type 1/Type 2, the virtual start sector is the one that was
* originally submitted by the block layer for the ref_tag usage. Due to
@@ -236,19 +179,14 @@ EXPORT_SYMBOL(t10_pi_prepare);
* likely to be different. Since the physical start sector was submitted
* to the device, we should remap it back to virtual values expected by the
* block layer.
- *
- * Type 3 does not have a reference tag so no remapping is required.
*/
-void t10_pi_complete(struct request *rq, u8 protection_type,
- unsigned int intervals)
+static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
{
+ unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp;
const int tuple_sz = rq->q->integrity.tuple_size;
u32 ref_tag = t10_pi_ref_tag(rq);
struct bio *bio;
- if (protection_type == T10_PI_TYPE3_PROTECTION)
- return;
-
__rq_for_each_bio(bio, rq) {
struct bio_integrity_payload *bip = bio_integrity(bio);
u32 virt = bip_get_seed(bip) & 0xffffffff;
@@ -276,4 +214,73 @@ void t10_pi_complete(struct request *rq, u8 protection_type,
}
}
}
-EXPORT_SYMBOL(t10_pi_complete);
+
+static blk_status_t t10_pi_type3_generate_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_generate_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_generate(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_crc(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_crc_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+static blk_status_t t10_pi_type3_verify_ip(struct blk_integrity_iter *iter)
+{
+ return t10_pi_verify(iter, t10_pi_ip_fn, T10_PI_TYPE3_PROTECTION);
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_prepare(struct request *rq)
+{
+}
+
+/**
+ * Type 3 does not have a reference tag so no remapping is required.
+ */
+static void t10_pi_type3_complete(struct request *rq, unsigned int nr_bytes)
+{
+}
+
+const struct blk_integrity_profile t10_pi_type1_crc = {
+ .name = "T10-DIF-TYPE1-CRC",
+ .generate_fn = t10_pi_type1_generate_crc,
+ .verify_fn = t10_pi_type1_verify_crc,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_crc);
+
+const struct blk_integrity_profile t10_pi_type1_ip = {
+ .name = "T10-DIF-TYPE1-IP",
+ .generate_fn = t10_pi_type1_generate_ip,
+ .verify_fn = t10_pi_type1_verify_ip,
+ .prepare_fn = t10_pi_type1_prepare,
+ .complete_fn = t10_pi_type1_complete,
+};
+EXPORT_SYMBOL(t10_pi_type1_ip);
+
+const struct blk_integrity_profile t10_pi_type3_crc = {
+ .name = "T10-DIF-TYPE3-CRC",
+ .generate_fn = t10_pi_type3_generate_crc,
+ .verify_fn = t10_pi_type3_verify_crc,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_crc);
+
+const struct blk_integrity_profile t10_pi_type3_ip = {
+ .name = "T10-DIF-TYPE3-IP",
+ .generate_fn = t10_pi_type3_generate_ip,
+ .verify_fn = t10_pi_type3_verify_ip,
+ .prepare_fn = t10_pi_type3_prepare,
+ .complete_fn = t10_pi_type3_complete,
+};
+EXPORT_SYMBOL(t10_pi_type3_ip);