summaryrefslogtreecommitdiff
path: root/block/bfq-iosched.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2019-11-07 11:18:00 -0800
committerJens Axboe <axboe@kernel.dk>2019-11-07 12:28:13 -0700
commitfd41e60331b13b8fb35cc5048185a46de98db77c (patch)
tree3931df1952e9799ac7f8858932816c618304ffad /block/bfq-iosched.c
parenta557f1c7fee2f2059234647fea32ed1f3c07dce2 (diff)
downloadlinux-riscv-fd41e60331b13b8fb35cc5048185a46de98db77c.tar.gz
linux-riscv-fd41e60331b13b8fb35cc5048185a46de98db77c.tar.bz2
linux-riscv-fd41e60331b13b8fb35cc5048185a46de98db77c.zip
bfq-iosched: stop using blkg->stat_bytes and ->stat_ios
When used on cgroup1, bfq uses the blkg->stat_bytes and ->stat_ios from blk-cgroup core to populate six stat knobs. blk-cgroup core is moving away from blkg_rwstat to improve scalability and won't be able to support this usage. It isn't like the sharing gains all that much. Let's break it out to dedicated rwstat counters which are updated when on cgroup1. This makes use of bfqg_*rwstat*() helpers outside of CONFIG_BFQ_CGROUP_DEBUG. Move them out. v2: Compile fix when !CONFIG_BFQ_CGROUP_DEBUG. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Paolo Valente <paolo.valente@linaro.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/bfq-iosched.c')
-rw-r--r--block/bfq-iosched.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 0319d6339822..41d2d83c919b 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -5464,6 +5464,10 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool idle_timer_disabled = false;
unsigned int cmd_flags;
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+ if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio)
+ bfqg_stats_update_legacy_io(q, rq);
+#endif
spin_lock_irq(&bfqd->lock);
if (blk_mq_sched_try_insert_merge(q, rq)) {
spin_unlock_irq(&bfqd->lock);