From c1e44756fdb7b363fd22cb5514dced40752e36c5 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 8 Nov 2010 15:01:02 +0100 Subject: cfq-iosched: do cleanup Some functions should return boolean. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9eba291eb6f..b8174bb4a6a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -637,11 +637,11 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline bool cfq_slice_used(struct cfq_queue *cfqq) { if (cfq_cfqq_slice_new(cfqq)) - return 0; + return false; if (time_before(jiffies, cfqq->slice_end)) - return 0; + return false; - return 1; + return true; } /* @@ -1892,10 +1892,10 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) * in their service tree. */ if (service_tree->count == 1 && cfq_cfqq_sync(cfqq)) - return 1; + return true; cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", service_tree->count); - return 0; + return false; } static void cfq_arm_slice_timer(struct cfq_data *cfqd) @@ -2359,12 +2359,12 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, { /* the queue hasn't finished any request, can't estimate */ if (cfq_cfqq_slice_new(cfqq)) - return 1; + return true; if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, cfqq->slice_end)) - return 1; + return true; - return 0; + return false; } static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) -- cgit v1.2.3 From d2d59e18a1ea8ecdd1c0a52af320e9a7f5391cc4 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 8 Nov 2010 15:01:03 +0100 Subject: cfq-iosched: schedule dispatch for noidle queue A queue is idle at cfq_dispatch_requests(), but it gets noidle later. Unless other task explictly does unplug or all requests are drained, we will not deliever requests to the disk even cfq_arm_slice_timer doesn't make the queue idle. For example, cfq_should_idle() returns true because of service_tree->count == 1, and then other queues are added. Note, I didn't see obvious performance impacts so far with the patch, but just thought this could be a problem. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index b8174bb4a6a..986865e3fbc 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3255,6 +3255,10 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) return true; + /* An idle queue should not be idle now for some reason */ + if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) + return true; + if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) return false; @@ -3508,8 +3512,25 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } } - if (!cfqd->rq_in_driver) + if (!cfqd->rq_in_driver) { + cfq_schedule_dispatch(cfqd); + return; + } + /* + * A queue is idle at cfq_dispatch_requests(), but it gets noidle + * later. We schedule a dispatch if the queue has no requests, + * otherwise the disk is actually in idle till all requests + * are finished even cfq_arm_slice_timer doesn't make the queue idle + * */ + cfqq = cfqd->active_queue; + if (!cfqq) + return; + + if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq) && + (!cfqd->cfq_group_idle || cfqq->cfqg->nr_cfqq > 1)) { + cfq_del_timer(cfqd, cfqq); cfq_schedule_dispatch(cfqd); + } } /* -- cgit v1.2.3 From 8e1ac6655104bc6e1e79d67e2df88cc8fa9b6e07 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 8 Nov 2010 15:01:04 +0100 Subject: cfq-iosched: don't idle if a deep seek queue is slow If a deep seek queue slowly deliver requests but disk is much faster, idle for the queue just wastes disk throughput. If the queue delevers all requests before half its slice is used, the patch disable idle for it. In my test, application delivers 32 requests one time, the disk can accept 128 requests at maxium and disk is fast. without the patch, the throughput is just around 30m/s, while with it, the speed is about 80m/s. The disk is a SSD, but is detected as a rotational disk. I can configure it as SSD, but I thought the deep seek queue logic should be fixed too, for example, considering a fast raid. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 986865e3fbc..ca4d1990724 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2285,6 +2285,17 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) goto keep_queue; } + /* + * This is a deep seek queue, but the device is much faster than + * the queue can deliver, don't idle + **/ + if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && + (cfq_cfqq_slice_new(cfqq) || + (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { + cfq_clear_cfqq_deep(cfqq); + cfq_clear_cfqq_idle_window(cfqq); + } + if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { cfqq = NULL; goto keep_queue; -- cgit v1.2.3 From 2b9408a45978dcda77407859148deeccf403c372 Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Tue, 9 Nov 2010 14:51:13 +0100 Subject: cfq-iosched: don't schedule a dispatch for a non-idle queue Vivek suggests we don't need schedule a dispatch when an idle queue becomes nonidle. And he is right, cfq_should_preempt already covers the logic. Signed-off-by: Shaohua Li Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ca4d1990724..f90519430be 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3523,25 +3523,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } } - if (!cfqd->rq_in_driver) { + if (!cfqd->rq_in_driver) cfq_schedule_dispatch(cfqd); - return; - } - /* - * A queue is idle at cfq_dispatch_requests(), but it gets noidle - * later. We schedule a dispatch if the queue has no requests, - * otherwise the disk is actually in idle till all requests - * are finished even cfq_arm_slice_timer doesn't make the queue idle - * */ - cfqq = cfqd->active_queue; - if (!cfqq) - return; - - if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq) && - (!cfqd->cfq_group_idle || cfqq->cfqg->nr_cfqq > 1)) { - cfq_del_timer(cfqd, cfqq); - cfq_schedule_dispatch(cfqd); - } } /* -- cgit v1.2.3 From b54ce60eb7f61f8e314b8b241b0469eda3bb1d42 Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Tue, 30 Nov 2010 20:52:46 +0100 Subject: cfq-iosched: Get rid of st->active When a cfq group is running, it won't be dequeued from service tree, so there's no need to store the active one in st->active. Just gid rid of it. Signed-off-by: Gui Jianfeng Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 73a58628f54..e18d316ae65 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -87,7 +87,6 @@ struct cfq_rb_root { unsigned count; unsigned total_weight; u64 min_vdisktime; - struct rb_node *active; }; #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ .count = 0, .min_vdisktime = 0, } @@ -563,11 +562,6 @@ static void update_min_vdisktime(struct cfq_rb_root *st) u64 vdisktime = st->min_vdisktime; struct cfq_group *cfqg; - if (st->active) { - cfqg = rb_entry_cfqg(st->active); - vdisktime = cfqg->vdisktime; - } - if (st->left) { cfqg = rb_entry_cfqg(st->left); vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime); @@ -894,9 +888,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) { struct cfq_rb_root *st = &cfqd->grp_service_tree; - if (st->active == &cfqg->rb_node) - st->active = NULL; - BUG_ON(cfqg->nr_cfqq < 1); cfqg->nr_cfqq--; @@ -1095,7 +1086,7 @@ static void cfq_put_cfqg(struct cfq_group *cfqg) if (!atomic_dec_and_test(&cfqg->ref)) return; for_each_cfqg_st(cfqg, i, j, st) - BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL); + BUG_ON(!RB_EMPTY_ROOT(&st->rb)); kfree(cfqg); } @@ -1687,9 +1678,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (cfqq == cfqd->active_queue) cfqd->active_queue = NULL; - if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active) - cfqd->grp_service_tree.active = NULL; - if (cfqd->active_cic) { put_io_context(cfqd->active_cic->ioc); cfqd->active_cic = NULL; @@ -2199,7 +2187,6 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) if (RB_EMPTY_ROOT(&st->rb)) return NULL; cfqg = cfq_rb_first_group(st); - st->active = &cfqg->rb_node; update_min_vdisktime(st); return cfqg; } -- cgit v1.2.3 From 760701bfe14faee8ea0608a9cab2046071d98a39 Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Tue, 30 Nov 2010 20:52:47 +0100 Subject: cfq-iosched: Get rid of on_st flag It's able to check whether a CFQ group on a service tree by checking "cfqg->rb_node". There's no need to maintain an extra flag here. Signed-off-by: Gui Jianfeng Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e18d316ae65..5d0349d602f 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -179,7 +179,6 @@ struct cfq_group { /* group service_tree key */ u64 vdisktime; unsigned int weight; - bool on_st; /* number of cfqq currently on this group */ int nr_cfqq; @@ -863,7 +862,7 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) struct rb_node *n; cfqg->nr_cfqq++; - if (cfqg->on_st) + if (!RB_EMPTY_NODE(&cfqg->rb_node)) return; /* @@ -879,7 +878,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg) cfqg->vdisktime = st->min_vdisktime; __cfq_group_service_tree_add(st, cfqg); - cfqg->on_st = true; st->total_weight += cfqg->weight; } @@ -896,7 +894,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg) return; cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); - cfqg->on_st = false; st->total_weight -= cfqg->weight; if (!RB_EMPTY_NODE(&cfqg->rb_node)) cfq_rb_erase(&cfqg->rb_node, st); -- cgit v1.2.3 From e4ea0c16a85d221ebcc3a21f32e321440459e0fc Mon Sep 17 00:00:00 2001 From: Shaohua Li writes Date: Mon, 13 Dec 2010 14:32:22 +0100 Subject: block cfq: select new workload if priority changed If priority is changed, continuing to check workload_expires and service tree count of the previous workload does not make sense. We should always choose the workload with lowest key of new priority in such case. Signed-off-by: Shaohua Li Reviewed-by: Jeff Moyer Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 5d0349d602f..9b186fd6bf4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2101,6 +2101,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) unsigned count; struct cfq_rb_root *st; unsigned group_slice; + enum wl_prio_t original_prio = cfqd->serving_prio; if (!cfqg) { cfqd->serving_prio = IDLE_WORKLOAD; @@ -2119,6 +2120,9 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) return; } + if (original_prio != cfqd->serving_prio) + goto new_workload; + /* * For RT and BE, we have to choose also the type * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload @@ -2133,6 +2137,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) if (count && !time_after(jiffies, cfqd->workload_expires)) return; +new_workload: /* otherwise select new workload type */ cfqd->serving_type = cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio); -- cgit v1.2.3 From 7278c9c19bd85cf33213a2e0b538a18d3ac8ad00 Mon Sep 17 00:00:00 2001 From: Gui Jianfeng Date: Fri, 17 Dec 2010 08:57:14 +0100 Subject: cfq-iosched: don't check cfqg in choose_service_tree() When cfq_choose_cfqg() is called in select_queue(), there must be at least one backlogged CFQ queue waiting for dispatching, hence there must be at least one backlogged CFQ group on service tree. So we never call choose_service_tree() with cfqg == NULL. Signed-off-by: Gui Jianfeng Reviewed-by: Jeff Moyer Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 9b186fd6bf4..c19d015ac5a 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -2103,12 +2103,6 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) unsigned group_slice; enum wl_prio_t original_prio = cfqd->serving_prio; - if (!cfqg) { - cfqd->serving_prio = IDLE_WORKLOAD; - cfqd->workload_expires = jiffies + 1; - return; - } - /* Choose next priority. RT > BE > IDLE */ if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) cfqd->serving_prio = RT_WORKLOAD; -- cgit v1.2.3 From 30d7b9448f03f2c82d0fd44738674cc156a8ce0a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 7 Jan 2011 08:46:59 +0100 Subject: block cfq: don't use atomic_t for cfq_queue cfq_queue->ref is used with queue_lock hold, so ref doesn't need to be an atomic and atomic operation is slower. Signed-off-by: Shaohua Li Reviewed-by: Jeff Moyer Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c19d015ac5a..4cb4cf73ac0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -96,7 +96,7 @@ struct cfq_rb_root { */ struct cfq_queue { /* reference count */ - atomic_t ref; + int ref; /* various state flags, see below */ unsigned int flags; /* parent cfq_data */ @@ -2025,7 +2025,7 @@ static int cfqq_process_refs(struct cfq_queue *cfqq) int process_refs, io_refs; io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; - process_refs = atomic_read(&cfqq->ref) - io_refs; + process_refs = cfqq->ref - io_refs; BUG_ON(process_refs < 0); return process_refs; } @@ -2065,10 +2065,10 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) */ if (new_process_refs >= process_refs) { cfqq->new_cfqq = new_cfqq; - atomic_add(process_refs, &new_cfqq->ref); + new_cfqq->ref += process_refs; } else { new_cfqq->new_cfqq = cfqq; - atomic_add(new_process_refs, &cfqq->ref); + cfqq->ref += new_process_refs; } } @@ -2532,9 +2532,10 @@ static void cfq_put_queue(struct cfq_queue *cfqq) struct cfq_data *cfqd = cfqq->cfqd; struct cfq_group *cfqg, *orig_cfqg; - BUG_ON(atomic_read(&cfqq->ref) <= 0); + BUG_ON(cfqq->ref <= 0); - if (!atomic_dec_and_test(&cfqq->ref)) + cfqq->ref--; + if (cfqq->ref) return; cfq_log_cfqq(cfqd, cfqq, "put_queue"); @@ -2837,7 +2838,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, RB_CLEAR_NODE(&cfqq->p_node); INIT_LIST_HEAD(&cfqq->fifo); - atomic_set(&cfqq->ref, 0); + cfqq->ref = 0; cfqq->cfqd = cfqd; cfq_mark_cfqq_prio_changed(cfqq); @@ -2973,11 +2974,11 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc, * pin the queue now that it's allocated, scheduler exit will prune it */ if (!is_sync && !(*async_cfqq)) { - atomic_inc(&cfqq->ref); + cfqq->ref++; *async_cfqq = cfqq; } - atomic_inc(&cfqq->ref); + cfqq->ref++; return cfqq; } @@ -3679,7 +3680,7 @@ new_queue: } cfqq->allocated[rw]++; - atomic_inc(&cfqq->ref); + cfqq->ref++; spin_unlock_irqrestore(q->queue_lock, flags); @@ -3860,6 +3861,10 @@ static void *cfq_init_queue(struct request_queue *q) if (!cfqd) return NULL; + /* + * Don't need take queue_lock in the routine, since we are + * initializing the ioscheduler, and nobody is using cfqd + */ cfqd->cic_index = i; /* Init root service tree */ @@ -3899,7 +3904,7 @@ static void *cfq_init_queue(struct request_queue *q) * will not attempt to free it. */ cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); - atomic_inc(&cfqd->oom_cfqq.ref); + cfqd->oom_cfqq.ref++; cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group); INIT_LIST_HEAD(&cfqd->cic_list); -- cgit v1.2.3 From 329a67815b596d23daf0caa588ae0800e925320f Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 7 Jan 2011 08:48:28 +0100 Subject: block cfq: don't use atomic_t for cfq_group cfq_group->ref is used with queue_lock hold, the only exception is cfq_set_request, which looks like a bug to me, so ref doesn't need to be an atomic and atomic operation is slower. Signed-off-by: Shaohua Li Reviewed-by: Jeff Moyer Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'block/cfq-iosched.c') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 4cb4cf73ac0..f083bda3054 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -207,7 +207,7 @@ struct cfq_group { struct blkio_group blkg; #ifdef CONFIG_CFQ_GROUP_IOSCHED struct hlist_node cfqd_node; - atomic_t ref; + int ref; #endif /* number of requests that are on the dispatch list or inside driver */ int dispatched; @@ -1014,7 +1014,7 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) * elevator which will be dropped by either elevator exit * or cgroup deletion path depending on who is exiting first. */ - atomic_set(&cfqg->ref, 1); + cfqg->ref = 1; /* * Add group onto cgroup list. It might happen that bdi->dev is @@ -1059,7 +1059,7 @@ static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create) static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg) { - atomic_inc(&cfqg->ref); + cfqg->ref++; return cfqg; } @@ -1071,7 +1071,7 @@ static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) cfqq->cfqg = cfqg; /* cfqq reference on cfqg */ - atomic_inc(&cfqq->cfqg->ref); + cfqq->cfqg->ref++; } static void cfq_put_cfqg(struct cfq_group *cfqg) @@ -1079,8 +1079,9 @@ static void cfq_put_cfqg(struct cfq_group *cfqg) struct cfq_rb_root *st; int i, j; - BUG_ON(atomic_read(&cfqg->ref) <= 0); - if (!atomic_dec_and_test(&cfqg->ref)) + BUG_ON(cfqg->ref <= 0); + cfqg->ref--; + if (cfqg->ref) return; for_each_cfqg_st(cfqg, i, j, st) BUG_ON(!RB_EMPTY_ROOT(&st->rb)); @@ -1188,7 +1189,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, cfq_group_service_tree_del(cfqd, cfqq->cfqg); cfqq->orig_cfqg = cfqq->cfqg; cfqq->cfqg = &cfqd->root_group; - atomic_inc(&cfqd->root_group.ref); + cfqd->root_group.ref++; group_changed = 1; } else if (!cfqd->cfq_group_isolation && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) { @@ -3681,12 +3682,12 @@ new_queue: cfqq->allocated[rw]++; cfqq->ref++; - - spin_unlock_irqrestore(q->queue_lock, flags); - rq->elevator_private = cic; rq->elevator_private2 = cfqq; rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg); + + spin_unlock_irqrestore(q->queue_lock, flags); + return 0; queue_fail: @@ -3884,7 +3885,7 @@ static void *cfq_init_queue(struct request_queue *q) * Take a reference to root group which we never drop. This is just * to make sure that cfq_put_cfqg() does not try to kfree root group */ - atomic_set(&cfqg->ref, 1); + cfqg->ref = 1; rcu_read_lock(); cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 0); -- cgit v1.2.3