summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7246fc256315..f93de34fe87d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -877,14 +877,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
disk = ctx->bdev->bd_disk;
q = disk->queue;
- /*
- * blkcg_deactivate_policy() requires queue to be frozen, we can grab
- * q_usage_counter to prevent concurrent with blkcg_deactivate_policy().
- */
- ret = blk_queue_enter(q, 0);
- if (ret)
- goto fail;
-
+ /* Prevent concurrent with blkcg_deactivate_policy() */
+ mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol)) {
@@ -914,16 +908,16 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
/* Drop locks to do new blkg allocation with GFP_KERNEL. */
spin_unlock_irq(&q->queue_lock);
- new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
+ new_blkg = blkg_alloc(pos, disk, GFP_NOIO);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
- goto fail_exit_queue;
+ goto fail_exit;
}
if (radix_tree_preload(GFP_KERNEL)) {
blkg_free(new_blkg);
ret = -ENOMEM;
- goto fail_exit_queue;
+ goto fail_exit;
}
spin_lock_irq(&q->queue_lock);
@@ -951,7 +945,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto success;
}
success:
- blk_queue_exit(q);
+ mutex_unlock(&q->blkcg_mutex);
ctx->blkg = blkg;
return 0;
@@ -959,9 +953,8 @@ fail_preloaded:
radix_tree_preload_end();
fail_unlock:
spin_unlock_irq(&q->queue_lock);
-fail_exit_queue:
- blk_queue_exit(q);
-fail:
+fail_exit:
+ mutex_unlock(&q->blkcg_mutex);
/*
* If queue was bypassing, we should retry. Do so after a
* short msleep(). It isn't strictly necessary but queue