summaryrefslogtreecommitdiff
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2025-01-31 13:03:47 +0100
committerJens Axboe <axboe@kernel.dk>2025-01-31 07:20:08 -0700
commit1e1a9cecfab3f22ebef0a976f849c87be8d03c1c (patch)
tree34fa7958ed94c56127aa0fc55347bb409574af3a /block/blk-cgroup.c
parent14ef49657ff3b7156952b2eadcf2e5bafd735795 (diff)
block: force noio scope in blk_mq_freeze_queue
When block drivers or the core block code perform allocations with a frozen queue, this could try to recurse into the block device to reclaim memory and deadlock. Thus all allocations done by a process that froze a queue need to be done without __GFP_IO and __GFP_FS. Instead of tying to track all of them down, force a noio scope as part of freezing the queue. Note that nvme is a bit of a mess here due to the non-owner freezes, and they will be addressed separately. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lore.kernel.org/r/20250131120352.1315351-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 45a395862fbc..c795fa3a30e1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1545,6 +1545,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
struct request_queue *q = disk->queue;
struct blkg_policy_data *pd_prealloc = NULL;
struct blkcg_gq *blkg, *pinned_blkg = NULL;
+ unsigned int memflags;
int ret;
if (blkcg_policy_enabled(q, pol))
@@ -1559,7 +1560,7 @@ int blkcg_activate_policy(struct gendisk *disk, const struct blkcg_policy *pol)
return -EINVAL;
if (queue_is_mq(q))
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
retry:
spin_lock_irq(&q->queue_lock);
@@ -1623,7 +1624,7 @@ retry:
spin_unlock_irq(&q->queue_lock);
out:
if (queue_is_mq(q))
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
if (pinned_blkg)
blkg_put(pinned_blkg);
if (pd_prealloc)
@@ -1667,12 +1668,13 @@ void blkcg_deactivate_policy(struct gendisk *disk,
{
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg;
+ unsigned int memflags;
if (!blkcg_policy_enabled(q, pol))
return;
if (queue_is_mq(q))
- blk_mq_freeze_queue(q);
+ memflags = blk_mq_freeze_queue(q);
mutex_lock(&q->blkcg_mutex);
spin_lock_irq(&q->queue_lock);
@@ -1696,7 +1698,7 @@ void blkcg_deactivate_policy(struct gendisk *disk,
mutex_unlock(&q->blkcg_mutex);
if (queue_is_mq(q))
- blk_mq_unfreeze_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);