path: root/block/blk.h
diff options
authorChristoph Hellwig <>2020-05-16 20:28:01 +0200
committerJens Axboe <>2020-05-19 09:34:29 -0600
commitac7c5675fa45a372fab27d78a72d2e10e4734959 (patch)
tree7add7a513b2d64e636acf3d3912f8cf292253ad7 /block/blk.h
parent35b371ff01410ec7c73312f1c1c320de35fcfd0f (diff)
blk-mq: allow blk_mq_make_request to consume the q_usage_counter reference
blk_mq_make_request currently needs to grab an q_usage_counter reference when allocating a request. This is because the block layer grabs one before calling blk_mq_make_request, but also releases it as soon as blk_mq_make_request returns. Remove the blk_queue_exit call after blk_mq_make_request returns, and instead let it consume the reference. This works perfectly fine for the block layer caller, just device mapper needs an extra reference as the old problem still persists there. Open code blk_queue_enter_live in device mapper, as there should be no other callers and this allows better documenting why we do a non-try get. Signed-off-by: Christoph Hellwig <> Signed-off-by: Jens Axboe <>
Diffstat (limited to 'block/blk.h')
1 files changed, 0 insertions, 11 deletions
diff --git a/block/blk.h b/block/blk.h
index fc00537026a0..9e6ed5f11823 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -64,17 +64,6 @@ void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q);
-static inline void blk_queue_enter_live(struct request_queue *q)
- /*
- * Given that running in generic_make_request() context
- * guarantees that a live reference against q_usage_counter has
- * been established, further references under that same context
- * need not check that the queue has been frozen (marked dead).
- */
- percpu_ref_get(&q->q_usage_counter);
static inline bool biovec_phys_mergeable(struct request_queue *q,
struct bio_vec *vec1, struct bio_vec *vec2)