summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/bio.c5
-rw-r--r--block/blk-cgroup.c13
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-mq-sched.c2
-rw-r--r--block/blk-mq-tag.c5
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/fops.c13
8 files changed, 28 insertions, 21 deletions
diff --git a/block/bio.c b/block/bio.c
index 3a1a848940dd..b3a79285c278 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1316,7 +1316,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
}
/**
- * bio_iov_iter_get_pages_aligned - add user or kernel pages to a bio
+ * bio_iov_iter_get_pages - add user or kernel pages to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be added
* @len_align_mask: the mask to align the total size to, 0 for any length
@@ -1336,7 +1336,7 @@ static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
* MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned.
*/
-int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
unsigned len_align_mask)
{
int ret = 0;
@@ -1360,7 +1360,6 @@ int bio_iov_iter_get_pages_aligned(struct bio *bio, struct iov_iter *iter,
return bio_iov_iter_align_down(bio, iter, len_align_mask);
return ret;
}
-EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages_aligned);
static void submit_bio_wait_endio(struct bio *bio)
{
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index f93de34fe87d..3cffb68ba5d8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -812,8 +812,7 @@ int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
}
/*
* Similar to blkg_conf_open_bdev, but additionally freezes the queue,
- * acquires q->elevator_lock, and ensures the correct locking order
- * between q->elevator_lock and q->rq_qos_mutex.
+ * ensures the correct locking order between freeze queue and q->rq_qos_mutex.
*
* This function returns negative error on failure. On success it returns
* memflags which must be saved and later passed to blkg_conf_exit_frozen
@@ -834,13 +833,11 @@ unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
* At this point, we haven’t started protecting anything related to QoS,
* so we release q->rq_qos_mutex here, which was first acquired in blkg_
* conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
- * the queue and acquiring q->elevator_lock to maintain the correct
- * locking order.
+ * the queue to maintain the correct locking order.
*/
mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
- mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
return memflags;
@@ -995,9 +992,8 @@ void blkg_conf_exit(struct blkg_conf_ctx *ctx)
EXPORT_SYMBOL_GPL(blkg_conf_exit);
/*
- * Similar to blkg_conf_exit, but also unfreezes the queue and releases
- * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
- * is used to open the bdev.
+ * Similar to blkg_conf_exit, but also unfreezes the queue. Should be used
+ * when blkg_conf_open_bdev_frozen is used to open the bdev.
*/
void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
{
@@ -1005,7 +1001,6 @@ void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
struct request_queue *q = ctx->bdev->bd_queue;
blkg_conf_exit(ctx);
- mutex_unlock(&q->elevator_lock);
blk_mq_unfreeze_queue(q, memflags);
}
}
diff --git a/block/blk-map.c b/block/blk-map.c
index 165f2234f00f..60faf036fb6e 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -283,7 +283,11 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
if (!bio)
return -ENOMEM;
- ret = bio_iov_iter_get_pages(bio, iter);
+ /*
+ * No alignment requirements on our part to support arbitrary
+ * passthrough commands.
+ */
+ ret = bio_iov_iter_get_pages(bio, iter, 0);
if (ret)
goto out_put;
ret = blk_rq_append_bio(rq, bio);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index d06bb137a743..e0bed16485c3 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -557,7 +557,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
if (blk_mq_is_shared_tags(flags)) {
/* Shared tags are stored at index 0 in @et->tags. */
q->sched_shared_tags = et->tags[0];
- blk_mq_tag_update_sched_shared_tags(q);
+ blk_mq_tag_update_sched_shared_tags(q, et->nr_requests);
}
queue_for_each_hw_ctx(q, hctx, i) {
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index c7a4d4b9cc87..5b664dbdf655 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -622,10 +622,11 @@ void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size
sbitmap_queue_resize(&tags->bitmap_tags, size - set->reserved_tags);
}
-void blk_mq_tag_update_sched_shared_tags(struct request_queue *q)
+void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
+ unsigned int nr)
{
sbitmap_queue_resize(&q->sched_shared_tags->bitmap_tags,
- q->nr_requests - q->tag_set->reserved_tags);
+ nr - q->tag_set->reserved_tags);
}
/**
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 09f579414161..d626d32f6e57 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -4941,7 +4941,7 @@ struct elevator_tags *blk_mq_update_nr_requests(struct request_queue *q,
* tags can't grow, see blk_mq_alloc_sched_tags().
*/
if (q->elevator)
- blk_mq_tag_update_sched_shared_tags(q);
+ blk_mq_tag_update_sched_shared_tags(q, nr);
else
blk_mq_tag_resize_shared_tags(set, nr);
} else if (!q->elevator) {
diff --git a/block/blk-mq.h b/block/blk-mq.h
index af42dc018808..c4fccdeb5441 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -186,7 +186,8 @@ void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags);
void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
unsigned int size);
-void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
+void blk_mq_tag_update_sched_shared_tags(struct request_queue *q,
+ unsigned int nr);
void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
diff --git a/block/fops.c b/block/fops.c
index c2c0396ea9ee..5e3db9fead77 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -43,6 +43,13 @@ static bool blkdev_dio_invalid(struct block_device *bdev, struct kiocb *iocb,
(bdev_logical_block_size(bdev) - 1);
}
+static inline int blkdev_iov_iter_get_pages(struct bio *bio,
+ struct iov_iter *iter, struct block_device *bdev)
+{
+ return bio_iov_iter_get_pages(bio, iter,
+ bdev_logical_block_size(bdev) - 1);
+}
+
#define DIO_INLINE_BIO_VECS 4
static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
@@ -78,7 +85,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
if (iocb->ki_flags & IOCB_ATOMIC)
bio.bi_opf |= REQ_ATOMIC;
- ret = bio_iov_iter_get_bdev_pages(&bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(&bio, iter, bdev);
if (unlikely(ret))
goto out;
ret = bio.bi_iter.bi_size;
@@ -212,7 +219,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
if (unlikely(ret)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
@@ -348,7 +355,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
*/
bio_iov_bvec_set(bio, iter);
} else {
- ret = bio_iov_iter_get_bdev_pages(bio, iter, bdev);
+ ret = blkdev_iov_iter_get_pages(bio, iter, bdev);
if (unlikely(ret))
goto out_bio_put;
}