summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2025-02-05 22:15:17 -0800
committerAndrew Morton <akpm@linux-foundation.org>2025-03-16 22:06:04 -0700
commit4000e3d0a367c5ff2035a0394b01b93974be6cb1 (patch)
treecd8629fd29bc88ae02c83fc9a24dff6ab03e6988
parent457753da6462024ad821bcb4df2d828cf2ef18be (diff)
mm/madvise: remove redundant mmap_lock operations from process_madvise()
Optimize redundant mmap lock operations from process_madvise() by directly doing the mmap locking first, and then the remaining works for all ranges in the loop. [akpm@linux-foundation.org: update comment, per Lorenzo] Link: https://lkml.kernel.org/r/20250206061517.2958-5-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Reviewed-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Liam R. Howlett <howlett@gmail.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: David Hildenbrand <david@redhat.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/madvise.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/mm/madvise.c b/mm/madvise.c
index 6e31e3202d71..6ecead476a80 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1778,16 +1778,33 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
total_len = iov_iter_count(iter);
+ ret = madvise_lock(mm, behavior);
+ if (ret)
+ return ret;
+
while (iov_iter_count(iter)) {
- ret = do_madvise(mm, (unsigned long)iter_iov_addr(iter),
- iter_iov_len(iter), behavior);
+ unsigned long start = (unsigned long)iter_iov_addr(iter);
+ size_t len_in = iter_iov_len(iter);
+ size_t len;
+
+ if (!is_valid_madvise(start, len_in, behavior)) {
+ ret = -EINVAL;
+ break;
+ }
+
+ len = PAGE_ALIGN(len_in);
+ if (start + len == start)
+ ret = 0;
+ else
+ ret = madvise_do_behavior(mm, start, len_in, len,
+ behavior);
/*
* An madvise operation is attempting to restart the syscall,
* but we cannot proceed as it would not be correct to repeat
* the operation in aggregate, and would be surprising to the
* user.
*
- * As we have already dropped locks, it is safe to just loop and
+ * We drop and reacquire locks so it is safe to just loop and
* try again. We check for fatal signals in case we need exit
* early anyway.
*/
@@ -1796,12 +1813,17 @@ static ssize_t vector_madvise(struct mm_struct *mm, struct iov_iter *iter,
ret = -EINTR;
break;
}
+
+ /* Drop and reacquire lock to unwind race. */
+ madvise_unlock(mm, behavior);
+ madvise_lock(mm, behavior);
continue;
}
if (ret < 0)
break;
iov_iter_advance(iter, iter_iov_len(iter));
}
+ madvise_unlock(mm, behavior);
ret = (total_len - iov_iter_count(iter)) ? : ret;