diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2025-03-13 15:14:56 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2025-03-17 22:07:06 -0700 |
commit | 8defffa4c7b5d19a9a480aec675003f9c9e7daf6 (patch) | |
tree | 3be281d600ce5ccff904f3857107be72153bff75 /mm | |
parent | f841ad9ca5007167c02de143980c9dc703f90b3d (diff) |
mm: convert lru_add_page_tail() to lru_add_split_folio()
Remove three hidden calls to compound_head() and accesses to page->lru.
Link: https://lkml.kernel.org/r/20250313151458.4145978-1-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e3ed8e9523f5..10a86b681cf1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3262,25 +3262,25 @@ static void remap_page(struct folio *folio, unsigned long nr, int flags) } } -static void lru_add_page_tail(struct folio *folio, struct page *tail, +static void lru_add_split_folio(struct folio *folio, struct folio *new_folio, struct lruvec *lruvec, struct list_head *list) { - VM_BUG_ON_FOLIO(PageLRU(tail), folio); + VM_BUG_ON_FOLIO(folio_test_lru(new_folio), folio); lockdep_assert_held(&lruvec->lru_lock); if (list) { /* page reclaim is reclaiming a huge page */ VM_WARN_ON(folio_test_lru(folio)); - get_page(tail); - list_add_tail(&tail->lru, list); + folio_get(new_folio); + list_add_tail(&new_folio->lru, list); } else { /* head is still on lru (and we have it frozen) */ VM_WARN_ON(!folio_test_lru(folio)); if (folio_test_unevictable(folio)) - tail->mlock_count = 0; + new_folio->mlock_count = 0; else - list_add_tail(&tail->lru, &folio->lru); - SetPageLRU(tail); + list_add_tail(&new_folio->lru, &folio->lru); + folio_set_lru(new_folio); } } @@ -3581,8 +3581,8 @@ after_split: ((mapping || swap_cache) ? folio_nr_pages(release) : 0)); - lru_add_page_tail(origin_folio, &release->page, - lruvec, list); + lru_add_split_folio(origin_folio, release, lruvec, + list); /* Some pages can be beyond EOF: drop them from cache */ if (release->index >= end) { |