summaryrefslogtreecommitdiff
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c74
1 files changed, 41 insertions, 33 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index c0e9f15be2a2..5169f9717f60 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -16,7 +16,7 @@
#include <linux/migrate.h>
#include <linux/export.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
@@ -307,6 +307,7 @@ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
VM_BUG_ON_PAGE(!PageAnon(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(pte_present(old_pte), page);
+ VM_WARN_ON_ONCE_FOLIO(folio_is_device_private(folio), folio);
if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
mm_forbids_zeropage(pvmw->vma->vm_mm))
@@ -352,7 +353,7 @@ static bool remove_migration_pte(struct folio *folio,
rmap_t rmap_flags = RMAP_NONE;
pte_t old_pte;
pte_t pte;
- swp_entry_t entry;
+ softleaf_t entry;
struct page *new;
unsigned long idx = 0;
@@ -378,22 +379,22 @@ static bool remove_migration_pte(struct folio *folio,
folio_get(folio);
pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
- entry = pte_to_swp_entry(old_pte);
- if (!is_migration_entry_young(entry))
+ entry = softleaf_from_pte(old_pte);
+ if (!softleaf_is_migration_young(entry))
pte = pte_mkold(pte);
- if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
+ if (folio_test_dirty(folio) && softleaf_is_migration_dirty(entry))
pte = pte_mkdirty(pte);
if (pte_swp_soft_dirty(old_pte))
pte = pte_mksoft_dirty(pte);
else
pte = pte_clear_soft_dirty(pte);
- if (is_writable_migration_entry(entry))
+ if (softleaf_is_migration_write(entry))
pte = pte_mkwrite(pte, vma);
else if (pte_swp_uffd_wp(old_pte))
pte = pte_mkuffd_wp(pte);
- if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
+ if (folio_test_anon(folio) && !softleaf_is_migration_read(entry))
rmap_flags |= RMAP_EXCLUSIVE;
if (unlikely(is_device_private_page(new))) {
@@ -403,7 +404,7 @@ static bool remove_migration_pte(struct folio *folio,
else
entry = make_readable_device_private_entry(
page_to_pfn(new));
- pte = swp_entry_to_pte(entry);
+ pte = softleaf_to_pte(entry);
if (pte_swp_soft_dirty(old_pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(old_pte))
@@ -482,7 +483,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
spinlock_t *ptl;
pte_t *ptep;
pte_t pte;
- swp_entry_t entry;
+ softleaf_t entry;
ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
if (!ptep)
@@ -491,11 +492,11 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
pte = ptep_get(ptep);
pte_unmap(ptep);
- if (!is_swap_pte(pte))
+ if (pte_none(pte) || pte_present(pte))
goto out;
- entry = pte_to_swp_entry(pte);
- if (!is_migration_entry(entry))
+ entry = softleaf_from_pte(pte);
+ if (!softleaf_is_migration(entry))
goto out;
migration_entry_wait_on_locked(entry, ptl);
@@ -514,16 +515,18 @@ out:
void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
+ softleaf_t entry;
pte_t pte;
hugetlb_vma_assert_locked(vma);
spin_lock(ptl);
pte = huge_ptep_get(vma->vm_mm, addr, ptep);
- if (unlikely(!is_hugetlb_entry_migration(pte))) {
- spin_unlock(ptl);
- hugetlb_vma_unlock_read(vma);
- } else {
+ if (huge_pte_none(pte))
+ goto fail;
+
+ entry = softleaf_from_pte(pte);
+ if (softleaf_is_migration(entry)) {
/*
* If migration entry existed, safe to release vma lock
* here because the pgtable page won't be freed without the
@@ -531,8 +534,13 @@ void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, p
* lock release in migration_entry_wait_on_locked().
*/
hugetlb_vma_unlock_read(vma);
- migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
+ migration_entry_wait_on_locked(entry, ptl);
+ return;
}
+
+fail:
+ spin_unlock(ptl);
+ hugetlb_vma_unlock_read(vma);
}
#endif
@@ -542,9 +550,9 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
spinlock_t *ptl;
ptl = pmd_lock(mm, pmd);
- if (!is_pmd_migration_entry(*pmd))
+ if (!pmd_is_migration_entry(*pmd))
goto unlock;
- migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
+ migration_entry_wait_on_locked(softleaf_from_pmd(*pmd), ptl);
return;
unlock:
spin_unlock(ptl);
@@ -562,7 +570,7 @@ unlock:
static int __folio_migrate_mapping(struct address_space *mapping,
struct folio *newfolio, struct folio *folio, int expected_count)
{
- XA_STATE(xas, &mapping->i_pages, folio_index(folio));
+ XA_STATE(xas, &mapping->i_pages, folio->index);
struct swap_cluster_info *ci = NULL;
struct zone *oldzone, *newzone;
int dirty;
@@ -667,27 +675,27 @@ static int __folio_migrate_mapping(struct address_space *mapping,
old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
- __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
- __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
+ mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
+ mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
- __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
+ mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
+ mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
if (folio_test_pmd_mappable(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
- __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
+ mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
+ mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
}
}
#ifdef CONFIG_SWAP
if (folio_test_swapcache(folio)) {
- __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
- __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
+ mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
+ mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
}
#endif
if (dirty && mapping_can_writeback(mapping)) {
- __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
+ mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
- __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
+ mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
}
}
@@ -715,7 +723,7 @@ EXPORT_SYMBOL(folio_migrate_mapping);
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct folio *dst, struct folio *src)
{
- XA_STATE(xas, &mapping->i_pages, folio_index(src));
+ XA_STATE(xas, &mapping->i_pages, src->index);
int rc, expected_count = folio_expected_ref_count(src) + 1;
if (folio_ref_count(src) != expected_count)
@@ -2164,7 +2172,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private)
gfp_t gfp_mask;
unsigned int order = 0;
int nid;
- int zidx;
+ enum zone_type zidx;
mtc = (struct migration_target_control *)private;
gfp_mask = mtc->gfp_mask;
@@ -2190,7 +2198,7 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private)
gfp_mask |= GFP_TRANSHUGE;
order = folio_order(src);
}
- zidx = zone_idx(folio_zone(src));
+ zidx = folio_zonenum(src);
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM;