summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/s390/mm/gmap_helpers.c20
-rw-r--r--arch/s390/mm/pgtable.c12
-rw-r--r--fs/proc/task_mmu.c12
-rw-r--r--include/linux/swapops.h5
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hmm.c16
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memory.c36
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/userfaultfd.c24
10 files changed, 63 insertions, 68 deletions
diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c
index d4c3c36855e2..549f14ad08af 100644
--- a/arch/s390/mm/gmap_helpers.c
+++ b/arch/s390/mm/gmap_helpers.c
@@ -11,27 +11,27 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/pagewalk.h>
#include <linux/ksm.h>
#include <asm/gmap_helpers.h>
#include <asm/pgtable.h>
/**
- * ptep_zap_swap_entry() - discard a swap entry.
+ * ptep_zap_softleaf_entry() - discard a software leaf entry.
* @mm: the mm
- * @entry: the swap entry that needs to be zapped
+ * @entry: the software leaf entry that needs to be zapped
*
- * Discards the given swap entry. If the swap entry was an actual swap
- * entry (and not a migration entry, for example), the actual swapped
+ * Discards the given software leaf entry. If the leaf entry was an actual
+ * swap entry (and not a migration entry, for example), the actual swapped
* page is also discarded from swap.
*/
-static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
+static void ptep_zap_softleaf_entry(struct mm_struct *mm, softleaf_t entry)
{
- if (!non_swap_entry(entry))
+ if (softleaf_is_swap(entry))
dec_mm_counter(mm, MM_SWAPENTS);
- else if (is_migration_entry(entry))
- dec_mm_counter(mm, mm_counter(pfn_swap_entry_folio(entry)));
+ else if (softleaf_is_migration(entry))
+ dec_mm_counter(mm, mm_counter(softleaf_to_folio(entry)));
free_swap_and_cache(entry);
}
@@ -66,7 +66,7 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr)
preempt_disable();
pgste = pgste_get_lock(ptep);
- ptep_zap_swap_entry(mm, pte_to_swp_entry(*ptep));
+ ptep_zap_softleaf_entry(mm, softleaf_from_pte(*ptep));
pte_clear(mm, vmaddr, ptep);
pgste_set_unlock(ptep, pgste);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 0fde20bbc50b..d670bfb47d9b 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -16,7 +16,7 @@
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
-#include <linux/swapops.h>
+#include <linux/leafops.h>
#include <linux/sysctl.h>
#include <linux/ksm.h>
#include <linux/mman.h>
@@ -683,12 +683,12 @@ void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
pgste_set_unlock(ptep, pgste);
}
-static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
+static void ptep_zap_softleaf_entry(struct mm_struct *mm, softleaf_t entry)
{
- if (!non_swap_entry(entry))
+ if (softleaf_is_swap(entry))
dec_mm_counter(mm, MM_SWAPENTS);
- else if (is_migration_entry(entry)) {
- struct folio *folio = pfn_swap_entry_folio(entry);
+ else if (softleaf_is_migration(entry)) {
+ struct folio *folio = softleaf_to_folio(entry);
dec_mm_counter(mm, mm_counter(folio));
}
@@ -710,7 +710,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
if (!reset && pte_swap(pte) &&
((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
(pgstev & _PGSTE_GPS_ZERO))) {
- ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
+ ptep_zap_softleaf_entry(mm, softleaf_from_pte(pte));
pte_clear(mm, addr, ptep);
}
if (reset)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 898df952b6bc..1f49c81b3591 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1020,13 +1020,13 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else if (pte_none(ptent)) {
smaps_pte_hole_lookup(addr, walk);
} else {
- swp_entry_t swpent = pte_to_swp_entry(ptent);
+ const softleaf_t entry = softleaf_from_pte(ptent);
- if (!non_swap_entry(swpent)) {
+ if (softleaf_is_swap(entry)) {
int mapcount;
mss->swap += PAGE_SIZE;
- mapcount = swp_swapcount(swpent);
+ mapcount = swp_swapcount(entry);
if (mapcount >= 2) {
u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
@@ -1035,10 +1035,10 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_pfn_swap_entry(swpent)) {
- if (is_device_private_entry(swpent))
+ } else if (softleaf_has_pfn(entry)) {
+ if (softleaf_is_device_private(entry))
present = true;
- page = pfn_swap_entry_to_page(swpent);
+ page = softleaf_to_page(entry);
}
}
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 41cfc6d59054..c8e6f927da48 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -492,10 +492,5 @@ static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
-static inline int non_swap_entry(swp_entry_t entry)
-{
- return swp_type(entry) >= MAX_SWAPFILES;
-}
-
#endif /* CONFIG_MMU */
#endif /* _LINUX_SWAPOPS_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index 02355aa46324..07634b7d9934 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -4567,7 +4567,7 @@ static void filemap_cachestat(struct address_space *mapping,
swp_entry_t swp = radix_to_swp_entry(folio);
/* swapin error results in poisoned entry */
- if (non_swap_entry(swp))
+ if (!softleaf_is_swap(swp))
goto resched;
/*
diff --git a/mm/hmm.c b/mm/hmm.c
index e9735a9b6102..0158f2d1e027 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -258,17 +258,17 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
}
if (!pte_present(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
+ const softleaf_t entry = softleaf_from_pte(pte);
/*
* Don't fault in device private pages owned by the caller,
* just report the PFN.
*/
- if (is_device_private_entry(entry) &&
- page_pgmap(pfn_swap_entry_to_page(entry))->owner ==
+ if (softleaf_is_device_private(entry) &&
+ page_pgmap(softleaf_to_page(entry))->owner ==
range->dev_private_owner) {
cpu_flags = HMM_PFN_VALID;
- if (is_writable_device_private_entry(entry))
+ if (softleaf_is_device_private_write(entry))
cpu_flags |= HMM_PFN_WRITE;
new_pfn_flags = swp_offset_pfn(entry) | cpu_flags;
goto out;
@@ -279,16 +279,16 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
if (!required_fault)
goto out;
- if (!non_swap_entry(entry))
+ if (softleaf_is_swap(entry))
goto fault;
- if (is_device_private_entry(entry))
+ if (softleaf_is_device_private(entry))
goto fault;
- if (is_device_exclusive_entry(entry))
+ if (softleaf_is_device_exclusive(entry))
goto fault;
- if (is_migration_entry(entry)) {
+ if (softleaf_is_migration(entry)) {
pte_unmap(ptep);
hmm_vma_walk->last = addr;
migration_entry_wait(walk->mm, pmdp, addr);
diff --git a/mm/madvise.c b/mm/madvise.c
index 5979a4a39738..d8bc51e1bea7 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -249,7 +249,7 @@ static void shmem_swapin_range(struct vm_area_struct *vma,
continue;
entry = radix_to_swp_entry(folio);
/* There might be swapin error entries in shmem mapping. */
- if (non_swap_entry(entry))
+ if (!softleaf_is_swap(entry))
continue;
addr = vma->vm_start +
diff --git a/mm/memory.c b/mm/memory.c
index 95dac6a1cbc4..a3f001a47ecf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -932,7 +932,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
struct folio *folio;
struct page *page;
- if (likely(!non_swap_entry(entry))) {
+ if (likely(softleaf_is_swap(entry))) {
if (swap_duplicate(entry) < 0)
return -EIO;
@@ -950,12 +950,12 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
set_pte_at(src_mm, addr, src_pte, pte);
}
rss[MM_SWAPENTS]++;
- } else if (is_migration_entry(entry)) {
- folio = pfn_swap_entry_folio(entry);
+ } else if (softleaf_is_migration(entry)) {
+ folio = softleaf_to_folio(entry);
rss[mm_counter(folio)]++;
- if (!is_readable_migration_entry(entry) &&
+ if (!softleaf_is_migration_read(entry) &&
is_cow_mapping(vm_flags)) {
/*
* COW mappings require pages in both parent and child
@@ -964,15 +964,15 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
*/
entry = make_readable_migration_entry(
swp_offset(entry));
- pte = swp_entry_to_pte(entry);
+ pte = softleaf_to_pte(entry);
if (pte_swp_soft_dirty(orig_pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(orig_pte))
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
- } else if (is_device_private_entry(entry)) {
- page = pfn_swap_entry_to_page(entry);
+ } else if (softleaf_is_device_private(entry)) {
+ page = softleaf_to_page(entry);
folio = page_folio(page);
/*
@@ -996,7 +996,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* when a device driver is involved (you cannot easily
* save and restore device driver state).
*/
- if (is_writable_device_private_entry(entry) &&
+ if (softleaf_is_device_private_write(entry) &&
is_cow_mapping(vm_flags)) {
entry = make_readable_device_private_entry(
swp_offset(entry));
@@ -1005,7 +1005,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pte = pte_swp_mkuffd_wp(pte);
set_pte_at(src_mm, addr, src_pte, pte);
}
- } else if (is_device_exclusive_entry(entry)) {
+ } else if (softleaf_is_device_exclusive(entry)) {
/*
* Make device exclusive entries present by restoring the
* original entry then copying as for a present pte. Device
@@ -4625,7 +4625,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
rmap_t rmap_flags = RMAP_NONE;
bool need_clear_cache = false;
bool exclusive = false;
- swp_entry_t entry;
+ softleaf_t entry;
pte_t pte;
vm_fault_t ret = 0;
void *shadow = NULL;
@@ -4637,15 +4637,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
if (!pte_unmap_same(vmf))
goto out;
- entry = pte_to_swp_entry(vmf->orig_pte);
- if (unlikely(non_swap_entry(entry))) {
- if (is_migration_entry(entry)) {
+ entry = softleaf_from_pte(vmf->orig_pte);
+ if (unlikely(!softleaf_is_swap(entry))) {
+ if (softleaf_is_migration(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);
- } else if (is_device_exclusive_entry(entry)) {
- vmf->page = pfn_swap_entry_to_page(entry);
+ } else if (softleaf_is_device_exclusive(entry)) {
+ vmf->page = softleaf_to_page(entry);
ret = remove_device_exclusive_entry(vmf);
- } else if (is_device_private_entry(entry)) {
+ } else if (softleaf_is_device_private(entry)) {
if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
/*
* migrate_to_ram is not yet ready to operate
@@ -4656,7 +4656,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
goto out;
}
- vmf->page = pfn_swap_entry_to_page(entry);
+ vmf->page = softleaf_to_page(entry);
vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
vmf->address, &vmf->ptl);
if (unlikely(!vmf->pte ||
@@ -4680,7 +4680,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
} else {
pte_unmap_unlock(vmf->pte, vmf->ptl);
}
- } else if (is_hwpoison_entry(entry)) {
+ } else if (softleaf_is_hwpoison(entry)) {
ret = VM_FAULT_HWPOISON;
} else if (softleaf_is_marker(entry)) {
ret = handle_pte_marker(vmf);
diff --git a/mm/mincore.c b/mm/mincore.c
index b3682488a65d..9a908d8bb706 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -74,7 +74,7 @@ static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
* absent. Page table may contain migration or hwpoison
* entries which are always uptodate.
*/
- if (non_swap_entry(entry))
+ if (!softleaf_is_swap(entry))
return !shmem;
/*
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index 055ec1050776..bd1f74a7a5ac 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -1256,7 +1256,6 @@ static long move_pages_ptes(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd
unsigned long dst_addr, unsigned long src_addr,
unsigned long len, __u64 mode)
{
- swp_entry_t entry;
struct swap_info_struct *si = NULL;
pte_t orig_src_pte, orig_dst_pte;
pte_t src_folio_pte;
@@ -1430,19 +1429,20 @@ retry:
orig_dst_pte, orig_src_pte, dst_pmd,
dst_pmdval, dst_ptl, src_ptl, &src_folio,
len);
- } else {
+ } else { /* !pte_present() */
struct folio *folio = NULL;
+ const softleaf_t entry = softleaf_from_pte(orig_src_pte);
- entry = pte_to_swp_entry(orig_src_pte);
- if (non_swap_entry(entry)) {
- if (is_migration_entry(entry)) {
- pte_unmap(src_pte);
- pte_unmap(dst_pte);
- src_pte = dst_pte = NULL;
- migration_entry_wait(mm, src_pmd, src_addr);
- ret = -EAGAIN;
- } else
- ret = -EFAULT;
+ if (softleaf_is_migration(entry)) {
+ pte_unmap(src_pte);
+ pte_unmap(dst_pte);
+ src_pte = dst_pte = NULL;
+ migration_entry_wait(mm, src_pmd, src_addr);
+
+ ret = -EAGAIN;
+ goto out;
+ } else if (!softleaf_is_swap(entry)) {
+ ret = -EFAULT;
goto out;
}