diff options
author | Jani Nikula <jani.nikula@intel.com> | 2024-06-19 11:38:31 +0300 |
---|---|---|
committer | Jani Nikula <jani.nikula@intel.com> | 2024-06-19 11:38:31 +0300 |
commit | d754ed2821fd9675d203cb73c4afcd593e28b7d0 (patch) | |
tree | cd16683cd956a7c334d7e1b3baf02e2e7baa729c /include/linux/ksm.h | |
parent | dcaacff03a9fa2838f936e1009b4b7ad56807152 (diff) | |
parent | 1ddaaa244021aba8496536a6627b4ad2bc0f936a (diff) |
Merge drm/drm-next into drm-intel-next
Sync to v6.10-rc3.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'include/linux/ksm.h')
-rw-r--r-- | include/linux/ksm.h | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 401348e9f92b..11690dacd986 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -33,28 +33,39 @@ void __ksm_exit(struct mm_struct *mm); */ #define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte)) -extern unsigned long ksm_zero_pages; +extern atomic_long_t ksm_zero_pages; + +static inline void ksm_map_zero_page(struct mm_struct *mm) +{ + atomic_long_inc(&ksm_zero_pages); + atomic_long_inc(&mm->ksm_zero_pages); +} static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { if (is_ksm_zero_pte(pte)) { - ksm_zero_pages--; - mm->ksm_zero_pages--; + atomic_long_dec(&ksm_zero_pages); + atomic_long_dec(&mm->ksm_zero_pages); } } +static inline long mm_ksm_zero_pages(struct mm_struct *mm) +{ + return atomic_long_read(&mm->ksm_zero_pages); +} + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { - int ret; + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { - ret = __ksm_enter(mm); - if (ret) - return ret; - } + return 0; +} - if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) - set_bit(MMF_VM_MERGE_ANY, &mm->flags); +static inline int ksm_execve(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return __ksm_enter(mm); return 0; } @@ -81,15 +92,9 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); - -#ifdef CONFIG_MEMORY_FAILURE -void collect_procs_ksm(struct page *page, struct list_head *to_kill, - int force_early); -#endif - -#ifdef CONFIG_PROC_FS +void collect_procs_ksm(struct folio *folio, struct page *page, + struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); -#endif /* CONFIG_PROC_FS */ #else /* !CONFIG_KSM */ @@ -107,6 +112,11 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } +static inline int ksm_execve(struct mm_struct *mm) +{ + return 0; +} + static inline void ksm_exit(struct mm_struct *mm) { } @@ -115,12 +125,10 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } -#ifdef CONFIG_MEMORY_FAILURE -static inline void collect_procs_ksm(struct page *page, +static inline void collect_procs_ksm(struct folio *folio, struct page *page, struct list_head *to_kill, int force_early) { } -#endif #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |