summaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c261
1 files changed, 242 insertions, 19 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 2b8d30068cbb..0156bded3a66 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -45,6 +45,9 @@
#include "internal.h"
#include "mm_slot.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/ksm.h>
+
#ifdef CONFIG_NUMA
#define NUMA(x) (x)
#define DO_NUMA(x) do { (x); } while (0)
@@ -512,6 +515,28 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
}
+static bool vma_ksm_compatible(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
+ VM_IO | VM_DONTEXPAND | VM_HUGETLB |
+ VM_MIXEDMAP))
+ return false; /* just ignore the advice */
+
+ if (vma_is_dax(vma))
+ return false;
+
+#ifdef VM_SAO
+ if (vma->vm_flags & VM_SAO)
+ return false;
+#endif
+#ifdef VM_SPARC_ADI
+ if (vma->vm_flags & VM_SPARC_ADI)
+ return false;
+#endif
+
+ return true;
+}
+
static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
unsigned long addr)
{
@@ -633,10 +658,12 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
BUG_ON(stable_node->rmap_hlist_len < 0);
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
- if (rmap_item->hlist.next)
+ if (rmap_item->hlist.next) {
ksm_pages_sharing--;
- else
+ trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
+ } else {
ksm_pages_shared--;
+ }
rmap_item->mm->ksm_merging_pages--;
@@ -657,6 +684,7 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
+ trace_ksm_remove_ksm_page(stable_node->kpfn);
if (stable_node->head == &migrate_nodes)
list_del(&stable_node->list);
else
@@ -1020,6 +1048,7 @@ mm_exiting:
mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
mmdrop(mm);
} else
spin_unlock(&ksm_mmlist_lock);
@@ -1324,6 +1353,8 @@ static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
get_anon_vma(vma->anon_vma);
out:
mmap_read_unlock(mm);
+ trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
+ rmap_item, mm, err);
return err;
}
@@ -2142,6 +2173,9 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
if (vma) {
err = try_to_merge_one_page(vma, page,
ZERO_PAGE(rmap_item->address));
+ trace_ksm_merge_one_page(
+ page_to_pfn(ZERO_PAGE(rmap_item->address)),
+ rmap_item, mm, err);
} else {
/*
* If the vma is out of date, we do not need to
@@ -2264,6 +2298,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
mm_slot = ksm_scan.mm_slot;
if (mm_slot == &ksm_mm_head) {
+ trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
+
/*
* A number of pages can hang around indefinitely on per-cpu
* pagevecs, raised page count preventing write_protect_page
@@ -2395,6 +2431,7 @@ no_vmas:
mm_slot_free(mm_slot_cache, mm_slot);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
mmap_read_unlock(mm);
mmdrop(mm);
} else {
@@ -2414,6 +2451,7 @@ no_vmas:
if (mm_slot != &ksm_mm_head)
goto next_mm;
+ trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
ksm_scan.seqnr++;
return NULL;
}
@@ -2471,6 +2509,136 @@ static int ksm_scan_thread(void *nothing)
return 0;
}
+static void __ksm_add_vma(struct vm_area_struct *vma)
+{
+ unsigned long vm_flags = vma->vm_flags;
+
+ if (vm_flags & VM_MERGEABLE)
+ return;
+
+ if (vma_ksm_compatible(vma))
+ vm_flags_set(vma, VM_MERGEABLE);
+}
+
+static int __ksm_del_vma(struct vm_area_struct *vma)
+{
+ int err;
+
+ if (!(vma->vm_flags & VM_MERGEABLE))
+ return 0;
+
+ if (vma->anon_vma) {
+ err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end);
+ if (err)
+ return err;
+ }
+
+ vm_flags_clear(vma, VM_MERGEABLE);
+ return 0;
+}
+/**
+ * ksm_add_vma - Mark vma as mergeable if compatible
+ *
+ * @vma: Pointer to vma
+ */
+void ksm_add_vma(struct vm_area_struct *vma)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ __ksm_add_vma(vma);
+}
+
+static void ksm_add_vmas(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+
+ VMA_ITERATOR(vmi, mm, 0);
+ for_each_vma(vmi, vma)
+ __ksm_add_vma(vma);
+}
+
+static int ksm_del_vmas(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma;
+ int err;
+
+ VMA_ITERATOR(vmi, mm, 0);
+ for_each_vma(vmi, vma) {
+ err = __ksm_del_vma(vma);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+/**
+ * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
+ * compatible VMA's
+ *
+ * @mm: Pointer to mm
+ *
+ * Returns 0 on success, otherwise error code
+ */
+int ksm_enable_merge_any(struct mm_struct *mm)
+{
+ int err;
+
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return 0;
+
+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
+ err = __ksm_enter(mm);
+ if (err)
+ return err;
+ }
+
+ set_bit(MMF_VM_MERGE_ANY, &mm->flags);
+ ksm_add_vmas(mm);
+
+ return 0;
+}
+
+/**
+ * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
+ * previously enabled via ksm_enable_merge_any().
+ *
+ * Disabling merging implies unmerging any merged pages, like setting
+ * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
+ * merging on all compatible VMA's remains enabled.
+ *
+ * @mm: Pointer to mm
+ *
+ * Returns 0 on success, otherwise error code
+ */
+int ksm_disable_merge_any(struct mm_struct *mm)
+{
+ int err;
+
+ if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return 0;
+
+ err = ksm_del_vmas(mm);
+ if (err) {
+ ksm_add_vmas(mm);
+ return err;
+ }
+
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
+ return 0;
+}
+
+int ksm_disable(struct mm_struct *mm)
+{
+ mmap_assert_write_locked(mm);
+
+ if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
+ return 0;
+ if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
+ return ksm_disable_merge_any(mm);
+ return ksm_del_vmas(mm);
+}
+
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
unsigned long end, int advice, unsigned long *vm_flags)
{
@@ -2479,25 +2647,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
switch (advice) {
case MADV_MERGEABLE:
- /*
- * Be somewhat over-protective for now!
- */
- if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
- VM_PFNMAP | VM_IO | VM_DONTEXPAND |
- VM_HUGETLB | VM_MIXEDMAP))
- return 0; /* just ignore the advice */
-
- if (vma_is_dax(vma))
- return 0;
-
-#ifdef VM_SAO
- if (*vm_flags & VM_SAO)
+ if (vma->vm_flags & VM_MERGEABLE)
return 0;
-#endif
-#ifdef VM_SPARC_ADI
- if (*vm_flags & VM_SPARC_ADI)
+ if (!vma_ksm_compatible(vma))
return 0;
-#endif
if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
err = __ksm_enter(mm);
@@ -2565,6 +2718,7 @@ int __ksm_enter(struct mm_struct *mm)
if (needs_wakeup)
wake_up_interruptible(&ksm_thread_wait);
+ trace_ksm_enter(mm);
return 0;
}
@@ -2600,12 +2754,15 @@ void __ksm_exit(struct mm_struct *mm)
if (easy_to_free) {
mm_slot_free(mm_slot_cache, mm_slot);
+ clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
clear_bit(MMF_VM_MERGEABLE, &mm->flags);
mmdrop(mm);
} else if (mm_slot) {
mmap_write_lock(mm);
mmap_write_unlock(mm);
}
+
+ trace_ksm_exit(mm);
}
struct page *ksm_might_need_to_copy(struct page *page,
@@ -2721,6 +2878,51 @@ again:
goto again;
}
+#ifdef CONFIG_MEMORY_FAILURE
+/*
+ * Collect processes when the error hit an ksm page.
+ */
+void collect_procs_ksm(struct page *page, struct list_head *to_kill,
+ int force_early)
+{
+ struct ksm_stable_node *stable_node;
+ struct ksm_rmap_item *rmap_item;
+ struct folio *folio = page_folio(page);
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+
+ stable_node = folio_stable_node(folio);
+ if (!stable_node)
+ return;
+ hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
+ struct anon_vma *av = rmap_item->anon_vma;
+
+ anon_vma_lock_read(av);
+ read_lock(&tasklist_lock);
+ for_each_process(tsk) {
+ struct anon_vma_chain *vmac;
+ unsigned long addr;
+ struct task_struct *t =
+ task_early_kill(tsk, force_early);
+ if (!t)
+ continue;
+ anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
+ ULONG_MAX)
+ {
+ vma = vmac->vma;
+ if (vma->vm_mm == t->mm) {
+ addr = rmap_item->address & PAGE_MASK;
+ add_to_kill_ksm(t, page, vma, to_kill,
+ addr);
+ }
+ }
+ }
+ read_unlock(&tasklist_lock);
+ anon_vma_unlock_read(av);
+ }
+}
+#endif
+
#ifdef CONFIG_MIGRATION
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
{
@@ -2875,6 +3077,14 @@ static void wait_while_offlining(void)
}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+#ifdef CONFIG_PROC_FS
+long ksm_process_profit(struct mm_struct *mm)
+{
+ return mm->ksm_merging_pages * PAGE_SIZE -
+ mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
+}
+#endif /* CONFIG_PROC_FS */
+
#ifdef CONFIG_SYSFS
/*
* This all compiles without CONFIG_SYSFS, but is a waste of space.
@@ -3139,6 +3349,18 @@ static ssize_t pages_volatile_show(struct kobject *kobj,
}
KSM_ATTR_RO(pages_volatile);
+static ssize_t general_profit_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ long general_profit;
+
+ general_profit = ksm_pages_sharing * PAGE_SIZE -
+ ksm_rmap_items * sizeof(struct ksm_rmap_item);
+
+ return sysfs_emit(buf, "%ld\n", general_profit);
+}
+KSM_ATTR_RO(general_profit);
+
static ssize_t stable_node_dups_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -3203,6 +3425,7 @@ static struct attribute *ksm_attrs[] = {
&stable_node_dups_attr.attr,
&stable_node_chains_prune_millisecs_attr.attr,
&use_zero_pages_attr.attr,
+ &general_profit_attr.attr,
NULL,
};