summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS1
-rw-r--r--include/linux/memory-failure.h17
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/ras/ras_event.h1
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/memory-failure.c145
6 files changed, 165 insertions, 1 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 2625bc3d53d8..5cf6873569d3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -11557,6 +11557,7 @@ M: Miaohe Lin <linmiaohe@huawei.com>
R: Naoya Horiguchi <nao.horiguchi@gmail.com>
L: linux-mm@kvack.org
S: Maintained
+F: include/linux/memory-failure.h
F: mm/hwpoison-inject.c
F: mm/memory-failure.c
diff --git a/include/linux/memory-failure.h b/include/linux/memory-failure.h
new file mode 100644
index 000000000000..bc326503d2d2
--- /dev/null
+++ b/include/linux/memory-failure.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MEMORY_FAILURE_H
+#define _LINUX_MEMORY_FAILURE_H
+
+#include <linux/interval_tree.h>
+
+struct pfn_address_space;
+
+struct pfn_address_space {
+ struct interval_tree_node node;
+ struct address_space *mapping;
+};
+
+int register_pfn_address_space(struct pfn_address_space *pfn_space);
+void unregister_pfn_address_space(struct pfn_address_space *pfn_space);
+
+#endif /* _LINUX_MEMORY_FAILURE_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7bcd9e6fbc3c..b636d12bb651 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4285,6 +4285,7 @@ enum mf_action_page_type {
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
MF_MSG_ALREADY_POISONED,
+ MF_MSG_PFN_MAP,
MF_MSG_UNKNOWN,
};
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index c8cd0f00c845..fecfeb7c8be7 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -375,6 +375,7 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_DAX, "dax page" ) \
EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
EM ( MF_MSG_ALREADY_POISONED, "already poisoned" ) \
+ EM ( MF_MSG_PFN_MAP, "non struct page pfn" ) \
EMe ( MF_MSG_UNKNOWN, "unknown page" )
/*
diff --git a/mm/Kconfig b/mm/Kconfig
index eae03b14f7de..d548976d0e0a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -741,6 +741,7 @@ config MEMORY_FAILURE
depends on ARCH_SUPPORTS_MEMORY_FAILURE
bool "Enable recovery from hardware memory errors"
select RAS
+ select INTERVAL_TREE
help
Enables code to recover from some memory failures on systems
with MCA recovery. This allows a system to continue running
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 560884dd6250..77391b6f9f76 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -38,6 +38,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/memory-failure.h>
#include <linux/page-flags.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
@@ -154,6 +155,10 @@ static const struct ctl_table memory_failure_table[] = {
}
};
+static struct rb_root_cached pfn_space_itree = RB_ROOT_CACHED;
+
+static DEFINE_MUTEX(pfn_space_lock);
+
/*
* Return values:
* 1: the page is dissolved (if needed) and taken off from buddy,
@@ -885,6 +890,7 @@ static const char * const action_page_types[] = {
[MF_MSG_DAX] = "dax page",
[MF_MSG_UNSPLIT_THP] = "unsplit thp",
[MF_MSG_ALREADY_POISONED] = "already poisoned page",
+ [MF_MSG_PFN_MAP] = "non struct page pfn",
[MF_MSG_UNKNOWN] = "unknown page",
};
@@ -1277,7 +1283,7 @@ static int action_result(unsigned long pfn, enum mf_action_page_type type,
{
trace_memory_failure_event(pfn, type, result);
- if (type != MF_MSG_ALREADY_POISONED) {
+ if (type != MF_MSG_ALREADY_POISONED && type != MF_MSG_PFN_MAP) {
num_poisoned_pages_inc(pfn);
update_per_node_mf_stats(pfn, result);
}
@@ -2147,6 +2153,135 @@ static void kill_procs_now(struct page *p, unsigned long pfn, int flags,
kill_procs(&tokill, true, pfn, flags);
}
+int register_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+ guard(mutex)(&pfn_space_lock);
+
+ if (interval_tree_iter_first(&pfn_space_itree,
+ pfn_space->node.start,
+ pfn_space->node.last))
+ return -EBUSY;
+
+ interval_tree_insert(&pfn_space->node, &pfn_space_itree);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(register_pfn_address_space);
+
+void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
+{
+ guard(mutex)(&pfn_space_lock);
+
+ if (interval_tree_iter_first(&pfn_space_itree,
+ pfn_space->node.start,
+ pfn_space->node.last))
+ interval_tree_remove(&pfn_space->node, &pfn_space_itree);
+}
+EXPORT_SYMBOL_GPL(unregister_pfn_address_space);
+
+static void add_to_kill_pfn(struct task_struct *tsk,
+ struct vm_area_struct *vma,
+ struct list_head *to_kill,
+ unsigned long pfn)
+{
+ struct to_kill *tk;
+
+ tk = kmalloc(sizeof(*tk), GFP_ATOMIC);
+ if (!tk) {
+ pr_info("Unable to kill proc %d\n", tsk->pid);
+ return;
+ }
+
+ /* Check for pgoff not backed by struct page */
+ tk->addr = vma_address(vma, pfn, 1);
+ tk->size_shift = PAGE_SHIFT;
+
+ if (tk->addr == -EFAULT)
+ pr_info("Unable to find address %lx in %s\n",
+ pfn, tsk->comm);
+
+ get_task_struct(tsk);
+ tk->tsk = tsk;
+ list_add_tail(&tk->nd, to_kill);
+}
+
+/*
+ * Collect processes when the error hit a PFN not backed by struct page.
+ */
+static void collect_procs_pfn(struct address_space *mapping,
+ unsigned long pfn, struct list_head *to_kill)
+{
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+
+ i_mmap_lock_read(mapping);
+ rcu_read_lock();
+ for_each_process(tsk) {
+ struct task_struct *t = tsk;
+
+ t = task_early_kill(tsk, true);
+ if (!t)
+ continue;
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) {
+ if (vma->vm_mm == t->mm)
+ add_to_kill_pfn(t, vma, to_kill, pfn);
+ }
+ }
+ rcu_read_unlock();
+ i_mmap_unlock_read(mapping);
+}
+
+/**
+ * memory_failure_pfn - Handle memory failure on a page not backed by
+ * struct page.
+ * @pfn: Page Number of the corrupted page
+ * @flags: fine tune action taken
+ *
+ * Return:
+ * 0 - success,
+ * -EBUSY - Page PFN does not belong to any address space mapping.
+ */
+static int memory_failure_pfn(unsigned long pfn, int flags)
+{
+ struct interval_tree_node *node;
+ LIST_HEAD(tokill);
+
+ scoped_guard(mutex, &pfn_space_lock) {
+ bool mf_handled = false;
+
+ /*
+ * Modules registers with MM the address space mapping to
+ * the device memory they manage. Iterate to identify
+ * exactly which address space has mapped to this failing
+ * PFN.
+ */
+ for (node = interval_tree_iter_first(&pfn_space_itree, pfn, pfn); node;
+ node = interval_tree_iter_next(node, pfn, pfn)) {
+ struct pfn_address_space *pfn_space =
+ container_of(node, struct pfn_address_space, node);
+
+ collect_procs_pfn(pfn_space->mapping, pfn, &tokill);
+
+ mf_handled = true;
+ }
+
+ if (!mf_handled)
+ return action_result(pfn, MF_MSG_PFN_MAP, MF_IGNORED);
+ }
+
+ /*
+ * Unlike System-RAM there is no possibility to swap in a different
+ * physical page at a given virtual address, so all userspace
+ * consumption of direct PFN memory necessitates SIGBUS (i.e.
+ * MF_MUST_KILL)
+ */
+ flags |= MF_ACTION_REQUIRED | MF_MUST_KILL;
+
+ kill_procs(&tokill, true, pfn, flags);
+
+ return action_result(pfn, MF_MSG_PFN_MAP, MF_RECOVERED);
+}
+
/**
* memory_failure - Handle memory failure of a page.
* @pfn: Page Number of the corrupted page
@@ -2196,6 +2331,14 @@ int memory_failure(unsigned long pfn, int flags)
if (res == 0)
goto unlock_mutex;
+ if (!pfn_valid(pfn) && !arch_is_platform_page(PFN_PHYS(pfn))) {
+ /*
+ * The PFN is not backed by struct page.
+ */
+ res = memory_failure_pfn(pfn, flags);
+ goto unlock_mutex;
+ }
+
if (pfn_valid(pfn)) {
pgmap = get_dev_pagemap(pfn);
put_ref_page(pfn, flags);