diff options
author | Pratyush Yadav <pratyush@kernel.org> | 2025-09-17 14:56:54 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2025-09-23 14:14:17 -0700 |
commit | 89a3ecca49ee889cc1ab4def6caa0452df196efb (patch) | |
tree | e046ee6fb61d80996c33c7c8c7ce58ce054f17f6 | |
parent | 20571b187051e5b78b48b99c9bdd425c94b29e18 (diff) |
kho: make sure page being restored is actually from KHO
When restoring a page, no sanity checks are done to make sure the page
actually came from a kexec handover. The caller is trusted to pass in the
right address. If the caller has a bug and passes in a wrong address, an
in-use page might be "restored" and returned, causing all sorts of memory
corruption.
Harden the page restore logic by stashing in a magic number in
page->private along with the order. If the magic number does not match,
the page won't be touched. page->private is an unsigned long. The union
kho_page_info splits it into two parts, with one holding the order and the
other holding the magic number.
Link: https://lkml.kernel.org/r/20250917125725.665-2-pratyush@kernel.org
Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
Cc: Alexander Graf <graf@amazon.com>
Cc: Baoquan He <bhe@redhat.com>
Cc: Changyuan Lyu <changyuanl@google.com>
Cc: Chris Li <chrisl@kernel.org>
Cc: Jason Gunthorpe <jgg@nvidia.com>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | kernel/kexec_handover.c | 41 |
1 files changed, 34 insertions, 7 deletions
diff --git a/kernel/kexec_handover.c b/kernel/kexec_handover.c index c006a7544664..555488eb1a18 100644 --- a/kernel/kexec_handover.c +++ b/kernel/kexec_handover.c @@ -32,6 +32,22 @@ #define PROP_PRESERVED_MEMORY_MAP "preserved-memory-map" #define PROP_SUB_FDT "fdt" +#define KHO_PAGE_MAGIC 0x4b484f50U /* ASCII for 'KHOP' */ + +/* + * KHO uses page->private, which is an unsigned long, to store page metadata. + * Use it to store both the magic and the order. + */ +union kho_page_info { + unsigned long page_private; + struct { + unsigned int order; + unsigned int magic; + }; +}; + +static_assert(sizeof(union kho_page_info) == sizeof(((struct page *)0)->private)); + static bool kho_enable __ro_after_init; bool kho_is_enabled(void) @@ -186,16 +202,24 @@ static int __kho_preserve_order(struct kho_mem_track *track, unsigned long pfn, static struct page *kho_restore_page(phys_addr_t phys) { struct page *page = pfn_to_online_page(PHYS_PFN(phys)); - unsigned int nr_pages, order; + union kho_page_info info; + unsigned int nr_pages; if (!page) return NULL; - order = page->private; - if (order > MAX_PAGE_ORDER) + info.page_private = page->private; + /* + * deserialize_bitmap() only sets the magic on the head page. This magic + * check also implicitly makes sure phys is order-aligned since for + * non-order-aligned phys addresses, magic will never be set. + */ + if (WARN_ON_ONCE(info.magic != KHO_PAGE_MAGIC || info.order > MAX_PAGE_ORDER)) return NULL; - nr_pages = (1 << order); + nr_pages = (1 << info.order); + /* Clear private to make sure later restores on this page error out. */ + page->private = 0; /* Head page gets refcount of 1. */ set_page_count(page, 1); @@ -203,8 +227,8 @@ static struct page *kho_restore_page(phys_addr_t phys) for (unsigned int i = 1; i < nr_pages; i++) set_page_count(page + i, 0); - if (order > 0) - prep_compound_page(page, order); + if (info.order > 0) + prep_compound_page(page, info.order); adjust_managed_page_count(page, nr_pages); return page; @@ -341,10 +365,13 @@ static void __init deserialize_bitmap(unsigned int order, phys_addr_t phys = elm->phys_start + (bit << (order + PAGE_SHIFT)); struct page *page = phys_to_page(phys); + union kho_page_info info; memblock_reserve(phys, sz); memblock_reserved_mark_noinit(phys, sz); - page->private = order; + info.magic = KHO_PAGE_MAGIC; + info.order = order; + page->private = info.page_private; } } |