diff options
-rw-r--r-- | arch/x86/kernel/machine_kexec_64.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/relocate_kernel_64.S | 5 |
2 files changed, 28 insertions, 7 deletions
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 3a4cbac1a0c6..9567347f7a9b 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -157,7 +157,12 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd, pmd_t *pmd; pte_t *pte; - vaddr = (unsigned long)relocate_kernel; + /* + * For the transition to the identity mapped page tables, the control + * code page also needs to be mapped at the virtual address it starts + * off running from. + */ + vaddr = (unsigned long)__va(control_page); paddr = control_page; pgd += pgd_index(vaddr); if (!pgd_present(*pgd)) { @@ -311,11 +316,17 @@ int machine_kexec_prepare(struct kimage *image) __memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); + set_memory_x((unsigned long)control_page, 1); + return 0; } void machine_kexec_cleanup(struct kimage *image) { + void *control_page = page_address(image->control_code_page); + + set_memory_nx((unsigned long)control_page, 1); + free_transition_pgtable(image); } @@ -325,6 +336,11 @@ void machine_kexec_cleanup(struct kimage *image) */ void machine_kexec(struct kimage *image) { + unsigned long (*relocate_kernel_ptr)(unsigned long indirection_page, + unsigned long page_list, + unsigned long start_address, + unsigned int preserve_context, + unsigned int host_mem_enc_active); unsigned long page_list[PAGES_NR]; unsigned int host_mem_enc_active; int save_ftrace_enabled; @@ -371,6 +387,8 @@ void machine_kexec(struct kimage *image) page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT); + relocate_kernel_ptr = control_page; + /* * The segment registers are funny things, they have both a * visible and an invisible part. Whenever the visible part is @@ -390,11 +408,11 @@ void machine_kexec(struct kimage *image) native_gdt_invalidate(); /* now call it */ - image->start = relocate_kernel((unsigned long)image->head, - (unsigned long)page_list, - image->start, - image->preserve_context, - host_mem_enc_active); + image->start = relocate_kernel_ptr((unsigned long)image->head, + (unsigned long)page_list, + image->start, + image->preserve_context, + host_mem_enc_active); #ifdef CONFIG_KEXEC_JUMP if (image->preserve_context) diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index ca7f1e1d5b11..d0a87b39db6a 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -39,6 +39,7 @@ #define CP_PA_TABLE_PAGE DATA(0x20) #define CP_PA_SWAP_PAGE DATA(0x28) #define CP_PA_BACKUP_PAGES_MAP DATA(0x30) +#define CP_VA_CONTROL_PAGE DATA(0x38) .text .align PAGE_SIZE @@ -99,6 +100,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel) movq %r9, CP_PA_TABLE_PAGE(%r11) movq %r10, CP_PA_SWAP_PAGE(%r11) movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11) + movq %r11, CP_VA_CONTROL_PAGE(%r11) /* Save the preserve_context to %r11 as swap_pages clobbers %rcx. */ movq %rcx, %r11 @@ -235,7 +237,8 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped) movq %rax, %cr3 lea PAGE_SIZE(%r8), %rsp call swap_pages - movq $virtual_mapped, %rax + movq CP_VA_CONTROL_PAGE(%r8), %rax + addq $(virtual_mapped - relocate_kernel), %rax pushq %rax ANNOTATE_UNRET_SAFE ret |