From 34f51147d2e64da842679a97ffcfd17c8a0e6943 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Thu, 1 May 2014 21:15:48 +0800 Subject: x86/efi: earlyprintk=efi,keep fix earlyprintk=efi,keep will cause kernel hangs while freeing initmem like below: VFS: Mounted root (ext4 filesystem) readonly on device 254:2. devtmpfs: mounted Freeing unused kernel memory: 880K (ffffffff817d4000 - ffffffff818b0000) It is caused by efi earlyprintk use __init function which will be freed later. Such as early_efi_write is marked as __init, also it will use early_ioremap which is init function as well. To fix this issue, I added early initcall early_efi_map_fb which maps the whole efi fb for later use. OTOH, adding a wrapper function early_efi_map which calls early_ioremap before ioremap is available. With this patch applied efi boot ok with earlyprintk=efi,keep console=efi Signed-off-by: Dave Young Signed-off-by: Matt Fleming --- arch/x86/platform/efi/early_printk.c | 83 +++++++++++++++++++++++++++--------- 1 file changed, 64 insertions(+), 19 deletions(-) diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c index 81b506d5befd..524142117296 100644 --- a/arch/x86/platform/efi/early_printk.c +++ b/arch/x86/platform/efi/early_printk.c @@ -14,48 +14,92 @@ static const struct font_desc *font; static u32 efi_x, efi_y; +static void *efi_fb; +static bool early_efi_keep; -static __init void early_efi_clear_scanline(unsigned int y) +/* + * efi earlyprintk need use early_ioremap to map the framebuffer. + * But early_ioremap is not usable for earlyprintk=efi,keep, ioremap should + * be used instead. ioremap will be available after paging_init() which is + * earlier than initcall callbacks. Thus adding this early initcall function + * early_efi_map_fb to map the whole efi framebuffer. + */ +static __init int early_efi_map_fb(void) { - unsigned long base, *dst; - u16 len; + unsigned long base, size; + + if (!early_efi_keep) + return 0; base = boot_params.screen_info.lfb_base; - len = boot_params.screen_info.lfb_linelength; + size = boot_params.screen_info.lfb_size; + efi_fb = ioremap(base, size); + + return efi_fb ? 0 : -ENOMEM; +} +early_initcall(early_efi_map_fb); + +/* + * early_efi_map maps efi framebuffer region [start, start + len -1] + * In case earlyprintk=efi,keep we have the whole framebuffer mapped already + * so just return the offset efi_fb + start. + */ +static __init_refok void *early_efi_map(unsigned long start, unsigned long len) +{ + unsigned long base; + + base = boot_params.screen_info.lfb_base; + + if (efi_fb) + return (efi_fb + start); + else + return early_ioremap(base + start, len); +} - dst = early_ioremap(base + y*len, len); +static __init_refok void early_efi_unmap(void *addr, unsigned long len) +{ + if (!efi_fb) + early_iounmap(addr, len); +} + +static void early_efi_clear_scanline(unsigned int y) +{ + unsigned long *dst; + u16 len; + + len = boot_params.screen_info.lfb_linelength; + dst = early_efi_map(y*len, len); if (!dst) return; memset(dst, 0, len); - early_iounmap(dst, len); + early_efi_unmap(dst, len); } -static __init void early_efi_scroll_up(void) +static void early_efi_scroll_up(void) { - unsigned long base, *dst, *src; + unsigned long *dst, *src; u16 len; u32 i, height; - base = boot_params.screen_info.lfb_base; len = boot_params.screen_info.lfb_linelength; height = boot_params.screen_info.lfb_height; for (i = 0; i < height - font->height; i++) { - dst = early_ioremap(base + i*len, len); + dst = early_efi_map(i*len, len); if (!dst) return; - src = early_ioremap(base + (i + font->height) * len, len); + src = early_efi_map((i + font->height) * len, len); if (!src) { - early_iounmap(dst, len); + early_efi_unmap(dst, len); return; } memmove(dst, src, len); - early_iounmap(src, len); - early_iounmap(dst, len); + early_efi_unmap(src, len); + early_efi_unmap(dst, len); } } @@ -79,16 +123,14 @@ static void early_efi_write_char(u32 *dst, unsigned char c, unsigned int h) } } -static __init void +static void early_efi_write(struct console *con, const char *str, unsigned int num) { struct screen_info *si; - unsigned long base; unsigned int len; const char *s; void *dst; - base = boot_params.screen_info.lfb_base; si = &boot_params.screen_info; len = si->lfb_linelength; @@ -109,7 +151,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) for (h = 0; h < font->height; h++) { unsigned int n, x; - dst = early_ioremap(base + (efi_y + h) * len, len); + dst = early_efi_map((efi_y + h) * len, len); if (!dst) return; @@ -123,7 +165,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num) s++; } - early_iounmap(dst, len); + early_efi_unmap(dst, len); } num -= count; @@ -179,6 +221,9 @@ static __init int early_efi_setup(struct console *con, char *options) for (i = 0; i < (yres - efi_y) / font->height; i++) early_efi_scroll_up(); + /* early_console_register will unset CON_BOOT in case ,keep */ + if (!(con->flags & CON_BOOT)) + early_efi_keep = true; return 0; } -- cgit From a3530e8fe980f756b823d451fe9243f26db95fa5 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Fri, 30 May 2014 11:20:10 +0800 Subject: x86/efi: Do not export efi runtime map in case old map For ioremapped efi memory aka old_map the virt addresses are not persistant across kexec reboot. kexec-tools will read the runtime maps from sysfs then pass them to 2nd kernel and assuming kexec efi boot is ok. This will cause kexec boot failure. To address this issue do not export runtime maps in case efi old_map so userspace can use no efi boot instead. Signed-off-by: Dave Young Acked-by: Borislav Petkov Acked-by: Vivek Goyal Signed-off-by: Matt Fleming --- arch/x86/platform/efi/efi.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 3781dd39e8bd..4d36932ca4f2 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -919,6 +919,9 @@ static void __init save_runtime_map(void) void *tmp, *p, *q = NULL; int count = 0; + if (efi_enabled(EFI_OLD_MEMMAP)) + return; + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; -- cgit From ac2a55395eddccd6e3e39532df9869d61e97b2ee Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Tue, 13 May 2014 11:39:34 -0400 Subject: x86: irq: Get correct available vectors for cpu disable check_irq_vectors_for_cpu_disable() can overestimate the number of available interrupt vectors, so the check for cpu down succeeds, but the actual cpu removal fails. It iterates from FIRST_EXTERNAL_VECTOR to NR_VECTORS, which is wrong because the systems vectors are not taken into account. Limit the search to first_system_vector instead of NR_VECTORS. The second indicator for vector availability the used_vectors bitmap is not taken into account at all. So system vectors, e.g. IA32_SYSCALL_VECTOR (0x80) and IRQ_MOVE_CLEANUP_VECTOR (0x20), are accounted as available. Add a check for the used_vectors bitmap and do not account vectors which are marked there. [ tglx: Simplified code. Rewrote changelog and code comments. ] Signed-off-by: Yinghai Lu Acked-by: Prarit Bhargava Cc: Seiji Aguchi Cc: Andi Kleen Cc: K. Y. Srinivasan Cc: Steven Rostedt (Red Hat) Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: "Elliott, Robert (Server Storage)" Cc: x86@kernel.org Link: http://lkml.kernel.org/r/1400160305-17774-2-git-send-email-prarit@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/irq.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 283a76a9cc40..11ccfb0a63e7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -334,10 +335,17 @@ int check_irq_vectors_for_cpu_disable(void) for_each_online_cpu(cpu) { if (cpu == this_cpu) continue; - for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; - vector++) { - if (per_cpu(vector_irq, cpu)[vector] < 0) - count++; + /* + * We scan from FIRST_EXTERNAL_VECTOR to first system + * vector. If the vector is marked in the used vectors + * bitmap or an irq is assigned to it, we don't count + * it as available. + */ + for (vector = FIRST_EXTERNAL_VECTOR; + vector < first_system_vector; vector++) { + if (!test_bit(vector, used_vectors) && + per_cpu(vector_irq, cpu)[vector] < 0) + count++; } } -- cgit From 89f898c1e195fa6235c869bb457e500b7b3ac49d Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Thu, 5 Jun 2014 15:42:43 +0200 Subject: x86: Fix list/memory corruption on CPU hotplug currently if AP wake up is failed, master CPU marks AP as not present in do_boot_cpu() by calling set_cpu_present(cpu, false). That leads to following list corruption on the next physical CPU hotplug: [ 418.107336] WARNING: CPU: 1 PID: 45 at lib/list_debug.c:33 __list_add+0xbe/0xd0() [ 418.115268] list_add corruption. prev->next should be next (ffff88003dc57600), but was ffff88003e20c3a0. (prev=ffff88003e20c3a0). [ 418.123693] Modules linked in: nf_conntrack_netbios_ns nf_conntrack_broadcast ipt_MASQUERADE ip6t_REJECT ipt_REJECT cfg80211 xt_conntrack rfkill ee [ 418.138979] CPU: 1 PID: 45 Comm: kworker/u10:1 Not tainted 3.14.0-rc6+ #387 [ 418.149989] Hardware name: Red Hat KVM, BIOS 0.5.1 01/01/2007 [ 418.165750] Workqueue: kacpi_hotplug acpi_hotplug_work_fn [ 418.166433] 0000000000000021 ffff880038ca7988 ffffffff8159b22d 0000000000000021 [ 418.176460] ffff880038ca79d8 ffff880038ca79c8 ffffffff8106942c ffff880038ca79e8 [ 418.177453] ffff88003e20c3a0 ffff88003dc57600 ffff88003e20c3a0 00000000ffffffea [ 418.178445] Call Trace: [ 418.185811] [] dump_stack+0x49/0x5c [ 418.186440] [] warn_slowpath_common+0x8c/0xc0 [ 418.187192] [] warn_slowpath_fmt+0x46/0x50 [ 418.191231] [] ? acpi_ns_get_node+0xb7/0xc7 [ 418.193889] [] __list_add+0xbe/0xd0 [ 418.196649] [] kobject_add_internal+0x79/0x200 [ 418.208610] [] kobject_add_varg+0x38/0x60 [ 418.213831] [] kobject_add+0x44/0x70 [ 418.229961] [] device_add+0xd0/0x550 [ 418.234991] [] ? pm_runtime_init+0xe5/0xf0 [ 418.250226] [] device_register+0x1e/0x30 [ 418.255296] [] register_cpu+0xe3/0x130 [ 418.266539] [] arch_register_cpu+0x65/0x150 [ 418.285845] [] acpi_processor_hotadd_init+0x5a/0x9b ... Which is caused by the fact that generic_processor_info() allocates logical CPU id by calling: cpu = cpumask_next_zero(-1, cpu_present_mask); which returns id of previously failed to wake up CPU, since its bit is cleared by do_boot_cpu() and as result register_cpu() tries to register another CPU with the same id as already present but failed to be onlined CPU. Taking in account that AP will not do anything if master CPU failed to wake it up, there is no reason to mark that AP as not present and break next cpu hotplug attempts. As a side effect of not marking AP as not present, user would be allowed to online it again later. Also fix memory corruption in acpi_unmap_lsapic() if during CPU hotplug master CPU failed to wake up AP it set percpu x86_cpu_to_apicid to BAD_APICID=0xFFFF for AP. However following attempt to unplug that CPU will lead to out of bound write access to __apicid_to_node[] which is 32768 items long on x86_64 kernel. So with above fix of cpu_present_mask make sure that a present CPU has a valid APIC ID by not setting x86_cpu_to_apicid to BAD_APICID in do_boot_cpu() on failure and allow acpi_processor_remove()->acpi_unmap_lsapic() cleanly remove CPU. Signed-off-by: Igor Mammedov Acked-by: Toshi Kani Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1401975765-22328-2-git-send-email-imammedo@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 34826934d4a7..2988f69f673e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -859,9 +859,6 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) /* was set by cpu_init() */ cpumask_clear_cpu(cpu, cpu_initialized_mask); - - set_cpu_present(cpu, false); - per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; } /* mark "stuck" area as not stuck */ -- cgit From feef1e8ecbadf24f8e6829c935df8683cabae41b Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Thu, 5 Jun 2014 15:42:44 +0200 Subject: x86/smpboot: Log error on secondary CPU wakeup failure at ERR level If system is running without debug level logging, it will not log error if do_boot_cpu() failed to wakeup AP. It may lead to silent AP bringup failures at boot time. Change message level to KERN_ERR to make error visible to user as it's done on other architectures. Signed-off-by: Igor Mammedov Acked-by: Toshi Kani Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1401975765-22328-3-git-send-email-imammedo@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2988f69f673e..ae2fd975b782 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -918,7 +918,7 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle) err = do_boot_cpu(apicid, cpu, tidle); if (err) { - pr_debug("do_boot_cpu failed %d\n", err); + pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); return -EIO; } -- cgit From 3e1a878b7ccdb31da6d9d2b855c72ad87afeba3f Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Thu, 5 Jun 2014 15:42:45 +0200 Subject: x86/smpboot: Initialize secondary CPU only if master CPU will wait for it Hang is observed on virtual machines during CPU hotplug, especially in big guests with many CPUs. (It reproducible more often if host is over-committed). It happens because master CPU gives up waiting on secondary CPU and allows it to run wild. As result AP causes locking or crashing system. For example as described here: https://lkml.org/lkml/2014/3/6/257 If master CPU have sent STARTUP IPI successfully, and AP signalled to master CPU that it's ready to start initialization, make master CPU wait indefinitely till AP is onlined. To ensure that AP won't ever run wild, make it wait at early startup till master CPU confirms its intention to wait for AP. If AP doesn't respond in 10 seconds, the master CPU will timeout and cancel AP onlining. Signed-off-by: Igor Mammedov Acked-by: Toshi Kani Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1401975765-22328-4-git-send-email-imammedo@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 27 +++++++----- arch/x86/kernel/smpboot.c | 99 ++++++++++++++------------------------------ 2 files changed, 47 insertions(+), 79 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a135239badb7..a4bcbacdbe0b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1221,6 +1221,17 @@ static void dbg_restore_debug_regs(void) #define dbg_restore_debug_regs() #endif /* ! CONFIG_KGDB */ +static void wait_for_master_cpu(int cpu) +{ + /* + * wait for ACK from master CPU before continuing + * with AP initialization + */ + WARN_ON(cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)); + while (!cpumask_test_cpu(cpu, cpu_callout_mask)) + cpu_relax(); +} + /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT @@ -1236,16 +1247,17 @@ void cpu_init(void) struct task_struct *me; struct tss_struct *t; unsigned long v; - int cpu; + int cpu = stack_smp_processor_id(); int i; + wait_for_master_cpu(cpu); + /* * Load microcode on this cpu if a valid microcode is available. * This is early microcode loading procedure. */ load_ucode_ap(); - cpu = stack_smp_processor_id(); t = &per_cpu(init_tss, cpu); oist = &per_cpu(orig_ist, cpu); @@ -1257,9 +1269,6 @@ void cpu_init(void) me = current; - if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) - panic("CPU#%d already initialized!\n", cpu); - pr_debug("Initializing CPU#%d\n", cpu); clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); @@ -1336,13 +1345,9 @@ void cpu_init(void) struct tss_struct *t = &per_cpu(init_tss, cpu); struct thread_struct *thread = &curr->thread; - show_ucode_info_early(); + wait_for_master_cpu(cpu); - if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { - printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); - for (;;) - local_irq_enable(); - } + show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ae2fd975b782..bc52fac39dd3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -111,7 +111,6 @@ atomic_t init_deasserted; static void smp_callin(void) { int cpuid, phys_id; - unsigned long timeout; /* * If waken up by an INIT in an 82489DX configuration @@ -130,37 +129,6 @@ static void smp_callin(void) * (This works even if the APIC is not enabled.) */ phys_id = read_apic_id(); - if (cpumask_test_cpu(cpuid, cpu_callin_mask)) { - panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, - phys_id, cpuid); - } - pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); - - /* - * STARTUP IPIs are fragile beasts as they might sometimes - * trigger some glue motherboard logic. Complete APIC bus - * silence for 1 second, this overestimates the time the - * boot CPU is spending to send the up to 2 STARTUP IPIs - * by a factor of two. This should be enough. - */ - - /* - * Waiting 2s total for startup (udelay is not yet working) - */ - timeout = jiffies + 2*HZ; - while (time_before(jiffies, timeout)) { - /* - * Has the boot CPU finished it's STARTUP sequence? - */ - if (cpumask_test_cpu(cpuid, cpu_callout_mask)) - break; - cpu_relax(); - } - - if (!time_before(jiffies, timeout)) { - panic("%s: CPU%d started up but did not get a callout!\n", - __func__, cpuid); - } /* * the boot CPU has finished the init stage and is spinning @@ -750,8 +718,8 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) unsigned long start_ip = real_mode_header->trampoline_start; unsigned long boot_error = 0; - int timeout; int cpu0_nmi_registered = 0; + unsigned long timeout; /* Just in case we booted with a single CPU. */ alternatives_enable_smp(); @@ -798,6 +766,15 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) } } + /* + * AP might wait on cpu_callout_mask in cpu_init() with + * cpu_initialized_mask set if previous attempt to online + * it timed-out. Clear cpu_initialized_mask so that after + * INIT/SIPI it could start with a clean state. + */ + cpumask_clear_cpu(cpu, cpu_initialized_mask); + smp_mb(); + /* * Wake up a CPU in difference cases: * - Use the method in the APIC driver if it's defined @@ -810,55 +787,41 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid, &cpu0_nmi_registered); + if (!boot_error) { /* - * allow APs to start initializing. + * Wait 10s total for a response from AP */ - pr_debug("Before Callout %d\n", cpu); - cpumask_set_cpu(cpu, cpu_callout_mask); - pr_debug("After Callout %d\n", cpu); + boot_error = -1; + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpumask_test_cpu(cpu, cpu_initialized_mask)) { + /* + * Tell AP to proceed with initialization + */ + cpumask_set_cpu(cpu, cpu_callout_mask); + boot_error = 0; + break; + } + udelay(100); + schedule(); + } + } + if (!boot_error) { /* - * Wait 5s total for a response + * Wait till AP completes initial initialization */ - for (timeout = 0; timeout < 50000; timeout++) { - if (cpumask_test_cpu(cpu, cpu_callin_mask)) - break; /* It has booted */ - udelay(100); + while (!cpumask_test_cpu(cpu, cpu_callin_mask)) { /* * Allow other tasks to run while we wait for the * AP to come online. This also gives a chance * for the MTRR work(triggered by the AP coming online) * to be completed in the stop machine context. */ + udelay(100); schedule(); } - - if (cpumask_test_cpu(cpu, cpu_callin_mask)) { - print_cpu_msr(&cpu_data(cpu)); - pr_debug("CPU%d: has booted.\n", cpu); - } else { - boot_error = 1; - if (*trampoline_status == 0xA5A5A5A5) - /* trampoline started but...? */ - pr_err("CPU%d: Stuck ??\n", cpu); - else - /* trampoline code not run */ - pr_err("CPU%d: Not responding\n", cpu); - if (apic->inquire_remote_apic) - apic->inquire_remote_apic(apicid); - } - } - - if (boot_error) { - /* Try to put things back the way they were before ... */ - numa_remove_cpu(cpu); /* was set by numa_add_cpu */ - - /* was set by do_boot_cpu() */ - cpumask_clear_cpu(cpu, cpu_callout_mask); - - /* was set by cpu_init() */ - cpumask_clear_cpu(cpu, cpu_initialized_mask); } /* mark "stuck" area as not stuck */ -- cgit From 745c51673e289acf4d9ffc2835524de73ef923fd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 7 Jun 2014 12:26:20 +0100 Subject: x86/boot: EFI_MIXED should not prohibit loading above 4G commit 7d453eee36ae ("x86/efi: Wire up CONFIG_EFI_MIXED") introduced a regression for the functionality to load kernels above 4G. The relevant (incorrect) reasoning behind this change can be seen in the commit message, "The xloadflags field in the bzImage header is also updated to reflect that the kernel supports both entry points by setting both of XLF_EFI_HANDOVER_32 and XLF_EFI_HANDOVER_64 when CONFIG_EFI_MIXED=y. XLF_CAN_BE_LOADED_ABOVE_4G is disabled so that the kernel text is guaranteed to be addressable with 32-bits." This is obviously bogus since 32-bit EFI loaders will never place the kernel above the 4G mark. So this restriction is entirely unnecessary. But things are worse than that - since we want to encourage people to always compile with CONFIG_EFI_MIXED=y so that their kernels work out of the box for both 32-bit and 64-bit firmware, commit 7d453eee36ae effectively disables XLF_CAN_BE_LOADED_ABOVE_4G completely. Remove the overzealous and superfluous restriction and restore the XLF_CAN_BE_LOADED_ABOVE_4G functionality. Cc: "H. Peter Anvin" Cc: Dave Young Cc: Vivek Goyal Signed-off-by: Matt Fleming Link: http://lkml.kernel.org/r/1402140380-15377-1-git-send-email-matt@console-pimps.org Signed-off-by: H. Peter Anvin --- arch/x86/boot/header.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 0ca9a5c362bc..84c223479e3c 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S @@ -375,8 +375,7 @@ xloadflags: # define XLF0 0 #endif -#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) && \ - !defined(CONFIG_EFI_MIXED) +#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_X86_64) /* kernel/boot_param/ramdisk could be loaded above 4g */ # define XLF1 XLF_CAN_BE_LOADED_ABOVE_4G #else -- cgit