diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
| -rw-r--r-- | arch/x86/kernel/cpu/aperfmperf.c | 20 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 22 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/intel_epb.c | 16 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mce/core.c | 14 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/microcode/core.c | 15 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mtrr/legacy.c | 12 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/scattered.c | 1 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/sgx/driver.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/umwait.c | 10 |
9 files changed, 69 insertions, 43 deletions
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index a315b0627dfb..7ffc78d5ebf2 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -37,7 +37,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct aperfmperf, cpu_samples) = { .seq = SEQCNT_ZERO(cpu_samples.seq) }; -static void init_counter_refs(void) +static void init_counter_refs(void *data) { u64 aperf, mperf; @@ -289,16 +289,20 @@ out: } #ifdef CONFIG_PM_SLEEP -static struct syscore_ops freq_invariance_syscore_ops = { +static const struct syscore_ops freq_invariance_syscore_ops = { .resume = init_counter_refs, }; -static void register_freq_invariance_syscore_ops(void) +static struct syscore freq_invariance_syscore = { + .ops = &freq_invariance_syscore_ops, +}; + +static void register_freq_invariance_syscore(void) { - register_syscore_ops(&freq_invariance_syscore_ops); + register_syscore(&freq_invariance_syscore); } #else -static inline void register_freq_invariance_syscore_ops(void) {} +static inline void register_freq_invariance_syscore(void) {} #endif static void freq_invariance_enable(void) @@ -308,7 +312,7 @@ static void freq_invariance_enable(void) return; } static_branch_enable_cpuslocked(&arch_scale_freq_key); - register_freq_invariance_syscore_ops(); + register_freq_invariance_syscore(); pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio); } @@ -535,7 +539,7 @@ static int __init bp_init_aperfmperf(void) if (!cpu_feature_enabled(X86_FEATURE_APERFMPERF)) return 0; - init_counter_refs(); + init_counter_refs(NULL); bp_init_freq_invariance(); return 0; } @@ -544,5 +548,5 @@ early_initcall(bp_init_aperfmperf); void ap_init_aperfmperf(void) { if (cpu_feature_enabled(X86_FEATURE_APERFMPERF)) - init_counter_refs(); + init_counter_refs(NULL); } diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d8660770dc6a..d0a2847a4bb0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -145,14 +145,6 @@ EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); */ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); -/* - * Controls CPU Fill buffer clear before VMenter. This is a subset of - * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only - * mitigation is required. - */ -DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); -EXPORT_SYMBOL_FOR_KVM(cpu_buf_vm_clear); - #undef pr_fmt #define pr_fmt(fmt) "mitigations: " fmt @@ -349,8 +341,8 @@ static enum rfds_mitigations rfds_mitigation __ro_after_init = IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; /* - * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing - * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. + * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing on exit to + * userspace *and* on entry to KVM guests. */ static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; @@ -396,6 +388,7 @@ static void __init mds_apply_mitigation(void) if (mds_mitigation == MDS_MITIGATION_FULL || mds_mitigation == MDS_MITIGATION_VMWERV) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && (mds_nosmt || smt_mitigations == SMT_MITIGATIONS_ON)) cpu_smt_disable(false); @@ -507,6 +500,7 @@ static void __init taa_apply_mitigation(void) * present on host, enable the mitigation for UCODE_NEEDED as well. */ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); if (taa_nosmt || smt_mitigations == SMT_MITIGATIONS_ON) cpu_smt_disable(false); @@ -608,9 +602,9 @@ static void __init mmio_apply_mitigation(void) */ if (verw_clear_cpu_buf_mitigation_selected) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); - static_branch_disable(&cpu_buf_vm_clear); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); } else { - static_branch_enable(&cpu_buf_vm_clear); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM_MMIO); } /* @@ -699,8 +693,10 @@ static void __init rfds_update_mitigation(void) static void __init rfds_apply_mitigation(void) { - if (rfds_mitigation == RFDS_MITIGATION_VERW) + if (rfds_mitigation == RFDS_MITIGATION_VERW) { setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); + setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); + } } static __init int rfds_parse_cmdline(char *str) diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c index bc7671f920a7..2c56f8730f59 100644 --- a/arch/x86/kernel/cpu/intel_epb.c +++ b/arch/x86/kernel/cpu/intel_epb.c @@ -75,7 +75,7 @@ static u8 energ_perf_values[] = { [EPB_INDEX_POWERSAVE] = ENERGY_PERF_BIAS_POWERSAVE, }; -static int intel_epb_save(void) +static int intel_epb_save(void *data) { u64 epb; @@ -89,7 +89,7 @@ static int intel_epb_save(void) return 0; } -static void intel_epb_restore(void) +static void intel_epb_restore(void *data) { u64 val = this_cpu_read(saved_epb); u64 epb; @@ -114,11 +114,15 @@ static void intel_epb_restore(void) wrmsrq(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val); } -static struct syscore_ops intel_epb_syscore_ops = { +static const struct syscore_ops intel_epb_syscore_ops = { .suspend = intel_epb_save, .resume = intel_epb_restore, }; +static struct syscore intel_epb_syscore = { + .ops = &intel_epb_syscore_ops, +}; + static const char * const energy_perf_strings[] = { [EPB_INDEX_PERFORMANCE] = "performance", [EPB_INDEX_BALANCE_PERFORMANCE] = "balance-performance", @@ -185,7 +189,7 @@ static int intel_epb_online(unsigned int cpu) { struct device *cpu_dev = get_cpu_device(cpu); - intel_epb_restore(); + intel_epb_restore(NULL); if (!cpuhp_tasks_frozen) sysfs_merge_group(&cpu_dev->kobj, &intel_epb_attr_group); @@ -199,7 +203,7 @@ static int intel_epb_offline(unsigned int cpu) if (!cpuhp_tasks_frozen) sysfs_unmerge_group(&cpu_dev->kobj, &intel_epb_attr_group); - intel_epb_save(); + intel_epb_save(NULL); return 0; } @@ -230,7 +234,7 @@ static __init int intel_epb_init(void) if (ret < 0) goto err_out_online; - register_syscore_ops(&intel_epb_syscore_ops); + register_syscore(&intel_epb_syscore); return 0; err_out_online: diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 4aff14e04287..6297416647ed 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2439,13 +2439,13 @@ static void vendor_disable_error_reporting(void) mce_disable_error_reporting(); } -static int mce_syscore_suspend(void) +static int mce_syscore_suspend(void *data) { vendor_disable_error_reporting(); return 0; } -static void mce_syscore_shutdown(void) +static void mce_syscore_shutdown(void *data) { vendor_disable_error_reporting(); } @@ -2455,7 +2455,7 @@ static void mce_syscore_shutdown(void) * Only one CPU is active at this time, the others get re-added later using * CPU hotplug: */ -static void mce_syscore_resume(void) +static void mce_syscore_resume(void *data) { __mcheck_cpu_init_generic(); __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info)); @@ -2463,12 +2463,16 @@ static void mce_syscore_resume(void) cr4_set_bits(X86_CR4_MCE); } -static struct syscore_ops mce_syscore_ops = { +static const struct syscore_ops mce_syscore_ops = { .suspend = mce_syscore_suspend, .shutdown = mce_syscore_shutdown, .resume = mce_syscore_resume, }; +static struct syscore mce_syscore = { + .ops = &mce_syscore_ops, +}; + /* * mce_device: Sysfs support */ @@ -2869,7 +2873,7 @@ static __init int mcheck_init_device(void) if (err < 0) goto err_out_online; - register_syscore_ops(&mce_syscore_ops); + register_syscore(&mce_syscore); return 0; diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index ccc83b0bf63c..68049f171860 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -823,8 +823,17 @@ void microcode_bsp_resume(void) reload_early_microcode(cpu); } -static struct syscore_ops mc_syscore_ops = { - .resume = microcode_bsp_resume, +static void microcode_bsp_syscore_resume(void *data) +{ + microcode_bsp_resume(); +} + +static const struct syscore_ops mc_syscore_ops = { + .resume = microcode_bsp_syscore_resume, +}; + +static struct syscore mc_syscore = { + .ops = &mc_syscore_ops, }; static int mc_cpu_online(unsigned int cpu) @@ -903,7 +912,7 @@ static int __init microcode_init(void) } } - register_syscore_ops(&mc_syscore_ops); + register_syscore(&mc_syscore); cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", mc_cpu_online, mc_cpu_down_prep); diff --git a/arch/x86/kernel/cpu/mtrr/legacy.c b/arch/x86/kernel/cpu/mtrr/legacy.c index d25882fcf181..2415ffaaf02c 100644 --- a/arch/x86/kernel/cpu/mtrr/legacy.c +++ b/arch/x86/kernel/cpu/mtrr/legacy.c @@ -41,7 +41,7 @@ struct mtrr_value { static struct mtrr_value *mtrr_value; -static int mtrr_save(void) +static int mtrr_save(void *data) { int i; @@ -56,7 +56,7 @@ static int mtrr_save(void) return 0; } -static void mtrr_restore(void) +static void mtrr_restore(void *data) { int i; @@ -69,11 +69,15 @@ static void mtrr_restore(void) } } -static struct syscore_ops mtrr_syscore_ops = { +static const struct syscore_ops mtrr_syscore_ops = { .suspend = mtrr_save, .resume = mtrr_restore, }; +static struct syscore mtrr_syscore = { + .ops = &mtrr_syscore_ops, +}; + void mtrr_register_syscore(void) { mtrr_value = kcalloc(num_var_ranges, sizeof(*mtrr_value), GFP_KERNEL); @@ -86,5 +90,5 @@ void mtrr_register_syscore(void) * TBD: is there any system with such CPU which supports * suspend/resume? If no, we should remove the code. */ - register_syscore_ops(&mtrr_syscore_ops); + register_syscore(&mtrr_syscore); } diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index cde4b6cd3471..42c7eac0c387 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c @@ -53,6 +53,7 @@ static const struct cpuid_bit cpuid_bits[] = { { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 }, { X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 }, { X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 }, + { X86_FEATURE_X2AVIC_EXT, CPUID_ECX, 6, 0x8000000a, 0 }, { X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 }, { X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 }, { X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 }, diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 79d6020dfe9c..a42c7180900b 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -130,7 +130,7 @@ static unsigned long sgx_get_unmapped_area(struct file *file, if (flags & MAP_FIXED) return addr; - return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); + return mm_get_unmapped_area(file, addr, len, pgoff, flags); } #ifdef CONFIG_COMPAT diff --git a/arch/x86/kernel/cpu/umwait.c b/arch/x86/kernel/cpu/umwait.c index 933fcd7ff250..e4a31c536642 100644 --- a/arch/x86/kernel/cpu/umwait.c +++ b/arch/x86/kernel/cpu/umwait.c @@ -86,15 +86,19 @@ static int umwait_cpu_offline(unsigned int cpu) * trust the firmware nor does it matter if the same value is written * again. */ -static void umwait_syscore_resume(void) +static void umwait_syscore_resume(void *data) { umwait_update_control_msr(NULL); } -static struct syscore_ops umwait_syscore_ops = { +static const struct syscore_ops umwait_syscore_ops = { .resume = umwait_syscore_resume, }; +static struct syscore umwait_syscore = { + .ops = &umwait_syscore_ops, +}; + /* sysfs interface */ /* @@ -226,7 +230,7 @@ static int __init umwait_init(void) return ret; } - register_syscore_ops(&umwait_syscore_ops); + register_syscore(&umwait_syscore); /* * Add umwait control interface. Ignore failure, so at least the |
