diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-12-03 11:44:00 +0100 | 
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-12-03 11:44:00 +0100 | 
| commit | 989a4222c13a3e148772730d362fceb0727852f5 (patch) | |
| tree | c66538440a53713e8d3fc52db62e174c6966f831 /arch/x86/kernel/cpu/bugs.c | |
| parent | e8da8794a7fd9eef1ec9a07f0d4897c68581c72b (diff) | |
| parent | 2595646791c319cadfdbf271563aac97d0843dc7 (diff) | |
Merge tag 'v4.20-rc5' into irq/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu/bugs.c')
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 525 | 
1 files changed, 388 insertions, 137 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c37e66e493bf..500278f5308e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -14,6 +14,7 @@  #include <linux/module.h>  #include <linux/nospec.h>  #include <linux/prctl.h> +#include <linux/sched/smt.h>  #include <asm/spec-ctrl.h>  #include <asm/cmdline.h> @@ -53,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;  u64 __ro_after_init x86_amd_ls_cfg_base;  u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; +/* Control conditional STIPB in switch_to() */ +DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); +/* Control conditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); +/* Control unconditional IBPB in switch_mm() */ +DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); +  void __init check_bugs(void)  {  	identify_boot_cpu(); @@ -123,31 +131,6 @@ void __init check_bugs(void)  #endif  } -/* The kernel command line selection */ -enum spectre_v2_mitigation_cmd { -	SPECTRE_V2_CMD_NONE, -	SPECTRE_V2_CMD_AUTO, -	SPECTRE_V2_CMD_FORCE, -	SPECTRE_V2_CMD_RETPOLINE, -	SPECTRE_V2_CMD_RETPOLINE_GENERIC, -	SPECTRE_V2_CMD_RETPOLINE_AMD, -}; - -static const char *spectre_v2_strings[] = { -	[SPECTRE_V2_NONE]			= "Vulnerable", -	[SPECTRE_V2_RETPOLINE_MINIMAL]		= "Vulnerable: Minimal generic ASM retpoline", -	[SPECTRE_V2_RETPOLINE_MINIMAL_AMD]	= "Vulnerable: Minimal AMD ASM retpoline", -	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline", -	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline", -	[SPECTRE_V2_IBRS_ENHANCED]		= "Mitigation: Enhanced IBRS", -}; - -#undef pr_fmt -#define pr_fmt(fmt)     "Spectre V2 : " fmt - -static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = -	SPECTRE_V2_NONE; -  void  x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)  { @@ -169,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)  		    static_cpu_has(X86_FEATURE_AMD_SSBD))  			hostval |= ssbd_tif_to_spec_ctrl(ti->flags); +		/* Conditional STIBP enabled? */ +		if (static_branch_unlikely(&switch_to_cond_stibp)) +			hostval |= stibp_tif_to_spec_ctrl(ti->flags); +  		if (hostval != guestval) {  			msrval = setguest ? guestval : hostval;  			wrmsrl(MSR_IA32_SPEC_CTRL, msrval); @@ -202,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)  		tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :  				 ssbd_spec_ctrl_to_tif(hostval); -		speculative_store_bypass_update(tif); +		speculation_ctrl_update(tif);  	}  }  EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); @@ -217,6 +204,15 @@ static void x86_amd_ssb_disable(void)  		wrmsrl(MSR_AMD64_LS_CFG, msrval);  } +#undef pr_fmt +#define pr_fmt(fmt)     "Spectre V2 : " fmt + +static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = +	SPECTRE_V2_NONE; + +static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = +	SPECTRE_V2_USER_NONE; +  #ifdef RETPOLINE  static bool spectre_v2_bad_module; @@ -238,67 +234,217 @@ static inline const char *spectre_v2_module_string(void)  static inline const char *spectre_v2_module_string(void) { return ""; }  #endif -static void __init spec2_print_if_insecure(const char *reason) +static inline bool match_option(const char *arg, int arglen, const char *opt)  { -	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) -		pr_info("%s selected on command line.\n", reason); +	int len = strlen(opt); + +	return len == arglen && !strncmp(arg, opt, len);  } -static void __init spec2_print_if_secure(const char *reason) +/* The kernel command line selection for spectre v2 */ +enum spectre_v2_mitigation_cmd { +	SPECTRE_V2_CMD_NONE, +	SPECTRE_V2_CMD_AUTO, +	SPECTRE_V2_CMD_FORCE, +	SPECTRE_V2_CMD_RETPOLINE, +	SPECTRE_V2_CMD_RETPOLINE_GENERIC, +	SPECTRE_V2_CMD_RETPOLINE_AMD, +}; + +enum spectre_v2_user_cmd { +	SPECTRE_V2_USER_CMD_NONE, +	SPECTRE_V2_USER_CMD_AUTO, +	SPECTRE_V2_USER_CMD_FORCE, +	SPECTRE_V2_USER_CMD_PRCTL, +	SPECTRE_V2_USER_CMD_PRCTL_IBPB, +	SPECTRE_V2_USER_CMD_SECCOMP, +	SPECTRE_V2_USER_CMD_SECCOMP_IBPB, +}; + +static const char * const spectre_v2_user_strings[] = { +	[SPECTRE_V2_USER_NONE]		= "User space: Vulnerable", +	[SPECTRE_V2_USER_STRICT]	= "User space: Mitigation: STIBP protection", +	[SPECTRE_V2_USER_PRCTL]		= "User space: Mitigation: STIBP via prctl", +	[SPECTRE_V2_USER_SECCOMP]	= "User space: Mitigation: STIBP via seccomp and prctl", +}; + +static const struct { +	const char			*option; +	enum spectre_v2_user_cmd	cmd; +	bool				secure; +} v2_user_options[] __initdata = { +	{ "auto",		SPECTRE_V2_USER_CMD_AUTO,		false }, +	{ "off",		SPECTRE_V2_USER_CMD_NONE,		false }, +	{ "on",			SPECTRE_V2_USER_CMD_FORCE,		true  }, +	{ "prctl",		SPECTRE_V2_USER_CMD_PRCTL,		false }, +	{ "prctl,ibpb",		SPECTRE_V2_USER_CMD_PRCTL_IBPB,		false }, +	{ "seccomp",		SPECTRE_V2_USER_CMD_SECCOMP,		false }, +	{ "seccomp,ibpb",	SPECTRE_V2_USER_CMD_SECCOMP_IBPB,	false }, +}; + +static void __init spec_v2_user_print_cond(const char *reason, bool secure)  { -	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) -		pr_info("%s selected on command line.\n", reason); +	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) +		pr_info("spectre_v2_user=%s forced on command line.\n", reason);  } -static inline bool retp_compiler(void) +static enum spectre_v2_user_cmd __init +spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)  { -	return __is_defined(RETPOLINE); +	char arg[20]; +	int ret, i; + +	switch (v2_cmd) { +	case SPECTRE_V2_CMD_NONE: +		return SPECTRE_V2_USER_CMD_NONE; +	case SPECTRE_V2_CMD_FORCE: +		return SPECTRE_V2_USER_CMD_FORCE; +	default: +		break; +	} + +	ret = cmdline_find_option(boot_command_line, "spectre_v2_user", +				  arg, sizeof(arg)); +	if (ret < 0) +		return SPECTRE_V2_USER_CMD_AUTO; + +	for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { +		if (match_option(arg, ret, v2_user_options[i].option)) { +			spec_v2_user_print_cond(v2_user_options[i].option, +						v2_user_options[i].secure); +			return v2_user_options[i].cmd; +		} +	} + +	pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); +	return SPECTRE_V2_USER_CMD_AUTO;  } -static inline bool match_option(const char *arg, int arglen, const char *opt) +static void __init +spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)  { -	int len = strlen(opt); +	enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; +	bool smt_possible = IS_ENABLED(CONFIG_SMP); +	enum spectre_v2_user_cmd cmd; -	return len == arglen && !strncmp(arg, opt, len); +	if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) +		return; + +	if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || +	    cpu_smt_control == CPU_SMT_NOT_SUPPORTED) +		smt_possible = false; + +	cmd = spectre_v2_parse_user_cmdline(v2_cmd); +	switch (cmd) { +	case SPECTRE_V2_USER_CMD_NONE: +		goto set_mode; +	case SPECTRE_V2_USER_CMD_FORCE: +		mode = SPECTRE_V2_USER_STRICT; +		break; +	case SPECTRE_V2_USER_CMD_PRCTL: +	case SPECTRE_V2_USER_CMD_PRCTL_IBPB: +		mode = SPECTRE_V2_USER_PRCTL; +		break; +	case SPECTRE_V2_USER_CMD_AUTO: +	case SPECTRE_V2_USER_CMD_SECCOMP: +	case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: +		if (IS_ENABLED(CONFIG_SECCOMP)) +			mode = SPECTRE_V2_USER_SECCOMP; +		else +			mode = SPECTRE_V2_USER_PRCTL; +		break; +	} + +	/* Initialize Indirect Branch Prediction Barrier */ +	if (boot_cpu_has(X86_FEATURE_IBPB)) { +		setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + +		switch (cmd) { +		case SPECTRE_V2_USER_CMD_FORCE: +		case SPECTRE_V2_USER_CMD_PRCTL_IBPB: +		case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: +			static_branch_enable(&switch_mm_always_ibpb); +			break; +		case SPECTRE_V2_USER_CMD_PRCTL: +		case SPECTRE_V2_USER_CMD_AUTO: +		case SPECTRE_V2_USER_CMD_SECCOMP: +			static_branch_enable(&switch_mm_cond_ibpb); +			break; +		default: +			break; +		} + +		pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", +			static_key_enabled(&switch_mm_always_ibpb) ? +			"always-on" : "conditional"); +	} + +	/* If enhanced IBRS is enabled no STIPB required */ +	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) +		return; + +	/* +	 * If SMT is not possible or STIBP is not available clear the STIPB +	 * mode. +	 */ +	if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) +		mode = SPECTRE_V2_USER_NONE; +set_mode: +	spectre_v2_user = mode; +	/* Only print the STIBP mode when SMT possible */ +	if (smt_possible) +		pr_info("%s\n", spectre_v2_user_strings[mode]);  } +static const char * const spectre_v2_strings[] = { +	[SPECTRE_V2_NONE]			= "Vulnerable", +	[SPECTRE_V2_RETPOLINE_GENERIC]		= "Mitigation: Full generic retpoline", +	[SPECTRE_V2_RETPOLINE_AMD]		= "Mitigation: Full AMD retpoline", +	[SPECTRE_V2_IBRS_ENHANCED]		= "Mitigation: Enhanced IBRS", +}; +  static const struct {  	const char *option;  	enum spectre_v2_mitigation_cmd cmd;  	bool secure; -} mitigation_options[] = { -	{ "off",               SPECTRE_V2_CMD_NONE,              false }, -	{ "on",                SPECTRE_V2_CMD_FORCE,             true }, -	{ "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false }, -	{ "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false }, -	{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, -	{ "auto",              SPECTRE_V2_CMD_AUTO,              false }, +} mitigation_options[] __initdata = { +	{ "off",		SPECTRE_V2_CMD_NONE,		  false }, +	{ "on",			SPECTRE_V2_CMD_FORCE,		  true  }, +	{ "retpoline",		SPECTRE_V2_CMD_RETPOLINE,	  false }, +	{ "retpoline,amd",	SPECTRE_V2_CMD_RETPOLINE_AMD,	  false }, +	{ "retpoline,generic",	SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, +	{ "auto",		SPECTRE_V2_CMD_AUTO,		  false },  }; +static void __init spec_v2_print_cond(const char *reason, bool secure) +{ +	if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) +		pr_info("%s selected on command line.\n", reason); +} +  static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)  { +	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;  	char arg[20];  	int ret, i; -	enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;  	if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))  		return SPECTRE_V2_CMD_NONE; -	else { -		ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); -		if (ret < 0) -			return SPECTRE_V2_CMD_AUTO; -		for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { -			if (!match_option(arg, ret, mitigation_options[i].option)) -				continue; -			cmd = mitigation_options[i].cmd; -			break; -		} +	ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); +	if (ret < 0) +		return SPECTRE_V2_CMD_AUTO; -		if (i >= ARRAY_SIZE(mitigation_options)) { -			pr_err("unknown option (%s). Switching to AUTO select\n", arg); -			return SPECTRE_V2_CMD_AUTO; -		} +	for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { +		if (!match_option(arg, ret, mitigation_options[i].option)) +			continue; +		cmd = mitigation_options[i].cmd; +		break; +	} + +	if (i >= ARRAY_SIZE(mitigation_options)) { +		pr_err("unknown option (%s). Switching to AUTO select\n", arg); +		return SPECTRE_V2_CMD_AUTO;  	}  	if ((cmd == SPECTRE_V2_CMD_RETPOLINE || @@ -316,54 +462,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)  		return SPECTRE_V2_CMD_AUTO;  	} -	if (mitigation_options[i].secure) -		spec2_print_if_secure(mitigation_options[i].option); -	else -		spec2_print_if_insecure(mitigation_options[i].option); - +	spec_v2_print_cond(mitigation_options[i].option, +			   mitigation_options[i].secure);  	return cmd;  } -static bool stibp_needed(void) -{ -	if (spectre_v2_enabled == SPECTRE_V2_NONE) -		return false; - -	if (!boot_cpu_has(X86_FEATURE_STIBP)) -		return false; - -	return true; -} - -static void update_stibp_msr(void *info) -{ -	wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); -} - -void arch_smt_update(void) -{ -	u64 mask; - -	if (!stibp_needed()) -		return; - -	mutex_lock(&spec_ctrl_mutex); -	mask = x86_spec_ctrl_base; -	if (cpu_smt_control == CPU_SMT_ENABLED) -		mask |= SPEC_CTRL_STIBP; -	else -		mask &= ~SPEC_CTRL_STIBP; - -	if (mask != x86_spec_ctrl_base) { -		pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n", -				cpu_smt_control == CPU_SMT_ENABLED ? -				"Enabling" : "Disabling"); -		x86_spec_ctrl_base = mask; -		on_each_cpu(update_stibp_msr, NULL, 1); -	} -	mutex_unlock(&spec_ctrl_mutex); -} -  static void __init spectre_v2_select_mitigation(void)  {  	enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); @@ -417,14 +520,12 @@ retpoline_auto:  			pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");  			goto retpoline_generic;  		} -		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : -					 SPECTRE_V2_RETPOLINE_MINIMAL_AMD; +		mode = SPECTRE_V2_RETPOLINE_AMD;  		setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);  		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);  	} else {  	retpoline_generic: -		mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : -					 SPECTRE_V2_RETPOLINE_MINIMAL; +		mode = SPECTRE_V2_RETPOLINE_GENERIC;  		setup_force_cpu_cap(X86_FEATURE_RETPOLINE);  	} @@ -443,12 +544,6 @@ specv2_set_mode:  	setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);  	pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); -	/* Initialize Indirect Branch Prediction Barrier if supported */ -	if (boot_cpu_has(X86_FEATURE_IBPB)) { -		setup_force_cpu_cap(X86_FEATURE_USE_IBPB); -		pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); -	} -  	/*  	 * Retpoline means the kernel is safe because it has no indirect  	 * branches. Enhanced IBRS protects firmware too, so, enable restricted @@ -465,10 +560,67 @@ specv2_set_mode:  		pr_info("Enabling Restricted Speculation for firmware calls\n");  	} +	/* Set up IBPB and STIBP depending on the general spectre V2 command */ +	spectre_v2_user_select_mitigation(cmd); +  	/* Enable STIBP if appropriate */  	arch_smt_update();  } +static void update_stibp_msr(void * __unused) +{ +	wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); +} + +/* Update x86_spec_ctrl_base in case SMT state changed. */ +static void update_stibp_strict(void) +{ +	u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; + +	if (sched_smt_active()) +		mask |= SPEC_CTRL_STIBP; + +	if (mask == x86_spec_ctrl_base) +		return; + +	pr_info("Update user space SMT mitigation: STIBP %s\n", +		mask & SPEC_CTRL_STIBP ? "always-on" : "off"); +	x86_spec_ctrl_base = mask; +	on_each_cpu(update_stibp_msr, NULL, 1); +} + +/* Update the static key controlling the evaluation of TIF_SPEC_IB */ +static void update_indir_branch_cond(void) +{ +	if (sched_smt_active()) +		static_branch_enable(&switch_to_cond_stibp); +	else +		static_branch_disable(&switch_to_cond_stibp); +} + +void arch_smt_update(void) +{ +	/* Enhanced IBRS implies STIBP. No update required. */ +	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) +		return; + +	mutex_lock(&spec_ctrl_mutex); + +	switch (spectre_v2_user) { +	case SPECTRE_V2_USER_NONE: +		break; +	case SPECTRE_V2_USER_STRICT: +		update_stibp_strict(); +		break; +	case SPECTRE_V2_USER_PRCTL: +	case SPECTRE_V2_USER_SECCOMP: +		update_indir_branch_cond(); +		break; +	} + +	mutex_unlock(&spec_ctrl_mutex); +} +  #undef pr_fmt  #define pr_fmt(fmt)	"Speculative Store Bypass: " fmt @@ -483,7 +635,7 @@ enum ssb_mitigation_cmd {  	SPEC_STORE_BYPASS_CMD_SECCOMP,  }; -static const char *ssb_strings[] = { +static const char * const ssb_strings[] = {  	[SPEC_STORE_BYPASS_NONE]	= "Vulnerable",  	[SPEC_STORE_BYPASS_DISABLE]	= "Mitigation: Speculative Store Bypass disabled",  	[SPEC_STORE_BYPASS_PRCTL]	= "Mitigation: Speculative Store Bypass disabled via prctl", @@ -493,7 +645,7 @@ static const char *ssb_strings[] = {  static const struct {  	const char *option;  	enum ssb_mitigation_cmd cmd; -} ssb_mitigation_options[] = { +} ssb_mitigation_options[]  __initdata = {  	{ "auto",	SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */  	{ "on",		SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */  	{ "off",	SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */ @@ -604,10 +756,25 @@ static void ssb_select_mitigation(void)  #undef pr_fmt  #define pr_fmt(fmt)     "Speculation prctl: " fmt -static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +static void task_update_spec_tif(struct task_struct *tsk)  { -	bool update; +	/* Force the update of the real TIF bits */ +	set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); +	/* +	 * Immediately update the speculation control MSRs for the current +	 * task, but for a non-current task delay setting the CPU +	 * mitigation until it is scheduled next. +	 * +	 * This can only happen for SECCOMP mitigation. For PRCTL it's +	 * always the current task. +	 */ +	if (tsk == current) +		speculation_ctrl_update_current(); +} + +static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) +{  	if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&  	    ssb_mode != SPEC_STORE_BYPASS_SECCOMP)  		return -ENXIO; @@ -618,28 +785,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)  		if (task_spec_ssb_force_disable(task))  			return -EPERM;  		task_clear_spec_ssb_disable(task); -		update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); +		task_update_spec_tif(task);  		break;  	case PR_SPEC_DISABLE:  		task_set_spec_ssb_disable(task); -		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); +		task_update_spec_tif(task);  		break;  	case PR_SPEC_FORCE_DISABLE:  		task_set_spec_ssb_disable(task);  		task_set_spec_ssb_force_disable(task); -		update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); +		task_update_spec_tif(task);  		break;  	default:  		return -ERANGE;  	} +	return 0; +} -	/* -	 * If being set on non-current task, delay setting the CPU -	 * mitigation until it is next scheduled. -	 */ -	if (task == current && update) -		speculative_store_bypass_update_current(); - +static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) +{ +	switch (ctrl) { +	case PR_SPEC_ENABLE: +		if (spectre_v2_user == SPECTRE_V2_USER_NONE) +			return 0; +		/* +		 * Indirect branch speculation is always disabled in strict +		 * mode. +		 */ +		if (spectre_v2_user == SPECTRE_V2_USER_STRICT) +			return -EPERM; +		task_clear_spec_ib_disable(task); +		task_update_spec_tif(task); +		break; +	case PR_SPEC_DISABLE: +	case PR_SPEC_FORCE_DISABLE: +		/* +		 * Indirect branch speculation is always allowed when +		 * mitigation is force disabled. +		 */ +		if (spectre_v2_user == SPECTRE_V2_USER_NONE) +			return -EPERM; +		if (spectre_v2_user == SPECTRE_V2_USER_STRICT) +			return 0; +		task_set_spec_ib_disable(task); +		if (ctrl == PR_SPEC_FORCE_DISABLE) +			task_set_spec_ib_force_disable(task); +		task_update_spec_tif(task); +		break; +	default: +		return -ERANGE; +	}  	return 0;  } @@ -649,6 +844,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,  	switch (which) {  	case PR_SPEC_STORE_BYPASS:  		return ssb_prctl_set(task, ctrl); +	case PR_SPEC_INDIRECT_BRANCH: +		return ib_prctl_set(task, ctrl);  	default:  		return -ENODEV;  	} @@ -659,6 +856,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task)  {  	if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)  		ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +	if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) +		ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);  }  #endif @@ -681,11 +880,35 @@ static int ssb_prctl_get(struct task_struct *task)  	}  } +static int ib_prctl_get(struct task_struct *task) +{ +	if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) +		return PR_SPEC_NOT_AFFECTED; + +	switch (spectre_v2_user) { +	case SPECTRE_V2_USER_NONE: +		return PR_SPEC_ENABLE; +	case SPECTRE_V2_USER_PRCTL: +	case SPECTRE_V2_USER_SECCOMP: +		if (task_spec_ib_force_disable(task)) +			return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; +		if (task_spec_ib_disable(task)) +			return PR_SPEC_PRCTL | PR_SPEC_DISABLE; +		return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +	case SPECTRE_V2_USER_STRICT: +		return PR_SPEC_DISABLE; +	default: +		return PR_SPEC_NOT_AFFECTED; +	} +} +  int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)  {  	switch (which) {  	case PR_SPEC_STORE_BYPASS:  		return ssb_prctl_get(task); +	case PR_SPEC_INDIRECT_BRANCH: +		return ib_prctl_get(task);  	default:  		return -ENODEV;  	} @@ -823,7 +1046,7 @@ early_param("l1tf", l1tf_cmdline);  #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"  #if IS_ENABLED(CONFIG_KVM_INTEL) -static const char *l1tf_vmx_states[] = { +static const char * const l1tf_vmx_states[] = {  	[VMENTER_L1D_FLUSH_AUTO]		= "auto",  	[VMENTER_L1D_FLUSH_NEVER]		= "vulnerable",  	[VMENTER_L1D_FLUSH_COND]		= "conditional cache flushes", @@ -839,13 +1062,14 @@ static ssize_t l1tf_show_state(char *buf)  	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||  	    (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && -	     cpu_smt_control == CPU_SMT_ENABLED)) +	     sched_smt_active())) {  		return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,  			       l1tf_vmx_states[l1tf_vmx_mitigation]); +	}  	return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,  		       l1tf_vmx_states[l1tf_vmx_mitigation], -		       cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); +		       sched_smt_active() ? "vulnerable" : "disabled");  }  #else  static ssize_t l1tf_show_state(char *buf) @@ -854,11 +1078,39 @@ static ssize_t l1tf_show_state(char *buf)  }  #endif +static char *stibp_state(void) +{ +	if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) +		return ""; + +	switch (spectre_v2_user) { +	case SPECTRE_V2_USER_NONE: +		return ", STIBP: disabled"; +	case SPECTRE_V2_USER_STRICT: +		return ", STIBP: forced"; +	case SPECTRE_V2_USER_PRCTL: +	case SPECTRE_V2_USER_SECCOMP: +		if (static_key_enabled(&switch_to_cond_stibp)) +			return ", STIBP: conditional"; +	} +	return ""; +} + +static char *ibpb_state(void) +{ +	if (boot_cpu_has(X86_FEATURE_IBPB)) { +		if (static_key_enabled(&switch_mm_always_ibpb)) +			return ", IBPB: always-on"; +		if (static_key_enabled(&switch_mm_cond_ibpb)) +			return ", IBPB: conditional"; +		return ", IBPB: disabled"; +	} +	return ""; +} +  static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,  			       char *buf, unsigned int bug)  { -	int ret; -  	if (!boot_cpu_has_bug(bug))  		return sprintf(buf, "Not affected\n"); @@ -876,13 +1128,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr  		return sprintf(buf, "Mitigation: __user pointer sanitization\n");  	case X86_BUG_SPECTRE_V2: -		ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], -			       boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", +		return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], +			       ibpb_state(),  			       boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", -			       (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", +			       stibp_state(),  			       boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",  			       spectre_v2_module_string()); -		return ret;  	case X86_BUG_SPEC_STORE_BYPASS:  		return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);  | 
