summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c72
1 files changed, 48 insertions, 24 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0fbf3e59cf14..49acbd2fa81a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -54,6 +54,7 @@ struct cpuhp_cpu_state {
bool rollback;
bool single;
bool bringup;
+ bool booted_once;
struct hlist_node *node;
enum cpuhp_state cb_state;
int result;
@@ -355,6 +356,40 @@ void cpu_hotplug_enable(void)
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
#endif /* CONFIG_HOTPLUG_CPU */
+#ifdef CONFIG_HOTPLUG_SMT
+enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+
+static int __init smt_cmdline_disable(char *str)
+{
+ cpu_smt_control = CPU_SMT_DISABLED;
+ if (str && !strcmp(str, "force")) {
+ pr_info("SMT: Force disabled\n");
+ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+ }
+ return 0;
+}
+early_param("nosmt", smt_cmdline_disable);
+
+static inline bool cpu_smt_allowed(unsigned int cpu)
+{
+ if (cpu_smt_control == CPU_SMT_ENABLED)
+ return true;
+
+ if (topology_is_primary_thread(cpu))
+ return true;
+
+ /*
+ * On x86 it's required to boot all logical CPUs at least once so
+ * that the init code can get a chance to set CR4.MCE on each
+ * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
+ * core will shutdown the machine.
+ */
+ return !per_cpu(cpuhp_state, cpu).booted_once;
+}
+#else
+static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+#endif
+
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
@@ -431,6 +466,16 @@ static int bringup_wait_for_ap(unsigned int cpu)
stop_machine_unpark(cpu);
kthread_unpark(st->thread);
+ /*
+ * SMT soft disabling on X86 requires to bring the CPU out of the
+ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
+ * CPU marked itself as booted_once in cpu_notify_starting() so the
+ * cpu_smt_allowed() check will now return false if this is not the
+ * primary sibling.
+ */
+ if (!cpu_smt_allowed(cpu))
+ return -ECANCELED;
+
/* Should we go further up ? */
if (st->target > CPUHP_AP_ONLINE_IDLE) {
__cpuhp_kick_ap_work(st);
@@ -978,29 +1023,6 @@ int cpu_down(unsigned int cpu)
EXPORT_SYMBOL(cpu_down);
#endif /*CONFIG_HOTPLUG_CPU*/
-#ifdef CONFIG_HOTPLUG_SMT
-enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-
-static int __init smt_cmdline_disable(char *str)
-{
- cpu_smt_control = CPU_SMT_DISABLED;
- if (str && !strcmp(str, "force")) {
- pr_info("SMT: Force disabled\n");
- cpu_smt_control = CPU_SMT_FORCE_DISABLED;
- }
- return 0;
-}
-early_param("nosmt", smt_cmdline_disable);
-
-static inline bool cpu_smt_allowed(unsigned int cpu)
-{
- return cpu_smt_control == CPU_SMT_ENABLED ||
- topology_is_primary_thread(cpu);
-}
-#else
-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
-#endif
-
/**
* notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
* @cpu: cpu that just started
@@ -1014,6 +1036,7 @@ void notify_cpu_starting(unsigned int cpu)
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
+ st->booted_once = true;
while (st->state < target) {
st->state++;
cpuhp_invoke_callback(cpu, st->state, true, NULL);
@@ -2114,5 +2137,6 @@ void __init boot_cpu_init(void)
*/
void __init boot_cpu_hotplug_init(void)
{
- per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+ this_cpu_write(cpuhp_state.booted_once, true);
+ this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
}