Merge branch 'core/speculation' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorWill Deacon <will.deacon@arm.com>
Fri, 26 Apr 2019 12:32:20 +0000 (13:32 +0100)
committerWill Deacon <will.deacon@arm.com>
Fri, 26 Apr 2019 12:32:20 +0000 (13:32 +0100)
Pull in core support for the "mitigations=" cmdline option from Thomas
Gleixner via -tip, which we can build on top of when we expose our
mitigation state via sysfs.

1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/powerpc/kernel/security.c
arch/powerpc/kernel/setup_64.c
arch/x86/kernel/cpu/bugs.c
arch/x86/mm/pti.c
kernel/cpu.c

index 2b8ee90bb64470d0d6d6ccadccf8b8fbbf86509d,1ae93872b79fd8f165dc7564d5a73c6b2d412811..cf82bac648cd909751b4c18350de66393135067f
                        possible to determine what the correct size should be.
                        This option provides an override for these situations.
  
 +      carrier_timeout=
 +                      [NET] Specifies amount of time (in seconds) that
 +                      the kernel should wait for a network carrier. By default
 +                      it waits 120 seconds.
 +
        ca_keys=        [KEYS] This parameter identifies a specific key(s) on
                        the system trusted keyring to be used for certificate
                        trust validation.
                        The filter can be disabled or changed to another
                        driver later using sysfs.
  
 +      driver_async_probe=  [KNL]
 +                      List of driver names to be probed asynchronously.
 +                      Format: <driver_name1>,<driver_name2>...
 +
        drm.edid_firmware=[<connector>:]<file>[,[<connector>:]<file>]
                        Broken monitors, graphic adapters, KVMs and EDIDless
                        panels may send no or incorrect EDID data sets.
                        specified address. The serial port must already be
                        setup and configured. Options are not yet supported.
  
 +              efifb,[options]
 +                      Start an early, unaccelerated console on the EFI
 +                      memory mapped framebuffer (if available). On cache
 +                      coherent non-x86 systems that use system memory for
 +                      the framebuffer, pass the 'ram' option so that it is
 +                      mapped with the correct attributes.
 +
        earlyprintk=    [X86,SH,ARM,M68k,S390]
                        earlyprintk=vga
 -                      earlyprintk=efi
                        earlyprintk=sclp
                        earlyprintk=xen
                        earlyprintk=serial[,ttySn[,baudrate]]
                        arch/x86/kernel/cpu/cpufreq/elanfreq.c.
  
        elevator=       [IOSCHED]
 -                      Format: {"cfq" | "deadline" | "noop"}
 -                      See Documentation/block/cfq-iosched.txt and
 -                      Documentation/block/deadline-iosched.txt for details.
 +                      Format: { "mq-deadline" | "kyber" | "bfq" }
 +                      See Documentation/block/deadline-iosched.txt,
 +                      Documentation/block/kyber-iosched.txt and
 +                      Documentation/block/bfq-iosched.txt for details.
  
        elfcorehdr=[size[KMG]@]offset[KMG] [IA64,PPC,SH,X86,S390]
                        Specifies physical address of start of kernel core
                        to let secondary kernels in charge of setting up
                        LPIs.
  
 +      irqchip.gicv3_pseudo_nmi= [ARM64]
 +                      Enables support for pseudo-NMIs in the kernel. This
 +                      requires the kernel to be built with
 +                      CONFIG_ARM64_PSEUDO_NMI.
 +
        irqfixup        [HW]
                        When an interrupt is not handled search all handlers
                        for it. Intended to get systems with badly broken
                        Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
                        the default is off.
  
 +      kpti=           [ARM64] Control page table isolation of user
 +                      and kernel address spaces.
 +                      Default: enabled on cores which need mitigation.
 +                      0: force disabled
 +                      1: force enabled
 +
        kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
                        Default is 0 (don't ignore, but inject #GP)
  
  
        lsm.debug       [SECURITY] Enable LSM initialization debugging output.
  
 +      lsm=lsm1,...,lsmN
 +                      [SECURITY] Choose order of LSM initialization. This
 +                      overrides CONFIG_LSM, and the "security=" parameter.
 +
        machvec=        [IA-64] Force the use of a particular machine-vector
                        (machvec) in a generic kernel.
                        Example: machvec=hpzx1_swiotlb
                        in the "bleeding edge" mini2440 support kernel at
                        http://repo.or.cz/w/linux-2.6/mini2440.git
  
+       mitigations=
+                       [X86,PPC,S390] Control optional mitigations for CPU
+                       vulnerabilities.  This is a set of curated,
+                       arch-independent options, each of which is an
+                       aggregation of existing arch-specific options.
+                       off
+                               Disable all optional CPU mitigations.  This
+                               improves system performance, but it may also
+                               expose users to several CPU vulnerabilities.
+                               Equivalent to: nopti [X86,PPC]
+                                              nospectre_v1 [PPC]
+                                              nobp=0 [S390]
+                                              nospectre_v2 [X86,PPC,S390]
+                                              spectre_v2_user=off [X86]
+                                              spec_store_bypass_disable=off [X86,PPC]
+                                              l1tf=off [X86]
+                       auto (default)
+                               Mitigate all CPU vulnerabilities, but leave SMT
+                               enabled, even if it's vulnerable.  This is for
+                               users who don't want to be surprised by SMT
+                               getting disabled across kernel upgrades, or who
+                               have other ways of avoiding SMT-based attacks.
+                               Equivalent to: (default behavior)
+                       auto,nosmt
+                               Mitigate all CPU vulnerabilities, disabling SMT
+                               if needed.  This is for users who always want to
+                               be fully mitigated, even if it means losing SMT.
+                               Equivalent to: l1tf=flush,nosmt [X86]
        mminit_loglevel=
                        [KNL] When CONFIG_DEBUG_MEMORY_INIT is set, this
                        parameter allows control of the logging verbosity for
                        latencies, which will choose a value aligned
                        with the appropriate hardware boundaries.
  
 -      rcutree.jiffies_till_sched_qs= [KNL]
 -                      Set required age in jiffies for a
 -                      given grace period before RCU starts
 -                      soliciting quiescent-state help from
 -                      rcu_note_context_switch().  If not specified, the
 -                      kernel will calculate a value based on the most
 -                      recent settings of rcutree.jiffies_till_first_fqs
 -                      and rcutree.jiffies_till_next_fqs.
 -                      This calculated value may be viewed in
 -                      rcutree.jiffies_to_sched_qs.  Any attempt to
 -                      set rcutree.jiffies_to_sched_qs will be
 -                      cheerfully overwritten.
 -
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
                        first attempt to force quiescent states.
                        quiescent states.  Units are jiffies, minimum
                        value is one, and maximum value is HZ.
  
 +      rcutree.jiffies_till_sched_qs= [KNL]
 +                      Set required age in jiffies for a
 +                      given grace period before RCU starts
 +                      soliciting quiescent-state help from
 +                      rcu_note_context_switch() and cond_resched().
 +                      If not specified, the kernel will calculate
 +                      a value based on the most recent settings
 +                      of rcutree.jiffies_till_first_fqs
 +                      and rcutree.jiffies_till_next_fqs.
 +                      This calculated value may be viewed in
 +                      rcutree.jiffies_to_sched_qs.  Any attempt to set
 +                      rcutree.jiffies_to_sched_qs will be cheerfully
 +                      overwritten.
 +
        rcutree.kthread_prio=    [KNL,BOOT]
                        Set the SCHED_FIFO priority of the RCU per-CPU
                        kthreads (rcuc/N). This value is also used for
                        This wake_up() will be accompanied by a
                        WARN_ONCE() splat and an ftrace_dump().
  
 +      rcutree.sysrq_rcu= [KNL]
 +                      Commandeer a sysrq key to dump out Tree RCU's
 +                      rcu_node tree with an eye towards determining
 +                      why a new grace period has not yet started.
 +
        rcuperf.gp_async= [KNL]
                        Measure performance of asynchronous
                        grace-period primitives such as call_rcu().
                        Note: increases power consumption, thus should only be
                        enabled if running jitter sensitive (HPC/RT) workloads.
  
 -      security=       [SECURITY] Choose a security module to enable at boot.
 -                      If this boot parameter is not specified, only the first
 -                      security module asking for security registration will be
 -                      loaded. An invalid security module name will be treated
 -                      as if no module has been chosen.
 +      security=       [SECURITY] Choose a legacy "major" security module to
 +                      enable at boot. This has been deprecated by the
 +                      "lsm=" parameter.
  
        selinux=        [SELINUX] Disable or enable SELinux at boot time.
                        Format: { "0" | "1" }
        usbcore.authorized_default=
                        [USB] Default USB device authorization:
                        (default -1 = authorized except for wireless USB,
 -                      0 = not authorized, 1 = authorized)
 +                      0 = not authorized, 1 = authorized, 2 = authorized
 +                      if device connected to internal port)
  
        usbcore.autosuspend=
                        [USB] The autosuspend time delay (in seconds) used
                        or other driver-specific files in the
                        Documentation/watchdog/ directory.
  
 +      watchdog_thresh=
 +                      [KNL]
 +                      Set the hard lockup detector stall duration
 +                      threshold in seconds. The soft lockup detector
 +                      threshold is set to twice the value. A value of 0
 +                      disables both lockup detectors. Default is 10
 +                      seconds.
 +
        workqueue.watchdog_thresh=
                        If CONFIG_WQ_WATCHDOG is configured, workqueue can
                        warn stall conditions and dump internal state to
index b33bafb8fcea1f7a964ad99e203ee0a2cf3103cb,cdf3e73000e94d36698b9c760f2a9ca87d54ee15..70568ccbd9fd5eae17014473aa415d9b472b7d86
@@@ -57,7 -57,7 +57,7 @@@ void setup_barrier_nospec(void
        enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
                 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
  
-       if (!no_nospec)
+       if (!no_nospec && !cpu_mitigations_off())
                enable_barrier_nospec(enable);
  }
  
@@@ -116,7 -116,7 +116,7 @@@ static int __init handle_nospectre_v2(c
  early_param("nospectre_v2", handle_nospectre_v2);
  void setup_spectre_v2(void)
  {
-       if (no_spectrev2)
+       if (no_spectrev2 || cpu_mitigations_off())
                do_btb_flush_fixups();
        else
                btb_flush_enabled = true;
@@@ -190,22 -190,29 +190,22 @@@ ssize_t cpu_show_spectre_v2(struct devi
        bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
        ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
  
 -      if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
 -              bool comma = false;
 +      if (bcs || ccd) {
                seq_buf_printf(&s, "Mitigation: ");
  
 -              if (bcs) {
 +              if (bcs)
                        seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
 -                      comma = true;
 -              }
  
 -              if (ccd) {
 -                      if (comma)
 -                              seq_buf_printf(&s, ", ");
 -                      seq_buf_printf(&s, "Indirect branch cache disabled");
 -                      comma = true;
 -              }
 -
 -              if (comma)
 +              if (bcs && ccd)
                        seq_buf_printf(&s, ", ");
  
 -              seq_buf_printf(&s, "Software count cache flush");
 +              if (ccd)
 +                      seq_buf_printf(&s, "Indirect branch cache disabled");
 +      } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
 +              seq_buf_printf(&s, "Mitigation: Software count cache flush");
  
                if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
 -                      seq_buf_printf(&s, "(hardware accelerated)");
 +                      seq_buf_printf(&s, " (hardware accelerated)");
        } else if (btb_flush_enabled) {
                seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
        } else {
@@@ -300,7 -307,7 +300,7 @@@ void setup_stf_barrier(void
  
        stf_enabled_flush_types = type;
  
-       if (!no_stf_barrier)
+       if (!no_stf_barrier && !cpu_mitigations_off())
                stf_barrier_enable(enable);
  }
  
index ba404dd9ce1d88809e0a6e70f0decc286caf576a,c7ec27ba8926a8b1a974e3c8cc56855467d35b70..4f49e1a3594c2d3423ae232152cda2b53e730483
@@@ -634,17 -634,19 +634,17 @@@ __init u64 ppc64_bolted_size(void
  
  static void *__init alloc_stack(unsigned long limit, int cpu)
  {
 -      unsigned long pa;
 +      void *ptr;
  
        BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
  
 -      pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
 -                                      early_cpu_to_node(cpu), MEMBLOCK_NONE);
 -      if (!pa) {
 -              pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
 -              if (!pa)
 -                      panic("cannot allocate stacks");
 -      }
 +      ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
 +                                   MEMBLOCK_LOW_LIMIT, limit,
 +                                   early_cpu_to_node(cpu));
 +      if (!ptr)
 +              panic("cannot allocate stacks");
  
 -      return __va(pa);
 +      return ptr;
  }
  
  void __init irqstack_early_init(void)
@@@ -689,6 -691,24 +689,6 @@@ void __init exc_lvl_early_init(void
  }
  #endif
  
 -/*
 - * Emergency stacks are used for a range of things, from asynchronous
 - * NMIs (system reset, machine check) to synchronous, process context.
 - * We set preempt_count to zero, even though that isn't necessarily correct. To
 - * get the right value we'd need to copy it from the previous thread_info, but
 - * doing that might fault causing more problems.
 - * TODO: what to do with accounting?
 - */
 -static void emerg_stack_init_thread_info(struct thread_info *ti, int cpu)
 -{
 -      ti->task = NULL;
 -      ti->cpu = cpu;
 -      ti->preempt_count = 0;
 -      ti->local_flags = 0;
 -      ti->flags = 0;
 -      klp_init_thread_info(ti);
 -}
 -
  /*
   * Stack space used when we detect a bad kernel stack pointer, and
   * early in SMP boots before relocation is enabled. Exclusive emergency
@@@ -716,14 -736,25 +716,14 @@@ void __init emergency_stack_init(void
        limit = min(ppc64_bolted_size(), ppc64_rma_size);
  
        for_each_possible_cpu(i) {
 -              struct thread_info *ti;
 -
 -              ti = alloc_stack(limit, i);
 -              memset(ti, 0, THREAD_SIZE);
 -              emerg_stack_init_thread_info(ti, i);
 -              paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
 +              paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
  
  #ifdef CONFIG_PPC_BOOK3S_64
                /* emergency stack for NMI exception handling. */
 -              ti = alloc_stack(limit, i);
 -              memset(ti, 0, THREAD_SIZE);
 -              emerg_stack_init_thread_info(ti, i);
 -              paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
 +              paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
  
                /* emergency stack for machine check exception handling. */
 -              ti = alloc_stack(limit, i);
 -              memset(ti, 0, THREAD_SIZE);
 -              emerg_stack_init_thread_info(ti, i);
 -              paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
 +              paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
  #endif
        }
  }
@@@ -902,13 -933,8 +902,13 @@@ static void __ref init_fallback_flush(v
         * hardware prefetch runoff. We don't have a recipe for load patterns to
         * reliably avoid the prefetcher.
         */
 -      l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit));
 -      memset(l1d_flush_fallback_area, 0, l1d_size * 2);
 +      l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
 +                                              l1d_size, MEMBLOCK_LOW_LIMIT,
 +                                              limit, NUMA_NO_NODE);
 +      if (!l1d_flush_fallback_area)
 +              panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
 +                    __func__, l1d_size * 2, l1d_size, &limit);
 +
  
        for_each_possible_cpu(cpu) {
                struct paca_struct *paca = paca_ptrs[cpu];
@@@ -932,7 -958,7 +932,7 @@@ void setup_rfi_flush(enum l1d_flush_typ
  
        enabled_flush_types = types;
  
-       if (!no_rfi_flush)
+       if (!no_rfi_flush && !cpu_mitigations_off())
                rfi_flush_enable(enable);
  }
  
index 2da82eff0eb4f8498c8cdd65bd9f9dd5fa1fa6eb,435c078c29483abcc9dcc4589a238477b98f7eb5..8043a21f36bea925f2be1b61eecdf84dcc85b4f0
@@@ -440,7 -440,8 +440,8 @@@ static enum spectre_v2_mitigation_cmd _
        char arg[20];
        int ret, i;
  
-       if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
+       if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
+           cpu_mitigations_off())
                return SPECTRE_V2_CMD_NONE;
  
        ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
@@@ -672,7 -673,8 +673,8 @@@ static enum ssb_mitigation_cmd __init s
        char arg[20];
        int ret, i;
  
-       if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+       if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
+           cpu_mitigations_off()) {
                return SPEC_STORE_BYPASS_CMD_NONE;
        } else {
                ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
@@@ -798,25 -800,15 +800,25 @@@ static int ssb_prctl_set(struct task_st
                if (task_spec_ssb_force_disable(task))
                        return -EPERM;
                task_clear_spec_ssb_disable(task);
 +              task_clear_spec_ssb_noexec(task);
                task_update_spec_tif(task);
                break;
        case PR_SPEC_DISABLE:
                task_set_spec_ssb_disable(task);
 +              task_clear_spec_ssb_noexec(task);
                task_update_spec_tif(task);
                break;
        case PR_SPEC_FORCE_DISABLE:
                task_set_spec_ssb_disable(task);
                task_set_spec_ssb_force_disable(task);
 +              task_clear_spec_ssb_noexec(task);
 +              task_update_spec_tif(task);
 +              break;
 +      case PR_SPEC_DISABLE_NOEXEC:
 +              if (task_spec_ssb_force_disable(task))
 +                      return -EPERM;
 +              task_set_spec_ssb_disable(task);
 +              task_set_spec_ssb_noexec(task);
                task_update_spec_tif(task);
                break;
        default:
@@@ -895,8 -887,6 +897,8 @@@ static int ssb_prctl_get(struct task_st
        case SPEC_STORE_BYPASS_PRCTL:
                if (task_spec_ssb_force_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
 +              if (task_spec_ssb_noexec(task))
 +                      return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
                if (task_spec_ssb_disable(task))
                        return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
                return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
@@@ -1008,6 -998,11 +1010,11 @@@ static void __init l1tf_select_mitigati
        if (!boot_cpu_has_bug(X86_BUG_L1TF))
                return;
  
+       if (cpu_mitigations_off())
+               l1tf_mitigation = L1TF_MITIGATION_OFF;
+       else if (cpu_mitigations_auto_nosmt())
+               l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
        override_cache_bits(&boot_cpu_data);
  
        switch (l1tf_mitigation) {
diff --combined arch/x86/mm/pti.c
index 139b28a01ce47f90b770b0eea4a98f35664a6ef5,5890f09bfc1959e6f3b48e72511e33d75c75b2c1..d0255d64edced4f887efe21799b0f527c063aeea
@@@ -35,6 -35,7 +35,7 @@@
  #include <linux/spinlock.h>
  #include <linux/mm.h>
  #include <linux/uaccess.h>
+ #include <linux/cpu.h>
  
  #include <asm/cpufeature.h>
  #include <asm/hypervisor.h>
@@@ -77,7 -78,7 +78,7 @@@ static void __init pti_print_if_secure(
                pr_info("%s\n", reason);
  }
  
 -enum pti_mode {
 +static enum pti_mode {
        PTI_AUTO = 0,
        PTI_FORCE_OFF,
        PTI_FORCE_ON
@@@ -115,7 -116,8 +116,8 @@@ void __init pti_check_boottime_disable(
                }
        }
  
-       if (cmdline_find_option_bool(boot_command_line, "nopti")) {
+       if (cmdline_find_option_bool(boot_command_line, "nopti") ||
+           cpu_mitigations_off()) {
                pti_mode = PTI_FORCE_OFF;
                pti_print_if_insecure("disabled on command line.");
                return;
@@@ -602,7 -604,7 +604,7 @@@ static void pti_clone_kernel_text(void
        set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
  }
  
 -void pti_set_kernel_image_nonglobal(void)
 +static void pti_set_kernel_image_nonglobal(void)
  {
        /*
         * The identity map is created with PMDs, regardless of the
diff --combined kernel/cpu.c
index 6754f3ecfd943c97af0b865197d01366abac7c73,e70a90634b41cd57de7a6fed4d3ff40d24055590..43e741e88691c2993de99c50ad3e4031bac91ec9
@@@ -313,15 -313,6 +313,15 @@@ void cpus_write_unlock(void
  
  void lockdep_assert_cpus_held(void)
  {
 +      /*
 +       * We can't have hotplug operations before userspace starts running,
 +       * and some init codepaths will knowingly not take the hotplug lock.
 +       * This is all valid, so mute lockdep until it makes sense to report
 +       * unheld locks.
 +       */
 +      if (system_state < SYSTEM_RUNNING)
 +              return;
 +
        percpu_rwsem_assert_held(&cpu_hotplug_lock);
  }
  
@@@ -564,20 -555,6 +564,20 @@@ static void undo_cpu_up(unsigned int cp
                cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
  }
  
 +static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
 +{
 +      if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
 +              return true;
 +      /*
 +       * When CPU hotplug is disabled, then taking the CPU down is not
 +       * possible because takedown_cpu() and the architecture and
 +       * subsystem specific mechanisms are not available. So the CPU
 +       * which would be completely unplugged again needs to stay around
 +       * in the current state.
 +       */
 +      return st->state <= CPUHP_BRINGUP_CPU;
 +}
 +
  static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
                              enum cpuhp_state target)
  {
                st->state++;
                ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
                if (ret) {
 -                      st->target = prev_state;
 -                      undo_cpu_up(cpu, st);
 +                      if (can_rollback_cpu(st)) {
 +                              st->target = prev_state;
 +                              undo_cpu_up(cpu, st);
 +                      }
                        break;
                }
        }
@@@ -2304,3 -2279,18 +2304,18 @@@ void __init boot_cpu_hotplug_init(void
  #endif
        this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
  }
+ enum cpu_mitigations cpu_mitigations __ro_after_init = CPU_MITIGATIONS_AUTO;
+ static int __init mitigations_parse_cmdline(char *arg)
+ {
+       if (!strcmp(arg, "off"))
+               cpu_mitigations = CPU_MITIGATIONS_OFF;
+       else if (!strcmp(arg, "auto"))
+               cpu_mitigations = CPU_MITIGATIONS_AUTO;
+       else if (!strcmp(arg, "auto,nosmt"))
+               cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
+       return 0;
+ }
+ early_param("mitigations", mitigations_parse_cmdline);