3 * Common boot and setup code.
5 * Copyright (C) 2001 PPC64 Team, IBM Corp
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/export.h>
14 #include <linux/string.h>
15 #include <linux/sched.h>
16 #include <linux/init.h>
17 #include <linux/kernel.h>
18 #include <linux/reboot.h>
19 #include <linux/delay.h>
20 #include <linux/initrd.h>
21 #include <linux/seq_file.h>
22 #include <linux/ioport.h>
23 #include <linux/console.h>
24 #include <linux/utsname.h>
25 #include <linux/tty.h>
26 #include <linux/root_dev.h>
27 #include <linux/notifier.h>
28 #include <linux/cpu.h>
29 #include <linux/unistd.h>
30 #include <linux/serial.h>
31 #include <linux/serial_8250.h>
32 #include <linux/memblock.h>
33 #include <linux/pci.h>
34 #include <linux/lockdep.h>
35 #include <linux/memory.h>
36 #include <linux/nmi.h>
38 #include <asm/debugfs.h>
40 #include <asm/kdump.h>
42 #include <asm/processor.h>
43 #include <asm/pgtable.h>
46 #include <asm/machdep.h>
49 #include <asm/cputable.h>
50 #include <asm/dt_cpu_ftrs.h>
51 #include <asm/sections.h>
52 #include <asm/btext.h>
53 #include <asm/nvram.h>
54 #include <asm/setup.h>
56 #include <asm/iommu.h>
57 #include <asm/serial.h>
58 #include <asm/cache.h>
61 #include <asm/firmware.h>
64 #include <asm/kexec.h>
65 #include <asm/code-patching.h>
66 #include <asm/livepatch.h>
68 #include <asm/cputhreads.h>
69 #include <asm/hw_irq.h>
70 #include <asm/feature-fixups.h>
76 #define DBG(fmt...) udbg_printf(fmt)
81 int spinning_secondaries;
84 struct ppc64_caches ppc64_caches = {
94 EXPORT_SYMBOL_GPL(ppc64_caches);
96 #if defined(CONFIG_PPC_BOOK3E) && defined(CONFIG_SMP)
97 void __init setup_tlb_core_data(void)
101 BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
103 for_each_possible_cpu(cpu) {
104 int first = cpu_first_thread_sibling(cpu);
107 * If we boot via kdump on a non-primary thread,
108 * make sure we point at the thread that actually
111 if (cpu_first_thread_sibling(boot_cpuid) == first)
114 paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
117 * If we have threads, we need either tlbsrx.
118 * or e6500 tablewalk mode, or else TLB handlers
119 * will be racy and could produce duplicate entries.
120 * Should we panic instead?
122 WARN_ONCE(smt_enabled_at_boot >= 2 &&
123 !mmu_has_feature(MMU_FTR_USE_TLBRSRV) &&
124 book3e_htw_mode != PPC_HTW_E6500,
125 "%s: unsupported MMU configuration\n", __func__);
132 static char *smt_enabled_cmdline;
134 /* Look for ibm,smt-enabled OF option */
135 void __init check_smt_enabled(void)
137 struct device_node *dn;
138 const char *smt_option;
140 /* Default to enabling all threads */
141 smt_enabled_at_boot = threads_per_core;
143 /* Allow the command line to overrule the OF option */
144 if (smt_enabled_cmdline) {
145 if (!strcmp(smt_enabled_cmdline, "on"))
146 smt_enabled_at_boot = threads_per_core;
147 else if (!strcmp(smt_enabled_cmdline, "off"))
148 smt_enabled_at_boot = 0;
153 rc = kstrtoint(smt_enabled_cmdline, 10, &smt);
155 smt_enabled_at_boot =
156 min(threads_per_core, smt);
159 dn = of_find_node_by_path("/options");
161 smt_option = of_get_property(dn, "ibm,smt-enabled",
165 if (!strcmp(smt_option, "on"))
166 smt_enabled_at_boot = threads_per_core;
167 else if (!strcmp(smt_option, "off"))
168 smt_enabled_at_boot = 0;
176 /* Look for smt-enabled= cmdline option */
177 static int __init early_smt_enabled(char *p)
179 smt_enabled_cmdline = p;
182 early_param("smt-enabled", early_smt_enabled);
184 #endif /* CONFIG_SMP */
186 /** Fix up paca fields required for the boot cpu */
187 static void __init fixup_boot_paca(void)
189 /* The boot cpu is started */
190 get_paca()->cpu_start = 1;
191 /* Allow percpu accesses to work until we setup percpu data */
192 get_paca()->data_offset = 0;
193 /* Mark interrupts disabled in PACA */
194 irq_soft_mask_set(IRQS_DISABLED);
197 static void __init configure_exceptions(void)
200 * Setup the trampolines from the lowmem exception vectors
201 * to the kdump kernel when not using a relocatable kernel.
203 setup_kdump_trampoline();
205 /* Under a PAPR hypervisor, we need hypercalls */
206 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
207 /* Enable AIL if possible */
208 pseries_enable_reloc_on_exc();
211 * Tell the hypervisor that we want our exceptions to
212 * be taken in little endian mode.
214 * We don't call this for big endian as our calling convention
215 * makes us always enter in BE, and the call may fail under
216 * some circumstances with kdump.
218 #ifdef __LITTLE_ENDIAN__
219 pseries_little_endian_exceptions();
222 /* Set endian mode using OPAL */
223 if (firmware_has_feature(FW_FEATURE_OPAL))
224 opal_configure_cores();
226 /* AIL on native is done in cpu_ready_for_interrupts() */
230 static void cpu_ready_for_interrupts(void)
233 * Enable AIL if supported, and we are in hypervisor mode. This
234 * is called once for every processor.
236 * If we are not in hypervisor mode the job is done once for
237 * the whole partition in configure_exceptions().
239 if (cpu_has_feature(CPU_FTR_HVMODE) &&
240 cpu_has_feature(CPU_FTR_ARCH_207S)) {
241 unsigned long lpcr = mfspr(SPRN_LPCR);
242 mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
246 * Set HFSCR:TM based on CPU features:
247 * In the special case of TM no suspend (P9N DD2.1), Linux is
248 * told TM is off via the dt-ftrs but told to (partially) use
249 * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
250 * will be off from dt-ftrs but we need to turn it on for the
253 if (cpu_has_feature(CPU_FTR_HVMODE)) {
254 if (cpu_has_feature(CPU_FTR_TM_COMP))
255 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
257 mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
260 /* Set IR and DR in PACA MSR */
261 get_paca()->kernel_msr = MSR_KERNEL;
264 unsigned long spr_default_dscr = 0;
266 void __init record_spr_defaults(void)
268 if (early_cpu_has_feature(CPU_FTR_DSCR))
269 spr_default_dscr = mfspr(SPRN_DSCR);
273 * Early initialization entry point. This is called by head.S
274 * with MMU translation disabled. We rely on the "feature" of
275 * the CPU that ignores the top 2 bits of the address in real
276 * mode so we can access kernel globals normally provided we
277 * only toy with things in the RMO region. From here, we do
278 * some early parsing of the device-tree to setup out MEMBLOCK
279 * data structures, and allocate & initialize the hash table
280 * and segment tables so we can start running with translation
283 * It is this function which will call the probe() callback of
284 * the various platform types and copy the matching one to the
285 * global ppc_md structure. Your platform can eventually do
286 * some very early initializations from the probe() routine, but
287 * this is not recommended, be very careful as, for example, the
288 * device-tree is not accessible via normal means at this point.
291 void __init early_setup(unsigned long dt_ptr)
293 static __initdata struct paca_struct boot_paca;
295 /* -------- printk is _NOT_ safe to use here ! ------- */
297 /* Try new device tree based feature discovery ... */
298 if (!dt_cpu_ftrs_init(__va(dt_ptr)))
299 /* Otherwise use the old style CPU table */
300 identify_cpu(0, mfspr(SPRN_PVR));
302 /* Assume we're on cpu 0 for now. Don't write to the paca yet! */
303 initialise_paca(&boot_paca, 0);
304 setup_paca(&boot_paca);
307 /* -------- printk is now safe to use ------- */
309 /* Enable early debugging if any specified (see udbg.h) */
312 DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr);
315 * Do early initialization using the flattened device
316 * tree, such as retrieving the physical memory map or
317 * calculating/retrieving the hash table size.
319 early_init_devtree(__va(dt_ptr));
321 /* Now we know the logical id of our boot cpu, setup the paca. */
322 if (boot_cpuid != 0) {
323 /* Poison paca_ptrs[0] again if it's not the boot cpu */
324 memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
326 setup_paca(paca_ptrs[boot_cpuid]);
330 * Configure exception handlers. This include setting up trampolines
331 * if needed, setting exception endian mode, etc...
333 configure_exceptions();
336 * Configure Kernel Userspace Protection. This needs to happen before
337 * feature fixups for platforms that implement this using features.
341 /* Apply all the dynamic patching */
342 apply_feature_fixups();
343 setup_feature_keys();
345 /* Initialize the hash table or TLB handling */
349 * After firmware and early platform setup code has set things up,
350 * we note the SPR values for configurable control/performance
351 * registers, and use those as initial defaults.
353 record_spr_defaults();
356 * At this point, we can let interrupts switch to virtual mode
357 * (the MMU has been setup), so adjust the MSR in the PACA to
358 * have IR and DR set and enable AIL if it exists
360 cpu_ready_for_interrupts();
363 * We enable ftrace here, but since we only support DYNAMIC_FTRACE, it
364 * will only actually get enabled on the boot cpu much later once
365 * ftrace itself has been initialized.
367 this_cpu_enable_ftrace();
369 DBG(" <- early_setup()\n");
371 #ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
373 * This needs to be done *last* (after the above DBG() even)
375 * Right after we return from this function, we turn on the MMU
376 * which means the real-mode access trick that btext does will
377 * no longer work, it needs to switch to using a real MMU
378 * mapping. This call will ensure that it does
381 #endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
385 void early_setup_secondary(void)
387 /* Mark interrupts disabled in PACA */
388 irq_soft_mask_set(IRQS_DISABLED);
390 /* Initialize the hash table or TLB handling */
391 early_init_mmu_secondary();
393 /* Perform any KUP setup that is per-cpu */
397 * At this point, we can let interrupts switch to virtual mode
398 * (the MMU has been setup), so adjust the MSR in the PACA to
399 * have IR and DR set.
401 cpu_ready_for_interrupts();
404 #endif /* CONFIG_SMP */
406 void panic_smp_self_stop(void)
414 #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
415 static bool use_spinloop(void)
417 if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
419 * See comments in head_64.S -- not all platforms insert
420 * secondaries at __secondary_hold and wait at the spin
423 if (firmware_has_feature(FW_FEATURE_OPAL))
429 * When book3e boots from kexec, the ePAPR spin table does
432 return of_property_read_bool(of_chosen, "linux,booted-from-kexec");
435 void smp_release_cpus(void)
443 DBG(" -> smp_release_cpus()\n");
445 /* All secondary cpus are spinning on a common spinloop, release them
446 * all now so they can start to spin on their individual paca
447 * spinloops. For non SMP kernels, the secondary cpus never get out
448 * of the common spinloop.
451 ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
453 *ptr = ppc_function_entry(generic_secondary_smp_init);
455 /* And wait a bit for them to catch up */
456 for (i = 0; i < 100000; i++) {
459 if (spinning_secondaries == 0)
463 DBG("spinning_secondaries = %d\n", spinning_secondaries);
465 DBG(" <- smp_release_cpus()\n");
467 #endif /* CONFIG_SMP || CONFIG_KEXEC_CORE */
470 * Initialize some remaining members of the ppc64_caches and systemcfg
472 * (at least until we get rid of them completely). This is mostly some
473 * cache informations about the CPU that will be used by cache flush
474 * routines and/or provided to userland
477 static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
482 info->line_size = lsize;
483 info->block_size = bsize;
484 info->log_block_size = __ilog2(bsize);
486 info->blocks_per_page = PAGE_SIZE / bsize;
488 info->blocks_per_page = 0;
491 info->assoc = 0xffff;
493 info->assoc = size / (sets * lsize);
496 static bool __init parse_cache_info(struct device_node *np,
498 struct ppc_cache_info *info)
500 static const char *ipropnames[] __initdata = {
503 "i-cache-block-size",
506 static const char *dpropnames[] __initdata = {
509 "d-cache-block-size",
512 const char **propnames = icache ? ipropnames : dpropnames;
513 const __be32 *sizep, *lsizep, *bsizep, *setsp;
514 u32 size, lsize, bsize, sets;
519 lsize = bsize = cur_cpu_spec->dcache_bsize;
520 sizep = of_get_property(np, propnames[0], NULL);
522 size = be32_to_cpu(*sizep);
523 setsp = of_get_property(np, propnames[1], NULL);
525 sets = be32_to_cpu(*setsp);
526 bsizep = of_get_property(np, propnames[2], NULL);
527 lsizep = of_get_property(np, propnames[3], NULL);
531 lsize = be32_to_cpu(*lsizep);
533 bsize = be32_to_cpu(*bsizep);
534 if (sizep == NULL || bsizep == NULL || lsizep == NULL)
538 * OF is weird .. it represents fully associative caches
539 * as "1 way" which doesn't make much sense and doesn't
540 * leave room for direct mapped. We'll assume that 0
541 * in OF means direct mapped for that reason.
548 init_cache_info(info, size, lsize, bsize, sets);
553 void __init initialize_cache_info(void)
555 struct device_node *cpu = NULL, *l2, *l3 = NULL;
558 DBG(" -> initialize_cache_info()\n");
561 * All shipping POWER8 machines have a firmware bug that
562 * puts incorrect information in the device-tree. This will
563 * be (hopefully) fixed for future chips but for now hard
564 * code the values if we are running on one of these
566 pvr = PVR_VER(mfspr(SPRN_PVR));
567 if (pvr == PVR_POWER8 || pvr == PVR_POWER8E ||
568 pvr == PVR_POWER8NVL) {
569 /* size lsize blk sets */
570 init_cache_info(&ppc64_caches.l1i, 0x8000, 128, 128, 32);
571 init_cache_info(&ppc64_caches.l1d, 0x10000, 128, 128, 64);
572 init_cache_info(&ppc64_caches.l2, 0x80000, 128, 0, 512);
573 init_cache_info(&ppc64_caches.l3, 0x800000, 128, 0, 8192);
575 cpu = of_find_node_by_type(NULL, "cpu");
578 * We're assuming *all* of the CPUs have the same
579 * d-cache and i-cache sizes... -Peter
582 if (!parse_cache_info(cpu, false, &ppc64_caches.l1d))
583 DBG("Argh, can't find dcache properties !\n");
585 if (!parse_cache_info(cpu, true, &ppc64_caches.l1i))
586 DBG("Argh, can't find icache properties !\n");
589 * Try to find the L2 and L3 if any. Assume they are
590 * unified and use the D-side properties.
592 l2 = of_find_next_cache_node(cpu);
595 parse_cache_info(l2, false, &ppc64_caches.l2);
596 l3 = of_find_next_cache_node(l2);
600 parse_cache_info(l3, false, &ppc64_caches.l3);
605 /* For use by binfmt_elf */
606 dcache_bsize = ppc64_caches.l1d.block_size;
607 icache_bsize = ppc64_caches.l1i.block_size;
609 cur_cpu_spec->dcache_bsize = dcache_bsize;
610 cur_cpu_spec->icache_bsize = icache_bsize;
612 DBG(" <- initialize_cache_info()\n");
616 * This returns the limit below which memory accesses to the linear
617 * mapping are guarnateed not to cause an architectural exception (e.g.,
618 * TLB or SLB miss fault).
620 * This is used to allocate PACAs and various interrupt stacks that
621 * that are accessed early in interrupt handlers that must not cause
622 * re-entrant interrupts.
624 __init u64 ppc64_bolted_size(void)
626 #ifdef CONFIG_PPC_BOOK3E
627 /* Freescale BookE bolts the entire linear mapping */
628 /* XXX: BookE ppc64_rma_limit setup seems to disagree? */
629 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E))
630 return linear_map_top;
631 /* Other BookE, we assume the first GB is bolted */
634 /* BookS radix, does not take faults on linear mapping */
635 if (early_radix_enabled())
638 /* BookS hash, the first segment is bolted */
639 if (early_mmu_has_feature(MMU_FTR_1T_SEGMENT))
640 return 1UL << SID_SHIFT_1T;
641 return 1UL << SID_SHIFT;
645 static void *__init alloc_stack(unsigned long limit, int cpu)
649 BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
651 ptr = memblock_alloc_try_nid(THREAD_SIZE, THREAD_SIZE,
652 MEMBLOCK_LOW_LIMIT, limit,
653 early_cpu_to_node(cpu));
655 panic("cannot allocate stacks");
660 void __init irqstack_early_init(void)
662 u64 limit = ppc64_bolted_size();
666 * Interrupt stacks must be in the first segment since we
667 * cannot afford to take SLB misses on them. They are not
668 * accessed in realmode.
670 for_each_possible_cpu(i) {
671 softirq_ctx[i] = alloc_stack(limit, i);
672 hardirq_ctx[i] = alloc_stack(limit, i);
676 #ifdef CONFIG_PPC_BOOK3E
677 void __init exc_lvl_early_init(void)
681 for_each_possible_cpu(i) {
684 sp = alloc_stack(ULONG_MAX, i);
686 paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
688 sp = alloc_stack(ULONG_MAX, i);
690 paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
692 sp = alloc_stack(ULONG_MAX, i);
693 mcheckirq_ctx[i] = sp;
694 paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
697 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
698 patch_exception(0x040, exc_debug_debug_book3e);
703 * Stack space used when we detect a bad kernel stack pointer, and
704 * early in SMP boots before relocation is enabled. Exclusive emergency
705 * stack for machine checks.
707 void __init emergency_stack_init(void)
713 * Emergency stacks must be under 256MB, we cannot afford to take
714 * SLB misses on them. The ABI also requires them to be 128-byte
717 * Since we use these as temporary stacks during secondary CPU
718 * bringup, machine check, system reset, and HMI, we need to get
719 * at them in real mode. This means they must also be within the RMO
722 * The IRQ stacks allocated elsewhere in this file are zeroed and
723 * initialized in kernel/irq.c. These are initialized here in order
724 * to have emergency stacks available as early as possible.
726 limit = min(ppc64_bolted_size(), ppc64_rma_size);
728 for_each_possible_cpu(i) {
729 paca_ptrs[i]->emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
731 #ifdef CONFIG_PPC_BOOK3S_64
732 /* emergency stack for NMI exception handling. */
733 paca_ptrs[i]->nmi_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
735 /* emergency stack for machine check exception handling. */
736 paca_ptrs[i]->mc_emergency_sp = alloc_stack(limit, i) + THREAD_SIZE;
742 #define PCPU_DYN_SIZE ()
744 static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
746 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
747 MEMBLOCK_ALLOC_ACCESSIBLE,
748 early_cpu_to_node(cpu));
752 static void __init pcpu_fc_free(void *ptr, size_t size)
754 memblock_free(__pa(ptr), size);
757 static int pcpu_cpu_distance(unsigned int from, unsigned int to)
759 if (early_cpu_to_node(from) == early_cpu_to_node(to))
760 return LOCAL_DISTANCE;
762 return REMOTE_DISTANCE;
765 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
766 EXPORT_SYMBOL(__per_cpu_offset);
768 void __init setup_per_cpu_areas(void)
770 const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
777 * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
778 * to group units. For larger mappings, use 1M atom which
779 * should be large enough to contain a number of units.
781 if (mmu_linear_psize == MMU_PAGE_4K)
782 atom_size = PAGE_SIZE;
786 rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
787 pcpu_fc_alloc, pcpu_fc_free);
789 panic("cannot initialize percpu area (err=%d)", rc);
791 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
792 for_each_possible_cpu(cpu) {
793 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
794 paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
799 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
800 unsigned long memory_block_size_bytes(void)
802 if (ppc_md.memory_block_size)
803 return ppc_md.memory_block_size();
805 return MIN_MEMORY_BLOCK_SIZE;
809 #if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
810 struct ppc_pci_io ppc_pci_io;
811 EXPORT_SYMBOL(ppc_pci_io);
814 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
815 u64 hw_nmi_get_sample_period(int watchdog_thresh)
817 return ppc_proc_freq * watchdog_thresh;
822 * The perf based hardlockup detector breaks PMU event based branches, so
823 * disable it by default. Book3S has a soft-nmi hardlockup detector based
824 * on the decrementer interrupt, so it does not suffer from this problem.
826 * It is likely to get false positives in VM guests, so disable it there
829 static int __init disable_hardlockup_detector(void)
831 #ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
832 hardlockup_detector_disable();
834 if (firmware_has_feature(FW_FEATURE_LPAR))
835 hardlockup_detector_disable();
840 early_initcall(disable_hardlockup_detector);
842 #ifdef CONFIG_PPC_BOOK3S_64
843 static enum l1d_flush_type enabled_flush_types;
844 static void *l1d_flush_fallback_area;
845 static bool no_rfi_flush;
848 static int __init handle_no_rfi_flush(char *p)
850 pr_info("rfi-flush: disabled on command line.");
854 early_param("no_rfi_flush", handle_no_rfi_flush);
857 * The RFI flush is not KPTI, but because users will see doco that says to use
858 * nopti we hijack that option here to also disable the RFI flush.
860 static int __init handle_no_pti(char *p)
862 pr_info("rfi-flush: disabling due to 'nopti' on command line.\n");
863 handle_no_rfi_flush(NULL);
866 early_param("nopti", handle_no_pti);
868 static void do_nothing(void *unused)
871 * We don't need to do the flush explicitly, just enter+exit kernel is
872 * sufficient, the RFI exit handlers will do the right thing.
876 void rfi_flush_enable(bool enable)
879 do_rfi_flush_fixups(enabled_flush_types);
880 on_each_cpu(do_nothing, NULL, 1);
882 do_rfi_flush_fixups(L1D_FLUSH_NONE);
887 static void __ref init_fallback_flush(void)
892 /* Only allocate the fallback flush area once (at boot time). */
893 if (l1d_flush_fallback_area)
896 l1d_size = ppc64_caches.l1d.size;
899 * If there is no d-cache-size property in the device tree, l1d_size
900 * could be zero. That leads to the loop in the asm wrapping around to
901 * 2^64-1, and then walking off the end of the fallback area and
902 * eventually causing a page fault which is fatal. Just default to
903 * something vaguely sane.
906 l1d_size = (64 * 1024);
908 limit = min(ppc64_bolted_size(), ppc64_rma_size);
911 * Align to L1d size, and size it at 2x L1d size, to catch possible
912 * hardware prefetch runoff. We don't have a recipe for load patterns to
913 * reliably avoid the prefetcher.
915 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
916 l1d_size, MEMBLOCK_LOW_LIMIT,
917 limit, NUMA_NO_NODE);
918 if (!l1d_flush_fallback_area)
919 panic("%s: Failed to allocate %llu bytes align=0x%llx max_addr=%pa\n",
920 __func__, l1d_size * 2, l1d_size, &limit);
923 for_each_possible_cpu(cpu) {
924 struct paca_struct *paca = paca_ptrs[cpu];
925 paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
926 paca->l1d_flush_size = l1d_size;
930 void setup_rfi_flush(enum l1d_flush_type types, bool enable)
932 if (types & L1D_FLUSH_FALLBACK) {
933 pr_info("rfi-flush: fallback displacement flush available\n");
934 init_fallback_flush();
937 if (types & L1D_FLUSH_ORI)
938 pr_info("rfi-flush: ori type flush available\n");
940 if (types & L1D_FLUSH_MTTRIG)
941 pr_info("rfi-flush: mttrig type flush available\n");
943 enabled_flush_types = types;
945 if (!no_rfi_flush && !cpu_mitigations_off())
946 rfi_flush_enable(enable);
949 #ifdef CONFIG_DEBUG_FS
950 static int rfi_flush_set(void *data, u64 val)
961 /* Only do anything if we're changing state */
962 if (enable != rfi_flush)
963 rfi_flush_enable(enable);
968 static int rfi_flush_get(void *data, u64 *val)
970 *val = rfi_flush ? 1 : 0;
974 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
976 static __init int rfi_flush_debugfs_init(void)
978 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
981 device_initcall(rfi_flush_debugfs_init);
983 #endif /* CONFIG_PPC_BOOK3S_64 */