1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 * Copyright (C) 2000, 2001 Kanoj Sarcar
7 * Copyright (C) 2000, 2001 Ralf Baechle
8 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/profile.h>
17 #include <linux/seq_file.h>
18 #include <linux/smp.h>
19 #include <linux/threads.h>
20 #include <linux/export.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/time.h>
23 #include <linux/tracepoint.h>
24 #include <linux/sched/hotplug.h>
25 #include <linux/sched/task_stack.h>
29 #include <asm/loongson.h>
30 #include <asm/mmu_context.h>
32 #include <asm/processor.h>
33 #include <asm/setup.h>
36 int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
37 EXPORT_SYMBOL(__cpu_number_map);
39 int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
40 EXPORT_SYMBOL(__cpu_logical_map);
42 /* Representing the threads (siblings) of each logical CPU */
43 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
44 EXPORT_SYMBOL(cpu_sibling_map);
46 /* Representing the core map of multi-core chips of each logical CPU */
47 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
48 EXPORT_SYMBOL(cpu_core_map);
50 static DECLARE_COMPLETION(cpu_starting);
51 static DECLARE_COMPLETION(cpu_running);
54 * A logcal cpu mask containing only one VPE per core to
55 * reduce the number of IPIs on large MT systems.
57 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
58 EXPORT_SYMBOL(cpu_foreign_map);
60 /* representing cpus for which sibling maps can be computed */
61 static cpumask_t cpu_sibling_setup_map;
63 /* representing cpus for which core maps can be computed */
64 static cpumask_t cpu_core_setup_map;
66 struct secondary_data cpuboot_data;
67 static DEFINE_PER_CPU(int, cpu_state);
74 static const char *ipi_types[NR_IPI] __tracepoint_string = {
75 [IPI_RESCHEDULE] = "Rescheduling interrupts",
76 [IPI_CALL_FUNCTION] = "Function call interrupts",
79 void show_ipi_list(struct seq_file *p, int prec)
83 for (i = 0; i < NR_IPI; i++) {
84 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
85 for_each_online_cpu(cpu)
86 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
87 seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
91 static inline void set_cpu_core_map(int cpu)
95 cpumask_set_cpu(cpu, &cpu_core_setup_map);
97 for_each_cpu(i, &cpu_core_setup_map) {
98 if (cpu_data[cpu].package == cpu_data[i].package) {
99 cpumask_set_cpu(i, &cpu_core_map[cpu]);
100 cpumask_set_cpu(cpu, &cpu_core_map[i]);
105 static inline void set_cpu_sibling_map(int cpu)
109 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
111 for_each_cpu(i, &cpu_sibling_setup_map) {
112 if (cpus_are_siblings(cpu, i)) {
113 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
114 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
119 static inline void clear_cpu_sibling_map(int cpu)
123 for_each_cpu(i, &cpu_sibling_setup_map) {
124 if (cpus_are_siblings(cpu, i)) {
125 cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
126 cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
130 cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
134 * Calculate a new cpu_foreign_map mask whenever a
135 * new cpu appears or disappears.
137 void calculate_cpu_foreign_map(void)
139 int i, k, core_present;
140 cpumask_t temp_foreign_map;
142 /* Re-calculate the mask */
143 cpumask_clear(&temp_foreign_map);
144 for_each_online_cpu(i) {
146 for_each_cpu(k, &temp_foreign_map)
147 if (cpus_are_siblings(i, k))
150 cpumask_set_cpu(i, &temp_foreign_map);
153 for_each_online_cpu(i)
154 cpumask_andnot(&cpu_foreign_map[i],
155 &temp_foreign_map, &cpu_sibling_map[i]);
158 /* Send mailbox buffer via Mail_Send */
159 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
163 /* Send high 32 bits */
164 val = IOCSR_MBUF_SEND_BLOCKING;
165 val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
166 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
167 val |= (data & IOCSR_MBUF_SEND_H32_MASK);
168 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
170 /* Send low 32 bits */
171 val = IOCSR_MBUF_SEND_BLOCKING;
172 val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
173 val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
174 val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
175 iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
178 static u32 ipi_read_clear(int cpu)
182 /* Load the ipi register to figure out what we're supposed to do */
183 action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
184 /* Clear the ipi register to clear the interrupt */
185 iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
191 static void ipi_write_action(int cpu, u32 action)
193 unsigned int irq = 0;
195 while ((irq = ffs(action))) {
196 uint32_t val = IOCSR_IPI_SEND_BLOCKING;
199 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
200 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
201 action &= ~BIT(irq - 1);
205 void loongson_send_ipi_single(int cpu, unsigned int action)
207 ipi_write_action(cpu_logical_map(cpu), (u32)action);
210 void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
214 for_each_cpu(i, mask)
215 ipi_write_action(cpu_logical_map(i), (u32)action);
219 * This function sends a 'reschedule' IPI to another CPU.
220 * it goes straight through and wastes no time serializing
221 * anything. Worst case is that we lose a reschedule ...
223 void arch_smp_send_reschedule(int cpu)
225 loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
227 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
229 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
232 unsigned int cpu = smp_processor_id();
234 action = ipi_read_clear(cpu_logical_map(cpu));
236 if (action & SMP_RESCHEDULE) {
238 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
241 if (action & SMP_CALL_FUNCTION) {
242 generic_smp_call_function_interrupt();
243 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
249 static void __init fdt_smp_setup(void)
252 unsigned int cpu, cpuid;
253 struct device_node *node = NULL;
255 for_each_of_cpu_node(node) {
256 if (!of_device_is_available(node))
259 cpuid = of_get_cpu_hwid(node, 0);
260 if (cpuid >= nr_cpu_ids)
263 if (cpuid == loongson_sysconf.boot_cpu_id) {
267 cpu = cpumask_next_zero(-1, cpu_present_mask);
271 set_cpu_possible(cpu, true);
272 set_cpu_present(cpu, true);
273 __cpu_number_map[cpuid] = cpu;
274 __cpu_logical_map[cpu] = cpuid;
277 loongson_sysconf.nr_cpus = num_processors;
278 set_bit(0, loongson_sysconf.cores_io_master);
282 void __init loongson_smp_setup(void)
286 if (loongson_sysconf.cores_per_package == 0)
287 loongson_sysconf.cores_per_package = num_processors;
289 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
290 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
292 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
293 pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
296 void __init loongson_prepare_cpus(unsigned int max_cpus)
300 parse_acpi_topology();
302 for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
303 set_cpu_present(i, true);
304 csr_mail_send(0, __cpu_logical_map[i], 0);
305 cpu_data[i].global_id = __cpu_logical_map[i];
308 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
312 * Setup the PC, SP, and TP of a secondary processor and start it running!
314 void loongson_boot_secondary(int cpu, struct task_struct *idle)
318 pr_info("Booting CPU#%d...\n", cpu);
320 entry = __pa_symbol((unsigned long)&smpboot_entry);
321 cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
322 cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
324 csr_mail_send(entry, cpu_logical_map(cpu), 0);
326 loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
330 * SMP init and finish on secondary CPUs
332 void loongson_init_secondary(void)
334 unsigned int cpu = smp_processor_id();
335 unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
336 ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
338 change_csr_ecfg(ECFG0_IM, imask);
340 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
345 per_cpu(cpu_state, cpu) = CPU_ONLINE;
346 cpu_data[cpu].package =
347 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
348 cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
349 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
352 void loongson_smp_finish(void)
355 iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
356 pr_info("CPU#%d finished\n", smp_processor_id());
359 #ifdef CONFIG_HOTPLUG_CPU
361 int loongson_cpu_disable(void)
364 unsigned int cpu = smp_processor_id();
370 numa_remove_cpu(cpu);
372 set_cpu_online(cpu, false);
373 clear_cpu_sibling_map(cpu);
374 calculate_cpu_foreign_map();
375 local_irq_save(flags);
376 irq_migrate_all_off_this_cpu();
377 clear_csr_ecfg(ECFG0_IM);
378 local_irq_restore(flags);
379 local_flush_tlb_all();
384 void loongson_cpu_die(unsigned int cpu)
386 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
392 void __noreturn arch_cpu_idle_dead(void)
394 register uint64_t addr;
395 register void (*init_fn)(void);
399 set_csr_ecfg(ECFGF_IPI);
400 __this_cpu_write(cpu_state, CPU_DEAD);
404 __asm__ __volatile__("idle 0\n\t");
405 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
409 init_fn = (void *)TO_CACHE(addr);
410 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
423 static int loongson_ipi_suspend(void)
428 static void loongson_ipi_resume(void)
430 iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
433 static struct syscore_ops loongson_ipi_syscore_ops = {
434 .resume = loongson_ipi_resume,
435 .suspend = loongson_ipi_suspend,
439 * Enable boot cpu ipi before enabling nonboot cpus
440 * during syscore_resume.
442 static int __init ipi_pm_init(void)
444 register_syscore_ops(&loongson_ipi_syscore_ops);
448 core_initcall(ipi_pm_init);
451 /* Preload SMP state for boot cpu */
452 void smp_prepare_boot_cpu(void)
454 unsigned int cpu, node, rr_node;
456 set_cpu_possible(0, true);
457 set_cpu_online(0, true);
458 set_my_cpu_offset(per_cpu_offset(0));
460 rr_node = first_node(node_online_map);
461 for_each_possible_cpu(cpu) {
462 node = early_cpu_to_node(cpu);
465 * The mapping between present cpus and nodes has been
466 * built during MADT and SRAT parsing.
468 * If possible cpus = present cpus here, early_cpu_to_node
469 * will return valid node.
471 * If possible cpus > present cpus here (e.g. some possible
472 * cpus will be added by cpu-hotplug later), for possible but
473 * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
474 * and we just map them to online nodes in round-robin way.
475 * Once hotplugged, new correct mapping will be built for them.
477 if (node != NUMA_NO_NODE)
478 set_cpu_numa_node(cpu, node);
480 set_cpu_numa_node(cpu, rr_node);
481 rr_node = next_node_in(rr_node, node_online_map);
486 /* called from main before smp_init() */
487 void __init smp_prepare_cpus(unsigned int max_cpus)
489 init_new_context(current, &init_mm);
490 current_thread_info()->cpu = 0;
491 loongson_prepare_cpus(max_cpus);
492 set_cpu_sibling_map(0);
494 calculate_cpu_foreign_map();
495 #ifndef CONFIG_HOTPLUG_CPU
496 init_cpu_present(cpu_possible_mask);
500 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
502 loongson_boot_secondary(cpu, tidle);
504 /* Wait for CPU to start and be ready to sync counters */
505 if (!wait_for_completion_timeout(&cpu_starting,
506 msecs_to_jiffies(5000))) {
507 pr_crit("CPU%u: failed to start\n", cpu);
511 /* Wait for CPU to finish startup & mark itself online before return */
512 wait_for_completion(&cpu_running);
518 * First C code run on the secondary CPUs after being started up by
521 asmlinkage void start_secondary(void)
526 cpu = raw_smp_processor_id();
527 set_my_cpu_offset(per_cpu_offset(cpu));
530 constant_clockevent_init();
531 loongson_init_secondary();
533 set_cpu_sibling_map(cpu);
534 set_cpu_core_map(cpu);
536 notify_cpu_starting(cpu);
538 /* Notify boot CPU that we're starting */
539 complete(&cpu_starting);
541 /* The CPU is running, now mark it online */
542 set_cpu_online(cpu, true);
544 calculate_cpu_foreign_map();
547 * Notify boot CPU that we're up & online and it can safely return
550 complete(&cpu_running);
553 * irq will be enabled in loongson_smp_finish(), enabling it too
554 * early is dangerous.
556 WARN_ON_ONCE(!irqs_disabled());
557 loongson_smp_finish();
559 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
562 void __init smp_cpus_done(unsigned int max_cpus)
566 static void stop_this_cpu(void *dummy)
568 set_cpu_online(smp_processor_id(), false);
569 calculate_cpu_foreign_map();
574 void smp_send_stop(void)
576 smp_call_function(stop_this_cpu, NULL, 0);
579 #ifdef CONFIG_PROFILING
580 int setup_profiling_timer(unsigned int multiplier)
586 static void flush_tlb_all_ipi(void *info)
588 local_flush_tlb_all();
591 void flush_tlb_all(void)
593 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
596 static void flush_tlb_mm_ipi(void *mm)
598 local_flush_tlb_mm((struct mm_struct *)mm);
601 void flush_tlb_mm(struct mm_struct *mm)
603 if (atomic_read(&mm->mm_users) == 0)
604 return; /* happens as a result of exit_mmap() */
608 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
609 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
613 for_each_online_cpu(cpu) {
614 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
615 cpu_context(cpu, mm) = 0;
617 local_flush_tlb_mm(mm);
623 struct flush_tlb_data {
624 struct vm_area_struct *vma;
629 static void flush_tlb_range_ipi(void *info)
631 struct flush_tlb_data *fd = info;
633 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
636 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
638 struct mm_struct *mm = vma->vm_mm;
641 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
642 struct flush_tlb_data fd = {
648 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
652 for_each_online_cpu(cpu) {
653 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
654 cpu_context(cpu, mm) = 0;
656 local_flush_tlb_range(vma, start, end);
661 static void flush_tlb_kernel_range_ipi(void *info)
663 struct flush_tlb_data *fd = info;
665 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
668 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
670 struct flush_tlb_data fd = {
675 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
678 static void flush_tlb_page_ipi(void *info)
680 struct flush_tlb_data *fd = info;
682 local_flush_tlb_page(fd->vma, fd->addr1);
685 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
688 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
689 struct flush_tlb_data fd = {
694 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
698 for_each_online_cpu(cpu) {
699 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
700 cpu_context(cpu, vma->vm_mm) = 0;
702 local_flush_tlb_page(vma, page);
706 EXPORT_SYMBOL(flush_tlb_page);
708 static void flush_tlb_one_ipi(void *info)
710 unsigned long vaddr = (unsigned long) info;
712 local_flush_tlb_one(vaddr);
715 void flush_tlb_one(unsigned long vaddr)
717 on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
719 EXPORT_SYMBOL(flush_tlb_one);