Merge tag 'rust-6.9' of https://github.com/Rust-for-Linux/linux
[sfrench/cifs-2.6.git] / arch / loongarch / kernel / smp.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  *
5  * Derived from MIPS:
6  * Copyright (C) 2000, 2001 Kanoj Sarcar
7  * Copyright (C) 2000, 2001 Ralf Baechle
8  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
9  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
10  */
11 #include <linux/acpi.h>
12 #include <linux/cpu.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/profile.h>
17 #include <linux/seq_file.h>
18 #include <linux/smp.h>
19 #include <linux/threads.h>
20 #include <linux/export.h>
21 #include <linux/syscore_ops.h>
22 #include <linux/time.h>
23 #include <linux/tracepoint.h>
24 #include <linux/sched/hotplug.h>
25 #include <linux/sched/task_stack.h>
26
27 #include <asm/cpu.h>
28 #include <asm/idle.h>
29 #include <asm/loongson.h>
30 #include <asm/mmu_context.h>
31 #include <asm/numa.h>
32 #include <asm/processor.h>
33 #include <asm/setup.h>
34 #include <asm/time.h>
35
36 int __cpu_number_map[NR_CPUS];   /* Map physical to logical */
37 EXPORT_SYMBOL(__cpu_number_map);
38
39 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
40 EXPORT_SYMBOL(__cpu_logical_map);
41
42 /* Representing the threads (siblings) of each logical CPU */
43 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
44 EXPORT_SYMBOL(cpu_sibling_map);
45
46 /* Representing the core map of multi-core chips of each logical CPU */
47 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
48 EXPORT_SYMBOL(cpu_core_map);
49
50 static DECLARE_COMPLETION(cpu_starting);
51 static DECLARE_COMPLETION(cpu_running);
52
53 /*
54  * A logcal cpu mask containing only one VPE per core to
55  * reduce the number of IPIs on large MT systems.
56  */
57 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
58 EXPORT_SYMBOL(cpu_foreign_map);
59
60 /* representing cpus for which sibling maps can be computed */
61 static cpumask_t cpu_sibling_setup_map;
62
63 /* representing cpus for which core maps can be computed */
64 static cpumask_t cpu_core_setup_map;
65
66 struct secondary_data cpuboot_data;
67 static DEFINE_PER_CPU(int, cpu_state);
68
69 enum ipi_msg_type {
70         IPI_RESCHEDULE,
71         IPI_CALL_FUNCTION,
72 };
73
74 static const char *ipi_types[NR_IPI] __tracepoint_string = {
75         [IPI_RESCHEDULE] = "Rescheduling interrupts",
76         [IPI_CALL_FUNCTION] = "Function call interrupts",
77 };
78
79 void show_ipi_list(struct seq_file *p, int prec)
80 {
81         unsigned int cpu, i;
82
83         for (i = 0; i < NR_IPI; i++) {
84                 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
85                 for_each_online_cpu(cpu)
86                         seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
87                 seq_printf(p, " LoongArch  %d  %s\n", i + 1, ipi_types[i]);
88         }
89 }
90
91 static inline void set_cpu_core_map(int cpu)
92 {
93         int i;
94
95         cpumask_set_cpu(cpu, &cpu_core_setup_map);
96
97         for_each_cpu(i, &cpu_core_setup_map) {
98                 if (cpu_data[cpu].package == cpu_data[i].package) {
99                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
100                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
101                 }
102         }
103 }
104
105 static inline void set_cpu_sibling_map(int cpu)
106 {
107         int i;
108
109         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
110
111         for_each_cpu(i, &cpu_sibling_setup_map) {
112                 if (cpus_are_siblings(cpu, i)) {
113                         cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
114                         cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
115                 }
116         }
117 }
118
119 static inline void clear_cpu_sibling_map(int cpu)
120 {
121         int i;
122
123         for_each_cpu(i, &cpu_sibling_setup_map) {
124                 if (cpus_are_siblings(cpu, i)) {
125                         cpumask_clear_cpu(i, &cpu_sibling_map[cpu]);
126                         cpumask_clear_cpu(cpu, &cpu_sibling_map[i]);
127                 }
128         }
129
130         cpumask_clear_cpu(cpu, &cpu_sibling_setup_map);
131 }
132
133 /*
134  * Calculate a new cpu_foreign_map mask whenever a
135  * new cpu appears or disappears.
136  */
137 void calculate_cpu_foreign_map(void)
138 {
139         int i, k, core_present;
140         cpumask_t temp_foreign_map;
141
142         /* Re-calculate the mask */
143         cpumask_clear(&temp_foreign_map);
144         for_each_online_cpu(i) {
145                 core_present = 0;
146                 for_each_cpu(k, &temp_foreign_map)
147                         if (cpus_are_siblings(i, k))
148                                 core_present = 1;
149                 if (!core_present)
150                         cpumask_set_cpu(i, &temp_foreign_map);
151         }
152
153         for_each_online_cpu(i)
154                 cpumask_andnot(&cpu_foreign_map[i],
155                                &temp_foreign_map, &cpu_sibling_map[i]);
156 }
157
158 /* Send mailbox buffer via Mail_Send */
159 static void csr_mail_send(uint64_t data, int cpu, int mailbox)
160 {
161         uint64_t val;
162
163         /* Send high 32 bits */
164         val = IOCSR_MBUF_SEND_BLOCKING;
165         val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
166         val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
167         val |= (data & IOCSR_MBUF_SEND_H32_MASK);
168         iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
169
170         /* Send low 32 bits */
171         val = IOCSR_MBUF_SEND_BLOCKING;
172         val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
173         val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
174         val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
175         iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
176 };
177
178 static u32 ipi_read_clear(int cpu)
179 {
180         u32 action;
181
182         /* Load the ipi register to figure out what we're supposed to do */
183         action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
184         /* Clear the ipi register to clear the interrupt */
185         iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
186         wbflush();
187
188         return action;
189 }
190
191 static void ipi_write_action(int cpu, u32 action)
192 {
193         unsigned int irq = 0;
194
195         while ((irq = ffs(action))) {
196                 uint32_t val = IOCSR_IPI_SEND_BLOCKING;
197
198                 val |= (irq - 1);
199                 val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
200                 iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
201                 action &= ~BIT(irq - 1);
202         }
203 }
204
205 void loongson_send_ipi_single(int cpu, unsigned int action)
206 {
207         ipi_write_action(cpu_logical_map(cpu), (u32)action);
208 }
209
210 void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
211 {
212         unsigned int i;
213
214         for_each_cpu(i, mask)
215                 ipi_write_action(cpu_logical_map(i), (u32)action);
216 }
217
218 /*
219  * This function sends a 'reschedule' IPI to another CPU.
220  * it goes straight through and wastes no time serializing
221  * anything. Worst case is that we lose a reschedule ...
222  */
223 void arch_smp_send_reschedule(int cpu)
224 {
225         loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
226 }
227 EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
228
229 irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
230 {
231         unsigned int action;
232         unsigned int cpu = smp_processor_id();
233
234         action = ipi_read_clear(cpu_logical_map(cpu));
235
236         if (action & SMP_RESCHEDULE) {
237                 scheduler_ipi();
238                 per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
239         }
240
241         if (action & SMP_CALL_FUNCTION) {
242                 generic_smp_call_function_interrupt();
243                 per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
244         }
245
246         return IRQ_HANDLED;
247 }
248
249 static void __init fdt_smp_setup(void)
250 {
251 #ifdef CONFIG_OF
252         unsigned int cpu, cpuid;
253         struct device_node *node = NULL;
254
255         for_each_of_cpu_node(node) {
256                 if (!of_device_is_available(node))
257                         continue;
258
259                 cpuid = of_get_cpu_hwid(node, 0);
260                 if (cpuid >= nr_cpu_ids)
261                         continue;
262
263                 if (cpuid == loongson_sysconf.boot_cpu_id) {
264                         cpu = 0;
265                         numa_add_cpu(cpu);
266                 } else {
267                         cpu = cpumask_next_zero(-1, cpu_present_mask);
268                 }
269
270                 num_processors++;
271                 set_cpu_possible(cpu, true);
272                 set_cpu_present(cpu, true);
273                 __cpu_number_map[cpuid] = cpu;
274                 __cpu_logical_map[cpu] = cpuid;
275         }
276
277         loongson_sysconf.nr_cpus = num_processors;
278         set_bit(0, loongson_sysconf.cores_io_master);
279 #endif
280 }
281
282 void __init loongson_smp_setup(void)
283 {
284         fdt_smp_setup();
285
286         if (loongson_sysconf.cores_per_package == 0)
287                 loongson_sysconf.cores_per_package = num_processors;
288
289         cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
290         cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
291
292         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
293         pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
294 }
295
296 void __init loongson_prepare_cpus(unsigned int max_cpus)
297 {
298         int i = 0;
299
300         parse_acpi_topology();
301
302         for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
303                 set_cpu_present(i, true);
304                 csr_mail_send(0, __cpu_logical_map[i], 0);
305                 cpu_data[i].global_id = __cpu_logical_map[i];
306         }
307
308         per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
309 }
310
311 /*
312  * Setup the PC, SP, and TP of a secondary processor and start it running!
313  */
314 void loongson_boot_secondary(int cpu, struct task_struct *idle)
315 {
316         unsigned long entry;
317
318         pr_info("Booting CPU#%d...\n", cpu);
319
320         entry = __pa_symbol((unsigned long)&smpboot_entry);
321         cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
322         cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
323
324         csr_mail_send(entry, cpu_logical_map(cpu), 0);
325
326         loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
327 }
328
329 /*
330  * SMP init and finish on secondary CPUs
331  */
332 void loongson_init_secondary(void)
333 {
334         unsigned int cpu = smp_processor_id();
335         unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
336                              ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
337
338         change_csr_ecfg(ECFG0_IM, imask);
339
340         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
341
342 #ifdef CONFIG_NUMA
343         numa_add_cpu(cpu);
344 #endif
345         per_cpu(cpu_state, cpu) = CPU_ONLINE;
346         cpu_data[cpu].package =
347                      cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
348         cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
349                      cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
350 }
351
352 void loongson_smp_finish(void)
353 {
354         local_irq_enable();
355         iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
356         pr_info("CPU#%d finished\n", smp_processor_id());
357 }
358
359 #ifdef CONFIG_HOTPLUG_CPU
360
361 int loongson_cpu_disable(void)
362 {
363         unsigned long flags;
364         unsigned int cpu = smp_processor_id();
365
366         if (io_master(cpu))
367                 return -EBUSY;
368
369 #ifdef CONFIG_NUMA
370         numa_remove_cpu(cpu);
371 #endif
372         set_cpu_online(cpu, false);
373         clear_cpu_sibling_map(cpu);
374         calculate_cpu_foreign_map();
375         local_irq_save(flags);
376         irq_migrate_all_off_this_cpu();
377         clear_csr_ecfg(ECFG0_IM);
378         local_irq_restore(flags);
379         local_flush_tlb_all();
380
381         return 0;
382 }
383
384 void loongson_cpu_die(unsigned int cpu)
385 {
386         while (per_cpu(cpu_state, cpu) != CPU_DEAD)
387                 cpu_relax();
388
389         mb();
390 }
391
392 void __noreturn arch_cpu_idle_dead(void)
393 {
394         register uint64_t addr;
395         register void (*init_fn)(void);
396
397         idle_task_exit();
398         local_irq_enable();
399         set_csr_ecfg(ECFGF_IPI);
400         __this_cpu_write(cpu_state, CPU_DEAD);
401
402         __smp_mb();
403         do {
404                 __asm__ __volatile__("idle 0\n\t");
405                 addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
406         } while (addr == 0);
407
408         local_irq_disable();
409         init_fn = (void *)TO_CACHE(addr);
410         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
411
412         init_fn();
413         BUG();
414 }
415
416 #endif
417
418 /*
419  * Power management
420  */
421 #ifdef CONFIG_PM
422
423 static int loongson_ipi_suspend(void)
424 {
425         return 0;
426 }
427
428 static void loongson_ipi_resume(void)
429 {
430         iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
431 }
432
433 static struct syscore_ops loongson_ipi_syscore_ops = {
434         .resume         = loongson_ipi_resume,
435         .suspend        = loongson_ipi_suspend,
436 };
437
438 /*
439  * Enable boot cpu ipi before enabling nonboot cpus
440  * during syscore_resume.
441  */
442 static int __init ipi_pm_init(void)
443 {
444         register_syscore_ops(&loongson_ipi_syscore_ops);
445         return 0;
446 }
447
448 core_initcall(ipi_pm_init);
449 #endif
450
451 /* Preload SMP state for boot cpu */
452 void smp_prepare_boot_cpu(void)
453 {
454         unsigned int cpu, node, rr_node;
455
456         set_cpu_possible(0, true);
457         set_cpu_online(0, true);
458         set_my_cpu_offset(per_cpu_offset(0));
459
460         rr_node = first_node(node_online_map);
461         for_each_possible_cpu(cpu) {
462                 node = early_cpu_to_node(cpu);
463
464                 /*
465                  * The mapping between present cpus and nodes has been
466                  * built during MADT and SRAT parsing.
467                  *
468                  * If possible cpus = present cpus here, early_cpu_to_node
469                  * will return valid node.
470                  *
471                  * If possible cpus > present cpus here (e.g. some possible
472                  * cpus will be added by cpu-hotplug later), for possible but
473                  * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
474                  * and we just map them to online nodes in round-robin way.
475                  * Once hotplugged, new correct mapping will be built for them.
476                  */
477                 if (node != NUMA_NO_NODE)
478                         set_cpu_numa_node(cpu, node);
479                 else {
480                         set_cpu_numa_node(cpu, rr_node);
481                         rr_node = next_node_in(rr_node, node_online_map);
482                 }
483         }
484 }
485
486 /* called from main before smp_init() */
487 void __init smp_prepare_cpus(unsigned int max_cpus)
488 {
489         init_new_context(current, &init_mm);
490         current_thread_info()->cpu = 0;
491         loongson_prepare_cpus(max_cpus);
492         set_cpu_sibling_map(0);
493         set_cpu_core_map(0);
494         calculate_cpu_foreign_map();
495 #ifndef CONFIG_HOTPLUG_CPU
496         init_cpu_present(cpu_possible_mask);
497 #endif
498 }
499
500 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
501 {
502         loongson_boot_secondary(cpu, tidle);
503
504         /* Wait for CPU to start and be ready to sync counters */
505         if (!wait_for_completion_timeout(&cpu_starting,
506                                          msecs_to_jiffies(5000))) {
507                 pr_crit("CPU%u: failed to start\n", cpu);
508                 return -EIO;
509         }
510
511         /* Wait for CPU to finish startup & mark itself online before return */
512         wait_for_completion(&cpu_running);
513
514         return 0;
515 }
516
517 /*
518  * First C code run on the secondary CPUs after being started up by
519  * the master.
520  */
521 asmlinkage void start_secondary(void)
522 {
523         unsigned int cpu;
524
525         sync_counter();
526         cpu = raw_smp_processor_id();
527         set_my_cpu_offset(per_cpu_offset(cpu));
528
529         cpu_probe();
530         constant_clockevent_init();
531         loongson_init_secondary();
532
533         set_cpu_sibling_map(cpu);
534         set_cpu_core_map(cpu);
535
536         notify_cpu_starting(cpu);
537
538         /* Notify boot CPU that we're starting */
539         complete(&cpu_starting);
540
541         /* The CPU is running, now mark it online */
542         set_cpu_online(cpu, true);
543
544         calculate_cpu_foreign_map();
545
546         /*
547          * Notify boot CPU that we're up & online and it can safely return
548          * from __cpu_up()
549          */
550         complete(&cpu_running);
551
552         /*
553          * irq will be enabled in loongson_smp_finish(), enabling it too
554          * early is dangerous.
555          */
556         WARN_ON_ONCE(!irqs_disabled());
557         loongson_smp_finish();
558
559         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
560 }
561
562 void __init smp_cpus_done(unsigned int max_cpus)
563 {
564 }
565
566 static void stop_this_cpu(void *dummy)
567 {
568         set_cpu_online(smp_processor_id(), false);
569         calculate_cpu_foreign_map();
570         local_irq_disable();
571         while (true);
572 }
573
574 void smp_send_stop(void)
575 {
576         smp_call_function(stop_this_cpu, NULL, 0);
577 }
578
579 #ifdef CONFIG_PROFILING
580 int setup_profiling_timer(unsigned int multiplier)
581 {
582         return 0;
583 }
584 #endif
585
586 static void flush_tlb_all_ipi(void *info)
587 {
588         local_flush_tlb_all();
589 }
590
591 void flush_tlb_all(void)
592 {
593         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
594 }
595
596 static void flush_tlb_mm_ipi(void *mm)
597 {
598         local_flush_tlb_mm((struct mm_struct *)mm);
599 }
600
601 void flush_tlb_mm(struct mm_struct *mm)
602 {
603         if (atomic_read(&mm->mm_users) == 0)
604                 return;         /* happens as a result of exit_mmap() */
605
606         preempt_disable();
607
608         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
609                 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
610         } else {
611                 unsigned int cpu;
612
613                 for_each_online_cpu(cpu) {
614                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
615                                 cpu_context(cpu, mm) = 0;
616                 }
617                 local_flush_tlb_mm(mm);
618         }
619
620         preempt_enable();
621 }
622
623 struct flush_tlb_data {
624         struct vm_area_struct *vma;
625         unsigned long addr1;
626         unsigned long addr2;
627 };
628
629 static void flush_tlb_range_ipi(void *info)
630 {
631         struct flush_tlb_data *fd = info;
632
633         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
634 }
635
636 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
637 {
638         struct mm_struct *mm = vma->vm_mm;
639
640         preempt_disable();
641         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
642                 struct flush_tlb_data fd = {
643                         .vma = vma,
644                         .addr1 = start,
645                         .addr2 = end,
646                 };
647
648                 on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
649         } else {
650                 unsigned int cpu;
651
652                 for_each_online_cpu(cpu) {
653                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
654                                 cpu_context(cpu, mm) = 0;
655                 }
656                 local_flush_tlb_range(vma, start, end);
657         }
658         preempt_enable();
659 }
660
661 static void flush_tlb_kernel_range_ipi(void *info)
662 {
663         struct flush_tlb_data *fd = info;
664
665         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
666 }
667
668 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
669 {
670         struct flush_tlb_data fd = {
671                 .addr1 = start,
672                 .addr2 = end,
673         };
674
675         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
676 }
677
678 static void flush_tlb_page_ipi(void *info)
679 {
680         struct flush_tlb_data *fd = info;
681
682         local_flush_tlb_page(fd->vma, fd->addr1);
683 }
684
685 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
686 {
687         preempt_disable();
688         if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
689                 struct flush_tlb_data fd = {
690                         .vma = vma,
691                         .addr1 = page,
692                 };
693
694                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
695         } else {
696                 unsigned int cpu;
697
698                 for_each_online_cpu(cpu) {
699                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
700                                 cpu_context(cpu, vma->vm_mm) = 0;
701                 }
702                 local_flush_tlb_page(vma, page);
703         }
704         preempt_enable();
705 }
706 EXPORT_SYMBOL(flush_tlb_page);
707
708 static void flush_tlb_one_ipi(void *info)
709 {
710         unsigned long vaddr = (unsigned long) info;
711
712         local_flush_tlb_one(vaddr);
713 }
714
715 void flush_tlb_one(unsigned long vaddr)
716 {
717         on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
718 }
719 EXPORT_SYMBOL(flush_tlb_one);