2 * SMP related functions
4 * Copyright IBM Corp. 1999, 2012
5 * Author(s): Denis Joseph Barrow,
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * The code outside of smp.c uses logical cpu numbers, only smp.c does
14 * the translation of logical to physical cpu ids. All new code that
15 * operates on physical cpu numbers needs to go into smp.c.
18 #define KMSG_COMPONENT "cpu"
19 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
21 #include <linux/workqueue.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
25 #include <linux/err.h>
26 #include <linux/spinlock.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/delay.h>
29 #include <linux/interrupt.h>
30 #include <linux/irqflags.h>
31 #include <linux/cpu.h>
32 #include <linux/slab.h>
33 #include <linux/crash_dump.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/switch_to.h>
36 #include <asm/facility.h>
38 #include <asm/setup.h>
40 #include <asm/tlbflush.h>
41 #include <asm/vtimer.h>
42 #include <asm/lowcore.h>
45 #include <asm/debug.h>
46 #include <asm/os_info.h>
53 ec_call_function_single,
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */
69 int polarization; /* physical polarization */
70 u16 address; /* physical cpu address */
73 static u8 boot_cpu_type;
74 static struct pcpu pcpu_devices[NR_CPUS];
76 unsigned int smp_cpu_mt_shift;
77 EXPORT_SYMBOL(smp_cpu_mt_shift);
79 unsigned int smp_cpu_mtid;
80 EXPORT_SYMBOL(smp_cpu_mtid);
82 static unsigned int smp_max_threads __initdata = -1U;
84 static int __init early_nosmt(char *s)
89 early_param("nosmt", early_nosmt);
91 static int __init early_smt(char *s)
93 get_option(&s, &smp_max_threads);
96 early_param("smt", early_smt);
99 * The smp_cpu_state_mutex must be held when changing the state or polarization
100 * member of a pcpu data structure within the pcpu_devices arreay.
102 DEFINE_MUTEX(smp_cpu_state_mutex);
105 * Signal processor helper functions.
107 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm,
113 cc = __pcpu_sigp(addr, order, parm, NULL);
114 if (cc != SIGP_CC_BUSY)
120 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
124 for (retry = 0; ; retry++) {
125 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
126 if (cc != SIGP_CC_BUSY)
134 static inline int pcpu_stopped(struct pcpu *pcpu)
136 u32 uninitialized_var(status);
138 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
139 0, &status) != SIGP_CC_STATUS_STORED)
141 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
144 static inline int pcpu_running(struct pcpu *pcpu)
146 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
147 0, NULL) != SIGP_CC_STATUS_STORED)
149 /* Status stored condition code is equivalent to cpu not running. */
154 * Find struct pcpu by cpu address.
156 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
160 for_each_cpu(cpu, mask)
161 if (pcpu_devices[cpu].address == address)
162 return pcpu_devices + cpu;
166 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
170 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
172 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
173 pcpu_sigp_retry(pcpu, order, 0);
176 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
180 if (pcpu != &pcpu_devices[0]) {
181 pcpu->lowcore = (struct _lowcore *)
182 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
183 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
184 pcpu->panic_stack = __get_free_page(GFP_KERNEL);
185 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
189 memcpy(lc, &S390_lowcore, 512);
190 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
191 lc->async_stack = pcpu->async_stack + ASYNC_SIZE
192 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
193 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
194 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
196 lc->spinlock_lockval = arch_spin_lockval(cpu);
198 if (MACHINE_HAS_IEEE) {
199 lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
200 if (!lc->extended_save_area_addr)
205 lc->vector_save_area_addr =
206 (unsigned long) &lc->vector_save_area;
207 if (vdso_alloc_per_cpu(lc))
210 lowcore_ptr[cpu] = lc;
211 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
214 if (pcpu != &pcpu_devices[0]) {
215 free_page(pcpu->panic_stack);
216 free_pages(pcpu->async_stack, ASYNC_ORDER);
217 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
222 #ifdef CONFIG_HOTPLUG_CPU
224 static void pcpu_free_lowcore(struct pcpu *pcpu)
226 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
227 lowcore_ptr[pcpu - pcpu_devices] = NULL;
229 if (MACHINE_HAS_IEEE) {
230 struct _lowcore *lc = pcpu->lowcore;
232 free_page((unsigned long) lc->extended_save_area_addr);
233 lc->extended_save_area_addr = 0;
236 vdso_free_per_cpu(pcpu->lowcore);
238 if (pcpu != &pcpu_devices[0]) {
239 free_page(pcpu->panic_stack);
240 free_pages(pcpu->async_stack, ASYNC_ORDER);
241 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
245 #endif /* CONFIG_HOTPLUG_CPU */
247 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
249 struct _lowcore *lc = pcpu->lowcore;
251 if (MACHINE_HAS_TLB_LC)
252 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
253 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
254 atomic_inc(&init_mm.context.attach_count);
256 lc->spinlock_lockval = arch_spin_lockval(cpu);
257 lc->percpu_offset = __per_cpu_offset[cpu];
258 lc->kernel_asce = S390_lowcore.kernel_asce;
259 lc->machine_flags = S390_lowcore.machine_flags;
260 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
261 __ctl_store(lc->cregs_save_area, 0, 15);
262 save_access_regs((unsigned int *) lc->access_regs_save_area);
263 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
267 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
269 struct _lowcore *lc = pcpu->lowcore;
270 struct thread_info *ti = task_thread_info(tsk);
272 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
273 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
274 lc->thread_info = (unsigned long) task_thread_info(tsk);
275 lc->current_task = (unsigned long) tsk;
276 lc->user_timer = ti->user_timer;
277 lc->system_timer = ti->system_timer;
281 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
283 struct _lowcore *lc = pcpu->lowcore;
285 lc->restart_stack = lc->kernel_stack;
286 lc->restart_fn = (unsigned long) func;
287 lc->restart_data = (unsigned long) data;
288 lc->restart_source = -1UL;
289 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
293 * Call function via PSW restart on pcpu and stop the current cpu.
295 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
296 void *data, unsigned long stack)
298 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
299 unsigned long source_cpu = stap();
301 __load_psw_mask(PSW_KERNEL_BITS);
302 if (pcpu->address == source_cpu)
303 func(data); /* should not return */
304 /* Stop target cpu (if func returns this stops the current cpu). */
305 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
306 /* Restart func on the target cpu and stop the current cpu. */
307 mem_assign_absolute(lc->restart_stack, stack);
308 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
309 mem_assign_absolute(lc->restart_data, (unsigned long) data);
310 mem_assign_absolute(lc->restart_source, source_cpu);
312 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
313 " brc 2,0b # busy, try again\n"
314 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
315 " brc 2,1b # busy, try again\n"
316 : : "d" (pcpu->address), "d" (source_cpu),
317 "K" (SIGP_RESTART), "K" (SIGP_STOP)
323 * Enable additional logical cpus for multi-threading.
325 static int pcpu_set_smt(unsigned int mtid)
327 register unsigned long reg1 asm ("1") = (unsigned long) mtid;
330 if (smp_cpu_mtid == mtid)
333 " sigp %1,0,%2 # sigp set multi-threading\n"
336 : "=d" (cc) : "d" (reg1), "K" (SIGP_SET_MULTI_THREADING)
340 smp_cpu_mt_shift = 0;
341 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
343 pcpu_devices[0].address = stap();
349 * Call function on an online CPU.
351 void smp_call_online_cpu(void (*func)(void *), void *data)
355 /* Use the current cpu if it is online. */
356 pcpu = pcpu_find_address(cpu_online_mask, stap());
358 /* Use the first online cpu. */
359 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
360 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
364 * Call function on the ipl CPU.
366 void smp_call_ipl_cpu(void (*func)(void *), void *data)
368 pcpu_delegate(&pcpu_devices[0], func, data,
369 pcpu_devices->panic_stack + PAGE_SIZE);
372 int smp_find_processor_id(u16 address)
376 for_each_present_cpu(cpu)
377 if (pcpu_devices[cpu].address == address)
382 int smp_vcpu_scheduled(int cpu)
384 return pcpu_running(pcpu_devices + cpu);
387 void smp_yield_cpu(int cpu)
389 if (MACHINE_HAS_DIAG9C)
390 asm volatile("diag %0,0,0x9c"
391 : : "d" (pcpu_devices[cpu].address));
392 else if (MACHINE_HAS_DIAG44)
393 asm volatile("diag 0,0,0x44");
397 * Send cpus emergency shutdown signal. This gives the cpus the
398 * opportunity to complete outstanding interrupts.
400 static void smp_emergency_stop(cpumask_t *cpumask)
405 end = get_tod_clock() + (1000000UL << 12);
406 for_each_cpu(cpu, cpumask) {
407 struct pcpu *pcpu = pcpu_devices + cpu;
408 set_bit(ec_stop_cpu, &pcpu->ec_mask);
409 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
410 0, NULL) == SIGP_CC_BUSY &&
411 get_tod_clock() < end)
414 while (get_tod_clock() < end) {
415 for_each_cpu(cpu, cpumask)
416 if (pcpu_stopped(pcpu_devices + cpu))
417 cpumask_clear_cpu(cpu, cpumask);
418 if (cpumask_empty(cpumask))
425 * Stop all cpus but the current one.
427 void smp_send_stop(void)
432 /* Disable all interrupts/machine checks */
433 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
434 trace_hardirqs_off();
436 debug_set_critical();
437 cpumask_copy(&cpumask, cpu_online_mask);
438 cpumask_clear_cpu(smp_processor_id(), &cpumask);
440 if (oops_in_progress)
441 smp_emergency_stop(&cpumask);
443 /* stop all processors */
444 for_each_cpu(cpu, &cpumask) {
445 struct pcpu *pcpu = pcpu_devices + cpu;
446 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
447 while (!pcpu_stopped(pcpu))
453 * This is the main routine where commands issued by other
456 static void smp_handle_ext_call(void)
460 /* handle bit signal external calls */
461 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
462 if (test_bit(ec_stop_cpu, &bits))
464 if (test_bit(ec_schedule, &bits))
466 if (test_bit(ec_call_function_single, &bits))
467 generic_smp_call_function_single_interrupt();
470 static void do_ext_call_interrupt(struct ext_code ext_code,
471 unsigned int param32, unsigned long param64)
473 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
474 smp_handle_ext_call();
477 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
481 for_each_cpu(cpu, mask)
482 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
485 void arch_send_call_function_single_ipi(int cpu)
487 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
492 * this function sends a 'purge tlb' signal to another CPU.
494 static void smp_ptlb_callback(void *info)
499 void smp_ptlb_all(void)
501 on_each_cpu(smp_ptlb_callback, NULL, 1);
503 EXPORT_SYMBOL(smp_ptlb_all);
504 #endif /* ! CONFIG_64BIT */
507 * this function sends a 'reschedule' IPI to another CPU.
508 * it goes straight through and wastes no time serializing
509 * anything. Worst case is that we lose a reschedule ...
511 void smp_send_reschedule(int cpu)
513 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
517 * parameter area for the set/clear control bit callbacks
519 struct ec_creg_mask_parms {
521 unsigned long andval;
526 * callback for setting/clearing control bits
528 static void smp_ctl_bit_callback(void *info)
530 struct ec_creg_mask_parms *pp = info;
531 unsigned long cregs[16];
533 __ctl_store(cregs, 0, 15);
534 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
535 __ctl_load(cregs, 0, 15);
539 * Set a bit in a control register of all cpus
541 void smp_ctl_set_bit(int cr, int bit)
543 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
545 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
547 EXPORT_SYMBOL(smp_ctl_set_bit);
550 * Clear a bit in a control register of all cpus
552 void smp_ctl_clear_bit(int cr, int bit)
554 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
556 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
558 EXPORT_SYMBOL(smp_ctl_clear_bit);
560 #ifdef CONFIG_CRASH_DUMP
562 static inline void __smp_store_cpu_state(int cpu, u16 address, int is_boot_cpu)
564 void *lc = pcpu_devices[0].lowcore;
565 struct save_area_ext *sa_ext;
568 sa_ext = dump_save_area_create(cpu);
570 panic("could not allocate memory for save area\n");
572 /* Copy the registers of the boot CPU. */
573 copy_oldmem_page(1, (void *) &sa_ext->sa, sizeof(sa_ext->sa),
574 SAVE_AREA_BASE - PAGE_SIZE, 0);
576 save_vx_regs_safe(sa_ext->vx_regs);
579 /* Get the registers of a non-boot cpu. */
580 __pcpu_sigp_relax(address, SIGP_STOP_AND_STORE_STATUS, 0, NULL);
581 memcpy_real(&sa_ext->sa, lc + SAVE_AREA_BASE, sizeof(sa_ext->sa));
584 /* Get the VX registers */
585 vx_sa = __get_free_page(GFP_KERNEL);
587 panic("could not allocate memory for VX save area\n");
588 __pcpu_sigp_relax(address, SIGP_STORE_ADDITIONAL_STATUS, vx_sa, NULL);
589 memcpy(sa_ext->vx_regs, (void *) vx_sa, sizeof(sa_ext->vx_regs));
594 * Collect CPU state of the previous, crashed system.
595 * There are four cases:
596 * 1) standard zfcp dump
597 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
598 * The state for all CPUs except the boot CPU needs to be collected
599 * with sigp stop-and-store-status. The boot CPU state is located in
600 * the absolute lowcore of the memory stored in the HSA. The zcore code
601 * will allocate the save area and copy the boot CPU state from the HSA.
602 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
603 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
604 * The state for all CPUs except the boot CPU needs to be collected
605 * with sigp stop-and-store-status. The firmware or the boot-loader
606 * stored the registers of the boot CPU in the absolute lowcore in the
607 * memory of the old system.
608 * 3) kdump and the old kernel did not store the CPU state,
609 * or stand-alone kdump for DASD
610 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
611 * The state for all CPUs except the boot CPU needs to be collected
612 * with sigp stop-and-store-status. The kexec code or the boot-loader
613 * stored the registers of the boot CPU in the memory of the old system.
614 * 4) kdump and the old kernel stored the CPU state
615 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
616 * The state of all CPUs is stored in ELF sections in the memory of the
617 * old system. The ELF sections are picked up by the crash_dump code
618 * via elfcorehdr_addr.
620 static void __init smp_store_cpu_states(struct sclp_cpu_info *info)
622 unsigned int cpu, address, i, j;
625 if (is_kdump_kernel())
626 /* Previous system stored the CPU states. Nothing to do. */
628 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
629 /* No previous system present, normal boot. */
631 /* Set multi-threading state to the previous system. */
632 pcpu_set_smt(sclp_get_mtid_prev());
633 /* Collect CPU states. */
635 for (i = 0; i < info->configured; i++) {
636 /* Skip CPUs with different CPU type. */
637 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
639 for (j = 0; j <= smp_cpu_mtid; j++, cpu++) {
640 address = (info->cpu[i].core_id << smp_cpu_mt_shift) + j;
641 is_boot_cpu = (address == pcpu_devices[0].address);
642 if (is_boot_cpu && !OLDMEM_BASE)
643 /* Skip boot CPU for standard zfcp dump. */
645 /* Get state for this CPu. */
646 __smp_store_cpu_state(cpu, address, is_boot_cpu);
651 int smp_store_status(int cpu)
656 pcpu = pcpu_devices + cpu;
657 if (__pcpu_sigp_relax(pcpu->address, SIGP_STOP_AND_STORE_STATUS,
658 0, NULL) != SIGP_CC_ORDER_CODE_ACCEPTED)
662 vx_sa = __pa(pcpu->lowcore->vector_save_area_addr);
663 __pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
668 #endif /* CONFIG_CRASH_DUMP */
670 void smp_cpu_set_polarization(int cpu, int val)
672 pcpu_devices[cpu].polarization = val;
675 int smp_cpu_get_polarization(int cpu)
677 return pcpu_devices[cpu].polarization;
680 static struct sclp_cpu_info *smp_get_cpu_info(void)
682 static int use_sigp_detection;
683 struct sclp_cpu_info *info;
686 info = kzalloc(sizeof(*info), GFP_KERNEL);
687 if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
688 use_sigp_detection = 1;
689 for (address = 0; address <= MAX_CPU_ADDRESS;
690 address += (1U << smp_cpu_mt_shift)) {
691 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0, NULL) ==
692 SIGP_CC_NOT_OPERATIONAL)
694 info->cpu[info->configured].core_id =
695 address >> smp_cpu_mt_shift;
698 info->combined = info->configured;
703 static int smp_add_present_cpu(int cpu);
705 static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
713 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
714 cpu = cpumask_first(&avail);
715 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
716 if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
718 address = info->cpu[i].core_id << smp_cpu_mt_shift;
719 for (j = 0; j <= smp_cpu_mtid; j++) {
720 if (pcpu_find_address(cpu_present_mask, address + j))
722 pcpu = pcpu_devices + cpu;
723 pcpu->address = address + j;
725 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
726 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
727 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
728 set_cpu_present(cpu, true);
729 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
730 set_cpu_present(cpu, false);
733 cpu = cpumask_next(cpu, &avail);
734 if (cpu >= nr_cpu_ids)
741 static void __init smp_detect_cpus(void)
743 unsigned int cpu, mtid, c_cpus, s_cpus;
744 struct sclp_cpu_info *info;
747 /* Get CPU information */
748 info = smp_get_cpu_info();
750 panic("smp_detect_cpus failed to allocate memory\n");
752 /* Find boot CPU type */
753 if (info->has_cpu_type) {
755 for (cpu = 0; cpu < info->combined; cpu++)
756 if (info->cpu[cpu].core_id == address) {
757 /* The boot cpu dictates the cpu type. */
758 boot_cpu_type = info->cpu[cpu].type;
761 if (cpu >= info->combined)
762 panic("Could not find boot CPU type");
765 #ifdef CONFIG_CRASH_DUMP
766 /* Collect CPU state of previous system */
767 smp_store_cpu_states(info);
770 /* Set multi-threading state for the current system */
771 mtid = sclp_get_mtid(boot_cpu_type);
772 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
775 /* Print number of CPUs */
777 for (cpu = 0; cpu < info->combined; cpu++) {
778 if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
780 if (cpu < info->configured)
781 c_cpus += smp_cpu_mtid + 1;
783 s_cpus += smp_cpu_mtid + 1;
785 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
787 /* Add CPUs present at boot */
789 __smp_rescan_cpus(info, 0);
795 * Activate a secondary processor.
797 static void smp_start_secondary(void *cpuvoid)
799 S390_lowcore.last_update_clock = get_tod_clock();
800 S390_lowcore.restart_stack = (unsigned long) restart_stack;
801 S390_lowcore.restart_fn = (unsigned long) do_restart;
802 S390_lowcore.restart_data = 0;
803 S390_lowcore.restart_source = -1UL;
804 restore_access_regs(S390_lowcore.access_regs_save_area);
805 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
806 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
812 notify_cpu_starting(smp_processor_id());
813 set_cpu_online(smp_processor_id(), true);
814 inc_irq_stat(CPU_RST);
816 cpu_startup_entry(CPUHP_ONLINE);
819 /* Upping and downing of CPUs */
820 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
825 pcpu = pcpu_devices + cpu;
826 if (pcpu->state != CPU_STATE_CONFIGURED)
828 base = cpu - (cpu % (smp_cpu_mtid + 1));
829 for (i = 0; i <= smp_cpu_mtid; i++) {
830 if (base + i < nr_cpu_ids)
831 if (cpu_online(base + i))
835 * If this is the first CPU of the core to get online
836 * do an initial CPU reset.
838 if (i > smp_cpu_mtid &&
839 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
840 SIGP_CC_ORDER_CODE_ACCEPTED)
843 rc = pcpu_alloc_lowcore(pcpu, cpu);
846 pcpu_prepare_secondary(pcpu, cpu);
847 pcpu_attach_task(pcpu, tidle);
848 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
849 while (!cpu_online(cpu))
854 static unsigned int setup_possible_cpus __initdata;
856 static int __init _setup_possible_cpus(char *s)
858 get_option(&s, &setup_possible_cpus);
861 early_param("possible_cpus", _setup_possible_cpus);
863 #ifdef CONFIG_HOTPLUG_CPU
865 int __cpu_disable(void)
867 unsigned long cregs[16];
869 /* Handle possible pending IPIs */
870 smp_handle_ext_call();
871 set_cpu_online(smp_processor_id(), false);
872 /* Disable pseudo page faults on this cpu. */
874 /* Disable interrupt sources via control register. */
875 __ctl_store(cregs, 0, 15);
876 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
877 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
878 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
879 __ctl_load(cregs, 0, 15);
880 clear_cpu_flag(CIF_NOHZ_DELAY);
884 void __cpu_die(unsigned int cpu)
888 /* Wait until target cpu is down */
889 pcpu = pcpu_devices + cpu;
890 while (!pcpu_stopped(pcpu))
892 pcpu_free_lowcore(pcpu);
893 atomic_dec(&init_mm.context.attach_count);
894 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
895 if (MACHINE_HAS_TLB_LC)
896 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
899 void __noreturn cpu_die(void)
902 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
906 #endif /* CONFIG_HOTPLUG_CPU */
908 void __init smp_fill_possible_mask(void)
910 unsigned int possible, sclp, cpu;
912 sclp = min(smp_max_threads, sclp_get_mtid_max() + 1);
913 sclp = sclp_get_max_cpu()*sclp ?: nr_cpu_ids;
914 possible = setup_possible_cpus ?: nr_cpu_ids;
915 possible = min(possible, sclp);
916 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
917 set_cpu_possible(cpu, true);
920 void __init smp_prepare_cpus(unsigned int max_cpus)
922 /* request the 0x1201 emergency signal external interrupt */
923 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
924 panic("Couldn't request external interrupt 0x1201");
925 /* request the 0x1202 external call external interrupt */
926 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
927 panic("Couldn't request external interrupt 0x1202");
931 void __init smp_prepare_boot_cpu(void)
933 struct pcpu *pcpu = pcpu_devices;
935 pcpu->state = CPU_STATE_CONFIGURED;
936 pcpu->address = stap();
937 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
938 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
939 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
940 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
941 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
942 S390_lowcore.percpu_offset = __per_cpu_offset[0];
943 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
944 set_cpu_present(0, true);
945 set_cpu_online(0, true);
948 void __init smp_cpus_done(unsigned int max_cpus)
952 void __init smp_setup_processor_id(void)
954 S390_lowcore.cpu_nr = 0;
955 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
959 * the frequency of the profiling timer can be changed
960 * by writing a multiplier value into /proc/profile.
962 * usually you want to run this on all CPUs ;)
964 int setup_profiling_timer(unsigned int multiplier)
969 #ifdef CONFIG_HOTPLUG_CPU
970 static ssize_t cpu_configure_show(struct device *dev,
971 struct device_attribute *attr, char *buf)
975 mutex_lock(&smp_cpu_state_mutex);
976 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
977 mutex_unlock(&smp_cpu_state_mutex);
981 static ssize_t cpu_configure_store(struct device *dev,
982 struct device_attribute *attr,
983 const char *buf, size_t count)
989 if (sscanf(buf, "%d %c", &val, &delim) != 1)
991 if (val != 0 && val != 1)
994 mutex_lock(&smp_cpu_state_mutex);
996 /* disallow configuration changes of online cpus and cpu 0 */
998 cpu -= cpu % (smp_cpu_mtid + 1);
1001 for (i = 0; i <= smp_cpu_mtid; i++)
1002 if (cpu_online(cpu + i))
1004 pcpu = pcpu_devices + cpu;
1008 if (pcpu->state != CPU_STATE_CONFIGURED)
1010 rc = sclp_cpu_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1013 for (i = 0; i <= smp_cpu_mtid; i++) {
1014 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1016 pcpu[i].state = CPU_STATE_STANDBY;
1017 smp_cpu_set_polarization(cpu + i,
1018 POLARIZATION_UNKNOWN);
1020 topology_expect_change();
1023 if (pcpu->state != CPU_STATE_STANDBY)
1025 rc = sclp_cpu_configure(pcpu->address >> smp_cpu_mt_shift);
1028 for (i = 0; i <= smp_cpu_mtid; i++) {
1029 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1031 pcpu[i].state = CPU_STATE_CONFIGURED;
1032 smp_cpu_set_polarization(cpu + i,
1033 POLARIZATION_UNKNOWN);
1035 topology_expect_change();
1041 mutex_unlock(&smp_cpu_state_mutex);
1043 return rc ? rc : count;
1045 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1046 #endif /* CONFIG_HOTPLUG_CPU */
1048 static ssize_t show_cpu_address(struct device *dev,
1049 struct device_attribute *attr, char *buf)
1051 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1053 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1055 static struct attribute *cpu_common_attrs[] = {
1056 #ifdef CONFIG_HOTPLUG_CPU
1057 &dev_attr_configure.attr,
1059 &dev_attr_address.attr,
1063 static struct attribute_group cpu_common_attr_group = {
1064 .attrs = cpu_common_attrs,
1067 static struct attribute *cpu_online_attrs[] = {
1068 &dev_attr_idle_count.attr,
1069 &dev_attr_idle_time_us.attr,
1073 static struct attribute_group cpu_online_attr_group = {
1074 .attrs = cpu_online_attrs,
1077 static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1080 unsigned int cpu = (unsigned int)(long)hcpu;
1081 struct cpu *c = pcpu_devices[cpu].cpu;
1082 struct device *s = &c->dev;
1085 switch (action & ~CPU_TASKS_FROZEN) {
1087 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1090 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1093 return notifier_from_errno(err);
1096 static int smp_add_present_cpu(int cpu)
1102 c = kzalloc(sizeof(*c), GFP_KERNEL);
1105 pcpu_devices[cpu].cpu = c;
1107 c->hotpluggable = 1;
1108 rc = register_cpu(c, cpu);
1111 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1114 if (cpu_online(cpu)) {
1115 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1119 rc = topology_cpu_init(c);
1125 if (cpu_online(cpu))
1126 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1128 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1130 #ifdef CONFIG_HOTPLUG_CPU
1137 #ifdef CONFIG_HOTPLUG_CPU
1139 int __ref smp_rescan_cpus(void)
1141 struct sclp_cpu_info *info;
1144 info = smp_get_cpu_info();
1148 mutex_lock(&smp_cpu_state_mutex);
1149 nr = __smp_rescan_cpus(info, 1);
1150 mutex_unlock(&smp_cpu_state_mutex);
1154 topology_schedule_update();
1158 static ssize_t __ref rescan_store(struct device *dev,
1159 struct device_attribute *attr,
1165 rc = smp_rescan_cpus();
1166 return rc ? rc : count;
1168 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1169 #endif /* CONFIG_HOTPLUG_CPU */
1171 static int __init s390_smp_init(void)
1175 #ifdef CONFIG_HOTPLUG_CPU
1176 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1180 cpu_notifier_register_begin();
1181 for_each_present_cpu(cpu) {
1182 rc = smp_add_present_cpu(cpu);
1187 __hotcpu_notifier(smp_cpu_notify, 0);
1190 cpu_notifier_register_done();
1193 subsys_initcall(s390_smp_init);