1 // SPDX-License-Identifier: GPL-2.0
3 * SMP related functions
5 * Copyright IBM Corp. 1999, 2012
6 * Author(s): Denis Joseph Barrow,
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
10 * based on other smp stuff by
11 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
12 * (c) 1998 Ingo Molnar
14 * The code outside of smp.c uses logical cpu numbers, only smp.c does
15 * the translation of logical to physical cpu ids. All new code that
16 * operates on physical cpu numbers needs to go into smp.c.
19 #define KMSG_COMPONENT "cpu"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/workqueue.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
25 #include <linux/init.h>
27 #include <linux/err.h>
28 #include <linux/spinlock.h>
29 #include <linux/kernel_stat.h>
30 #include <linux/kmemleak.h>
31 #include <linux/delay.h>
32 #include <linux/interrupt.h>
33 #include <linux/irqflags.h>
34 #include <linux/cpu.h>
35 #include <linux/slab.h>
36 #include <linux/sched/hotplug.h>
37 #include <linux/sched/task_stack.h>
38 #include <linux/crash_dump.h>
39 #include <linux/memblock.h>
40 #include <asm/asm-offsets.h>
42 #include <asm/switch_to.h>
43 #include <asm/facility.h>
45 #include <asm/setup.h>
47 #include <asm/tlbflush.h>
48 #include <asm/vtimer.h>
49 #include <asm/lowcore.h>
52 #include <asm/debug.h>
53 #include <asm/os_info.h>
61 ec_call_function_single,
70 static DEFINE_PER_CPU(struct cpu *, cpu_device);
73 struct lowcore *lowcore; /* lowcore page(s) for the cpu */
74 unsigned long ec_mask; /* bit mask for ec_xxx functions */
75 unsigned long ec_clk; /* sigp timestamp for ec_xxx */
76 signed char state; /* physical cpu state */
77 signed char polarization; /* physical polarization */
78 u16 address; /* physical cpu address */
81 static u8 boot_core_type;
82 static struct pcpu pcpu_devices[NR_CPUS];
84 static struct kmem_cache *pcpu_mcesa_cache;
86 unsigned int smp_cpu_mt_shift;
87 EXPORT_SYMBOL(smp_cpu_mt_shift);
89 unsigned int smp_cpu_mtid;
90 EXPORT_SYMBOL(smp_cpu_mtid);
92 #ifdef CONFIG_CRASH_DUMP
93 __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
96 static unsigned int smp_max_threads __initdata = -1U;
98 static int __init early_nosmt(char *s)
103 early_param("nosmt", early_nosmt);
105 static int __init early_smt(char *s)
107 get_option(&s, &smp_max_threads);
110 early_param("smt", early_smt);
113 * The smp_cpu_state_mutex must be held when changing the state or polarization
114 * member of a pcpu data structure within the pcpu_devices arreay.
116 DEFINE_MUTEX(smp_cpu_state_mutex);
119 * Signal processor helper functions.
121 static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
126 cc = __pcpu_sigp(addr, order, parm, NULL);
127 if (cc != SIGP_CC_BUSY)
133 static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
137 for (retry = 0; ; retry++) {
138 cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
139 if (cc != SIGP_CC_BUSY)
147 static inline int pcpu_stopped(struct pcpu *pcpu)
149 u32 uninitialized_var(status);
151 if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
152 0, &status) != SIGP_CC_STATUS_STORED)
154 return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
157 static inline int pcpu_running(struct pcpu *pcpu)
159 if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
160 0, NULL) != SIGP_CC_STATUS_STORED)
162 /* Status stored condition code is equivalent to cpu not running. */
167 * Find struct pcpu by cpu address.
169 static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
173 for_each_cpu(cpu, mask)
174 if (pcpu_devices[cpu].address == address)
175 return pcpu_devices + cpu;
179 static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
183 if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
185 order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
186 pcpu->ec_clk = get_tod_clock_fast();
187 pcpu_sigp_retry(pcpu, order, 0);
190 #define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
191 #define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
193 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
195 unsigned long async_stack, panic_stack;
196 unsigned long mcesa_origin, mcesa_bits;
199 mcesa_origin = mcesa_bits = 0;
200 if (pcpu != &pcpu_devices[0]) {
201 pcpu->lowcore = (struct lowcore *)
202 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
203 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
204 panic_stack = __get_free_page(GFP_KERNEL);
205 if (!pcpu->lowcore || !panic_stack || !async_stack)
207 if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
208 mcesa_origin = (unsigned long)
209 kmem_cache_alloc(pcpu_mcesa_cache, GFP_KERNEL);
212 /* The pointer is stored with mcesa_bits ORed in */
213 kmemleak_not_leak((void *) mcesa_origin);
214 mcesa_bits = MACHINE_HAS_GS ? 11 : 0;
217 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
218 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
219 mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
220 mcesa_bits = pcpu->lowcore->mcesad & MCESA_LC_MASK;
223 memcpy(lc, &S390_lowcore, 512);
224 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
225 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
226 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
227 lc->mcesad = mcesa_origin | mcesa_bits;
229 lc->spinlock_lockval = arch_spin_lockval(cpu);
230 if (vdso_alloc_per_cpu(lc))
232 lowcore_ptr[cpu] = lc;
233 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
236 if (pcpu != &pcpu_devices[0]) {
238 kmem_cache_free(pcpu_mcesa_cache,
239 (void *) mcesa_origin);
240 free_page(panic_stack);
241 free_pages(async_stack, ASYNC_ORDER);
242 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
247 #ifdef CONFIG_HOTPLUG_CPU
249 static void pcpu_free_lowcore(struct pcpu *pcpu)
251 unsigned long mcesa_origin;
253 pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
254 lowcore_ptr[pcpu - pcpu_devices] = NULL;
255 vdso_free_per_cpu(pcpu->lowcore);
256 if (pcpu == &pcpu_devices[0])
258 if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
259 mcesa_origin = pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK;
260 kmem_cache_free(pcpu_mcesa_cache, (void *) mcesa_origin);
262 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
263 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
264 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
267 #endif /* CONFIG_HOTPLUG_CPU */
269 static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
271 struct lowcore *lc = pcpu->lowcore;
273 cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
274 cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
276 lc->spinlock_lockval = arch_spin_lockval(cpu);
277 lc->percpu_offset = __per_cpu_offset[cpu];
278 lc->kernel_asce = S390_lowcore.kernel_asce;
279 lc->machine_flags = S390_lowcore.machine_flags;
280 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
281 __ctl_store(lc->cregs_save_area, 0, 15);
282 save_access_regs((unsigned int *) lc->access_regs_save_area);
283 memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
287 static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
289 struct lowcore *lc = pcpu->lowcore;
291 lc->kernel_stack = (unsigned long) task_stack_page(tsk)
292 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
293 lc->current_task = (unsigned long) tsk;
295 lc->current_pid = tsk->pid;
296 lc->user_timer = tsk->thread.user_timer;
297 lc->guest_timer = tsk->thread.guest_timer;
298 lc->system_timer = tsk->thread.system_timer;
299 lc->hardirq_timer = tsk->thread.hardirq_timer;
300 lc->softirq_timer = tsk->thread.softirq_timer;
304 static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
306 struct lowcore *lc = pcpu->lowcore;
308 lc->restart_stack = lc->kernel_stack;
309 lc->restart_fn = (unsigned long) func;
310 lc->restart_data = (unsigned long) data;
311 lc->restart_source = -1UL;
312 pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
316 * Call function via PSW restart on pcpu and stop the current cpu.
318 static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
319 void *data, unsigned long stack)
321 struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
322 unsigned long source_cpu = stap();
324 __load_psw_mask(PSW_KERNEL_BITS);
325 if (pcpu->address == source_cpu)
326 func(data); /* should not return */
327 /* Stop target cpu (if func returns this stops the current cpu). */
328 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
329 /* Restart func on the target cpu and stop the current cpu. */
330 mem_assign_absolute(lc->restart_stack, stack);
331 mem_assign_absolute(lc->restart_fn, (unsigned long) func);
332 mem_assign_absolute(lc->restart_data, (unsigned long) data);
333 mem_assign_absolute(lc->restart_source, source_cpu);
335 "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
336 " brc 2,0b # busy, try again\n"
337 "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
338 " brc 2,1b # busy, try again\n"
339 : : "d" (pcpu->address), "d" (source_cpu),
340 "K" (SIGP_RESTART), "K" (SIGP_STOP)
346 * Enable additional logical cpus for multi-threading.
348 static int pcpu_set_smt(unsigned int mtid)
352 if (smp_cpu_mtid == mtid)
354 cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
357 smp_cpu_mt_shift = 0;
358 while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
360 pcpu_devices[0].address = stap();
366 * Call function on an online CPU.
368 void smp_call_online_cpu(void (*func)(void *), void *data)
372 /* Use the current cpu if it is online. */
373 pcpu = pcpu_find_address(cpu_online_mask, stap());
375 /* Use the first online cpu. */
376 pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
377 pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
381 * Call function on the ipl CPU.
383 void smp_call_ipl_cpu(void (*func)(void *), void *data)
385 pcpu_delegate(&pcpu_devices[0], func, data,
386 pcpu_devices->lowcore->panic_stack -
387 PANIC_FRAME_OFFSET + PAGE_SIZE);
390 int smp_find_processor_id(u16 address)
394 for_each_present_cpu(cpu)
395 if (pcpu_devices[cpu].address == address)
400 bool arch_vcpu_is_preempted(int cpu)
402 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
404 if (pcpu_running(pcpu_devices + cpu))
408 EXPORT_SYMBOL(arch_vcpu_is_preempted);
410 void smp_yield_cpu(int cpu)
412 if (MACHINE_HAS_DIAG9C) {
413 diag_stat_inc_norecursion(DIAG_STAT_X09C);
414 asm volatile("diag %0,0,0x9c"
415 : : "d" (pcpu_devices[cpu].address));
416 } else if (MACHINE_HAS_DIAG44) {
417 diag_stat_inc_norecursion(DIAG_STAT_X044);
418 asm volatile("diag 0,0,0x44");
423 * Send cpus emergency shutdown signal. This gives the cpus the
424 * opportunity to complete outstanding interrupts.
426 static void smp_emergency_stop(cpumask_t *cpumask)
431 end = get_tod_clock() + (1000000UL << 12);
432 for_each_cpu(cpu, cpumask) {
433 struct pcpu *pcpu = pcpu_devices + cpu;
434 set_bit(ec_stop_cpu, &pcpu->ec_mask);
435 while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
436 0, NULL) == SIGP_CC_BUSY &&
437 get_tod_clock() < end)
440 while (get_tod_clock() < end) {
441 for_each_cpu(cpu, cpumask)
442 if (pcpu_stopped(pcpu_devices + cpu))
443 cpumask_clear_cpu(cpu, cpumask);
444 if (cpumask_empty(cpumask))
451 * Stop all cpus but the current one.
453 void smp_send_stop(void)
458 /* Disable all interrupts/machine checks */
459 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
460 trace_hardirqs_off();
462 debug_set_critical();
463 cpumask_copy(&cpumask, cpu_online_mask);
464 cpumask_clear_cpu(smp_processor_id(), &cpumask);
466 if (oops_in_progress)
467 smp_emergency_stop(&cpumask);
469 /* stop all processors */
470 for_each_cpu(cpu, &cpumask) {
471 struct pcpu *pcpu = pcpu_devices + cpu;
472 pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
473 while (!pcpu_stopped(pcpu))
479 * This is the main routine where commands issued by other
482 static void smp_handle_ext_call(void)
486 /* handle bit signal external calls */
487 bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
488 if (test_bit(ec_stop_cpu, &bits))
490 if (test_bit(ec_schedule, &bits))
492 if (test_bit(ec_call_function_single, &bits))
493 generic_smp_call_function_single_interrupt();
496 static void do_ext_call_interrupt(struct ext_code ext_code,
497 unsigned int param32, unsigned long param64)
499 inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
500 smp_handle_ext_call();
503 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
507 for_each_cpu(cpu, mask)
508 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
511 void arch_send_call_function_single_ipi(int cpu)
513 pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
517 * this function sends a 'reschedule' IPI to another CPU.
518 * it goes straight through and wastes no time serializing
519 * anything. Worst case is that we lose a reschedule ...
521 void smp_send_reschedule(int cpu)
523 pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
527 * parameter area for the set/clear control bit callbacks
529 struct ec_creg_mask_parms {
531 unsigned long andval;
536 * callback for setting/clearing control bits
538 static void smp_ctl_bit_callback(void *info)
540 struct ec_creg_mask_parms *pp = info;
541 unsigned long cregs[16];
543 __ctl_store(cregs, 0, 15);
544 cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
545 __ctl_load(cregs, 0, 15);
549 * Set a bit in a control register of all cpus
551 void smp_ctl_set_bit(int cr, int bit)
553 struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
555 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
557 EXPORT_SYMBOL(smp_ctl_set_bit);
560 * Clear a bit in a control register of all cpus
562 void smp_ctl_clear_bit(int cr, int bit)
564 struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
566 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
568 EXPORT_SYMBOL(smp_ctl_clear_bit);
570 #ifdef CONFIG_CRASH_DUMP
572 int smp_store_status(int cpu)
574 struct pcpu *pcpu = pcpu_devices + cpu;
577 pa = __pa(&pcpu->lowcore->floating_pt_save_area);
578 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
579 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
581 if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
583 pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
585 pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
586 if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
587 pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
593 * Collect CPU state of the previous, crashed system.
594 * There are four cases:
595 * 1) standard zfcp dump
596 * condition: OLDMEM_BASE == NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
597 * The state for all CPUs except the boot CPU needs to be collected
598 * with sigp stop-and-store-status. The boot CPU state is located in
599 * the absolute lowcore of the memory stored in the HSA. The zcore code
600 * will copy the boot CPU state from the HSA.
601 * 2) stand-alone kdump for SCSI (zfcp dump with swapped memory)
602 * condition: OLDMEM_BASE != NULL && ipl_info.type == IPL_TYPE_FCP_DUMP
603 * The state for all CPUs except the boot CPU needs to be collected
604 * with sigp stop-and-store-status. The firmware or the boot-loader
605 * stored the registers of the boot CPU in the absolute lowcore in the
606 * memory of the old system.
607 * 3) kdump and the old kernel did not store the CPU state,
608 * or stand-alone kdump for DASD
609 * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
610 * The state for all CPUs except the boot CPU needs to be collected
611 * with sigp stop-and-store-status. The kexec code or the boot-loader
612 * stored the registers of the boot CPU in the memory of the old system.
613 * 4) kdump and the old kernel stored the CPU state
614 * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
615 * This case does not exist for s390 anymore, setup_arch explicitly
616 * deactivates the elfcorehdr= kernel parameter
618 static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
619 bool is_boot_cpu, unsigned long page)
621 __vector128 *vxrs = (__vector128 *) page;
624 vxrs = boot_cpu_vector_save_area;
626 __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
627 save_area_add_vxrs(sa, vxrs);
630 static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
631 bool is_boot_cpu, unsigned long page)
633 void *regs = (void *) page;
636 copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
638 __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
639 save_area_add_regs(sa, regs);
642 void __init smp_save_dump_cpus(void)
644 int addr, boot_cpu_addr, max_cpu_addr;
645 struct save_area *sa;
649 if (!(OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP))
650 /* No previous system present, normal boot. */
652 /* Allocate a page as dumping area for the store status sigps */
653 page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
654 /* Set multi-threading state to the previous system. */
655 pcpu_set_smt(sclp.mtid_prev);
656 boot_cpu_addr = stap();
657 max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
658 for (addr = 0; addr <= max_cpu_addr; addr++) {
659 if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
660 SIGP_CC_NOT_OPERATIONAL)
662 is_boot_cpu = (addr == boot_cpu_addr);
663 /* Allocate save area */
664 sa = save_area_alloc(is_boot_cpu);
666 panic("could not allocate memory for save area\n");
668 /* Get the vector registers */
669 smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
671 * For a zfcp dump OLDMEM_BASE == NULL and the registers
672 * of the boot CPU are stored in the HSA. To retrieve
673 * these registers an SCLP request is required which is
674 * done by drivers/s390/char/zcore.c:init_cpu_info()
676 if (!is_boot_cpu || OLDMEM_BASE)
677 /* Get the CPU registers */
678 smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
680 memblock_free(page, PAGE_SIZE);
684 #endif /* CONFIG_CRASH_DUMP */
686 void smp_cpu_set_polarization(int cpu, int val)
688 pcpu_devices[cpu].polarization = val;
691 int smp_cpu_get_polarization(int cpu)
693 return pcpu_devices[cpu].polarization;
696 static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
698 static int use_sigp_detection;
701 if (use_sigp_detection || sclp_get_core_info(info, early)) {
702 use_sigp_detection = 1;
704 address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
705 address += (1U << smp_cpu_mt_shift)) {
706 if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
707 SIGP_CC_NOT_OPERATIONAL)
709 info->core[info->configured].core_id =
710 address >> smp_cpu_mt_shift;
713 info->combined = info->configured;
717 static int smp_add_present_cpu(int cpu);
719 static int __smp_rescan_cpus(struct sclp_core_info *info, int sysfs_add)
727 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
728 cpu = cpumask_first(&avail);
729 for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
730 if (sclp.has_core_type && info->core[i].type != boot_core_type)
732 address = info->core[i].core_id << smp_cpu_mt_shift;
733 for (j = 0; j <= smp_cpu_mtid; j++) {
734 if (pcpu_find_address(cpu_present_mask, address + j))
736 pcpu = pcpu_devices + cpu;
737 pcpu->address = address + j;
739 (cpu >= info->configured*(smp_cpu_mtid + 1)) ?
740 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
741 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
742 set_cpu_present(cpu, true);
743 if (sysfs_add && smp_add_present_cpu(cpu) != 0)
744 set_cpu_present(cpu, false);
747 cpu = cpumask_next(cpu, &avail);
748 if (cpu >= nr_cpu_ids)
755 void __init smp_detect_cpus(void)
757 unsigned int cpu, mtid, c_cpus, s_cpus;
758 struct sclp_core_info *info;
761 /* Get CPU information */
762 info = memblock_virt_alloc(sizeof(*info), 8);
763 smp_get_core_info(info, 1);
764 /* Find boot CPU type */
765 if (sclp.has_core_type) {
767 for (cpu = 0; cpu < info->combined; cpu++)
768 if (info->core[cpu].core_id == address) {
769 /* The boot cpu dictates the cpu type. */
770 boot_core_type = info->core[cpu].type;
773 if (cpu >= info->combined)
774 panic("Could not find boot CPU type");
777 /* Set multi-threading state for the current system */
778 mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
779 mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
782 /* Print number of CPUs */
784 for (cpu = 0; cpu < info->combined; cpu++) {
785 if (sclp.has_core_type &&
786 info->core[cpu].type != boot_core_type)
788 if (cpu < info->configured)
789 c_cpus += smp_cpu_mtid + 1;
791 s_cpus += smp_cpu_mtid + 1;
793 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
795 /* Add CPUs present at boot */
797 __smp_rescan_cpus(info, 0);
799 memblock_free_early((unsigned long)info, sizeof(*info));
803 * Activate a secondary processor.
805 static void smp_start_secondary(void *cpuvoid)
807 S390_lowcore.last_update_clock = get_tod_clock();
808 S390_lowcore.restart_stack = (unsigned long) restart_stack;
809 S390_lowcore.restart_fn = (unsigned long) do_restart;
810 S390_lowcore.restart_data = 0;
811 S390_lowcore.restart_source = -1UL;
812 restore_access_regs(S390_lowcore.access_regs_save_area);
813 __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
814 __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
820 notify_cpu_starting(smp_processor_id());
821 set_cpu_online(smp_processor_id(), true);
822 inc_irq_stat(CPU_RST);
824 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
827 /* Upping and downing of CPUs */
828 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
833 pcpu = pcpu_devices + cpu;
834 if (pcpu->state != CPU_STATE_CONFIGURED)
836 base = smp_get_base_cpu(cpu);
837 for (i = 0; i <= smp_cpu_mtid; i++) {
838 if (base + i < nr_cpu_ids)
839 if (cpu_online(base + i))
843 * If this is the first CPU of the core to get online
844 * do an initial CPU reset.
846 if (i > smp_cpu_mtid &&
847 pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
848 SIGP_CC_ORDER_CODE_ACCEPTED)
851 rc = pcpu_alloc_lowcore(pcpu, cpu);
854 pcpu_prepare_secondary(pcpu, cpu);
855 pcpu_attach_task(pcpu, tidle);
856 pcpu_start_fn(pcpu, smp_start_secondary, NULL);
857 /* Wait until cpu puts itself in the online & active maps */
858 while (!cpu_online(cpu))
863 static unsigned int setup_possible_cpus __initdata;
865 static int __init _setup_possible_cpus(char *s)
867 get_option(&s, &setup_possible_cpus);
870 early_param("possible_cpus", _setup_possible_cpus);
872 #ifdef CONFIG_HOTPLUG_CPU
874 int __cpu_disable(void)
876 unsigned long cregs[16];
878 /* Handle possible pending IPIs */
879 smp_handle_ext_call();
880 set_cpu_online(smp_processor_id(), false);
881 /* Disable pseudo page faults on this cpu. */
883 /* Disable interrupt sources via control register. */
884 __ctl_store(cregs, 0, 15);
885 cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
886 cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
887 cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
888 __ctl_load(cregs, 0, 15);
889 clear_cpu_flag(CIF_NOHZ_DELAY);
893 void __cpu_die(unsigned int cpu)
897 /* Wait until target cpu is down */
898 pcpu = pcpu_devices + cpu;
899 while (!pcpu_stopped(pcpu))
901 pcpu_free_lowcore(pcpu);
902 cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
903 cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
906 void __noreturn cpu_die(void)
909 pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
913 #endif /* CONFIG_HOTPLUG_CPU */
915 void __init smp_fill_possible_mask(void)
917 unsigned int possible, sclp_max, cpu;
919 sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
920 sclp_max = min(smp_max_threads, sclp_max);
921 sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
922 possible = setup_possible_cpus ?: nr_cpu_ids;
923 possible = min(possible, sclp_max);
924 for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
925 set_cpu_possible(cpu, true);
928 void __init smp_prepare_cpus(unsigned int max_cpus)
932 /* request the 0x1201 emergency signal external interrupt */
933 if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
934 panic("Couldn't request external interrupt 0x1201");
935 /* request the 0x1202 external call external interrupt */
936 if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
937 panic("Couldn't request external interrupt 0x1202");
938 /* create slab cache for the machine-check-extended-save-areas */
939 if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
940 size = 1UL << (MACHINE_HAS_GS ? 11 : 10);
941 pcpu_mcesa_cache = kmem_cache_create("nmi_save_areas",
942 size, size, 0, NULL);
943 if (!pcpu_mcesa_cache)
944 panic("Couldn't create nmi save area cache");
948 void __init smp_prepare_boot_cpu(void)
950 struct pcpu *pcpu = pcpu_devices;
952 WARN_ON(!cpu_present(0) || !cpu_online(0));
953 pcpu->state = CPU_STATE_CONFIGURED;
954 pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
955 S390_lowcore.percpu_offset = __per_cpu_offset[0];
956 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
959 void __init smp_cpus_done(unsigned int max_cpus)
963 void __init smp_setup_processor_id(void)
965 pcpu_devices[0].address = stap();
966 S390_lowcore.cpu_nr = 0;
967 S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
971 * the frequency of the profiling timer can be changed
972 * by writing a multiplier value into /proc/profile.
974 * usually you want to run this on all CPUs ;)
976 int setup_profiling_timer(unsigned int multiplier)
981 #ifdef CONFIG_HOTPLUG_CPU
982 static ssize_t cpu_configure_show(struct device *dev,
983 struct device_attribute *attr, char *buf)
987 mutex_lock(&smp_cpu_state_mutex);
988 count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
989 mutex_unlock(&smp_cpu_state_mutex);
993 static ssize_t cpu_configure_store(struct device *dev,
994 struct device_attribute *attr,
995 const char *buf, size_t count)
1001 if (sscanf(buf, "%d %c", &val, &delim) != 1)
1003 if (val != 0 && val != 1)
1006 mutex_lock(&smp_cpu_state_mutex);
1008 /* disallow configuration changes of online cpus and cpu 0 */
1010 cpu = smp_get_base_cpu(cpu);
1013 for (i = 0; i <= smp_cpu_mtid; i++)
1014 if (cpu_online(cpu + i))
1016 pcpu = pcpu_devices + cpu;
1020 if (pcpu->state != CPU_STATE_CONFIGURED)
1022 rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
1025 for (i = 0; i <= smp_cpu_mtid; i++) {
1026 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1028 pcpu[i].state = CPU_STATE_STANDBY;
1029 smp_cpu_set_polarization(cpu + i,
1030 POLARIZATION_UNKNOWN);
1032 topology_expect_change();
1035 if (pcpu->state != CPU_STATE_STANDBY)
1037 rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
1040 for (i = 0; i <= smp_cpu_mtid; i++) {
1041 if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
1043 pcpu[i].state = CPU_STATE_CONFIGURED;
1044 smp_cpu_set_polarization(cpu + i,
1045 POLARIZATION_UNKNOWN);
1047 topology_expect_change();
1053 mutex_unlock(&smp_cpu_state_mutex);
1055 return rc ? rc : count;
1057 static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
1058 #endif /* CONFIG_HOTPLUG_CPU */
1060 static ssize_t show_cpu_address(struct device *dev,
1061 struct device_attribute *attr, char *buf)
1063 return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
1065 static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
1067 static struct attribute *cpu_common_attrs[] = {
1068 #ifdef CONFIG_HOTPLUG_CPU
1069 &dev_attr_configure.attr,
1071 &dev_attr_address.attr,
1075 static struct attribute_group cpu_common_attr_group = {
1076 .attrs = cpu_common_attrs,
1079 static struct attribute *cpu_online_attrs[] = {
1080 &dev_attr_idle_count.attr,
1081 &dev_attr_idle_time_us.attr,
1085 static struct attribute_group cpu_online_attr_group = {
1086 .attrs = cpu_online_attrs,
1089 static int smp_cpu_online(unsigned int cpu)
1091 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1093 return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1095 static int smp_cpu_pre_down(unsigned int cpu)
1097 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1099 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1103 static int smp_add_present_cpu(int cpu)
1109 c = kzalloc(sizeof(*c), GFP_KERNEL);
1112 per_cpu(cpu_device, cpu) = c;
1114 c->hotpluggable = 1;
1115 rc = register_cpu(c, cpu);
1118 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1121 rc = topology_cpu_init(c);
1127 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1129 #ifdef CONFIG_HOTPLUG_CPU
1136 #ifdef CONFIG_HOTPLUG_CPU
1138 int __ref smp_rescan_cpus(void)
1140 struct sclp_core_info *info;
1143 info = kzalloc(sizeof(*info), GFP_KERNEL);
1146 smp_get_core_info(info, 0);
1148 mutex_lock(&smp_cpu_state_mutex);
1149 nr = __smp_rescan_cpus(info, 1);
1150 mutex_unlock(&smp_cpu_state_mutex);
1154 topology_schedule_update();
1158 static ssize_t __ref rescan_store(struct device *dev,
1159 struct device_attribute *attr,
1165 rc = smp_rescan_cpus();
1166 return rc ? rc : count;
1168 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
1169 #endif /* CONFIG_HOTPLUG_CPU */
1171 static int __init s390_smp_init(void)
1175 #ifdef CONFIG_HOTPLUG_CPU
1176 rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
1180 for_each_present_cpu(cpu) {
1181 rc = smp_add_present_cpu(cpu);
1186 rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
1187 smp_cpu_online, smp_cpu_pre_down);
1188 rc = rc <= 0 ? rc : 0;
1192 subsys_initcall(s390_smp_init);