select GENERIC_PCI_IOMAP
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_CPU_DEVICES
+ select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
+ select GENERIC_IRQ_MIGRATION if SMP
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select LEGACY_TIMER_TICK
select CPU_NO_EFFICIENT_FFS
If you don't know what to do here, say N.
-config PARISC_CPU_TOPOLOGY
- bool "Support cpu topology definition"
- depends on SMP
- default y
- help
- Support PARISC cpu topology definition.
-
config SCHED_MC
bool "Multi-core scheduler support"
- depends on PARISC_CPU_TOPOLOGY && PA8X00
+ depends on GENERIC_ARCH_TOPOLOGY && PA8X00
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
# Set default 32 bits cross compilers for vdso
CC_ARCHES_32 = hppa hppa2.0 hppa1.1
-CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS32_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES_32), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
# Set default cross compiler for kernel build
ifdef cross_compiling
ifeq ($(CROSS_COMPILE),)
- CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
+ CC_SUFFIXES = linux linux-gnu unknown-linux-gnu suse-linux
CROSS_COMPILE := $(call cc-cross-prefix, \
$(foreach a,$(CC_ARCHES), \
$(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
unsigned long glob_cfg);
int __pdc_cpu_rendezvous(void);
+void pdc_cpu_rendezvous_lock(void);
+void pdc_cpu_rendezvous_unlock(void);
+
static inline char * os_id_to_string(u16 os_id) {
switch(os_id) {
case OS_ID_NONE: return "No OS";
#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
+#define PDC_PAT_CPU_GET_PDC_ENTRYPOINT 11L /* Return PDC Entry point */
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
-
+extern int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry);
extern int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
extern int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
extern int pdc_pat_cell_info(struct pdc_pat_cell_info_rtn_block *info,
extern struct system_cpuinfo_parisc boot_cpu_data;
DECLARE_PER_CPU(struct cpuinfo_parisc, cpu_data);
+extern int time_keeper_id; /* CPU used for timekeeping */
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
-static inline int __cpu_disable (void) {
- return 0;
-}
-static inline void __cpu_die (unsigned int cpu) {
- while(1)
- ;
-}
+int __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
#endif /* __ASM_SMP_H */
pa; \
})
+#define CR_EIEM 15 /* External Interrupt Enable Mask */
+#define CR_CR16 16 /* CR16 Interval Timer */
+#define CR_EIRR 23 /* External Interrupt Request Register */
+
#define mfctl(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
- "mfctl " #reg ",%0" : \
- "=r" (cr) \
+ "mfctl %1,%0" : \
+ "=r" (cr) : "i" (reg) \
); \
cr; \
})
: /* no outputs */ \
: "r" (gr), "i" (cr) : "memory")
-/* these are here to de-mystefy the calling code, and to provide hooks */
-/* which I needed for debugging EIEM problems -PB */
-#define get_eiem() mfctl(15)
-static inline void set_eiem(unsigned long val)
-{
- mtctl(val, 15);
-}
+#define get_eiem() mfctl(CR_EIEM)
+#define set_eiem(val) mtctl(val, CR_EIEM)
#define mfsp(reg) ({ \
unsigned long cr; \
#ifndef _ASM_PARISC_TOPOLOGY_H
#define _ASM_PARISC_TOPOLOGY_H
-#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
#include <linux/cpumask.h>
-
-struct cputopo_parisc {
- int thread_id;
- int core_id;
- int socket_id;
- cpumask_t thread_sibling;
- cpumask_t core_sibling;
-};
-
-extern struct cputopo_parisc cpu_topology[NR_CPUS];
-
-#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
-#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
-#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
-
-void init_cpu_topology(void);
-void store_cpu_topology(unsigned int cpuid);
-const struct cpumask *cpu_coregroup_mask(int cpu);
+#include <linux/arch_topology.h>
#else
static inline void init_cpu_topology(void) { }
static inline void store_cpu_topology(unsigned int cpuid) { }
+static inline void reset_cpu_topology(void) { }
#endif
obj64-$(CONFIG_AUDIT) += compat_audit.o
# only supported for PCX-W/U in 64-bit mode at the moment
obj-$(CONFIG_64BIT) += perf.o perf_asm.o $(obj64-y)
-obj-$(CONFIG_PARISC_CPU_TOPOLOGY) += topology.o
+obj-$(CONFIG_GENERIC_ARCH_TOPOLOGY) += topology.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
}
}
-void __init disable_sr_hashing(void)
+void disable_sr_hashing(void)
{
int srhash_type, retval;
unsigned long space_bits;
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
{
if (pfn_valid(pfn)) {
- flush_tlb_page(vma, vmaddr);
if (likely(vma->vm_mm->context.space_id)) {
+ flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} else {
__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- unsigned long flags, physaddr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
return;
}
- while (start < end) {
- physaddr = lpa(start);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, start);
- purge_tlb_end(flags);
- flush_dcache_page_asm(physaddr, start);
- start += PAGE_SIZE;
- }
+ flush_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(flush_kernel_vmap_range);
{
unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
- unsigned long flags, physaddr;
if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
return;
}
- while (start < end) {
- physaddr = lpa(start);
- purge_tlb_start(flags);
- pdtlb(SR_KERNEL, start);
- purge_tlb_end(flags);
- purge_dcache_page_asm(physaddr, start);
- start += PAGE_SIZE;
- }
+ purge_kernel_dcache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
/* Firmware needs to be initially set to narrow to determine the
* actual firmware width. */
-int parisc_narrow_firmware __ro_after_init = 1;
+int parisc_narrow_firmware __ro_after_init = 2;
#endif
/* On most currently-supported platforms, IODC I/O calls are 32-bit calls
void set_firmware_width(void)
{
unsigned long flags;
+
+ /* already initialized? */
+ if (parisc_narrow_firmware != 2)
+ return;
+
spin_lock_irqsave(&pdc_lock, flags);
set_firmware_width_unlocked();
spin_unlock_irqrestore(&pdc_lock, flags);
return mem_pdc_call(PDC_PROC, 1, 0);
}
+/**
+ * pdc_cpu_rendezvous_lock - Lock PDC while transitioning to rendezvous state
+ */
+void pdc_cpu_rendezvous_lock(void)
+{
+ spin_lock(&pdc_lock);
+}
+
+/**
+ * pdc_cpu_rendezvous_unlock - Unlock PDC after reaching rendezvous state
+ */
+void pdc_cpu_rendezvous_unlock(void)
+{
+ spin_unlock(&pdc_lock);
+}
+
+/**
+ * pdc_pat_get_PDC_entrypoint - Get PDC entry point for current CPU
+ * @retval: -1 on error, 0 on success
+ */
+int pdc_pat_get_PDC_entrypoint(unsigned long *pdc_entry)
+{
+ int retval = 0;
+ unsigned long flags;
+
+ if (!IS_ENABLED(CONFIG_SMP) || !is_pdc_pat()) {
+ *pdc_entry = MEM_PDC;
+ return 0;
+ }
+
+ spin_lock_irqsave(&pdc_lock, flags);
+ retval = mem_pdc_call(PDC_PAT_CPU, PDC_PAT_CPU_GET_PDC_ENTRYPOINT,
+ __pa(pdc_result));
+ *pdc_entry = pdc_result[0];
+ spin_unlock_irqrestore(&pdc_lock, flags);
+ return retval;
+}
/**
* pdc_chassis_warn - Fetches chassis warnings
* @retval: -1 on error, 0 on success
/* FALLTHROUGH */
.procend
+#ifdef CONFIG_HOTPLUG_CPU
+ /* common_stext is far away in another section... jump there */
+ load32 PA(common_stext), %rp
+ bv,n (%rp)
+
+ /* common_stext and smp_slave_stext needs to be in text section */
+ .text
+#endif
+
/*
** Code Common to both Monarch and Slave processors.
** Entry:
.procend
#endif /* CONFIG_SMP */
-ENDPROC(parisc_kernel_start)
-
#ifndef CONFIG_64BIT
.section .data..ro_after_init
if (irqd_is_per_cpu(d))
return -EINVAL;
- /* whatever mask they set, we just allow one CPU */
- cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
- dest, cpu_online_mask);
+ cpu_dest = cpumask_first_and(dest, cpu_online_mask);
if (cpu_dest >= nr_cpu_ids)
- cpu_dest = cpumask_first_and(dest, cpu_online_mask);
+ cpu_dest = cpumask_first(cpu_online_mask);
return cpu_dest;
}
-
-static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
- bool force)
-{
- int cpu_dest;
-
- cpu_dest = cpu_check_affinity(d, dest);
- if (cpu_dest < 0)
- return -1;
-
- cpumask_copy(irq_data_get_affinity_mask(d), dest);
-
- return 0;
-}
#endif
static struct irq_chip cpu_interrupt_type = {
.irq_unmask = cpu_unmask_irq,
.irq_ack = cpu_ack_irq,
.irq_eoi = cpu_eoi_irq,
-#ifdef CONFIG_SMP
- .irq_set_affinity = cpu_set_affinity_irq,
-#endif
/* XXX: Needs to be written. We managed without it so far, but
* we really ought to write it.
*/
#endif
}
-void __init init_IRQ(void)
+void init_IRQ(void)
{
local_irq_disable(); /* PARANOID - should already be disabled */
mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
nop
ENDPROC_CFI(flush_kernel_icache_range_asm)
- __INIT
+ .text
/* align should cover use of rfi in disable_sr_hashing_asm and
* srdis_done.
*need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page));
- if (flags)
- raw_spin_lock_irqsave(&patch_lock, *flags);
- else
- __acquire(&patch_lock);
+ raw_spin_lock_irqsave(&patch_lock, *flags);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
}
{
clear_fixmap(fixmap);
- if (flags)
- raw_spin_unlock_irqrestore(&patch_lock, *flags);
- else
- __release(&patch_lock);
+ raw_spin_unlock_irqrestore(&patch_lock, *flags);
}
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
int mapped;
/* Make sure we don't have any aliases in cache */
- flush_kernel_vmap_range(addr, len);
- flush_icache_range(start, end);
+ flush_kernel_dcache_range_asm(start, end);
+ flush_kernel_icache_range_asm(start, end);
+ flush_tlb_kernel_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
* We're crossing a page boundary, so
* need to remap
*/
- flush_kernel_vmap_range((void *)fixmap,
- (p-fixmap) * sizeof(*p));
+ flush_kernel_dcache_range_asm((unsigned long)fixmap,
+ (unsigned long)p);
+ flush_tlb_kernel_range((unsigned long)fixmap,
+ (unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
}
}
- flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p));
+ flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
+ flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags);
- flush_icache_range(start, end);
}
void __kprobes __patch_text(void *addr, u32 insn)
#include <linux/rcupdate.h>
#include <linux/random.h>
#include <linux/nmi.h>
+#include <linux/sched/hotplug.h>
#include <asm/io.h>
#include <asm/asm-offsets.h>
#include <asm/pdc_chassis.h>
#include <asm/unwind.h>
#include <asm/sections.h>
+#include <asm/cacheflush.h>
#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
#define CMD_RESET 5 /* reset any module */
int running_on_qemu __ro_after_init;
EXPORT_SYMBOL(running_on_qemu);
-void __cpuidle arch_cpu_idle_dead(void)
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ */
+void arch_cpu_idle_dead(void)
{
- /* nop on real hardware, qemu will offline CPU. */
- asm volatile("or %%r31,%%r31,%%r31\n":::);
+#ifdef CONFIG_HOTPLUG_CPU
+ idle_task_exit();
+
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of. */
+ (void)cpu_report_death();
+
+ /* Ensure that the cache lines are written out. */
+ flush_cache_all_local();
+ flush_tlb_all_local(NULL);
+
+ /* Let PDC firmware put CPU into firmware idle loop. */
+ __pdc_cpu_rendezvous();
+
+ pr_warn("PDC does not provide rendezvous function.\n");
+#endif
+ while (1);
}
void __cpuidle arch_cpu_idle(void)
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <asm/topology.h>
#include <asm/param.h>
#include <asm/cache.h>
#include <asm/hardware.h> /* for register_parisc_driver() stuff */
*
* o Enable CPU profiling hooks.
*/
-int __init init_per_cpu(int cpunum)
+int init_per_cpu(int cpunum)
{
int ret;
struct pdc_coproc_cfg coproc_cfg;
boot_cpu_data.cpu_hz / 1000000,
boot_cpu_data.cpu_hz % 1000000 );
-#ifdef CONFIG_PARISC_CPU_TOPOLOGY
+#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
seq_printf(m, "physical id\t: %d\n",
topology_physical_package_id(cpu));
seq_printf(m, "siblings\t: %d\n",
*/
void __init processor_init(void)
{
+ reset_cpu_topology();
register_parisc_driver(&cpu_driver);
}
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/kgdb.h>
+#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <asm/current.h>
/* track which CPU is booting */
static volatile int cpu_now_booting;
-static int parisc_max_cpus = 1;
-
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
/*
* Called by secondaries to update state and initialize CPU registers.
*/
-static void __init
+static void
smp_cpu_init(int cpunum)
{
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
-void __init smp_callin(unsigned long pdce_proc)
+void smp_callin(unsigned long pdce_proc)
{
int slave_id = cpu_now_booting;
/*
* Bring one cpu online.
*/
-int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
+static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
+#ifdef CONFIG_HOTPLUG_CPU
+ int i;
+
+ /* reset irq statistics for this CPU */
+ memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
+ for (i = 0; i < NR_IRQS; i++) {
+ struct irq_desc *desc = irq_to_desc(i);
+
+ if (desc && desc->kstat_irqs)
+ *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
+ }
+#endif
+
+ /* wait until last booting CPU has started. */
+ while (cpu_now_booting)
+ ;
+
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
- smp_init_current_idle_task = NULL;
goto alive ;
}
udelay(100);
spin_lock_init(&per_cpu(ipi_lock, cpu));
init_cpu_present(cpumask_of(0));
-
- parisc_max_cpus = max_cpus;
- if (!max_cpus)
- printk(KERN_INFO "SMP mode deactivated.\n");
}
-void smp_cpus_done(unsigned int cpu_max)
+void __init smp_cpus_done(unsigned int cpu_max)
{
- return;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
- if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
- return -ENOSYS;
+ if (cpu_online(cpu))
+ return 0;
+
+ if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
+ return -EIO;
+
+ return cpu_online(cpu) ? 0 : -EIO;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ unsigned int cpu = smp_processor_id();
+
+ remove_cpu_topology(cpu);
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /* Find a new timesync master */
+ if (cpu == time_keeper_id) {
+ time_keeper_id = cpumask_first(cpu_online_mask);
+ pr_info("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
+ }
+
+ disable_percpu_irq(IPI_IRQ);
+
+ irq_migrate_all_off_this_cpu();
+
+ flush_cache_all_local();
+ flush_tlb_all_local(NULL);
+
+ /* disable all irqs, including timer irq */
+ local_irq_disable();
+
+ /* wait for next timer irq ... */
+ mdelay(1000/HZ+100);
+
+ /* ... and then clear all pending external irqs */
+ set_eiem(0);
+ mtctl(~0UL, CR_EIRR);
+ mfctl(CR_EIRR);
+ mtctl(0, CR_EIRR);
+#endif
+ return 0;
+}
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ pdc_cpu_rendezvous_lock();
+
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ pr_info("CPU%u: is shutting down\n", cpu);
+
+ /* set task's state to interruptible sleep */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
- return cpu_online(cpu) ? 0 : -ENOSYS;
+ pdc_cpu_rendezvous_unlock();
}
#ifdef CONFIG_PROC_FS
#include <linux/timex.h>
+int time_keeper_id __read_mostly; /* CPU used for timekeeping. */
+
static unsigned long clocktick __ro_after_init; /* timer cycles per tick */
/*
cpuinfo->it_value = next_tick;
/* Go do system house keeping. */
- if (cpu != 0)
+ if (IS_ENABLED(CONFIG_SMP) && (cpu != time_keeper_id))
ticks_elapsed = 0;
legacy_timer_tick(ticks_elapsed);
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-void __init start_cpu_itimer(void)
+void start_cpu_itimer(void)
{
unsigned int cpu = smp_processor_id();
unsigned long next_tick = mfctl(16) + clocktick;
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/topology.h>
+#include <linux/cpu.h>
#include <asm/topology.h>
+#include <asm/sections.h>
- /*
- * cpu topology table
- */
-struct cputopo_parisc cpu_topology[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL_GPL(cpu_topology);
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
-const struct cpumask *cpu_coregroup_mask(int cpu)
-{
- return &cpu_topology[cpu].core_sibling;
-}
-
-static void update_siblings_masks(unsigned int cpuid)
-{
- struct cputopo_parisc *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
- int cpu;
-
- /* update core and thread sibling masks */
- for_each_possible_cpu(cpu) {
- cpu_topo = &cpu_topology[cpu];
-
- if (cpuid_topo->socket_id != cpu_topo->socket_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
-
- if (cpuid_topo->core_id != cpu_topo->core_id)
- continue;
-
- cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
- if (cpu != cpuid)
- cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
- }
- smp_wmb();
-}
-
-static int dualcores_found __initdata;
+static int dualcores_found;
/*
* store_cpu_topology is called at boot when only one cpu is running
* and with the mutex cpu_hotplug.lock locked, when several cpus have booted,
* which prevents simultaneous write access to cpu_topology array
*/
-void __init store_cpu_topology(unsigned int cpuid)
+void store_cpu_topology(unsigned int cpuid)
{
- struct cputopo_parisc *cpuid_topo = &cpu_topology[cpuid];
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
struct cpuinfo_parisc *p;
int max_socket = -1;
unsigned long cpu;
if (cpuid_topo->core_id != -1)
return;
+#ifdef CONFIG_HOTPLUG_CPU
+ per_cpu(cpu_devices, cpuid).hotpluggable = 1;
+#endif
+ if (register_cpu(&per_cpu(cpu_devices, cpuid), cpuid))
+ pr_warn("Failed to register CPU%d device", cpuid);
+
/* create cpu topology mapping */
cpuid_topo->thread_id = -1;
cpuid_topo->core_id = 0;
cpuid_topo->core_id = cpu_topology[cpu].core_id;
if (p->cpu_loc) {
cpuid_topo->core_id++;
- cpuid_topo->socket_id = cpu_topology[cpu].socket_id;
+ cpuid_topo->package_id = cpu_topology[cpu].package_id;
dualcores_found = 1;
continue;
}
}
- if (cpuid_topo->socket_id == -1)
- max_socket = max(max_socket, cpu_topology[cpu].socket_id);
+ if (cpuid_topo->package_id == -1)
+ max_socket = max(max_socket, cpu_topology[cpu].package_id);
}
- if (cpuid_topo->socket_id == -1)
- cpuid_topo->socket_id = max_socket + 1;
+ if (cpuid_topo->package_id == -1)
+ cpuid_topo->package_id = max_socket + 1;
update_siblings_masks(cpuid);
pr_info("CPU%u: cpu core %d of socket %d\n",
cpuid,
cpu_topology[cpuid].core_id,
- cpu_topology[cpuid].socket_id);
+ cpu_topology[cpuid].package_id);
}
static struct sched_domain_topology_level parisc_mc_topology[] = {
*/
void __init init_cpu_topology(void)
{
- unsigned int cpu;
-
- /* init core mask and capacity */
- for_each_possible_cpu(cpu) {
- struct cputopo_parisc *cpu_topo = &(cpu_topology[cpu]);
-
- cpu_topo->thread_id = -1;
- cpu_topo->core_id = -1;
- cpu_topo->socket_id = -1;
- cpumask_clear(&cpu_topo->core_sibling);
- cpumask_clear(&cpu_topo->thread_sibling);
- }
- smp_wmb();
-
/* Set scheduler topology descriptor */
if (dualcores_found)
set_sched_topology(parisc_mc_topology);
{
struct pci_hba_data hba; /* 'C' inheritance - must be first */
spinlock_t dinosaur_pen;
- unsigned long txn_addr; /* EIR addr to generate interrupt */
- u32 txn_data; /* EIR data assign to each dino */
u32 imr; /* IRQ's which are enabled */
+ struct gsc_irq gsc_irq;
int global_irq[DINO_LOCAL_IRQS]; /* map IMR bit to global irq */
#ifdef DINO_DEBUG
unsigned int dino_irr0; /* save most recent IRQ line stat */
if (tmp & DINO_MASK_IRQ(local_irq)) {
DBG(KERN_WARNING "%s(): IRQ asserted! (ILR 0x%x)\n",
__func__, tmp);
- gsc_writel(dino_dev->txn_data, dino_dev->txn_addr);
+ gsc_writel(dino_dev->gsc_irq.txn_data, dino_dev->gsc_irq.txn_addr);
}
}
+#ifdef CONFIG_SMP
+static int dino_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ struct dino_device *dino_dev = irq_data_get_irq_chip_data(d);
+ struct cpumask tmask;
+ int cpu_irq;
+ u32 eim;
+
+ if (!cpumask_and(&tmask, dest, cpu_online_mask))
+ return -EINVAL;
+
+ cpu_irq = cpu_check_affinity(d, &tmask);
+ if (cpu_irq < 0)
+ return cpu_irq;
+
+ dino_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+ eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
+ __raw_writel(eim, dino_dev->hba.base_addr+DINO_IAR0);
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
static struct irq_chip dino_interrupt_type = {
.name = "GSC-PCI",
.irq_unmask = dino_unmask_irq,
.irq_mask = dino_mask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = dino_set_affinity_irq,
+#endif
};
{
int status;
u32 eim;
- struct gsc_irq gsc_irq;
struct resource *res;
pcibios_register_hba(&dino_dev->hba);
** still only has 11 IRQ input lines - just map some of them
** to a different processor.
*/
- dev->irq = gsc_alloc_irq(&gsc_irq);
- dino_dev->txn_addr = gsc_irq.txn_addr;
- dino_dev->txn_data = gsc_irq.txn_data;
- eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ dev->irq = gsc_alloc_irq(&dino_dev->gsc_irq);
+ eim = ((u32) dino_dev->gsc_irq.txn_addr) | dino_dev->gsc_irq.txn_data;
/*
** Dino needs a PA "IRQ" to get a processor's attention.
*/
}
+#ifdef CONFIG_SMP
+static int gsc_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
+ bool force)
+{
+ struct gsc_asic *gsc_dev = irq_data_get_irq_chip_data(d);
+ struct cpumask tmask;
+ int cpu_irq;
+
+ if (!cpumask_and(&tmask, dest, cpu_online_mask))
+ return -EINVAL;
+
+ cpu_irq = cpu_check_affinity(d, &tmask);
+ if (cpu_irq < 0)
+ return cpu_irq;
+
+ gsc_dev->gsc_irq.txn_addr = txn_affinity_addr(d->irq, cpu_irq);
+ gsc_dev->eim = ((u32) gsc_dev->gsc_irq.txn_addr) | gsc_dev->gsc_irq.txn_data;
+
+ /* switch IRQ's for devices below LASI/WAX to other CPU */
+ gsc_writel(gsc_dev->eim, gsc_dev->hpa + OFFSET_IAR);
+
+ irq_data_update_effective_affinity(d, &tmask);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+
static struct irq_chip gsc_asic_interrupt_type = {
.name = "GSC-ASIC",
.irq_unmask = gsc_asic_unmask_irq,
.irq_mask = gsc_asic_mask_irq,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = gsc_set_affinity_irq,
+#endif
};
int gsc_assign_irq(struct irq_chip *type, void *data)
int version;
int type;
int eim;
+ struct gsc_irq gsc_irq;
int global_irq[32];
};
{
extern void (*chassis_power_off)(void);
struct gsc_asic *lasi;
- struct gsc_irq gsc_irq;
int ret;
lasi = kzalloc(sizeof(*lasi), GFP_KERNEL);
lasi_init_irq(lasi);
/* the IRQ lasi should use */
- dev->irq = gsc_alloc_irq(&gsc_irq);
+ dev->irq = gsc_alloc_irq(&lasi->gsc_irq);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__func__);
return -EBUSY;
}
- lasi->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ lasi->eim = ((u32) lasi->gsc_irq.txn_addr) | lasi->gsc_irq.txn_data;
- ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
+ ret = request_irq(lasi->gsc_irq.irq, gsc_asic_intr, 0, "lasi", lasi);
if (ret < 0) {
kfree(lasi);
return ret;
{
struct gsc_asic *wax;
struct parisc_device *parent;
- struct gsc_irq gsc_irq;
int ret;
wax = kzalloc(sizeof(*wax), GFP_KERNEL);
wax_init_irq(wax);
/* the IRQ wax should use */
- dev->irq = gsc_claim_irq(&gsc_irq, WAX_GSC_IRQ);
+ dev->irq = gsc_claim_irq(&wax->gsc_irq, WAX_GSC_IRQ);
if (dev->irq < 0) {
printk(KERN_ERR "%s(): cannot get GSC irq\n",
__func__);
return -EBUSY;
}
- wax->eim = ((u32) gsc_irq.txn_addr) | gsc_irq.txn_data;
+ wax->eim = ((u32) wax->gsc_irq.txn_addr) | wax->gsc_irq.txn_data;
- ret = request_irq(gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
+ ret = request_irq(wax->gsc_irq.irq, gsc_asic_intr, 0, "wax", wax);
if (ret < 0) {
kfree(wax);
return ret;