1 /* -*- mode: c; c-basic-offset: 8 -*- */
3 /* Copyright (C) 1999,2001
5 * Author: J.E.J.Bottomley@HansenPartnership.com
7 * linux/arch/i386/kernel/voyager_smp.c
9 * This file provides all the same external entries as smp.c but uses
10 * the voyager hal to provide the functionality
12 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/delay.h>
16 #include <linux/mc146818rtc.h>
17 #include <linux/cache.h>
18 #include <linux/interrupt.h>
19 #include <linux/smp_lock.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/bootmem.h>
23 #include <linux/completion.h>
25 #include <asm/voyager.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
30 #include <asm/arch_hooks.h>
33 /* TLB state -- visible externally, indexed physically */
34 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0 };
36 /* CPU IRQ affinity -- set to all ones initially */
37 static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR_CPUS-1] = ~0UL };
39 /* per CPU data structure (for /proc/cpuinfo et al), visible externally
40 * indexed physically */
41 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
42 EXPORT_SYMBOL(cpu_data);
44 /* physical ID of the CPU used to boot the system */
45 unsigned char boot_cpu_id;
47 /* The memory line addresses for the Quad CPIs */
48 struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
50 /* The masks for the Extended VIC processors, filled in by cat_init */
51 __u32 voyager_extended_vic_processors = 0;
53 /* Masks for the extended Quad processors which cannot be VIC booted */
54 __u32 voyager_allowed_boot_processors = 0;
56 /* The mask for the Quad Processors (both extended and non-extended) */
57 __u32 voyager_quad_processors = 0;
59 /* Total count of live CPUs, used in process.c to display
60 * the CPU information and in irq.c for the per CPU irq
61 * activity count. Finally exported by i386_ksyms.c */
62 static int voyager_extended_cpus = 1;
64 /* Have we found an SMP box - used by time.c to do the profiling
65 interrupt for timeslicing; do not set to 1 until the per CPU timer
66 interrupt is active */
67 int smp_found_config = 0;
69 /* Used for the invalidate map that's also checked in the spinlock */
70 static volatile unsigned long smp_invalidate_needed;
72 /* Bitmask of currently online CPUs - used by setup.c for
73 /proc/cpuinfo, visible externally but still physical */
74 cpumask_t cpu_online_map = CPU_MASK_NONE;
75 EXPORT_SYMBOL(cpu_online_map);
77 /* Bitmask of CPUs present in the system - exported by i386_syms.c, used
78 * by scheduler but indexed physically */
79 cpumask_t phys_cpu_present_map = CPU_MASK_NONE;
82 /* The internal functions */
83 static void send_CPI(__u32 cpuset, __u8 cpi);
84 static void ack_CPI(__u8 cpi);
85 static int ack_QIC_CPI(__u8 cpi);
86 static void ack_special_QIC_CPI(__u8 cpi);
87 static void ack_VIC_CPI(__u8 cpi);
88 static void send_CPI_allbutself(__u8 cpi);
89 static void mask_vic_irq(unsigned int irq);
90 static void unmask_vic_irq(unsigned int irq);
91 static unsigned int startup_vic_irq(unsigned int irq);
92 static void enable_local_vic_irq(unsigned int irq);
93 static void disable_local_vic_irq(unsigned int irq);
94 static void before_handle_vic_irq(unsigned int irq);
95 static void after_handle_vic_irq(unsigned int irq);
96 static void set_vic_irq_affinity(unsigned int irq, cpumask_t mask);
97 static void ack_vic_irq(unsigned int irq);
98 static void vic_enable_cpi(void);
99 static void do_boot_cpu(__u8 cpuid);
100 static void do_quad_bootstrap(void);
102 int hard_smp_processor_id(void);
103 int safe_smp_processor_id(void);
105 /* Inline functions */
107 send_one_QIC_CPI(__u8 cpu, __u8 cpi)
109 voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
110 (smp_processor_id() << 16) + cpi;
114 send_QIC_CPI(__u32 cpuset, __u8 cpi)
118 for_each_online_cpu(cpu) {
119 if(cpuset & (1<<cpu)) {
121 if(!cpu_isset(cpu, cpu_online_map))
122 VDEBUG(("CPU%d sending cpi %d to CPU%d not in cpu_online_map\n", hard_smp_processor_id(), cpi, cpu));
124 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
130 wrapper_smp_local_timer_interrupt(void)
133 smp_local_timer_interrupt();
138 send_one_CPI(__u8 cpu, __u8 cpi)
140 if(voyager_quad_processors & (1<<cpu))
141 send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
143 send_CPI(1<<cpu, cpi);
147 send_CPI_allbutself(__u8 cpi)
149 __u8 cpu = smp_processor_id();
150 __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
157 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
158 return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
162 is_cpu_extended(void)
164 __u8 cpu = hard_smp_processor_id();
166 return(voyager_extended_vic_processors & (1<<cpu));
170 is_cpu_vic_boot(void)
172 __u8 cpu = hard_smp_processor_id();
174 return(voyager_extended_vic_processors
175 & voyager_allowed_boot_processors & (1<<cpu));
183 case VIC_CPU_BOOT_CPI:
184 if(is_cpu_quad() && !is_cpu_vic_boot())
191 /* These are slightly strange. Even on the Quad card,
192 * They are vectored as VIC CPIs */
194 ack_special_QIC_CPI(cpi);
199 printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
204 /* local variables */
206 /* The VIC IRQ descriptors -- these look almost identical to the
207 * 8259 IRQs except that masks and things must be kept per processor
209 static struct irq_chip vic_chip = {
211 .startup = startup_vic_irq,
212 .mask = mask_vic_irq,
213 .unmask = unmask_vic_irq,
214 .set_affinity = set_vic_irq_affinity,
217 /* used to count up as CPUs are brought on line (starts at 0) */
218 static int cpucount = 0;
220 /* steal a page from the bottom of memory for the trampoline and
221 * squirrel its address away here. This will be in kernel virtual
223 static __u32 trampoline_base;
225 /* The per cpu profile stuff - used in smp_local_timer_interrupt */
226 static DEFINE_PER_CPU(int, prof_multiplier) = 1;
227 static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
228 static DEFINE_PER_CPU(int, prof_counter) = 1;
230 /* the map used to check if a CPU has booted */
231 static __u32 cpu_booted_map;
233 /* the synchronize flag used to hold all secondary CPUs spinning in
234 * a tight loop until the boot sequence is ready for them */
235 static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
237 /* This is for the new dynamic CPU boot code */
238 cpumask_t cpu_callin_map = CPU_MASK_NONE;
239 cpumask_t cpu_callout_map = CPU_MASK_NONE;
240 EXPORT_SYMBOL(cpu_callout_map);
241 cpumask_t cpu_possible_map = CPU_MASK_NONE;
242 EXPORT_SYMBOL(cpu_possible_map);
244 /* The per processor IRQ masks (these are usually kept in sync) */
245 static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
247 /* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
248 static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
250 /* Lock for enable/disable of VIC interrupts */
251 static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
253 /* The boot processor is correctly set up in PC mode when it
254 * comes up, but the secondaries need their master/slave 8259
255 * pairs initializing correctly */
257 /* Interrupt counters (per cpu) and total - used to try to
258 * even up the interrupt handling routines */
259 static long vic_intr_total = 0;
260 static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
261 static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
263 /* Since we can only use CPI0, we fake all the other CPIs */
264 static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
266 /* debugging routine to read the isr of the cpu's pic */
273 isr = inb(0xa0) << 8;
284 /* not a quad, no setup */
287 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
288 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
290 if(is_cpu_extended()) {
291 /* the QIC duplicate of the VIC base register */
292 outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
293 outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
295 /* FIXME: should set up the QIC timer and memory parity
296 * error vectors here */
303 outb(1, VIC_REDIRECT_REGISTER_1);
304 /* clear the claim registers for dynamic routing */
305 outb(0, VIC_CLAIM_REGISTER_0);
306 outb(0, VIC_CLAIM_REGISTER_1);
308 outb(0, VIC_PRIORITY_REGISTER);
309 /* Set the Primary and Secondary Microchannel vector
310 * bases to be the same as the ordinary interrupts
312 * FIXME: This would be more efficient using separate
314 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
315 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
316 /* Now initiallise the master PIC belonging to this CPU by
317 * sending the four ICWs */
319 /* ICW1: level triggered, ICW4 needed */
322 /* ICW2: vector base */
323 outb(FIRST_EXTERNAL_VECTOR, 0x21);
325 /* ICW3: slave at line 2 */
328 /* ICW4: 8086 mode */
331 /* now the same for the slave PIC */
333 /* ICW1: level trigger, ICW4 needed */
336 /* ICW2: slave vector base */
337 outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
342 /* ICW4: 8086 mode */
347 do_quad_bootstrap(void)
349 if(is_cpu_quad() && is_cpu_vic_boot()) {
352 __u8 cpuid = hard_smp_processor_id();
354 local_irq_save(flags);
356 for(i = 0; i<4; i++) {
357 /* FIXME: this would be >>3 &0x7 on the 32 way */
358 if(((cpuid >> 2) & 0x03) == i)
359 /* don't lower our own mask! */
362 /* masquerade as local Quad CPU */
363 outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
364 /* enable the startup CPI */
365 outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
367 outb(0, QIC_PROCESSOR_ID);
369 local_irq_restore(flags);
374 /* Set up all the basic stuff: read the SMP config and make all the
375 * SMP information reflect only the boot cpu. All others will be
376 * brought on-line later. */
378 find_smp_config(void)
382 boot_cpu_id = hard_smp_processor_id();
384 printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
386 /* initialize the CPU structures (moved from smp_boot_cpus) */
387 for(i=0; i<NR_CPUS; i++) {
388 cpu_irq_affinity[i] = ~0;
390 cpu_online_map = cpumask_of_cpu(boot_cpu_id);
392 /* The boot CPU must be extended */
393 voyager_extended_vic_processors = 1<<boot_cpu_id;
394 /* initially, all of the first 8 cpu's can boot */
395 voyager_allowed_boot_processors = 0xff;
396 /* set up everything for just this CPU, we can alter
397 * this as we start the other CPUs later */
398 /* now get the CPU disposition from the extended CMOS */
399 cpus_addr(phys_cpu_present_map)[0] = voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
400 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
401 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 2) << 16;
402 cpus_addr(phys_cpu_present_map)[0] |= voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 3) << 24;
403 cpu_possible_map = phys_cpu_present_map;
404 printk("VOYAGER SMP: phys_cpu_present_map = 0x%lx\n", cpus_addr(phys_cpu_present_map)[0]);
405 /* Here we set up the VIC to enable SMP */
406 /* enable the CPIs by writing the base vector to their register */
407 outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
408 outb(1, VIC_REDIRECT_REGISTER_1);
409 /* set the claim registers for static routing --- Boot CPU gets
410 * all interrupts untill all other CPUs started */
411 outb(0xff, VIC_CLAIM_REGISTER_0);
412 outb(0xff, VIC_CLAIM_REGISTER_1);
413 /* Set the Primary and Secondary Microchannel vector
414 * bases to be the same as the ordinary interrupts
416 * FIXME: This would be more efficient using separate
418 outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
419 outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
421 /* Finally tell the firmware that we're driving */
422 outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
423 VOYAGER_SUS_IN_CONTROL_PORT);
425 current_thread_info()->cpu = boot_cpu_id;
426 write_pda(cpu_number, boot_cpu_id);
430 * The bootstrap kernel entry code has set these up. Save them
431 * for a given CPU, id is physical */
433 smp_store_cpu_info(int id)
435 struct cpuinfo_x86 *c=&cpu_data[id];
442 /* set up the trampoline and return the physical address of the code */
444 setup_trampoline(void)
446 /* these two are global symbols in trampoline.S */
447 extern __u8 trampoline_end[];
448 extern __u8 trampoline_data[];
450 memcpy((__u8 *)trampoline_base, trampoline_data,
451 trampoline_end - trampoline_data);
452 return virt_to_phys((__u8 *)trampoline_base);
455 /* Routine initially called when a non-boot CPU is brought online */
457 start_secondary(void *unused)
459 __u8 cpuid = hard_smp_processor_id();
460 /* external functions not defined in the headers */
461 extern void calibrate_delay(void);
463 secondary_cpu_init();
465 /* OK, we're in the routine */
466 ack_CPI(VIC_CPU_BOOT_CPI);
468 /* setup the 8259 master slave pair belonging to this CPU ---
469 * we won't actually receive any until the boot CPU
470 * relinquishes it's static routing mask */
475 if(is_cpu_quad() && !is_cpu_vic_boot()) {
476 /* clear the boot CPI */
479 dummy = voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
480 printk("read dummy %d\n", dummy);
483 /* lower the mask to receive CPIs */
486 VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
488 /* enable interrupts */
491 /* get our bogomips */
494 /* save our processor parameters */
495 smp_store_cpu_info(cpuid);
497 /* if we're a quad, we may need to bootstrap other CPUs */
500 /* FIXME: this is rather a poor hack to prevent the CPU
501 * activating softirqs while it's supposed to be waiting for
502 * permission to proceed. Without this, the new per CPU stuff
503 * in the softirqs will fail */
505 cpu_set(cpuid, cpu_callin_map);
507 /* signal that we're done */
510 while (!cpu_isset(cpuid, smp_commenced_mask))
516 cpu_set(cpuid, cpu_online_map);
522 /* Routine to kick start the given CPU and wait for it to report ready
523 * (or timeout in startup). When this routine returns, the requested
524 * CPU is either fully running and configured or known to be dead.
526 * We call this routine sequentially 1 CPU at a time, so no need for
530 do_boot_cpu(__u8 cpu)
532 struct task_struct *idle;
535 int quad_boot = (1<<cpu) & voyager_quad_processors
536 & ~( voyager_extended_vic_processors
537 & voyager_allowed_boot_processors);
539 /* This is an area in head.S which was used to set up the
540 * initial kernel stack. We need to alter this to give the
541 * booting CPU a new stack (taken from its idle process) */
546 /* This is the format of the CPI IDT gate (in real mode) which
547 * we're hijacking to boot the CPU */
556 __u32 *hijack_vector;
557 __u32 start_phys_address = setup_trampoline();
559 /* There's a clever trick to this: The linux trampoline is
560 * compiled to begin at absolute location zero, so make the
561 * address zero but have the data segment selector compensate
562 * for the actual address */
563 hijack_source.idt.Offset = start_phys_address & 0x000F;
564 hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
567 alternatives_smp_switch(1);
569 idle = fork_idle(cpu);
571 panic("failed fork for CPU%d", cpu);
572 idle->thread.eip = (unsigned long) start_secondary;
573 /* init_tasks (in sched.c) is indexed logically */
574 stack_start.esp = (void *) idle->thread.esp;
576 /* Pre-allocate and initialize the CPU's GDT and PDA so it
577 doesn't have to do any memory allocation during the
578 delicate CPU-bringup phase. */
579 if (!init_gdt(cpu, idle)) {
580 printk(KERN_INFO "Couldn't allocate GDT/PDA for CPU %d\n", cpu);
587 /* Note: Don't modify initial ss override */
588 VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
589 (unsigned long)hijack_source.val, hijack_source.idt.Segment,
590 hijack_source.idt.Offset, stack_start.esp));
592 /* init lowmem identity mapping */
593 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
594 min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS));
598 printk("CPU %d: non extended Quad boot\n", cpu);
599 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE)*4);
600 *hijack_vector = hijack_source.val;
602 printk("CPU%d: extended VIC boot\n", cpu);
603 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE)*4);
604 *hijack_vector = hijack_source.val;
605 /* VIC errata, may also receive interrupt at this address */
606 hijack_vector = (__u32 *)phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI + VIC_DEFAULT_CPI_BASE)*4);
607 *hijack_vector = hijack_source.val;
609 /* All non-boot CPUs start with interrupts fully masked. Need
610 * to lower the mask of the CPI we're about to send. We do
611 * this in the VIC by masquerading as the processor we're
612 * about to boot and lowering its interrupt mask */
613 local_irq_save(flags);
615 send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
617 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
618 /* here we're altering registers belonging to `cpu' */
620 outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
621 /* now go back to our original identity */
622 outb(boot_cpu_id, VIC_PROCESSOR_ID);
624 /* and boot the CPU */
626 send_CPI((1<<cpu), VIC_CPU_BOOT_CPI);
629 local_irq_restore(flags);
631 /* now wait for it to become ready (or timeout) */
632 for(timeout = 0; timeout < 50000; timeout++) {
637 /* reset the page table */
640 if (cpu_booted_map) {
641 VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
642 cpu, smp_processor_id()));
644 printk("CPU%d: ", cpu);
645 print_cpu_info(&cpu_data[cpu]);
647 cpu_set(cpu, cpu_callout_map);
648 cpu_set(cpu, cpu_present_map);
651 printk("CPU%d FAILED TO BOOT: ", cpu);
652 if (*((volatile unsigned char *)phys_to_virt(start_phys_address))==0xA5)
655 printk("Not responding.\n");
666 /* CAT BUS initialisation must be done after the memory */
667 /* FIXME: The L4 has a catbus too, it just needs to be
668 * accessed in a totally different way */
669 if(voyager_level == 5) {
672 /* now that the cat has probed the Voyager System Bus, sanity
673 * check the cpu map */
674 if( ((voyager_quad_processors | voyager_extended_vic_processors)
675 & cpus_addr(phys_cpu_present_map)[0]) != cpus_addr(phys_cpu_present_map)[0]) {
677 printk("\n\n***WARNING*** Sanity check of CPU present map FAILED\n");
679 } else if(voyager_level == 4)
680 voyager_extended_vic_processors = cpus_addr(phys_cpu_present_map)[0];
682 /* this sets up the idle task to run on the current cpu */
683 voyager_extended_cpus = 1;
684 /* Remove the global_irq_holder setting, it triggers a BUG() on
685 * schedule at the moment */
686 //global_irq_holder = boot_cpu_id;
688 /* FIXME: Need to do something about this but currently only works
689 * on CPUs with a tsc which none of mine have.
690 smp_tune_scheduling();
692 smp_store_cpu_info(boot_cpu_id);
693 printk("CPU%d: ", boot_cpu_id);
694 print_cpu_info(&cpu_data[boot_cpu_id]);
697 /* booting on a Quad CPU */
698 printk("VOYAGER SMP: Boot CPU is Quad\n");
703 /* enable our own CPIs */
706 cpu_set(boot_cpu_id, cpu_online_map);
707 cpu_set(boot_cpu_id, cpu_callout_map);
709 /* loop over all the extended VIC CPUs and boot them. The
710 * Quad CPUs must be bootstrapped by their extended VIC cpu */
711 for(i = 0; i < NR_CPUS; i++) {
712 if(i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
715 /* This udelay seems to be needed for the Quad boots
716 * don't remove unless you know what you're doing */
719 /* we could compute the total bogomips here, but why bother?,
720 * Code added from smpboot.c */
722 unsigned long bogosum = 0;
723 for (i = 0; i < NR_CPUS; i++)
724 if (cpu_isset(i, cpu_online_map))
725 bogosum += cpu_data[i].loops_per_jiffy;
726 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
729 (bogosum/(5000/HZ))%100);
731 voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
732 printk("VOYAGER: Extended (interrupt handling CPUs): %d, non-extended: %d\n", voyager_extended_cpus, num_booting_cpus() - voyager_extended_cpus);
733 /* that's it, switch to symmetric mode */
734 outb(0, VIC_PRIORITY_REGISTER);
735 outb(0, VIC_CLAIM_REGISTER_0);
736 outb(0, VIC_CLAIM_REGISTER_1);
738 VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
741 /* Reload the secondary CPUs task structure (this function does not
744 initialize_secondary(void)
748 set_current(hard_get_current());
752 * switch to the per CPU GDT we already set up
755 cpu_set_gdt(current_thread_info()->cpu);
758 * We don't actually need to load the full TSS,
759 * basically just the stack pointer and the eip.
766 :"r" (current->thread.esp),"r" (current->thread.eip));
769 /* handle a Voyager SYS_INT -- If we don't, the base board will
772 * System interrupts occur because some problem was detected on the
773 * various busses. To find out what you have to probe all the
774 * hardware via the CAT bus. FIXME: At the moment we do nothing. */
776 smp_vic_sys_interrupt(struct pt_regs *regs)
778 ack_CPI(VIC_SYS_INT);
779 printk("Voyager SYSTEM INTERRUPT\n");
782 /* Handle a voyager CMN_INT; These interrupts occur either because of
783 * a system status change or because a single bit memory error
784 * occurred. FIXME: At the moment, ignore all this. */
786 smp_vic_cmn_interrupt(struct pt_regs *regs)
788 static __u8 in_cmn_int = 0;
789 static DEFINE_SPINLOCK(cmn_int_lock);
791 /* common ints are broadcast, so make sure we only do this once */
792 _raw_spin_lock(&cmn_int_lock);
797 _raw_spin_unlock(&cmn_int_lock);
799 VDEBUG(("Voyager COMMON INTERRUPT\n"));
801 if(voyager_level == 5)
802 voyager_cat_do_common_interrupt();
804 _raw_spin_lock(&cmn_int_lock);
807 _raw_spin_unlock(&cmn_int_lock);
808 ack_CPI(VIC_CMN_INT);
812 * Reschedule call back. Nothing to do, all the work is done
813 * automatically when we return from the interrupt. */
815 smp_reschedule_interrupt(void)
820 static struct mm_struct * flush_mm;
821 static unsigned long flush_va;
822 static DEFINE_SPINLOCK(tlbstate_lock);
823 #define FLUSH_ALL 0xffffffff
826 * We cannot call mmdrop() because we are in interrupt context,
827 * instead update mm->cpu_vm_mask.
829 * We need to reload %cr3 since the page tables may be going
830 * away from under us..
833 leave_mm (unsigned long cpu)
835 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
837 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
838 load_cr3(swapper_pg_dir);
843 * Invalidate call-back
846 smp_invalidate_interrupt(void)
848 __u8 cpu = smp_processor_id();
850 if (!test_bit(cpu, &smp_invalidate_needed))
852 /* This will flood messages. Don't uncomment unless you see
853 * Problems with cross cpu invalidation
854 VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
855 smp_processor_id()));
858 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
859 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
860 if (flush_va == FLUSH_ALL)
863 __flush_tlb_one(flush_va);
867 smp_mb__before_clear_bit();
868 clear_bit(cpu, &smp_invalidate_needed);
869 smp_mb__after_clear_bit();
872 /* All the new flush operations for 2.4 */
875 /* This routine is called with a physical cpu mask */
877 flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
884 if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
886 if (cpumask & (1 << smp_processor_id()))
891 spin_lock(&tlbstate_lock);
895 atomic_set_mask(cpumask, &smp_invalidate_needed);
897 * We have to send the CPI only to
900 send_CPI(cpumask, VIC_INVALIDATE_CPI);
902 while (smp_invalidate_needed) {
905 printk("***WARNING*** Stuck doing invalidate CPI (CPU%d)\n", smp_processor_id());
910 /* Uncomment only to debug invalidation problems
911 VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
916 spin_unlock(&tlbstate_lock);
920 flush_tlb_current_task(void)
922 struct mm_struct *mm = current->mm;
923 unsigned long cpu_mask;
927 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
930 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
937 flush_tlb_mm (struct mm_struct * mm)
939 unsigned long cpu_mask;
943 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
945 if (current->active_mm == mm) {
949 leave_mm(smp_processor_id());
952 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
957 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
959 struct mm_struct *mm = vma->vm_mm;
960 unsigned long cpu_mask;
964 cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
965 if (current->active_mm == mm) {
969 leave_mm(smp_processor_id());
973 flush_tlb_others(cpu_mask, mm, va);
977 EXPORT_SYMBOL(flush_tlb_page);
979 /* enable the requested IRQs */
981 smp_enable_irq_interrupt(void)
984 __u8 cpu = get_cpu();
986 VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
987 vic_irq_enable_mask[cpu]));
989 spin_lock(&vic_irq_lock);
990 for(irq = 0; irq < 16; irq++) {
991 if(vic_irq_enable_mask[cpu] & (1<<irq))
992 enable_local_vic_irq(irq);
994 vic_irq_enable_mask[cpu] = 0;
995 spin_unlock(&vic_irq_lock);
997 put_cpu_no_resched();
1001 * CPU halt call-back
1004 smp_stop_cpu_function(void *dummy)
1006 VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
1007 cpu_clear(smp_processor_id(), cpu_online_map);
1008 local_irq_disable();
1013 static DEFINE_SPINLOCK(call_lock);
1015 struct call_data_struct {
1016 void (*func) (void *info);
1018 volatile unsigned long started;
1019 volatile unsigned long finished;
1023 static struct call_data_struct * call_data;
1025 /* execute a thread on a new CPU. The function to be called must be
1026 * previously set up. This is used to schedule a function for
1027 * execution on all CPU's - set up the function then broadcast a
1028 * function_interrupt CPI to come here on each CPU */
1030 smp_call_function_interrupt(void)
1032 void (*func) (void *info) = call_data->func;
1033 void *info = call_data->info;
1034 /* must take copy of wait because call_data may be replaced
1035 * unless the function is waiting for us to finish */
1036 int wait = call_data->wait;
1037 __u8 cpu = smp_processor_id();
1040 * Notify initiating CPU that I've grabbed the data and am
1041 * about to execute the function
1044 if(!test_and_clear_bit(cpu, &call_data->started)) {
1045 /* If the bit wasn't set, this could be a replay */
1046 printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion with no call pending\n", cpu);
1050 * At this point the info structure may be out of scope unless wait==1
1057 clear_bit(cpu, &call_data->finished);
1062 __smp_call_function_mask (void (*func) (void *info), void *info, int retry,
1063 int wait, __u32 mask)
1065 struct call_data_struct data;
1067 mask &= ~(1<<smp_processor_id());
1072 /* Can deadlock when called with interrupts disabled */
1073 WARN_ON(irqs_disabled());
1077 data.started = mask;
1080 data.finished = mask;
1082 spin_lock(&call_lock);
1085 /* Send a message to all other CPUs and wait for them to respond */
1086 send_CPI(mask, VIC_CALL_FUNCTION_CPI);
1088 /* Wait for response */
1089 while (data.started)
1093 while (data.finished)
1096 spin_unlock(&call_lock);
1101 /* Call this function on all CPUs using the function_interrupt above
1102 <func> The function to run. This must be fast and non-blocking.
1103 <info> An arbitrary pointer to pass to the function.
1104 <retry> If true, keep retrying until ready.
1105 <wait> If true, wait until function has completed on other CPUs.
1106 [RETURNS] 0 on success, else a negative status code. Does not return until
1107 remote CPUs are nearly ready to execute <<func>> or are or have executed.
1110 smp_call_function(void (*func) (void *info), void *info, int retry,
1113 __u32 mask = cpus_addr(cpu_online_map)[0];
1115 return __smp_call_function_mask(func, info, retry, wait, mask);
1117 EXPORT_SYMBOL(smp_call_function);
1120 * smp_call_function_single - Run a function on another CPU
1121 * @func: The function to run. This must be fast and non-blocking.
1122 * @info: An arbitrary pointer to pass to the function.
1123 * @nonatomic: Currently unused.
1124 * @wait: If true, wait until function has completed on other CPUs.
1126 * Retrurns 0 on success, else a negative status code.
1128 * Does not return until the remote CPU is nearly ready to execute <func>
1129 * or is or has executed.
1133 smp_call_function_single(int cpu, void (*func) (void *info), void *info,
1134 int nonatomic, int wait)
1136 __u32 mask = 1 << cpu;
1138 return __smp_call_function_mask(func, info, nonatomic, wait, mask);
1140 EXPORT_SYMBOL(smp_call_function_single);
1142 /* Sorry about the name. In an APIC based system, the APICs
1143 * themselves are programmed to send a timer interrupt. This is used
1144 * by linux to reschedule the processor. Voyager doesn't have this,
1145 * so we use the system clock to interrupt one processor, which in
1146 * turn, broadcasts a timer CPI to all the others --- we receive that
1147 * CPI here. We don't use this actually for counting so losing
1148 * ticks doesn't matter
1150 * FIXME: For those CPU's which actually have a local APIC, we could
1151 * try to use it to trigger this interrupt instead of having to
1152 * broadcast the timer tick. Unfortunately, all my pentium DYADs have
1153 * no local APIC, so I can't do this
1155 * This function is currently a placeholder and is unused in the code */
1157 smp_apic_timer_interrupt(struct pt_regs *regs)
1159 struct pt_regs *old_regs = set_irq_regs(regs);
1160 wrapper_smp_local_timer_interrupt();
1161 set_irq_regs(old_regs);
1164 /* All of the QUAD interrupt GATES */
1166 smp_qic_timer_interrupt(struct pt_regs *regs)
1168 struct pt_regs *old_regs = set_irq_regs(regs);
1169 ack_QIC_CPI(QIC_TIMER_CPI);
1170 wrapper_smp_local_timer_interrupt();
1171 set_irq_regs(old_regs);
1175 smp_qic_invalidate_interrupt(struct pt_regs *regs)
1177 ack_QIC_CPI(QIC_INVALIDATE_CPI);
1178 smp_invalidate_interrupt();
1182 smp_qic_reschedule_interrupt(struct pt_regs *regs)
1184 ack_QIC_CPI(QIC_RESCHEDULE_CPI);
1185 smp_reschedule_interrupt();
1189 smp_qic_enable_irq_interrupt(struct pt_regs *regs)
1191 ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
1192 smp_enable_irq_interrupt();
1196 smp_qic_call_function_interrupt(struct pt_regs *regs)
1198 ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
1199 smp_call_function_interrupt();
1203 smp_vic_cpi_interrupt(struct pt_regs *regs)
1205 struct pt_regs *old_regs = set_irq_regs(regs);
1206 __u8 cpu = smp_processor_id();
1209 ack_QIC_CPI(VIC_CPI_LEVEL0);
1211 ack_VIC_CPI(VIC_CPI_LEVEL0);
1213 if(test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
1214 wrapper_smp_local_timer_interrupt();
1215 if(test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
1216 smp_invalidate_interrupt();
1217 if(test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
1218 smp_reschedule_interrupt();
1219 if(test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
1220 smp_enable_irq_interrupt();
1221 if(test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
1222 smp_call_function_interrupt();
1223 set_irq_regs(old_regs);
1227 do_flush_tlb_all(void* info)
1229 unsigned long cpu = smp_processor_id();
1232 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
1237 /* flush the TLB of every active CPU in the system */
1241 on_each_cpu(do_flush_tlb_all, 0, 1, 1);
1244 /* used to set up the trampoline for other CPUs when the memory manager
1247 smp_alloc_memory(void)
1249 trampoline_base = (__u32)alloc_bootmem_low_pages(PAGE_SIZE);
1250 if(__pa(trampoline_base) >= 0x93000)
1254 /* send a reschedule CPI to one CPU by physical CPU number*/
1256 smp_send_reschedule(int cpu)
1258 send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
1263 hard_smp_processor_id(void)
1266 __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
1267 if((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
1268 return cpumask & 0x1F;
1270 for(i = 0; i < 8; i++) {
1271 if(cpumask & (1<<i))
1274 printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
1279 safe_smp_processor_id(void)
1281 return hard_smp_processor_id();
1284 /* broadcast a halt to all other CPUs */
1288 smp_call_function(smp_stop_cpu_function, NULL, 1, 1);
1291 /* this function is triggered in time.c when a clock tick fires
1292 * we need to re-broadcast the tick to all CPUs */
1294 smp_vic_timer_interrupt(void)
1296 send_CPI_allbutself(VIC_TIMER_CPI);
1297 smp_local_timer_interrupt();
1300 /* local (per CPU) timer interrupt. It does both profiling and
1301 * process statistics/rescheduling.
1303 * We do profiling in every local tick, statistics/rescheduling
1304 * happen only every 'profiling multiplier' ticks. The default
1305 * multiplier is 1 and it can be changed by writing the new multiplier
1306 * value into /proc/profile.
1309 smp_local_timer_interrupt(void)
1311 int cpu = smp_processor_id();
1314 profile_tick(CPU_PROFILING);
1315 if (--per_cpu(prof_counter, cpu) <= 0) {
1317 * The multiplier may have changed since the last time we got
1318 * to this point as a result of the user writing to
1319 * /proc/profile. In this case we need to adjust the APIC
1320 * timer accordingly.
1322 * Interrupts are already masked off at this point.
1324 per_cpu(prof_counter,cpu) = per_cpu(prof_multiplier, cpu);
1325 if (per_cpu(prof_counter, cpu) !=
1326 per_cpu(prof_old_multiplier, cpu)) {
1327 /* FIXME: need to update the vic timer tick here */
1328 per_cpu(prof_old_multiplier, cpu) =
1329 per_cpu(prof_counter, cpu);
1332 update_process_times(user_mode_vm(get_irq_regs()));
1335 if( ((1<<cpu) & voyager_extended_vic_processors) == 0)
1336 /* only extended VIC processors participate in
1337 * interrupt distribution */
1341 * We take the 'long' return path, and there every subsystem
1342 * grabs the apropriate locks (kernel lock/ irq lock).
1344 * we might want to decouple profiling from the 'long path',
1345 * and do the profiling totally in assembly.
1347 * Currently this isn't too much of an issue (performance wise),
1348 * we can take more than 100K local irqs per second on a 100 MHz P5.
1351 if((++vic_tick[cpu] & 0x7) != 0)
1353 /* get here every 16 ticks (about every 1/6 of a second) */
1355 /* Change our priority to give someone else a chance at getting
1356 * the IRQ. The algorithm goes like this:
1358 * In the VIC, the dynamically routed interrupt is always
1359 * handled by the lowest priority eligible (i.e. receiving
1360 * interrupts) CPU. If >1 eligible CPUs are equal lowest, the
1361 * lowest processor number gets it.
1363 * The priority of a CPU is controlled by a special per-CPU
1364 * VIC priority register which is 3 bits wide 0 being lowest
1365 * and 7 highest priority..
1367 * Therefore we subtract the average number of interrupts from
1368 * the number we've fielded. If this number is negative, we
1369 * lower the activity count and if it is positive, we raise
1372 * I'm afraid this still leads to odd looking interrupt counts:
1373 * the totals are all roughly equal, but the individual ones
1374 * look rather skewed.
1376 * FIXME: This algorithm is total crap when mixed with SMP
1377 * affinity code since we now try to even up the interrupt
1378 * counts when an affinity binding is keeping them on a
1380 weight = (vic_intr_count[cpu]*voyager_extended_cpus
1381 - vic_intr_total) >> 4;
1388 outb((__u8)weight, VIC_PRIORITY_REGISTER);
1390 #ifdef VOYAGER_DEBUG
1391 if((vic_tick[cpu] & 0xFFF) == 0) {
1392 /* print this message roughly every 25 secs */
1393 printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
1394 cpu, vic_tick[cpu], weight);
1399 /* setup the profiling timer */
1401 setup_profiling_timer(unsigned int multiplier)
1409 * Set the new multiplier for each CPU. CPUs don't start using the
1410 * new values until the next timer interrupt in which they do process
1413 for (i = 0; i < NR_CPUS; ++i)
1414 per_cpu(prof_multiplier, i) = multiplier;
1419 /* This is a bit of a mess, but forced on us by the genirq changes
1420 * there's no genirq handler that really does what voyager wants
1421 * so hack it up with the simple IRQ handler */
1422 static void fastcall
1423 handle_vic_irq(unsigned int irq, struct irq_desc *desc)
1425 before_handle_vic_irq(irq);
1426 handle_simple_irq(irq, desc);
1427 after_handle_vic_irq(irq);
1431 /* The CPIs are handled in the per cpu 8259s, so they must be
1432 * enabled to be received: FIX: enabling the CPIs in the early
1433 * boot sequence interferes with bug checking; enable them later
1435 #define VIC_SET_GATE(cpi, vector) \
1436 set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
1437 #define QIC_SET_GATE(cpi, vector) \
1438 set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
1445 /* initialize the per cpu irq mask to all disabled */
1446 for(i = 0; i < NR_CPUS; i++)
1447 vic_irq_mask[i] = 0xFFFF;
1449 VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
1451 VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
1452 VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
1454 QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
1455 QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
1456 QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
1457 QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
1458 QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
1461 /* now put the VIC descriptor into the first 48 IRQs
1463 * This is for later: first 16 correspond to PC IRQs; next 16
1464 * are Primary MC IRQs and final 16 are Secondary MC IRQs */
1465 for(i = 0; i < 48; i++)
1466 set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
1469 /* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
1470 * processor to receive CPI */
1472 send_CPI(__u32 cpuset, __u8 cpi)
1475 __u32 quad_cpuset = (cpuset & voyager_quad_processors);
1477 if(cpi < VIC_START_FAKE_CPI) {
1478 /* fake CPI are only used for booting, so send to the
1479 * extended quads as well---Quads must be VIC booted */
1480 outb((__u8)(cpuset), VIC_CPI_Registers[cpi]);
1484 send_QIC_CPI(quad_cpuset, cpi);
1485 cpuset &= ~quad_cpuset;
1486 cpuset &= 0xff; /* only first 8 CPUs vaild for VIC CPI */
1489 for_each_online_cpu(cpu) {
1490 if(cpuset & (1<<cpu))
1491 set_bit(cpi, &vic_cpi_mailbox[cpu]);
1494 outb((__u8)cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
1497 /* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
1498 * set the cache line to shared by reading it.
1500 * DON'T make this inline otherwise the cache line read will be
1504 ack_QIC_CPI(__u8 cpi) {
1505 __u8 cpu = hard_smp_processor_id();
1509 outb(1<<cpi, QIC_INTERRUPT_CLEAR1);
1510 return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
1514 ack_special_QIC_CPI(__u8 cpi)
1518 outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
1521 outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
1524 /* also clear at the VIC, just in case (nop for non-extended proc) */
1528 /* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
1530 ack_VIC_CPI(__u8 cpi)
1532 #ifdef VOYAGER_DEBUG
1533 unsigned long flags;
1535 __u8 cpu = smp_processor_id();
1537 local_irq_save(flags);
1538 isr = vic_read_isr();
1539 if((isr & (1<<(cpi &7))) == 0) {
1540 printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
1543 /* send specific EOI; the two system interrupts have
1544 * bit 4 set for a separate vector but behave as the
1545 * corresponding 3 bit intr */
1546 outb_p(0x60|(cpi & 7),0x20);
1548 #ifdef VOYAGER_DEBUG
1549 if((vic_read_isr() & (1<<(cpi &7))) != 0) {
1550 printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
1552 local_irq_restore(flags);
1556 /* cribbed with thanks from irq.c */
1557 #define __byte(x,y) (((unsigned char *)&(y))[x])
1558 #define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
1559 #define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
1562 startup_vic_irq(unsigned int irq)
1564 unmask_vic_irq(irq);
1569 /* The enable and disable routines. This is where we run into
1570 * conflicting architectural philosophy. Fundamentally, the voyager
1571 * architecture does not expect to have to disable interrupts globally
1572 * (the IRQ controllers belong to each CPU). The processor masquerade
1573 * which is used to start the system shouldn't be used in a running OS
1574 * since it will cause great confusion if two separate CPUs drive to
1575 * the same IRQ controller (I know, I've tried it).
1577 * The solution is a variant on the NCR lazy SPL design:
1579 * 1) To disable an interrupt, do nothing (other than set the
1580 * IRQ_DISABLED flag). This dares the interrupt actually to arrive.
1582 * 2) If the interrupt dares to come in, raise the local mask against
1583 * it (this will result in all the CPU masks being raised
1586 * 3) To enable the interrupt, lower the mask on the local CPU and
1587 * broadcast an Interrupt enable CPI which causes all other CPUs to
1588 * adjust their masks accordingly. */
1591 unmask_vic_irq(unsigned int irq)
1593 /* linux doesn't to processor-irq affinity, so enable on
1594 * all CPUs we know about */
1595 int cpu = smp_processor_id(), real_cpu;
1596 __u16 mask = (1<<irq);
1597 __u32 processorList = 0;
1598 unsigned long flags;
1600 VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
1601 irq, cpu, cpu_irq_affinity[cpu]));
1602 spin_lock_irqsave(&vic_irq_lock, flags);
1603 for_each_online_cpu(real_cpu) {
1604 if(!(voyager_extended_vic_processors & (1<<real_cpu)))
1606 if(!(cpu_irq_affinity[real_cpu] & mask)) {
1607 /* irq has no affinity for this CPU, ignore */
1610 if(real_cpu == cpu) {
1611 enable_local_vic_irq(irq);
1613 else if(vic_irq_mask[real_cpu] & mask) {
1614 vic_irq_enable_mask[real_cpu] |= mask;
1615 processorList |= (1<<real_cpu);
1618 spin_unlock_irqrestore(&vic_irq_lock, flags);
1620 send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
1624 mask_vic_irq(unsigned int irq)
1626 /* lazy disable, do nothing */
1630 enable_local_vic_irq(unsigned int irq)
1632 __u8 cpu = smp_processor_id();
1633 __u16 mask = ~(1 << irq);
1634 __u16 old_mask = vic_irq_mask[cpu];
1636 vic_irq_mask[cpu] &= mask;
1637 if(vic_irq_mask[cpu] == old_mask)
1640 VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
1644 outb_p(cached_A1(cpu),0xA1);
1648 outb_p(cached_21(cpu),0x21);
1654 disable_local_vic_irq(unsigned int irq)
1656 __u8 cpu = smp_processor_id();
1657 __u16 mask = (1 << irq);
1658 __u16 old_mask = vic_irq_mask[cpu];
1663 vic_irq_mask[cpu] |= mask;
1664 if(old_mask == vic_irq_mask[cpu])
1667 VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
1671 outb_p(cached_A1(cpu),0xA1);
1675 outb_p(cached_21(cpu),0x21);
1680 /* The VIC is level triggered, so the ack can only be issued after the
1681 * interrupt completes. However, we do Voyager lazy interrupt
1682 * handling here: It is an extremely expensive operation to mask an
1683 * interrupt in the vic, so we merely set a flag (IRQ_DISABLED). If
1684 * this interrupt actually comes in, then we mask and ack here to push
1685 * the interrupt off to another CPU */
1687 before_handle_vic_irq(unsigned int irq)
1689 irq_desc_t *desc = irq_desc + irq;
1690 __u8 cpu = smp_processor_id();
1692 _raw_spin_lock(&vic_irq_lock);
1694 vic_intr_count[cpu]++;
1696 if(!(cpu_irq_affinity[cpu] & (1<<irq))) {
1697 /* The irq is not in our affinity mask, push it off
1698 * onto another CPU */
1699 VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d on cpu %d\n",
1701 disable_local_vic_irq(irq);
1702 /* set IRQ_INPROGRESS to prevent the handler in irq.c from
1703 * actually calling the interrupt routine */
1704 desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
1705 } else if(desc->status & IRQ_DISABLED) {
1706 /* Damn, the interrupt actually arrived, do the lazy
1707 * disable thing. The interrupt routine in irq.c will
1708 * not handle a IRQ_DISABLED interrupt, so nothing more
1709 * need be done here */
1710 VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
1712 disable_local_vic_irq(irq);
1713 desc->status |= IRQ_REPLAY;
1715 desc->status &= ~IRQ_REPLAY;
1718 _raw_spin_unlock(&vic_irq_lock);
1721 /* Finish the VIC interrupt: basically mask */
1723 after_handle_vic_irq(unsigned int irq)
1725 irq_desc_t *desc = irq_desc + irq;
1727 _raw_spin_lock(&vic_irq_lock);
1729 unsigned int status = desc->status & ~IRQ_INPROGRESS;
1730 #ifdef VOYAGER_DEBUG
1734 desc->status = status;
1735 if ((status & IRQ_DISABLED))
1736 disable_local_vic_irq(irq);
1737 #ifdef VOYAGER_DEBUG
1738 /* DEBUG: before we ack, check what's in progress */
1739 isr = vic_read_isr();
1740 if((isr & (1<<irq) && !(status & IRQ_REPLAY)) == 0) {
1742 __u8 cpu = smp_processor_id();
1744 int mask; /* Um... initialize me??? --RR */
1746 printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
1748 for_each_possible_cpu(real_cpu, mask) {
1750 outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
1752 isr = vic_read_isr();
1753 if(isr & (1<<irq)) {
1754 printk("VOYAGER SMP: CPU%d ack irq %d\n",
1758 outb(cpu, VIC_PROCESSOR_ID);
1761 #endif /* VOYAGER_DEBUG */
1762 /* as soon as we ack, the interrupt is eligible for
1763 * receipt by another CPU so everything must be in
1766 if(status & IRQ_REPLAY) {
1767 /* replay is set if we disable the interrupt
1768 * in the before_handle_vic_irq() routine, so
1769 * clear the in progress bit here to allow the
1770 * next CPU to handle this correctly */
1771 desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
1773 #ifdef VOYAGER_DEBUG
1774 isr = vic_read_isr();
1775 if((isr & (1<<irq)) != 0)
1776 printk("VOYAGER SMP: after_handle_vic_irq() after ack irq=%d, isr=0x%x\n",
1778 #endif /* VOYAGER_DEBUG */
1780 _raw_spin_unlock(&vic_irq_lock);
1782 /* All code after this point is out of the main path - the IRQ
1783 * may be intercepted by another CPU if reasserted */
1787 /* Linux processor - interrupt affinity manipulations.
1789 * For each processor, we maintain a 32 bit irq affinity mask.
1790 * Initially it is set to all 1's so every processor accepts every
1791 * interrupt. In this call, we change the processor's affinity mask:
1793 * Change from enable to disable:
1795 * If the interrupt ever comes in to the processor, we will disable it
1796 * and ack it to push it off to another CPU, so just accept the mask here.
1798 * Change from disable to enable:
1800 * change the mask and then do an interrupt enable CPI to re-enable on
1801 * the selected processors */
1804 set_vic_irq_affinity(unsigned int irq, cpumask_t mask)
1806 /* Only extended processors handle interrupts */
1807 unsigned long real_mask;
1808 unsigned long irq_mask = 1 << irq;
1811 real_mask = cpus_addr(mask)[0] & voyager_extended_vic_processors;
1813 if(cpus_addr(mask)[0] == 0)
1814 /* can't have no cpu's to accept the interrupt -- extremely
1815 * bad things will happen */
1819 /* can't change the affinity of the timer IRQ. This
1820 * is due to the constraint in the voyager
1821 * architecture that the CPI also comes in on and IRQ
1822 * line and we have chosen IRQ0 for this. If you
1823 * raise the mask on this interrupt, the processor
1824 * will no-longer be able to accept VIC CPIs */
1828 /* You can only have 32 interrupts in a voyager system
1829 * (and 32 only if you have a secondary microchannel
1833 for_each_online_cpu(cpu) {
1834 unsigned long cpu_mask = 1 << cpu;
1836 if(cpu_mask & real_mask) {
1837 /* enable the interrupt for this cpu */
1838 cpu_irq_affinity[cpu] |= irq_mask;
1840 /* disable the interrupt for this cpu */
1841 cpu_irq_affinity[cpu] &= ~irq_mask;
1844 /* this is magic, we now have the correct affinity maps, so
1845 * enable the interrupt. This will send an enable CPI to
1846 * those cpu's who need to enable it in their local masks,
1847 * causing them to correct for the new affinity . If the
1848 * interrupt is currently globally disabled, it will simply be
1849 * disabled again as it comes in (voyager lazy disable). If
1850 * the affinity map is tightened to disable the interrupt on a
1851 * cpu, it will be pushed off when it comes in */
1852 unmask_vic_irq(irq);
1856 ack_vic_irq(unsigned int irq)
1859 outb(0x62,0x20); /* Specific EOI to cascade */
1860 outb(0x60|(irq & 7),0xA0);
1862 outb(0x60 | (irq & 7),0x20);
1866 /* enable the CPIs. In the VIC, the CPIs are delivered by the 8259
1867 * but are not vectored by it. This means that the 8259 mask must be
1868 * lowered to receive them */
1870 vic_enable_cpi(void)
1872 __u8 cpu = smp_processor_id();
1874 /* just take a copy of the current mask (nop for boot cpu) */
1875 vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
1877 enable_local_vic_irq(VIC_CPI_LEVEL0);
1878 enable_local_vic_irq(VIC_CPI_LEVEL1);
1879 /* for sys int and cmn int */
1880 enable_local_vic_irq(7);
1883 outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
1884 outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
1885 VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
1886 cpu, QIC_CPI_ENABLE));
1889 VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
1890 cpu, vic_irq_mask[cpu]));
1896 int old_cpu = smp_processor_id(), cpu;
1898 /* dump the interrupt masks of each processor */
1899 for_each_online_cpu(cpu) {
1900 __u16 imr, isr, irr;
1901 unsigned long flags;
1903 local_irq_save(flags);
1904 outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
1905 imr = (inb(0xa1) << 8) | inb(0x21);
1907 irr = inb(0xa0) << 8;
1911 isr = inb(0xa0) << 8;
1914 outb(old_cpu, VIC_PROCESSOR_ID);
1915 local_irq_restore(flags);
1916 printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
1917 cpu, vic_irq_mask[cpu], imr, irr, isr);
1919 /* These lines are put in to try to unstick an un ack'd irq */
1922 for(irq=0; irq<16; irq++) {
1923 if(isr & (1<<irq)) {
1924 printk("\tCPU%d: ack irq %d\n",
1926 local_irq_save(flags);
1927 outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
1930 outb(old_cpu, VIC_PROCESSOR_ID);
1931 local_irq_restore(flags);
1940 smp_voyager_power_off(void *dummy)
1942 if(smp_processor_id() == boot_cpu_id)
1943 voyager_power_off();
1945 smp_stop_cpu_function(NULL);
1949 smp_prepare_cpus(unsigned int max_cpus)
1951 /* FIXME: ignore max_cpus for now */
1955 void __devinit smp_prepare_boot_cpu(void)
1957 cpu_set(smp_processor_id(), cpu_online_map);
1958 cpu_set(smp_processor_id(), cpu_callout_map);
1959 cpu_set(smp_processor_id(), cpu_possible_map);
1960 cpu_set(smp_processor_id(), cpu_present_map);
1964 __cpu_up(unsigned int cpu)
1966 /* This only works at boot for x86. See "rewrite" above. */
1967 if (cpu_isset(cpu, smp_commenced_mask))
1970 /* In case one didn't come up */
1971 if (!cpu_isset(cpu, cpu_callin_map))
1973 /* Unleash the CPU! */
1974 cpu_set(cpu, smp_commenced_mask);
1975 while (!cpu_isset(cpu, cpu_online_map))
1981 smp_cpus_done(unsigned int max_cpus)
1987 smp_setup_processor_id(void)
1989 current_thread_info()->cpu = hard_smp_processor_id();
1990 write_pda(cpu_number, hard_smp_processor_id());