[SPARC64]: Fix VIRQ enabling.
[sfrench/cifs-2.6.git] / arch / sparc64 / kernel / irq.c
index 3edc18e1b818da7dda4032634c1360b6e1c94072..a862d13fc85748303b84497848046c1ad22a3e1e 100644 (file)
@@ -1,7 +1,6 @@
-/* $Id: irq.c,v 1.114 2002/01/11 08:45:38 davem Exp $
- * irq.c: UltraSparc IRQ handling/init/registry.
+/* irq.c: UltraSparc IRQ handling/init/registry.
  *
- * Copyright (C) 1997  David S. Miller  (davem@caip.rutgers.edu)
+ * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
  */
@@ -43,6 +42,7 @@
 #include <asm/cpudata.h>
 #include <asm/auxio.h>
 #include <asm/head.h>
+#include <asm/hypervisor.h>
 
 /* UPA nodes send interrupt packet to UltraSparc with first data reg
  * value low 5 (7 on Starfire) bits holding the IRQ identifier being
@@ -171,8 +171,6 @@ skip:
        return 0;
 }
 
-extern unsigned long real_hard_smp_processor_id(void);
-
 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
 {
        unsigned int tid;
@@ -382,6 +380,82 @@ static void sun4v_irq_end(unsigned int virt_irq)
        }
 }
 
+static void sun4v_virq_enable(unsigned int virt_irq)
+{
+       struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+       unsigned int ino = bucket - &ivector_table[0];
+
+       if (likely(bucket)) {
+               unsigned long cpuid, dev_handle, dev_ino;
+               int err;
+
+               cpuid = irq_choose_cpu(virt_irq);
+
+               dev_handle = ino & IMAP_IGN;
+               dev_ino = ino & IMAP_INO;
+
+               err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
+               if (err != HV_EOK)
+                       printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
+                              "err(%d)\n",
+                              dev_handle, dev_ino, cpuid, err);
+               err = sun4v_vintr_set_state(dev_handle, dev_ino,
+                                           HV_INTR_STATE_IDLE);
+               if (err != HV_EOK)
+                       printk("sun4v_vintr_set_state(%lx,%lx,"
+                               "HV_INTR_STATE_IDLE): err(%d)\n",
+                              dev_handle, dev_ino, err);
+               err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+                                           HV_INTR_ENABLED);
+               if (err != HV_EOK)
+                       printk("sun4v_vintr_set_state(%lx,%lx,"
+                              "HV_INTR_ENABLED): err(%d)\n",
+                              dev_handle, dev_ino, err);
+       }
+}
+
+static void sun4v_virq_disable(unsigned int virt_irq)
+{
+       struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+       unsigned int ino = bucket - &ivector_table[0];
+
+       if (likely(bucket)) {
+               unsigned long dev_handle, dev_ino;
+               int err;
+
+               dev_handle = ino & IMAP_IGN;
+               dev_ino = ino & IMAP_INO;
+
+               err = sun4v_vintr_set_valid(dev_handle, dev_ino,
+                                           HV_INTR_DISABLED);
+               if (err != HV_EOK)
+                       printk("sun4v_vintr_set_state(%lx,%lx,"
+                              "HV_INTR_DISABLED): err(%d)\n",
+                              dev_handle, dev_ino, err);
+       }
+}
+
+static void sun4v_virq_end(unsigned int virt_irq)
+{
+       struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
+       unsigned int ino = bucket - &ivector_table[0];
+
+       if (likely(bucket)) {
+               unsigned long dev_handle, dev_ino;
+               int err;
+
+               dev_handle = ino & IMAP_IGN;
+               dev_ino = ino & IMAP_INO;
+
+               err = sun4v_vintr_set_state(dev_handle, dev_ino,
+                                           HV_INTR_STATE_IDLE);
+               if (err != HV_EOK)
+                       printk("sun4v_vintr_set_state(%lx,%lx,"
+                               "HV_INTR_STATE_IDLE): err(%d)\n",
+                              dev_handle, dev_ino, err);
+       }
+}
+
 static void run_pre_handler(unsigned int virt_irq)
 {
        struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
@@ -436,6 +510,21 @@ static struct irq_chip sun4v_msi = {
 };
 #endif
 
+static struct irq_chip sun4v_virq = {
+       .typename       = "vsun4v",
+       .enable         = sun4v_virq_enable,
+       .disable        = sun4v_virq_disable,
+       .end            = sun4v_virq_end,
+};
+
+static struct irq_chip sun4v_virq_ack = {
+       .typename       = "vsun4v+ack",
+       .enable         = sun4v_virq_enable,
+       .disable        = sun4v_virq_disable,
+       .ack            = run_pre_handler,
+       .end            = sun4v_virq_end,
+};
+
 void irq_install_pre_handler(int virt_irq,
                             void (*func)(unsigned int, void *, void *),
                             void *arg1, void *arg2)
@@ -449,7 +538,8 @@ void irq_install_pre_handler(int virt_irq,
 
        chip = get_irq_chip(virt_irq);
        if (chip == &sun4u_irq_ack ||
-           chip == &sun4v_irq_ack
+           chip == &sun4v_irq_ack ||
+           chip == &sun4v_virq_ack
 #ifdef CONFIG_PCI_MSI
            || chip == &sun4v_msi
 #endif
@@ -457,7 +547,9 @@ void irq_install_pre_handler(int virt_irq,
                return;
 
        chip = (chip == &sun4u_irq ?
-               &sun4u_irq_ack : &sun4v_irq_ack);
+               &sun4u_irq_ack :
+               (chip == &sun4v_irq ?
+                &sun4v_irq_ack : &sun4v_virq_ack));
        set_irq_chip(virt_irq, chip);
 }
 
@@ -494,19 +586,18 @@ out:
        return bucket->virt_irq;
 }
 
-unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+static unsigned int sun4v_build_common(unsigned long sysino,
+                                      struct irq_chip *chip)
 {
        struct ino_bucket *bucket;
        struct irq_handler_data *data;
-       unsigned long sysino;
 
        BUG_ON(tlb_type != hypervisor);
 
-       sysino = sun4v_devino_to_sysino(devhandle, devino);
        bucket = &ivector_table[sysino];
        if (!bucket->virt_irq) {
                bucket->virt_irq = virt_irq_alloc(__irq(bucket));
-               set_irq_chip(bucket->virt_irq, &sun4v_irq);
+               set_irq_chip(bucket->virt_irq, chip);
        }
 
        data = get_irq_chip_data(bucket->virt_irq);
@@ -531,6 +622,32 @@ out:
        return bucket->virt_irq;
 }
 
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
+{
+       unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
+
+       return sun4v_build_common(sysino, &sun4v_irq);
+}
+
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
+{
+       unsigned long sysino, hv_err;
+
+       BUG_ON(devhandle & ~IMAP_IGN);
+       BUG_ON(devino & ~IMAP_INO);
+
+       sysino = devhandle | devino;
+
+       hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
+       if (hv_err) {
+               prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
+                           "err=%lu\n", devhandle, devino, hv_err);
+               prom_halt();
+       }
+
+       return sun4v_build_common(sysino, &sun4v_virq);
+}
+
 #ifdef CONFIG_PCI_MSI
 unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
                             unsigned int msi_start, unsigned int msi_end)
@@ -694,9 +811,20 @@ void init_irqwork_curcpu(void)
        trap_block[cpu].irq_worklist = 0;
 }
 
-static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
+/* Please be very careful with register_one_mondo() and
+ * sun4v_register_mondo_queues().
+ *
+ * On SMP this gets invoked from the CPU trampoline before
+ * the cpu has fully taken over the trap table from OBP,
+ * and it's kernel stack + %g6 thread register state is
+ * not fully cooked yet.
+ *
+ * Therefore you cannot make any OBP calls, not even prom_printf,
+ * from these two routines.
+ */
+static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
 {
-       unsigned long num_entries = 128;
+       unsigned long num_entries = (qmask + 1) / 64;
        unsigned long status;
 
        status = sun4v_cpu_qconf(type, paddr, num_entries);
@@ -711,44 +839,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
 {
        struct trap_per_cpu *tb = &trap_block[this_cpu];
 
-       register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO);
-       register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO);
-       register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR);
-       register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR);
+       register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
+                          tb->cpu_mondo_qmask);
+       register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
+                          tb->dev_mondo_qmask);
+       register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
+                          tb->resum_qmask);
+       register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
+                          tb->nonresum_qmask);
 }
 
-static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem)
+static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
 {
-       void *page;
+       unsigned long size = PAGE_ALIGN(qmask + 1);
+       unsigned long order = get_order(size);
+       void *p = NULL;
 
-       if (use_bootmem)
-               page = alloc_bootmem_low_pages(PAGE_SIZE);
-       else
-               page = (void *) get_zeroed_page(GFP_ATOMIC);
+       if (use_bootmem) {
+               p = __alloc_bootmem_low(size, size, 0);
+       } else {
+               struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
+               if (page)
+                       p = page_address(page);
+       }
 
-       if (!page) {
+       if (!p) {
                prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
                prom_halt();
        }
 
-       *pa_ptr = __pa(page);
+       *pa_ptr = __pa(p);
 }
 
-static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem)
+static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
 {
-       void *page;
+       unsigned long size = PAGE_ALIGN(qmask + 1);
+       unsigned long order = get_order(size);
+       void *p = NULL;
 
-       if (use_bootmem)
-               page = alloc_bootmem_low_pages(PAGE_SIZE);
-       else
-               page = (void *) get_zeroed_page(GFP_ATOMIC);
+       if (use_bootmem) {
+               p = __alloc_bootmem_low(size, size, 0);
+       } else {
+               struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
+               if (page)
+                       p = page_address(page);
+       }
 
-       if (!page) {
+       if (!p) {
                prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
                prom_halt();
        }
 
-       *pa_ptr = __pa(page);
+       *pa_ptr = __pa(p);
 }
 
 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
@@ -779,12 +921,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int
        struct trap_per_cpu *tb = &trap_block[cpu];
 
        if (alloc) {
-               alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem);
-               alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem);
-               alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem);
-               alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem);
-               alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem);
-               alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem);
+               alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
+               alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
+               alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
+               alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
+               alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
+               alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
 
                init_cpu_send_mondo_info(tb, use_bootmem);
        }