MIPS: SMTC: Cleanup the hook mess and use irq_data
authorThomas Gleixner <tglx@linutronix.de>
Wed, 23 Mar 2011 21:09:04 +0000 (21:09 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 25 Mar 2011 17:45:18 +0000 (18:45 +0100)
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
To: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/2194/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/include/asm/irq.h
arch/mips/kernel/irq.c
arch/mips/kernel/smtc.c

index bdc8b8c1832847b5d74caa20711b49b39ff30241..0ec01294b063c0cc323e49d45e4d4a46b6fc2a86 100644 (file)
@@ -57,7 +57,7 @@ static inline void smtc_im_ack_irq(unsigned int irq)
 
 extern int plat_set_irq_affinity(struct irq_data *d,
                                 const struct cpumask *affinity, bool force);
-extern void smtc_forward_irq(unsigned int irq);
+extern void smtc_forward_irq(struct irq_data *d);
 
 /*
  * IRQ affinity hook invoked at the beginning of interrupt dispatch
@@ -70,51 +70,53 @@ extern void smtc_forward_irq(unsigned int irq);
  * cpumask implementations, this version is optimistically assuming
  * that cpumask.h macro overhead is reasonable during interrupt dispatch.
  */
-#define IRQ_AFFINITY_HOOK(irq)                                         \
-do {                                                                   \
-    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
-       smtc_forward_irq(irq);                                          \
-       irq_exit();                                                     \
-       return;                                                         \
-    }                                                                  \
-} while (0)
+static inline int handle_on_other_cpu(unsigned int irq)
+{
+       struct irq_data *d = irq_get_irq_data(irq);
+
+       if (cpumask_test_cpu(smp_processor_id(), d->affinity))
+               return 0;
+       smtc_forward_irq(d);
+       return 1;
+}
 
 #else /* Not doing SMTC affinity */
 
-#define IRQ_AFFINITY_HOOK(irq) do { } while (0)
+static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
 
 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
 
 #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
 
+static inline void smtc_im_backstop(unsigned int irq)
+{
+       if (irq_hwmask[irq] & 0x0000ff00)
+               write_c0_tccontext(read_c0_tccontext() &
+                                  ~(irq_hwmask[irq] & 0x0000ff00));
+}
+
 /*
  * Clear interrupt mask handling "backstop" if irq_hwmask
  * entry so indicates. This implies that the ack() or end()
  * functions will take over re-enabling the low-level mask.
  * Otherwise it will be done on return from exception.
  */
-#define __DO_IRQ_SMTC_HOOK(irq)                                                \
-do {                                                                   \
-       IRQ_AFFINITY_HOOK(irq);                                         \
-       if (irq_hwmask[irq] & 0x0000ff00)                               \
-               write_c0_tccontext(read_c0_tccontext() &                \
-                                  ~(irq_hwmask[irq] & 0x0000ff00));    \
-} while (0)
-
-#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq)                               \
-do {                                                                   \
-       if (irq_hwmask[irq] & 0x0000ff00)                               \
-               write_c0_tccontext(read_c0_tccontext() &                \
-                                  ~(irq_hwmask[irq] & 0x0000ff00));    \
-} while (0)
+static inline int smtc_handle_on_other_cpu(unsigned int irq)
+{
+       int ret = handle_on_other_cpu(irq);
+
+       if (!ret)
+               smtc_im_backstop(irq);
+       return ret;
+}
 
 #else
 
-#define __DO_IRQ_SMTC_HOOK(irq)                                                \
-do {                                                                   \
-       IRQ_AFFINITY_HOOK(irq);                                         \
-} while (0)
-#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0)
+static inline void smtc_im_backstop(unsigned int irq) { }
+static inline int smtc_handle_on_other_cpu(unsigned int irq)
+{
+       return handle_on_other_cpu(irq);
+}
 
 #endif
 
index 4f93db58a79ed9bdbecb1ba53c0c3eed9261d2e6..779b78799ad25859188c1df41729c4d4b77e462b 100644 (file)
@@ -183,8 +183,8 @@ void __irq_entry do_IRQ(unsigned int irq)
 {
        irq_enter();
        check_stack_overflow();
-       __DO_IRQ_SMTC_HOOK(irq);
-       generic_handle_irq(irq);
+       if (!smtc_handle_on_other_cpu(irq))
+               generic_handle_irq(irq);
        irq_exit();
 }
 
@@ -197,7 +197,7 @@ void __irq_entry do_IRQ(unsigned int irq)
 void __irq_entry do_IRQ_no_affinity(unsigned int irq)
 {
        irq_enter();
-       __NO_AFFINITY_IRQ_SMTC_HOOK(irq);
+       smtc_im_backstop(irq);
        generic_handle_irq(irq);
        irq_exit();
 }
index 14c64235a24b1876abb6e9741408d68052fa0c87..f7e2c7807d7ba8932e8ae136e5dce79b83846f51 100644 (file)
@@ -677,9 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
         */
 }
 
-void smtc_forward_irq(unsigned int irq)
+void smtc_forward_irq(struct irq_data *d)
 {
-       struct irq_data *d = irq_get_irq_data(irq);
+       unsigned int irq = d->irq;
        int target;
 
        /*
@@ -708,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
         */
 
        /* If no one is eligible, service locally */
-       if (target >= NR_CPUS) {
+       if (target >= NR_CPUS)
                do_IRQ_no_affinity(irq);
-               return;
-       }
-
-       smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
+       else
+               smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
 }
 
 #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */