2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
14 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
17 static int octeon_coreid_for_cpu(int cpu)
20 return cpu_logical_map(cpu);
22 return cvmx_get_core_num();
26 static void octeon_irq_core_ack(unsigned int irq)
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
37 clear_c0_cause(0x100 << bit);
40 static void octeon_irq_core_eoi(unsigned int irq)
42 struct irq_desc *desc = irq_to_desc(irq);
43 unsigned int bit = irq - OCTEON_IRQ_SW0;
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
49 if ((unlikely(desc->status & IRQ_DISABLED)))
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
56 set_c0_status(0x100 << bit);
59 static void octeon_irq_core_enable(unsigned int irq)
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
65 * We need to disable interrupts to make sure our updates are
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
73 static void octeon_irq_core_disable_local(unsigned int irq)
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
78 * We need to disable interrupts to make sure our updates are
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
86 static void octeon_irq_core_disable(unsigned int irq)
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
92 octeon_irq_core_disable_local(irq);
96 static struct irq_chip octeon_irq_chip_core = {
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
105 static void octeon_irq_ciu0_ack(unsigned int irq)
108 case OCTEON_IRQ_GMX_DRP0:
109 case OCTEON_IRQ_GMX_DRP1:
110 case OCTEON_IRQ_IPD_DRP:
111 case OCTEON_IRQ_KEY_ZERO:
112 case OCTEON_IRQ_TIMER0:
113 case OCTEON_IRQ_TIMER1:
114 case OCTEON_IRQ_TIMER2:
115 case OCTEON_IRQ_TIMER3:
117 int index = cvmx_get_core_num() * 2;
118 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
120 * CIU timer type interrupts must be acknoleged by
121 * writing a '1' bit to their sum0 bit.
123 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
131 * In order to avoid any locking accessing the CIU, we
132 * acknowledge CIU interrupts by disabling all of them. This
133 * way we can use a per core register and avoid any out of
134 * core locking requirements. This has the side affect that
135 * CIU interrupts can't be processed recursively.
137 * We don't need to disable IRQs to make these atomic since
138 * they are already disabled earlier in the low level
141 clear_c0_status(0x100 << 2);
144 static void octeon_irq_ciu0_eoi(unsigned int irq)
147 * Enable all CIU interrupts again. We don't need to disable
148 * IRQs to make these atomic since they are already disabled
149 * earlier in the low level interrupt code.
151 set_c0_status(0x100 << 2);
154 static int next_coreid_for_irq(struct irq_desc *desc)
159 int weight = cpumask_weight(desc->affinity);
162 int cpu = smp_processor_id();
164 cpu = cpumask_next(cpu, desc->affinity);
165 if (cpu >= nr_cpu_ids) {
168 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
172 coreid = octeon_coreid_for_cpu(cpu);
173 } else if (weight == 1) {
174 coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
176 coreid = cvmx_get_core_num();
180 return cvmx_get_core_num();
184 static void octeon_irq_ciu0_enable(unsigned int irq)
186 struct irq_desc *desc = irq_to_desc(irq);
187 int coreid = next_coreid_for_irq(desc);
190 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
192 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
193 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
195 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
196 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
197 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
200 static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
202 int coreid = cvmx_get_core_num();
205 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
207 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
208 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
210 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
211 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
212 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
215 static void octeon_irq_ciu0_disable(unsigned int irq)
217 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
221 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
222 for_each_online_cpu(cpu) {
223 int coreid = octeon_coreid_for_cpu(cpu);
224 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
225 en0 &= ~(1ull << bit);
226 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
229 * We need to do a read after the last update to make sure all
232 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
233 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
237 * Enable the irq on the next core in the affinity set for chips that
238 * have the EN*_W1{S,C} registers.
240 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244 struct irq_desc *desc = irq_to_desc(irq);
246 if ((desc->status & IRQ_DISABLED) == 0) {
247 index = next_coreid_for_irq(desc) * 2;
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
253 * Enable the irq on the current CPU for chips that
254 * have the EN*_W1{S,C} registers.
256 static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
259 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
261 index = cvmx_get_core_num() * 2;
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
266 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
269 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
271 int index = cvmx_get_core_num() * 2;
272 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
275 case OCTEON_IRQ_GMX_DRP0:
276 case OCTEON_IRQ_GMX_DRP1:
277 case OCTEON_IRQ_IPD_DRP:
278 case OCTEON_IRQ_KEY_ZERO:
279 case OCTEON_IRQ_TIMER0:
280 case OCTEON_IRQ_TIMER1:
281 case OCTEON_IRQ_TIMER2:
282 case OCTEON_IRQ_TIMER3:
284 * CIU timer type interrupts must be acknoleged by
285 * writing a '1' bit to their sum0 bit.
287 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
297 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
300 static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
302 struct irq_desc *desc = irq_to_desc(irq);
303 int index = cvmx_get_core_num() * 2;
304 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
306 if (likely((desc->status & IRQ_DISABLED) == 0))
307 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
311 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
314 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
316 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
319 for_each_online_cpu(cpu) {
320 index = octeon_coreid_for_cpu(cpu) * 2;
321 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
326 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
329 struct irq_desc *desc = irq_to_desc(irq);
330 int enable_one = (desc->status & IRQ_DISABLED) == 0;
332 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
335 * For non-v2 CIU, we will allow only single CPU affinity.
336 * This removes the need to do locking in the .ack/.eoi
339 if (cpumask_weight(dest) != 1)
342 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
343 for_each_online_cpu(cpu) {
344 int coreid = octeon_coreid_for_cpu(cpu);
346 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
347 if (cpumask_test_cpu(cpu, dest) && enable_one) {
351 en0 &= ~(1ull << bit);
353 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
356 * We need to do a read after the last update to make sure all
359 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
360 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
366 * Set affinity for the irq for chips that have the EN*_W1{S,C}
369 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
370 const struct cpumask *dest)
374 struct irq_desc *desc = irq_to_desc(irq);
375 int enable_one = (desc->status & IRQ_DISABLED) == 0;
376 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
378 for_each_online_cpu(cpu) {
379 index = octeon_coreid_for_cpu(cpu) * 2;
380 if (cpumask_test_cpu(cpu, dest) && enable_one) {
382 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
384 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
392 * Newer octeon chips have support for lockless CIU operation.
394 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
396 .enable = octeon_irq_ciu0_enable_v2,
397 .disable = octeon_irq_ciu0_disable_all_v2,
398 .eoi = octeon_irq_ciu0_enable_v2,
400 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
404 static struct irq_chip octeon_irq_chip_ciu0 = {
406 .enable = octeon_irq_ciu0_enable,
407 .disable = octeon_irq_ciu0_disable,
408 .eoi = octeon_irq_ciu0_eoi,
410 .set_affinity = octeon_irq_ciu0_set_affinity,
414 /* The mbox versions don't do any affinity or round-robin. */
415 static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
417 .enable = octeon_irq_ciu0_enable_mbox_v2,
418 .disable = octeon_irq_ciu0_disable,
419 .eoi = octeon_irq_ciu0_eoi_mbox_v2,
422 static struct irq_chip octeon_irq_chip_ciu0_mbox = {
424 .enable = octeon_irq_ciu0_enable_mbox,
425 .disable = octeon_irq_ciu0_disable,
426 .eoi = octeon_irq_ciu0_eoi,
429 static void octeon_irq_ciu1_ack(unsigned int irq)
432 * In order to avoid any locking accessing the CIU, we
433 * acknowledge CIU interrupts by disabling all of them. This
434 * way we can use a per core register and avoid any out of
435 * core locking requirements. This has the side affect that
436 * CIU interrupts can't be processed recursively. We don't
437 * need to disable IRQs to make these atomic since they are
438 * already disabled earlier in the low level interrupt code.
440 clear_c0_status(0x100 << 3);
443 static void octeon_irq_ciu1_eoi(unsigned int irq)
446 * Enable all CIU interrupts again. We don't need to disable
447 * IRQs to make these atomic since they are already disabled
448 * earlier in the low level interrupt code.
450 set_c0_status(0x100 << 3);
453 static void octeon_irq_ciu1_enable(unsigned int irq)
455 struct irq_desc *desc = irq_to_desc(irq);
456 int coreid = next_coreid_for_irq(desc);
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
464 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
466 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
470 * Watchdog interrupts are special. They are associated with a single
471 * core, so we hardwire the affinity to that core.
473 static void octeon_irq_ciu1_wd_enable(unsigned int irq)
477 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
480 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
481 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
483 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
484 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
485 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
488 static void octeon_irq_ciu1_disable(unsigned int irq)
490 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
494 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
495 for_each_online_cpu(cpu) {
496 int coreid = octeon_coreid_for_cpu(cpu);
497 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
498 en1 &= ~(1ull << bit);
499 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
502 * We need to do a read after the last update to make sure all
505 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
506 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
510 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
513 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
516 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
517 struct irq_desc *desc = irq_to_desc(irq);
519 if ((desc->status & IRQ_DISABLED) == 0) {
520 index = next_coreid_for_irq(desc) * 2 + 1;
521 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
526 * Watchdog interrupts are special. They are associated with a single
527 * core, so we hardwire the affinity to that core.
529 static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
532 int coreid = irq - OCTEON_IRQ_WDOG0;
533 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
534 struct irq_desc *desc = irq_to_desc(irq);
536 if ((desc->status & IRQ_DISABLED) == 0) {
537 index = coreid * 2 + 1;
538 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
543 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
546 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
548 int index = cvmx_get_core_num() * 2 + 1;
549 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
551 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
555 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
558 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
560 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
563 for_each_online_cpu(cpu) {
564 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
565 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
570 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
571 const struct cpumask *dest)
574 struct irq_desc *desc = irq_to_desc(irq);
575 int enable_one = (desc->status & IRQ_DISABLED) == 0;
577 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
580 * For non-v2 CIU, we will allow only single CPU affinity.
581 * This removes the need to do locking in the .ack/.eoi
584 if (cpumask_weight(dest) != 1)
587 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
588 for_each_online_cpu(cpu) {
589 int coreid = octeon_coreid_for_cpu(cpu);
591 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
592 if (cpumask_test_cpu(cpu, dest) && enable_one) {
596 en1 &= ~(1ull << bit);
598 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
601 * We need to do a read after the last update to make sure all
604 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
605 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
611 * Set affinity for the irq for chips that have the EN*_W1{S,C}
614 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
615 const struct cpumask *dest)
619 struct irq_desc *desc = irq_to_desc(irq);
620 int enable_one = (desc->status & IRQ_DISABLED) == 0;
621 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
622 for_each_online_cpu(cpu) {
623 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
626 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
628 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
636 * Newer octeon chips have support for lockless CIU operation.
638 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
640 .enable = octeon_irq_ciu1_enable_v2,
641 .disable = octeon_irq_ciu1_disable_all_v2,
642 .eoi = octeon_irq_ciu1_enable_v2,
644 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
648 static struct irq_chip octeon_irq_chip_ciu1 = {
650 .enable = octeon_irq_ciu1_enable,
651 .disable = octeon_irq_ciu1_disable,
652 .eoi = octeon_irq_ciu1_eoi,
654 .set_affinity = octeon_irq_ciu1_set_affinity,
658 static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
660 .enable = octeon_irq_ciu1_wd_enable_v2,
661 .disable = octeon_irq_ciu1_disable_all_v2,
662 .eoi = octeon_irq_ciu1_wd_enable_v2,
665 static struct irq_chip octeon_irq_chip_ciu1_wd = {
667 .enable = octeon_irq_ciu1_wd_enable,
668 .disable = octeon_irq_ciu1_disable,
669 .eoi = octeon_irq_ciu1_eoi,
672 static void (*octeon_ciu0_ack)(unsigned int);
673 static void (*octeon_ciu1_ack)(unsigned int);
675 void __init arch_init_irq(void)
678 struct irq_chip *chip0;
679 struct irq_chip *chip0_mbox;
680 struct irq_chip *chip1;
681 struct irq_chip *chip1_wd;
684 /* Set the default affinity to the boot cpu. */
685 cpumask_clear(irq_default_affinity);
686 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
689 if (NR_IRQS < OCTEON_IRQ_LAST)
690 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
692 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
693 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
694 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
695 octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
697 chip0 = &octeon_irq_chip_ciu0_v2;
698 chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
699 chip1 = &octeon_irq_chip_ciu1_v2;
700 chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
702 octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 octeon_ciu1_ack = octeon_irq_ciu1_ack;
704 chip0 = &octeon_irq_chip_ciu0;
705 chip0_mbox = &octeon_irq_chip_ciu0_mbox;
706 chip1 = &octeon_irq_chip_ciu1;
707 chip1_wd = &octeon_irq_chip_ciu1_wd;
710 /* 0 - 15 reserved for i8259 master and slave controller. */
712 /* 17 - 23 Mips internal */
713 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
714 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
718 /* 24 - 87 CIU_INT_SUM0 */
719 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
721 case OCTEON_IRQ_MBOX0:
722 case OCTEON_IRQ_MBOX1:
723 set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
726 set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
731 /* 88 - 151 CIU_INT_SUM1 */
732 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
733 set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
735 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
738 set_c0_status(0x300 << 2);
741 asmlinkage void plat_irq_dispatch(void)
743 const unsigned long core_id = cvmx_get_core_num();
744 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
745 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
746 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
747 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
748 unsigned long cop0_cause;
749 unsigned long cop0_status;
755 cop0_cause = read_c0_cause();
756 cop0_status = read_c0_status();
757 cop0_cause &= cop0_status;
758 cop0_cause &= ST0_IM;
760 if (unlikely(cop0_cause & STATUSF_IP2)) {
761 ciu_sum = cvmx_read_csr(ciu_sum0_address);
762 ciu_en = cvmx_read_csr(ciu_en0_address);
764 if (likely(ciu_sum)) {
765 irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
766 octeon_ciu0_ack(irq);
769 spurious_interrupt();
771 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
772 ciu_sum = cvmx_read_csr(ciu_sum1_address);
773 ciu_en = cvmx_read_csr(ciu_en1_address);
775 if (likely(ciu_sum)) {
776 irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
777 octeon_ciu1_ack(irq);
780 spurious_interrupt();
782 } else if (likely(cop0_cause)) {
783 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
790 #ifdef CONFIG_HOTPLUG_CPU
791 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
794 int coreid = octeon_coreid_for_cpu(cpu);
795 int bit = (irq < OCTEON_IRQ_WDOG0) ?
796 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
798 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
799 (1ull << bit)) >> bit;
801 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
802 (1ull << bit)) >> bit;
807 void fixup_irqs(void)
811 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
812 octeon_irq_core_disable_local(irq);
814 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
815 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
816 /* ciu irq migrates to next cpu */
817 octeon_irq_chip_ciu0.disable(irq);
818 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
823 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
824 octeon_irq_mailbox_mask(irq);
826 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
827 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
828 /* ciu irq migrates to next cpu */
829 octeon_irq_chip_ciu0.disable(irq);
830 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
834 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
835 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
836 /* ciu irq migrates to next cpu */
837 octeon_irq_chip_ciu1.disable(irq);
838 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
843 #endif /* CONFIG_HOTPLUG_CPU */