2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2004-2008 Cavium Networks
9 #include <linux/interrupt.h>
10 #include <linux/smp.h>
12 #include <asm/octeon/octeon.h>
14 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
15 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
17 static int octeon_coreid_for_cpu(int cpu)
20 return cpu_logical_map(cpu);
22 return cvmx_get_core_num();
26 static void octeon_irq_core_ack(unsigned int irq)
28 unsigned int bit = irq - OCTEON_IRQ_SW0;
30 * We don't need to disable IRQs to make these atomic since
31 * they are already disabled earlier in the low level
34 clear_c0_status(0x100 << bit);
35 /* The two user interrupts must be cleared manually. */
37 clear_c0_cause(0x100 << bit);
40 static void octeon_irq_core_eoi(unsigned int irq)
42 struct irq_desc *desc = irq_desc + irq;
43 unsigned int bit = irq - OCTEON_IRQ_SW0;
45 * If an IRQ is being processed while we are disabling it the
46 * handler will attempt to unmask the interrupt after it has
49 if (desc->status & IRQ_DISABLED)
52 * We don't need to disable IRQs to make these atomic since
53 * they are already disabled earlier in the low level
56 set_c0_status(0x100 << bit);
59 static void octeon_irq_core_enable(unsigned int irq)
62 unsigned int bit = irq - OCTEON_IRQ_SW0;
65 * We need to disable interrupts to make sure our updates are
68 local_irq_save(flags);
69 set_c0_status(0x100 << bit);
70 local_irq_restore(flags);
73 static void octeon_irq_core_disable_local(unsigned int irq)
76 unsigned int bit = irq - OCTEON_IRQ_SW0;
78 * We need to disable interrupts to make sure our updates are
81 local_irq_save(flags);
82 clear_c0_status(0x100 << bit);
83 local_irq_restore(flags);
86 static void octeon_irq_core_disable(unsigned int irq)
89 on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
90 (void *) (long) irq, 1);
92 octeon_irq_core_disable_local(irq);
96 static struct irq_chip octeon_irq_chip_core = {
98 .enable = octeon_irq_core_enable,
99 .disable = octeon_irq_core_disable,
100 .ack = octeon_irq_core_ack,
101 .eoi = octeon_irq_core_eoi,
105 static void octeon_irq_ciu0_ack(unsigned int irq)
108 * In order to avoid any locking accessing the CIU, we
109 * acknowledge CIU interrupts by disabling all of them. This
110 * way we can use a per core register and avoid any out of
111 * core locking requirements. This has the side affect that
112 * CIU interrupts can't be processed recursively.
114 * We don't need to disable IRQs to make these atomic since
115 * they are already disabled earlier in the low level
118 clear_c0_status(0x100 << 2);
121 static void octeon_irq_ciu0_eoi(unsigned int irq)
124 * Enable all CIU interrupts again. We don't need to disable
125 * IRQs to make these atomic since they are already disabled
126 * earlier in the low level interrupt code.
128 set_c0_status(0x100 << 2);
131 static void octeon_irq_ciu0_enable(unsigned int irq)
133 int coreid = cvmx_get_core_num();
136 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
138 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
139 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
141 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
142 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
143 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
146 static void octeon_irq_ciu0_disable(unsigned int irq)
148 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
152 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
153 for_each_online_cpu(cpu) {
154 int coreid = octeon_coreid_for_cpu(cpu);
155 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
156 en0 &= ~(1ull << bit);
157 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
160 * We need to do a read after the last update to make sure all
163 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
164 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
168 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
171 static void octeon_irq_ciu0_enable_v2(unsigned int irq)
173 int index = cvmx_get_core_num() * 2;
174 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
176 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
180 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
183 static void octeon_irq_ciu0_ack_v2(unsigned int irq)
185 int index = cvmx_get_core_num() * 2;
186 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
188 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
192 * CIU timer type interrupts must be acknoleged by writing a '1' bit
195 static void octeon_irq_ciu0_timer_ack(unsigned int irq)
197 int index = cvmx_get_core_num() * 2;
198 uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
199 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
202 static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq)
204 octeon_irq_ciu0_timer_ack(irq);
205 octeon_irq_ciu0_ack(irq);
208 static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq)
210 octeon_irq_ciu0_timer_ack(irq);
211 octeon_irq_ciu0_ack_v2(irq);
215 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
218 static void octeon_irq_ciu0_eoi_v2(unsigned int irq)
220 struct irq_desc *desc = irq_desc + irq;
221 int index = cvmx_get_core_num() * 2;
222 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
224 if ((desc->status & IRQ_DISABLED) == 0)
225 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
229 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
232 static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
234 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
237 for_each_online_cpu(cpu) {
238 index = octeon_coreid_for_cpu(cpu) * 2;
239 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
244 static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
248 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
250 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
251 for_each_online_cpu(cpu) {
252 int coreid = octeon_coreid_for_cpu(cpu);
254 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
255 if (cpumask_test_cpu(cpu, dest))
258 en0 &= ~(1ull << bit);
259 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
262 * We need to do a read after the last update to make sure all
265 cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
266 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
272 * Set affinity for the irq for chips that have the EN*_W1{S,C}
275 static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
276 const struct cpumask *dest)
280 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
281 for_each_online_cpu(cpu) {
282 index = octeon_coreid_for_cpu(cpu) * 2;
283 if (cpumask_test_cpu(cpu, dest))
284 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
293 * Newer octeon chips have support for lockless CIU operation.
295 static struct irq_chip octeon_irq_chip_ciu0_v2 = {
297 .enable = octeon_irq_ciu0_enable_v2,
298 .disable = octeon_irq_ciu0_disable_all_v2,
299 .ack = octeon_irq_ciu0_ack_v2,
300 .eoi = octeon_irq_ciu0_eoi_v2,
302 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
306 static struct irq_chip octeon_irq_chip_ciu0 = {
308 .enable = octeon_irq_ciu0_enable,
309 .disable = octeon_irq_ciu0_disable,
310 .ack = octeon_irq_ciu0_ack,
311 .eoi = octeon_irq_ciu0_eoi,
313 .set_affinity = octeon_irq_ciu0_set_affinity,
317 static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = {
319 .enable = octeon_irq_ciu0_enable_v2,
320 .disable = octeon_irq_ciu0_disable_all_v2,
321 .ack = octeon_irq_ciu0_timer_ack_v2,
322 .eoi = octeon_irq_ciu0_eoi_v2,
324 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
328 static struct irq_chip octeon_irq_chip_ciu0_timer = {
330 .enable = octeon_irq_ciu0_enable,
331 .disable = octeon_irq_ciu0_disable,
332 .ack = octeon_irq_ciu0_timer_ack_v1,
333 .eoi = octeon_irq_ciu0_eoi,
335 .set_affinity = octeon_irq_ciu0_set_affinity,
340 static void octeon_irq_ciu1_ack(unsigned int irq)
343 * In order to avoid any locking accessing the CIU, we
344 * acknowledge CIU interrupts by disabling all of them. This
345 * way we can use a per core register and avoid any out of
346 * core locking requirements. This has the side affect that
347 * CIU interrupts can't be processed recursively. We don't
348 * need to disable IRQs to make these atomic since they are
349 * already disabled earlier in the low level interrupt code.
351 clear_c0_status(0x100 << 3);
354 static void octeon_irq_ciu1_eoi(unsigned int irq)
357 * Enable all CIU interrupts again. We don't need to disable
358 * IRQs to make these atomic since they are already disabled
359 * earlier in the low level interrupt code.
361 set_c0_status(0x100 << 3);
364 static void octeon_irq_ciu1_enable(unsigned int irq)
366 int coreid = cvmx_get_core_num();
369 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
371 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
372 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
374 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
375 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
376 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
379 static void octeon_irq_ciu1_disable(unsigned int irq)
381 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
385 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
386 for_each_online_cpu(cpu) {
387 int coreid = octeon_coreid_for_cpu(cpu);
388 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
389 en1 &= ~(1ull << bit);
390 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
393 * We need to do a read after the last update to make sure all
396 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
397 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
401 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
404 static void octeon_irq_ciu1_enable_v2(unsigned int irq)
406 int index = cvmx_get_core_num() * 2 + 1;
407 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
409 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
413 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
416 static void octeon_irq_ciu1_ack_v2(unsigned int irq)
418 int index = cvmx_get_core_num() * 2 + 1;
419 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
421 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
425 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
428 static void octeon_irq_ciu1_eoi_v2(unsigned int irq)
430 struct irq_desc *desc = irq_desc + irq;
431 int index = cvmx_get_core_num() * 2 + 1;
432 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
434 if ((desc->status & IRQ_DISABLED) == 0)
435 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
439 * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
442 static void octeon_irq_ciu1_disable_all_v2(unsigned int irq)
444 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
447 for_each_online_cpu(cpu) {
448 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
449 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
454 static int octeon_irq_ciu1_set_affinity(unsigned int irq,
455 const struct cpumask *dest)
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 for_each_online_cpu(cpu) {
463 int coreid = octeon_coreid_for_cpu(cpu);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1
467 if (cpumask_test_cpu(cpu, dest))
470 en1 &= ~(1ull << bit);
471 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
474 * We need to do a read after the last update to make sure all
477 cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
478 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
484 * Set affinity for the irq for chips that have the EN*_W1{S,C}
487 static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
488 const struct cpumask *dest)
492 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
493 for_each_online_cpu(cpu) {
494 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
495 if (cpumask_test_cpu(cpu, dest))
496 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
498 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
505 * Newer octeon chips have support for lockless CIU operation.
507 static struct irq_chip octeon_irq_chip_ciu1_v2 = {
509 .enable = octeon_irq_ciu1_enable_v2,
510 .disable = octeon_irq_ciu1_disable_all_v2,
511 .ack = octeon_irq_ciu1_ack_v2,
512 .eoi = octeon_irq_ciu1_eoi_v2,
514 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
518 static struct irq_chip octeon_irq_chip_ciu1 = {
520 .enable = octeon_irq_ciu1_enable,
521 .disable = octeon_irq_ciu1_disable,
522 .ack = octeon_irq_ciu1_ack,
523 .eoi = octeon_irq_ciu1_eoi,
525 .set_affinity = octeon_irq_ciu1_set_affinity,
529 void __init arch_init_irq(void)
532 struct irq_chip *chip0;
533 struct irq_chip *chip0_timer;
534 struct irq_chip *chip1;
537 /* Set the default affinity to the boot cpu. */
538 cpumask_clear(irq_default_affinity);
539 cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
542 if (NR_IRQS < OCTEON_IRQ_LAST)
543 pr_err("octeon_irq_init: NR_IRQS is set too low\n");
545 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
546 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
547 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
548 chip0 = &octeon_irq_chip_ciu0_v2;
549 chip0_timer = &octeon_irq_chip_ciu0_timer_v2;
550 chip1 = &octeon_irq_chip_ciu1_v2;
552 chip0 = &octeon_irq_chip_ciu0;
553 chip0_timer = &octeon_irq_chip_ciu0_timer;
554 chip1 = &octeon_irq_chip_ciu1;
557 /* 0 - 15 reserved for i8259 master and slave controller. */
559 /* 17 - 23 Mips internal */
560 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
561 set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
565 /* 24 - 87 CIU_INT_SUM0 */
566 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
568 case OCTEON_IRQ_GMX_DRP0:
569 case OCTEON_IRQ_GMX_DRP1:
570 case OCTEON_IRQ_IPD_DRP:
571 case OCTEON_IRQ_KEY_ZERO:
572 case OCTEON_IRQ_TIMER0:
573 case OCTEON_IRQ_TIMER1:
574 case OCTEON_IRQ_TIMER2:
575 case OCTEON_IRQ_TIMER3:
576 set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
579 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq);
584 /* 88 - 151 CIU_INT_SUM1 */
585 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
586 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq);
589 set_c0_status(0x300 << 2);
592 asmlinkage void plat_irq_dispatch(void)
594 const unsigned long core_id = cvmx_get_core_num();
595 const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
596 const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
597 const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
598 const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
599 unsigned long cop0_cause;
600 unsigned long cop0_status;
605 cop0_cause = read_c0_cause();
606 cop0_status = read_c0_status();
607 cop0_cause &= cop0_status;
608 cop0_cause &= ST0_IM;
610 if (unlikely(cop0_cause & STATUSF_IP2)) {
611 ciu_sum = cvmx_read_csr(ciu_sum0_address);
612 ciu_en = cvmx_read_csr(ciu_en0_address);
615 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
617 spurious_interrupt();
618 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
619 ciu_sum = cvmx_read_csr(ciu_sum1_address);
620 ciu_en = cvmx_read_csr(ciu_en1_address);
623 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
625 spurious_interrupt();
626 } else if (likely(cop0_cause)) {
627 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
634 #ifdef CONFIG_HOTPLUG_CPU
635 static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
638 int coreid = octeon_coreid_for_cpu(cpu);
639 int bit = (irq < OCTEON_IRQ_WDOG0) ?
640 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
642 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
643 (1ull << bit)) >> bit;
645 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
646 (1ull << bit)) >> bit;
651 void fixup_irqs(void)
655 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
656 octeon_irq_core_disable_local(irq);
658 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) {
659 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
660 /* ciu irq migrates to next cpu */
661 octeon_irq_chip_ciu0.disable(irq);
662 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
667 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++)
668 octeon_irq_mailbox_mask(irq);
670 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
671 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
672 /* ciu irq migrates to next cpu */
673 octeon_irq_chip_ciu0.disable(irq);
674 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map);
678 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) {
679 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
680 /* ciu irq migrates to next cpu */
681 octeon_irq_chip_ciu1.disable(irq);
682 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
687 #endif /* CONFIG_HOTPLUG_CPU */