1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/ia64/kernel/irq_ia64.c
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * David Mosberger-Tang <davidm@hpl.hp.com>
9 * 6/10/99: Updated to bring in sync with x86 version to facilitate
10 * support for SMP and different interrupt controllers.
12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
13 * PCI to vector allocation routine.
14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
15 * Added CPU Hotplug handling for IPF.
18 #include <linux/module.h>
20 #include <linux/jiffies.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/ptrace.h>
27 #include <linux/signal.h>
28 #include <linux/smp.h>
29 #include <linux/threads.h>
30 #include <linux/bitops.h>
31 #include <linux/irq.h>
32 #include <linux/ratelimit.h>
33 #include <linux/acpi.h>
34 #include <linux/sched.h>
36 #include <asm/delay.h>
37 #include <asm/intrinsics.h>
39 #include <asm/hw_irq.h>
40 #include <linux/pgtable.h>
41 #include <asm/tlbflush.h>
44 # include <asm/perfmon.h>
49 #define IRQ_VECTOR_UNASSIGNED (0)
51 #define IRQ_UNUSED (0)
55 int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
56 int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
58 /* default base addr of IPI table */
59 void __iomem *ipi_base_addr = ((void __iomem *)
60 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62 static cpumask_t vector_allocation_domain(int cpu);
65 * Legacy IRQ to IA-64 vector translation table.
67 __u8 isa_irq_to_vector_map[16] = {
68 /* 8259 IRQ translation, first 16 entries */
69 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
70 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
72 EXPORT_SYMBOL(isa_irq_to_vector_map);
74 DEFINE_SPINLOCK(vector_lock);
76 struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
77 [0 ... NR_IRQS - 1] = {
78 .vector = IRQ_VECTOR_UNASSIGNED,
79 .domain = CPU_MASK_NONE
83 DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
84 [0 ... IA64_NUM_VECTORS - 1] = -1
87 static cpumask_t vector_table[IA64_NUM_VECTORS] = {
88 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
91 static int irq_status[NR_IRQS] = {
92 [0 ... NR_IRQS -1] = IRQ_UNUSED
95 static inline int find_unassigned_irq(void)
99 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
100 if (irq_status[irq] == IRQ_UNUSED)
105 static inline int find_unassigned_vector(cpumask_t domain)
110 cpumask_and(&mask, &domain, cpu_online_mask);
111 if (cpumask_empty(&mask))
114 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
115 vector = IA64_FIRST_DEVICE_VECTOR + pos;
116 cpumask_and(&mask, &domain, &vector_table[vector]);
117 if (!cpumask_empty(&mask))
124 static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
128 struct irq_cfg *cfg = &irq_cfg[irq];
130 BUG_ON((unsigned)irq >= NR_IRQS);
131 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
133 cpumask_and(&mask, &domain, cpu_online_mask);
134 if (cpumask_empty(&mask))
136 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
138 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
140 for_each_cpu(cpu, &mask)
141 per_cpu(vector_irq, cpu)[vector] = irq;
142 cfg->vector = vector;
143 cfg->domain = domain;
144 irq_status[irq] = IRQ_USED;
145 cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
149 int bind_irq_vector(int irq, int vector, cpumask_t domain)
154 spin_lock_irqsave(&vector_lock, flags);
155 ret = __bind_irq_vector(irq, vector, domain);
156 spin_unlock_irqrestore(&vector_lock, flags);
160 static void __clear_irq_vector(int irq)
164 struct irq_cfg *cfg = &irq_cfg[irq];
166 BUG_ON((unsigned)irq >= NR_IRQS);
167 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
168 vector = cfg->vector;
169 domain = cfg->domain;
170 for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
171 per_cpu(vector_irq, cpu)[vector] = -1;
172 cfg->vector = IRQ_VECTOR_UNASSIGNED;
173 cfg->domain = CPU_MASK_NONE;
174 irq_status[irq] = IRQ_UNUSED;
175 cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
178 static void clear_irq_vector(int irq)
182 spin_lock_irqsave(&vector_lock, flags);
183 __clear_irq_vector(irq);
184 spin_unlock_irqrestore(&vector_lock, flags);
188 ia64_native_assign_irq_vector (int irq)
192 cpumask_t domain = CPU_MASK_NONE;
196 spin_lock_irqsave(&vector_lock, flags);
197 for_each_online_cpu(cpu) {
198 domain = vector_allocation_domain(cpu);
199 vector = find_unassigned_vector(domain);
205 if (irq == AUTO_ASSIGN)
207 BUG_ON(__bind_irq_vector(irq, vector, domain));
209 spin_unlock_irqrestore(&vector_lock, flags);
214 ia64_native_free_irq_vector (int vector)
216 if (vector < IA64_FIRST_DEVICE_VECTOR ||
217 vector > IA64_LAST_DEVICE_VECTOR)
219 clear_irq_vector(vector);
223 reserve_irq_vector (int vector)
225 if (vector < IA64_FIRST_DEVICE_VECTOR ||
226 vector > IA64_LAST_DEVICE_VECTOR)
228 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
232 * Initialize vector_irq on a new cpu. This function must be called
233 * with vector_lock held.
235 void __setup_vector_irq(int cpu)
239 /* Clear vector_irq */
240 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
241 per_cpu(vector_irq, cpu)[vector] = -1;
242 /* Mark the inuse vectors */
243 for (irq = 0; irq < NR_IRQS; ++irq) {
244 if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
246 vector = irq_to_vector(irq);
247 per_cpu(vector_irq, cpu)[vector] = irq;
253 static enum vector_domain_type {
256 } vector_domain_type = VECTOR_DOMAIN_NONE;
258 static cpumask_t vector_allocation_domain(int cpu)
260 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
261 return *cpumask_of(cpu);
265 static int __irq_prepare_move(int irq, int cpu)
267 struct irq_cfg *cfg = &irq_cfg[irq];
271 if (cfg->move_in_progress || cfg->move_cleanup_count)
273 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
275 if (cpumask_test_cpu(cpu, &cfg->domain))
277 domain = vector_allocation_domain(cpu);
278 vector = find_unassigned_vector(domain);
281 cfg->move_in_progress = 1;
282 cfg->old_domain = cfg->domain;
283 cfg->vector = IRQ_VECTOR_UNASSIGNED;
284 cfg->domain = CPU_MASK_NONE;
285 BUG_ON(__bind_irq_vector(irq, vector, domain));
289 int irq_prepare_move(int irq, int cpu)
294 spin_lock_irqsave(&vector_lock, flags);
295 ret = __irq_prepare_move(irq, cpu);
296 spin_unlock_irqrestore(&vector_lock, flags);
300 void irq_complete_move(unsigned irq)
302 struct irq_cfg *cfg = &irq_cfg[irq];
303 cpumask_t cleanup_mask;
306 if (likely(!cfg->move_in_progress))
309 if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
312 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
313 cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
314 for_each_cpu(i, &cleanup_mask)
315 ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
316 cfg->move_in_progress = 0;
319 static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
321 int me = smp_processor_id();
325 for (vector = IA64_FIRST_DEVICE_VECTOR;
326 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
328 struct irq_desc *desc;
330 irq = __this_cpu_read(vector_irq[vector]);
334 desc = irq_to_desc(irq);
336 raw_spin_lock(&desc->lock);
337 if (!cfg->move_cleanup_count)
340 if (!cpumask_test_cpu(me, &cfg->old_domain))
343 spin_lock_irqsave(&vector_lock, flags);
344 __this_cpu_write(vector_irq[vector], -1);
345 cpumask_clear_cpu(me, &vector_table[vector]);
346 spin_unlock_irqrestore(&vector_lock, flags);
347 cfg->move_cleanup_count--;
349 raw_spin_unlock(&desc->lock);
354 static int __init parse_vector_domain(char *arg)
358 if (!strcmp(arg, "percpu")) {
359 vector_domain_type = VECTOR_DOMAIN_PERCPU;
364 early_param("vector", parse_vector_domain);
366 static cpumask_t vector_allocation_domain(int cpu)
373 void destroy_and_reserve_irq(unsigned int irq)
378 spin_lock_irqsave(&vector_lock, flags);
379 __clear_irq_vector(irq);
380 irq_status[irq] = IRQ_RSVD;
381 spin_unlock_irqrestore(&vector_lock, flags);
385 * Dynamic irq allocate and deallocation for MSI
390 int irq, vector, cpu;
391 cpumask_t domain = CPU_MASK_NONE;
393 irq = vector = -ENOSPC;
394 spin_lock_irqsave(&vector_lock, flags);
395 for_each_online_cpu(cpu) {
396 domain = vector_allocation_domain(cpu);
397 vector = find_unassigned_vector(domain);
403 irq = find_unassigned_irq();
406 BUG_ON(__bind_irq_vector(irq, vector, domain));
408 spin_unlock_irqrestore(&vector_lock, flags);
414 void destroy_irq(unsigned int irq)
417 clear_irq_vector(irq);
421 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
422 # define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
424 # define IS_RESCHEDULE(vec) (0)
425 # define IS_LOCAL_TLB_FLUSH(vec) (0)
428 * That's where the IVT branches when we get an external
429 * interrupt. This branches to the correct hardware IRQ handler via
433 ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
435 struct pt_regs *old_regs = set_irq_regs(regs);
436 unsigned long saved_tpr;
440 unsigned long bsp, sp;
443 * Note: if the interrupt happened while executing in
444 * the context switch routine (ia64_switch_to), we may
445 * get a spurious stack overflow here. This is
446 * because the register and the memory stack are not
447 * switched atomically.
449 bsp = ia64_getreg(_IA64_REG_AR_BSP);
450 sp = ia64_getreg(_IA64_REG_SP);
452 if ((sp - bsp) < 1024) {
453 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
455 if (__ratelimit(&ratelimit)) {
456 printk("ia64_handle_irq: DANGER: less than "
457 "1KB of free stack space!!\n"
458 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
462 #endif /* IRQ_DEBUG */
465 * Always set TPR to limit maximum interrupt nesting depth to
466 * 16 (without this, it would be ~240, which could easily lead
467 * to kernel stack overflows).
470 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
472 while (vector != IA64_SPURIOUS_INT_VECTOR) {
473 int irq = local_vector_to_irq(vector);
475 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
476 smp_local_flush_tlb();
477 kstat_incr_irq_this_cpu(irq);
478 } else if (unlikely(IS_RESCHEDULE(vector))) {
480 kstat_incr_irq_this_cpu(irq);
482 ia64_setreg(_IA64_REG_CR_TPR, vector);
485 if (unlikely(irq < 0)) {
486 printk(KERN_ERR "%s: Unexpected interrupt "
487 "vector %d on CPU %d is not mapped "
488 "to any IRQ!\n", __func__, vector,
491 generic_handle_irq(irq);
494 * Disable interrupts and send EOI:
497 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
500 vector = ia64_get_ivr();
503 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
504 * handler needs to be able to wait for further keyboard interrupts, which can't
505 * come through until ia64_eoi() has been done.
508 set_irq_regs(old_regs);
511 #ifdef CONFIG_HOTPLUG_CPU
513 * This function emulates a interrupt processing when a cpu is about to be
516 void ia64_process_pending_intr(void)
519 unsigned long saved_tpr;
520 extern unsigned int vectors_in_migration[NR_IRQS];
522 vector = ia64_get_ivr();
525 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
529 * Perform normal interrupt style processing
531 while (vector != IA64_SPURIOUS_INT_VECTOR) {
532 int irq = local_vector_to_irq(vector);
534 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
535 smp_local_flush_tlb();
536 kstat_incr_irq_this_cpu(irq);
537 } else if (unlikely(IS_RESCHEDULE(vector))) {
538 kstat_incr_irq_this_cpu(irq);
540 struct pt_regs *old_regs = set_irq_regs(NULL);
542 ia64_setreg(_IA64_REG_CR_TPR, vector);
546 * Now try calling normal ia64_handle_irq as it would have got called
547 * from a real intr handler. Try passing null for pt_regs, hopefully
548 * it will work. I hope it works!.
549 * Probably could shared code.
551 if (unlikely(irq < 0)) {
552 printk(KERN_ERR "%s: Unexpected interrupt "
553 "vector %d on CPU %d not being mapped "
554 "to any IRQ!!\n", __func__, vector,
557 vectors_in_migration[irq]=0;
558 generic_handle_irq(irq);
560 set_irq_regs(old_regs);
563 * Disable interrupts and send EOI
566 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
569 vector = ia64_get_ivr();
578 static irqreturn_t dummy_handler (int irq, void *dev_id)
585 * KVM uses this interrupt to force a cpu out of guest mode
591 register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
597 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
598 irq_set_status_flags(irq, IRQ_PER_CPU);
599 irq_set_chip(irq, &irq_type_ia64_lsapic);
601 if (request_irq(irq, handler, flags, name, NULL))
602 pr_err("Failed to request irq %u (%s)\n", irq, name);
603 irq_set_handler(irq, handle_percpu_irq);
607 ia64_native_register_ipi(void)
610 register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
611 register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
612 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
622 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
624 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
625 register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
626 smp_irq_move_cleanup_interrupt, 0,
630 #ifdef CONFIG_PERFMON
636 ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
638 void __iomem *ipi_addr;
639 unsigned long ipi_data;
640 unsigned long phys_cpu_id;
642 phys_cpu_id = cpu_physical_id(cpu);
645 * cpu number is in 8bit ID and 8bit EID
648 ipi_data = (delivery_mode << 8) | (vector & 0xff);
649 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
651 writeq(ipi_data, ipi_addr);