1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _LINUX_INTERRUPT_H
4 #define _LINUX_INTERRUPT_H
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/cpumask.h>
9 #include <linux/irqreturn.h>
10 #include <linux/irqnr.h>
11 #include <linux/hardirq.h>
12 #include <linux/irqflags.h>
13 #include <linux/hrtimer.h>
14 #include <linux/kref.h>
15 #include <linux/workqueue.h>
16 #include <linux/jump_label.h>
18 #include <linux/atomic.h>
19 #include <asm/ptrace.h>
21 #include <asm/sections.h>
24 * These correspond to the IORESOURCE_IRQ_* defines in
25 * linux/ioport.h to select the interrupt line behaviour. When
26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
27 * setting should be assumed to be "as already configured", which
28 * may be as per machine or firmware initialisation.
30 #define IRQF_TRIGGER_NONE 0x00000000
31 #define IRQF_TRIGGER_RISING 0x00000001
32 #define IRQF_TRIGGER_FALLING 0x00000002
33 #define IRQF_TRIGGER_HIGH 0x00000004
34 #define IRQF_TRIGGER_LOW 0x00000008
35 #define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37 #define IRQF_TRIGGER_PROBE 0x00000010
40 * These flags used only by the kernel as part of the
41 * irq handling routines.
43 * IRQF_SHARED - allow sharing the irq among several devices
44 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
45 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
46 * IRQF_PERCPU - Interrupt is per cpu
47 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
48 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
49 * registered first in a shared interrupt is considered for
50 * performance reasons)
51 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
52 * Used by threaded interrupts which need to keep the
53 * irq line disabled until the threaded handler has been run.
54 * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend. Does not guarantee
55 * that this interrupt will wake the system from a suspended
56 * state. See Documentation/power/suspend-and-interrupts.rst
57 * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
58 * IRQF_NO_THREAD - Interrupt cannot be threaded
59 * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
61 * IRQF_COND_SUSPEND - If the IRQ is shared with a NO_SUSPEND user, execute this
62 * interrupt handler after suspending interrupts. For system
63 * wakeup devices users need to implement wakeup detection in
64 * their interrupt handlers.
65 * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it.
66 * Users will enable it explicitly by enable_irq() or enable_nmi()
68 * IRQF_NO_DEBUG - Exclude from runnaway detection for IPI and similar handlers,
69 * depends on IRQF_PERCPU.
70 * IRQF_COND_ONESHOT - Agree to do IRQF_ONESHOT if already set for a shared
73 #define IRQF_SHARED 0x00000080
74 #define IRQF_PROBE_SHARED 0x00000100
75 #define __IRQF_TIMER 0x00000200
76 #define IRQF_PERCPU 0x00000400
77 #define IRQF_NOBALANCING 0x00000800
78 #define IRQF_IRQPOLL 0x00001000
79 #define IRQF_ONESHOT 0x00002000
80 #define IRQF_NO_SUSPEND 0x00004000
81 #define IRQF_FORCE_RESUME 0x00008000
82 #define IRQF_NO_THREAD 0x00010000
83 #define IRQF_EARLY_RESUME 0x00020000
84 #define IRQF_COND_SUSPEND 0x00040000
85 #define IRQF_NO_AUTOEN 0x00080000
86 #define IRQF_NO_DEBUG 0x00100000
87 #define IRQF_COND_ONESHOT 0x00200000
89 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
92 * These values can be returned by request_any_context_irq() and
93 * describe the context the interrupt will be run in.
95 * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
96 * IRQC_IS_NESTED - interrupt runs in a nested threaded context
103 typedef irqreturn_t (*irq_handler_t)(int, void *);
106 * struct irqaction - per interrupt action descriptor
107 * @handler: interrupt handler function
108 * @name: name of the device
109 * @dev_id: cookie to identify the device
110 * @percpu_dev_id: cookie to identify the device
111 * @next: pointer to the next irqaction for shared interrupts
112 * @irq: interrupt number
113 * @flags: flags (see IRQF_* above)
114 * @thread_fn: interrupt handler function for threaded interrupts
115 * @thread: thread pointer for threaded interrupts
116 * @secondary: pointer to secondary irqaction (force threading)
117 * @thread_flags: flags related to @thread
118 * @thread_mask: bitmask for keeping track of @thread activity
119 * @dir: pointer to the proc/irq/NN/name entry
122 irq_handler_t handler;
124 void __percpu *percpu_dev_id;
125 struct irqaction *next;
126 irq_handler_t thread_fn;
127 struct task_struct *thread;
128 struct irqaction *secondary;
131 unsigned long thread_flags;
132 unsigned long thread_mask;
134 struct proc_dir_entry *dir;
135 } ____cacheline_internodealigned_in_smp;
137 extern irqreturn_t no_action(int cpl, void *dev_id);
140 * If a (PCI) device interrupt is not connected we set dev->irq to
141 * IRQ_NOTCONNECTED. This causes request_irq() to fail with -ENOTCONN, so we
142 * can distingiush that case from other error returns.
144 * 0x80000000 is guaranteed to be outside the available range of interrupts
145 * and easy to distinguish from other possible incorrect values.
147 #define IRQ_NOTCONNECTED (1U << 31)
149 extern int __must_check
150 request_threaded_irq(unsigned int irq, irq_handler_t handler,
151 irq_handler_t thread_fn,
152 unsigned long flags, const char *name, void *dev);
155 * request_irq - Add a handler for an interrupt line
156 * @irq: The interrupt line to allocate
157 * @handler: Function to be called when the IRQ occurs.
158 * Primary handler for threaded interrupts
159 * If NULL, the default primary handler is installed
160 * @flags: Handling flags
161 * @name: Name of the device generating this interrupt
162 * @dev: A cookie passed to the handler function
164 * This call allocates an interrupt and establishes a handler; see
165 * the documentation for request_threaded_irq() for details.
167 static inline int __must_check
168 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
169 const char *name, void *dev)
171 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
174 extern int __must_check
175 request_any_context_irq(unsigned int irq, irq_handler_t handler,
176 unsigned long flags, const char *name, void *dev_id);
178 extern int __must_check
179 __request_percpu_irq(unsigned int irq, irq_handler_t handler,
180 unsigned long flags, const char *devname,
181 void __percpu *percpu_dev_id);
183 extern int __must_check
184 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
185 const char *name, void *dev);
187 static inline int __must_check
188 request_percpu_irq(unsigned int irq, irq_handler_t handler,
189 const char *devname, void __percpu *percpu_dev_id)
191 return __request_percpu_irq(irq, handler, 0,
192 devname, percpu_dev_id);
195 extern int __must_check
196 request_percpu_nmi(unsigned int irq, irq_handler_t handler,
197 const char *devname, void __percpu *dev);
199 extern const void *free_irq(unsigned int, void *);
200 extern void free_percpu_irq(unsigned int, void __percpu *);
202 extern const void *free_nmi(unsigned int irq, void *dev_id);
203 extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
207 extern int __must_check
208 devm_request_threaded_irq(struct device *dev, unsigned int irq,
209 irq_handler_t handler, irq_handler_t thread_fn,
210 unsigned long irqflags, const char *devname,
213 static inline int __must_check
214 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
215 unsigned long irqflags, const char *devname, void *dev_id)
217 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
221 extern int __must_check
222 devm_request_any_context_irq(struct device *dev, unsigned int irq,
223 irq_handler_t handler, unsigned long irqflags,
224 const char *devname, void *dev_id);
226 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
228 bool irq_has_action(unsigned int irq);
229 extern void disable_irq_nosync(unsigned int irq);
230 extern bool disable_hardirq(unsigned int irq);
231 extern void disable_irq(unsigned int irq);
232 extern void disable_percpu_irq(unsigned int irq);
233 extern void enable_irq(unsigned int irq);
234 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
235 extern bool irq_percpu_is_enabled(unsigned int irq);
236 extern void irq_wake_thread(unsigned int irq, void *dev_id);
238 extern void disable_nmi_nosync(unsigned int irq);
239 extern void disable_percpu_nmi(unsigned int irq);
240 extern void enable_nmi(unsigned int irq);
241 extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
242 extern int prepare_percpu_nmi(unsigned int irq);
243 extern void teardown_percpu_nmi(unsigned int irq);
245 extern int irq_inject_interrupt(unsigned int irq);
247 /* The following three functions are for the core kernel use only. */
248 extern void suspend_device_irqs(void);
249 extern void resume_device_irqs(void);
250 extern void rearm_wake_irq(unsigned int irq);
253 * struct irq_affinity_notify - context for notification of IRQ affinity changes
254 * @irq: Interrupt to which notification applies
255 * @kref: Reference count, for internal use
256 * @work: Work item, for internal use
257 * @notify: Function to be called on change. This will be
258 * called in process context.
259 * @release: Function to be called on release. This will be
260 * called in process context. Once registered, the
261 * structure must only be freed when this function is
264 struct irq_affinity_notify {
267 struct work_struct work;
268 void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
269 void (*release)(struct kref *ref);
272 #define IRQ_AFFINITY_MAX_SETS 4
275 * struct irq_affinity - Description for automatic irq affinity assignements
276 * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of
277 * the MSI(-X) vector space
278 * @post_vectors: Don't apply affinity to @post_vectors at end of
279 * the MSI(-X) vector space
280 * @nr_sets: The number of interrupt sets for which affinity
281 * spreading is required
282 * @set_size: Array holding the size of each interrupt set
283 * @calc_sets: Callback for calculating the number and size
285 * @priv: Private data for usage by @calc_sets, usually a
286 * pointer to driver/device specific data.
288 struct irq_affinity {
289 unsigned int pre_vectors;
290 unsigned int post_vectors;
291 unsigned int nr_sets;
292 unsigned int set_size[IRQ_AFFINITY_MAX_SETS];
293 void (*calc_sets)(struct irq_affinity *, unsigned int nvecs);
298 * struct irq_affinity_desc - Interrupt affinity descriptor
299 * @mask: cpumask to hold the affinity assignment
300 * @is_managed: 1 if the interrupt is managed internally
302 struct irq_affinity_desc {
304 unsigned int is_managed : 1;
307 #if defined(CONFIG_SMP)
309 extern cpumask_var_t irq_default_affinity;
311 extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
312 extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
314 extern int irq_can_set_affinity(unsigned int irq);
315 extern int irq_select_affinity(unsigned int irq);
317 extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
321 * irq_update_affinity_hint - Update the affinity hint
322 * @irq: Interrupt to update
323 * @m: cpumask pointer (NULL to clear the hint)
325 * Updates the affinity hint, but does not change the affinity of the interrupt.
328 irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
330 return __irq_apply_affinity_hint(irq, m, false);
334 * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
335 * cpumask to the interrupt
336 * @irq: Interrupt to update
337 * @m: cpumask pointer (NULL to clear the hint)
339 * Updates the affinity hint and if @m is not NULL it applies it as the
340 * affinity of that interrupt.
343 irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
345 return __irq_apply_affinity_hint(irq, m, true);
349 * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
352 static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
354 return irq_set_affinity_and_hint(irq, m);
357 extern int irq_update_affinity_desc(unsigned int irq,
358 struct irq_affinity_desc *affinity);
361 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
363 struct irq_affinity_desc *
364 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd);
366 unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
367 const struct irq_affinity *affd);
369 #else /* CONFIG_SMP */
371 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
376 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
381 static inline int irq_can_set_affinity(unsigned int irq)
386 static inline int irq_select_affinity(unsigned int irq) { return 0; }
388 static inline int irq_update_affinity_hint(unsigned int irq,
389 const struct cpumask *m)
394 static inline int irq_set_affinity_and_hint(unsigned int irq,
395 const struct cpumask *m)
400 static inline int irq_set_affinity_hint(unsigned int irq,
401 const struct cpumask *m)
406 static inline int irq_update_affinity_desc(unsigned int irq,
407 struct irq_affinity_desc *affinity)
413 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
418 static inline struct irq_affinity_desc *
419 irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd)
424 static inline unsigned int
425 irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
426 const struct irq_affinity *affd)
431 #endif /* CONFIG_SMP */
434 * Special lockdep variants of irq disabling/enabling.
435 * These should be used for locking constructs that
436 * know that a particular irq context which is disabled,
437 * and which is the only irq-context user of a lock,
438 * that it's safe to take the lock in the irq-disabled
439 * section without disabling hardirqs.
441 * On !CONFIG_LOCKDEP they are equivalent to the normal
442 * irq disable/enable methods.
444 static inline void disable_irq_nosync_lockdep(unsigned int irq)
446 disable_irq_nosync(irq);
447 #ifdef CONFIG_LOCKDEP
452 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
454 disable_irq_nosync(irq);
455 #ifdef CONFIG_LOCKDEP
456 local_irq_save(*flags);
460 static inline void disable_irq_lockdep(unsigned int irq)
463 #ifdef CONFIG_LOCKDEP
468 static inline void enable_irq_lockdep(unsigned int irq)
470 #ifdef CONFIG_LOCKDEP
476 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
478 #ifdef CONFIG_LOCKDEP
479 local_irq_restore(*flags);
484 /* IRQ wakeup (PM) control: */
485 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
487 static inline int enable_irq_wake(unsigned int irq)
489 return irq_set_irq_wake(irq, 1);
492 static inline int disable_irq_wake(unsigned int irq)
494 return irq_set_irq_wake(irq, 0);
498 * irq_get_irqchip_state/irq_set_irqchip_state specific flags
500 enum irqchip_irq_state {
501 IRQCHIP_STATE_PENDING, /* Is interrupt pending? */
502 IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */
503 IRQCHIP_STATE_MASKED, /* Is interrupt masked? */
504 IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */
507 extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
509 extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
512 #ifdef CONFIG_IRQ_FORCED_THREADING
513 # ifdef CONFIG_PREEMPT_RT
514 # define force_irqthreads() (true)
516 DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
517 # define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key))
520 #define force_irqthreads() (false)
523 #ifndef local_softirq_pending
525 #ifndef local_softirq_pending_ref
526 #define local_softirq_pending_ref irq_stat.__softirq_pending
529 #define local_softirq_pending() (__this_cpu_read(local_softirq_pending_ref))
530 #define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
531 #define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
533 #endif /* local_softirq_pending */
535 /* Some architectures might implement lazy enabling/disabling of
536 * interrupts. In some cases, such as stop_machine, we might want
537 * to ensure that after a local_irq_disable(), interrupts have
538 * really been disabled in hardware. Such architectures need to
539 * implement the following hook.
541 #ifndef hard_irq_disable
542 #define hard_irq_disable() do { } while(0)
545 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
546 frequency threaded job scheduling. For almost all the purposes
547 tasklets are more than enough. F.e. all serial device BHs et
548 al. should be converted to tasklets, not to softirqs.
562 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
568 * The following vectors can be safely ignored after ksoftirqd is parked:
571 * 1) rcutree_migrate_callbacks() migrates the queue.
572 * 2) rcutree_report_cpu_dead() reports the final quiescent states.
574 * _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
576 * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
578 #define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
579 BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
582 /* map softirq index to softirq name. update 'softirq_to_name' in
583 * kernel/softirq.c when adding a new softirq.
585 extern const char * const softirq_to_name[NR_SOFTIRQS];
587 /* softirq mask and active fields moved to irq_cpustat_t in
588 * asm/hardirq.h to get better cache usage. KAO
591 struct softirq_action
593 void (*action)(struct softirq_action *);
596 asmlinkage void do_softirq(void);
597 asmlinkage void __do_softirq(void);
599 #ifdef CONFIG_PREEMPT_RT
600 extern void do_softirq_post_smp_call_flush(unsigned int was_pending);
602 static inline void do_softirq_post_smp_call_flush(unsigned int unused)
608 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
609 extern void softirq_init(void);
610 extern void __raise_softirq_irqoff(unsigned int nr);
612 extern void raise_softirq_irqoff(unsigned int nr);
613 extern void raise_softirq(unsigned int nr);
615 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
617 static inline struct task_struct *this_cpu_ksoftirqd(void)
619 return this_cpu_read(ksoftirqd);
622 /* Tasklets --- multithreaded analogue of BHs.
624 This API is deprecated. Please consider using threaded IRQs instead:
625 https://lore.kernel.org/lkml/20200716081538.2sivhkj4hcyrusem@linutronix.de
627 Main feature differing them of generic softirqs: tasklet
628 is running only on one CPU simultaneously.
630 Main feature differing them of BHs: different tasklets
631 may be run simultaneously on different CPUs.
634 * If tasklet_schedule() is called, then tasklet is guaranteed
635 to be executed on some cpu at least once after this.
636 * If the tasklet is already scheduled, but its execution is still not
637 started, it will be executed only once.
638 * If this tasklet is already running on another CPU (or schedule is called
639 from tasklet itself), it is rescheduled for later.
640 * Tasklet is strictly serialized wrt itself, but not
641 wrt another tasklets. If client needs some intertask synchronization,
642 he makes it with spinlocks.
645 struct tasklet_struct
647 struct tasklet_struct *next;
652 void (*func)(unsigned long data);
653 void (*callback)(struct tasklet_struct *t);
658 #define DECLARE_TASKLET(name, _callback) \
659 struct tasklet_struct name = { \
660 .count = ATOMIC_INIT(0), \
661 .callback = _callback, \
662 .use_callback = true, \
665 #define DECLARE_TASKLET_DISABLED(name, _callback) \
666 struct tasklet_struct name = { \
667 .count = ATOMIC_INIT(1), \
668 .callback = _callback, \
669 .use_callback = true, \
672 #define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
673 container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
675 #define DECLARE_TASKLET_OLD(name, _func) \
676 struct tasklet_struct name = { \
677 .count = ATOMIC_INIT(0), \
681 #define DECLARE_TASKLET_DISABLED_OLD(name, _func) \
682 struct tasklet_struct name = { \
683 .count = ATOMIC_INIT(1), \
689 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
690 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
693 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
694 static inline int tasklet_trylock(struct tasklet_struct *t)
696 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
699 void tasklet_unlock(struct tasklet_struct *t);
700 void tasklet_unlock_wait(struct tasklet_struct *t);
701 void tasklet_unlock_spin_wait(struct tasklet_struct *t);
704 static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
705 static inline void tasklet_unlock(struct tasklet_struct *t) { }
706 static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
707 static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
710 extern void __tasklet_schedule(struct tasklet_struct *t);
712 static inline void tasklet_schedule(struct tasklet_struct *t)
714 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
715 __tasklet_schedule(t);
718 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
720 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
722 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
723 __tasklet_hi_schedule(t);
726 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
728 atomic_inc(&t->count);
729 smp_mb__after_atomic();
733 * Do not use in new code. Disabling tasklets from atomic contexts is
734 * error prone and should be avoided.
736 static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
738 tasklet_disable_nosync(t);
739 tasklet_unlock_spin_wait(t);
743 static inline void tasklet_disable(struct tasklet_struct *t)
745 tasklet_disable_nosync(t);
746 tasklet_unlock_wait(t);
750 static inline void tasklet_enable(struct tasklet_struct *t)
752 smp_mb__before_atomic();
753 atomic_dec(&t->count);
756 extern void tasklet_kill(struct tasklet_struct *t);
757 extern void tasklet_init(struct tasklet_struct *t,
758 void (*func)(unsigned long), unsigned long data);
759 extern void tasklet_setup(struct tasklet_struct *t,
760 void (*callback)(struct tasklet_struct *));
763 * Autoprobing for irqs:
765 * probe_irq_on() and probe_irq_off() provide robust primitives
766 * for accurate IRQ probing during kernel initialization. They are
767 * reasonably simple to use, are not "fooled" by spurious interrupts,
768 * and, unlike other attempts at IRQ probing, they do not get hung on
769 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
771 * For reasonably foolproof probing, use them as follows:
773 * 1. clear and/or mask the device's internal interrupt.
775 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
776 * 4. enable the device and cause it to trigger an interrupt.
777 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
778 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
779 * 7. service the device to clear its pending interrupt.
780 * 8. loop again if paranoia is required.
782 * probe_irq_on() returns a mask of allocated irq's.
784 * probe_irq_off() takes the mask as a parameter,
785 * and returns the irq number which occurred,
786 * or zero if none occurred, or a negative irq number
787 * if more than one irq occurred.
790 #if !defined(CONFIG_GENERIC_IRQ_PROBE)
791 static inline unsigned long probe_irq_on(void)
795 static inline int probe_irq_off(unsigned long val)
799 static inline unsigned int probe_irq_mask(unsigned long val)
804 extern unsigned long probe_irq_on(void); /* returns 0 on failure */
805 extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
806 extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
809 #ifdef CONFIG_PROC_FS
810 /* Initialize /proc/irq/ */
811 extern void init_irq_proc(void);
813 static inline void init_irq_proc(void)
818 #ifdef CONFIG_IRQ_TIMINGS
819 void irq_timings_enable(void);
820 void irq_timings_disable(void);
821 u64 irq_timings_next_event(u64 now);
825 int show_interrupts(struct seq_file *p, void *v);
826 int arch_show_interrupts(struct seq_file *p, int prec);
828 extern int early_irq_init(void);
829 extern int arch_probe_nr_irqs(void);
830 extern int arch_early_irq_init(void);
833 * We want to know which function is an entrypoint of a hardirq or a softirq.
836 # define __irq_entry __section(".irqentry.text")
839 #define __softirq_entry __section(".softirqentry.text")