]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - include/linux/interrupt.h
net: kill proto_ops wrapper
[mirror_ubuntu-bionic-kernel.git] / include / linux / interrupt.h
CommitLineData
1da177e4
LT
1/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
1da177e4
LT
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
908dcecd 10#include <linux/irqreturn.h>
dd3a1db9 11#include <linux/irqnr.h>
1da177e4 12#include <linux/hardirq.h>
f037360f 13#include <linux/sched.h>
de30a2b3 14#include <linux/irqflags.h>
54514a70
DM
15#include <linux/smp.h>
16#include <linux/percpu.h>
9ba5f005 17#include <linux/hrtimer.h>
0ebb26e7 18
1da177e4
LT
19#include <asm/atomic.h>
20#include <asm/ptrace.h>
21#include <asm/system.h>
22
6e213616
TG
23/*
24 * These correspond to the IORESOURCE_IRQ_* defines in
25 * linux/ioport.h to select the interrupt line behaviour. When
26 * requesting an interrupt without specifying a IRQF_TRIGGER, the
27 * setting should be assumed to be "as already configured", which
28 * may be as per machine or firmware initialisation.
29 */
30#define IRQF_TRIGGER_NONE 0x00000000
31#define IRQF_TRIGGER_RISING 0x00000001
32#define IRQF_TRIGGER_FALLING 0x00000002
33#define IRQF_TRIGGER_HIGH 0x00000004
34#define IRQF_TRIGGER_LOW 0x00000008
35#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
36 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
37#define IRQF_TRIGGER_PROBE 0x00000010
38
39/*
40 * These flags used only by the kernel as part of the
41 * irq handling routines.
42 *
43 * IRQF_DISABLED - keep irqs disabled when calling the action handler
44 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
45 * IRQF_SHARED - allow sharing the irq among several devices
46 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
47 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
950f4427
TG
48 * IRQF_PERCPU - Interrupt is per cpu
49 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
d85a60d8
BW
50 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
51 * registered first in an shared interrupt is considered for
52 * performance reasons)
b25c340c
TG
53 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
54 * Used by threaded interrupts which need to keep the
55 * irq line disabled until the threaded handler has been run.
6e213616
TG
56 */
57#define IRQF_DISABLED 0x00000020
58#define IRQF_SAMPLE_RANDOM 0x00000040
59#define IRQF_SHARED 0x00000080
60#define IRQF_PROBE_SHARED 0x00000100
61#define IRQF_TIMER 0x00000200
284c6680 62#define IRQF_PERCPU 0x00000400
950f4427 63#define IRQF_NOBALANCING 0x00000800
d85a60d8 64#define IRQF_IRQPOLL 0x00001000
b25c340c 65#define IRQF_ONESHOT 0x00002000
6e213616 66
3aa551c9
TG
67/*
68 * Bits used by threaded handlers:
69 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
70 * IRQTF_DIED - handler thread died
f48fe81e 71 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
591d2fb0 72 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
3aa551c9
TG
73 */
74enum {
75 IRQTF_RUNTHREAD,
76 IRQTF_DIED,
f48fe81e 77 IRQTF_WARNED,
591d2fb0 78 IRQTF_AFFINITY,
3aa551c9
TG
79};
80
7d12e780 81typedef irqreturn_t (*irq_handler_t)(int, void *);
da482792 82
a9d0a1a3
TG
83/**
84 * struct irqaction - per interrupt action descriptor
85 * @handler: interrupt handler function
86 * @flags: flags (see IRQF_* above)
a9d0a1a3
TG
87 * @name: name of the device
88 * @dev_id: cookie to identify the device
89 * @next: pointer to the next irqaction for shared interrupts
90 * @irq: interrupt number
91 * @dir: pointer to the proc/irq/NN/name entry
3aa551c9
TG
92 * @thread_fn: interupt handler function for threaded interrupts
93 * @thread: thread pointer for threaded interrupts
94 * @thread_flags: flags related to @thread
a9d0a1a3 95 */
1da177e4 96struct irqaction {
da482792 97 irq_handler_t handler;
1da177e4 98 unsigned long flags;
1da177e4
LT
99 const char *name;
100 void *dev_id;
101 struct irqaction *next;
102 int irq;
103 struct proc_dir_entry *dir;
3aa551c9
TG
104 irq_handler_t thread_fn;
105 struct task_struct *thread;
106 unsigned long thread_flags;
1da177e4
LT
107};
108
7d12e780 109extern irqreturn_t no_action(int cpl, void *dev_id);
3aa551c9 110
3a38148f 111#ifdef CONFIG_GENERIC_HARDIRQS
3aa551c9
TG
112extern int __must_check
113request_threaded_irq(unsigned int irq, irq_handler_t handler,
114 irq_handler_t thread_fn,
115 unsigned long flags, const char *name, void *dev);
116
117static inline int __must_check
118request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
119 const char *name, void *dev)
120{
121 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
122}
123
3aa551c9
TG
124extern void exit_irq_thread(void);
125#else
3a38148f
TG
126
127extern int __must_check
128request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129 const char *name, void *dev);
130
de18836e
TG
131/*
132 * Special function to avoid ifdeffery in kernel/irq/devres.c which
133 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
134 * m68k). I really love these $@%#!* obvious Makefile references:
135 * ../../../kernel/irq/devres.o
136 */
137static inline int __must_check
138request_threaded_irq(unsigned int irq, irq_handler_t handler,
139 irq_handler_t thread_fn,
140 unsigned long flags, const char *name, void *dev)
141{
142 return request_irq(irq, handler, flags, name, dev);
143}
144
3aa551c9
TG
145static inline void exit_irq_thread(void) { }
146#endif
147
1da177e4
LT
148extern void free_irq(unsigned int, void *);
149
0af3678f
AV
150struct device;
151
935bd5b9
AV
152extern int __must_check
153devm_request_threaded_irq(struct device *dev, unsigned int irq,
154 irq_handler_t handler, irq_handler_t thread_fn,
155 unsigned long irqflags, const char *devname,
156 void *dev_id);
157
158static inline int __must_check
159devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
160 unsigned long irqflags, const char *devname, void *dev_id)
161{
162 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
163 devname, dev_id);
164}
165
9ac7849e
TH
166extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167
d7e9629d
IM
168/*
169 * On lockdep we dont want to enable hardirqs in hardirq
170 * context. Use local_irq_enable_in_hardirq() to annotate
171 * kernel code that has to do this nevertheless (pretty much
172 * the only valid case is for old/broken hardware that is
173 * insanely slow).
174 *
175 * NOTE: in theory this might break fragile code that relies
176 * on hardirq delivery - in practice we dont seem to have such
177 * places left. So the only effect should be slightly increased
178 * irqs-off latencies.
179 */
180#ifdef CONFIG_LOCKDEP
181# define local_irq_enable_in_hardirq() do { } while (0)
182#else
183# define local_irq_enable_in_hardirq() local_irq_enable()
184#endif
1da177e4 185
1da177e4
LT
186extern void disable_irq_nosync(unsigned int irq);
187extern void disable_irq(unsigned int irq);
188extern void enable_irq(unsigned int irq);
ba9a2331 189
0a0c5168 190/* The following three functions are for the core kernel use only. */
5818a6e2 191#ifdef CONFIG_GENERIC_HARDIRQS
0a0c5168
RW
192extern void suspend_device_irqs(void);
193extern void resume_device_irqs(void);
194#ifdef CONFIG_PM_SLEEP
195extern int check_wakeup_irqs(void);
196#else
197static inline int check_wakeup_irqs(void) { return 0; }
198#endif
5818a6e2
HC
199#else
200static inline void suspend_device_irqs(void) { };
201static inline void resume_device_irqs(void) { };
202static inline int check_wakeup_irqs(void) { return 0; }
0a0c5168
RW
203#endif
204
d7b90689
RK
205#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
206
d036e67b 207extern cpumask_var_t irq_default_affinity;
18404756 208
0de26520 209extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
d7b90689 210extern int irq_can_set_affinity(unsigned int irq);
18404756 211extern int irq_select_affinity(unsigned int irq);
d7b90689
RK
212
213#else /* CONFIG_SMP */
214
0de26520 215static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
d7b90689
RK
216{
217 return -EINVAL;
218}
219
220static inline int irq_can_set_affinity(unsigned int irq)
221{
222 return 0;
223}
224
18404756
MK
225static inline int irq_select_affinity(unsigned int irq) { return 0; }
226
d7b90689
RK
227#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
228
e9ed7e72 229#ifdef CONFIG_GENERIC_HARDIRQS
c01d403b
IM
230/*
231 * Special lockdep variants of irq disabling/enabling.
232 * These should be used for locking constructs that
233 * know that a particular irq context which is disabled,
234 * and which is the only irq-context user of a lock,
235 * that it's safe to take the lock in the irq-disabled
236 * section without disabling hardirqs.
237 *
238 * On !CONFIG_LOCKDEP they are equivalent to the normal
239 * irq disable/enable methods.
240 */
241static inline void disable_irq_nosync_lockdep(unsigned int irq)
242{
243 disable_irq_nosync(irq);
244#ifdef CONFIG_LOCKDEP
245 local_irq_disable();
246#endif
247}
248
e8106b94
AV
249static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
250{
251 disable_irq_nosync(irq);
252#ifdef CONFIG_LOCKDEP
253 local_irq_save(*flags);
254#endif
255}
256
c01d403b
IM
257static inline void disable_irq_lockdep(unsigned int irq)
258{
259 disable_irq(irq);
260#ifdef CONFIG_LOCKDEP
261 local_irq_disable();
262#endif
263}
264
265static inline void enable_irq_lockdep(unsigned int irq)
266{
267#ifdef CONFIG_LOCKDEP
268 local_irq_enable();
269#endif
270 enable_irq(irq);
271}
272
e8106b94
AV
273static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
274{
275#ifdef CONFIG_LOCKDEP
276 local_irq_restore(*flags);
277#endif
278 enable_irq(irq);
279}
280
ba9a2331
TG
281/* IRQ wakeup (PM) control: */
282extern int set_irq_wake(unsigned int irq, unsigned int on);
283
284static inline int enable_irq_wake(unsigned int irq)
285{
286 return set_irq_wake(irq, 1);
287}
288
289static inline int disable_irq_wake(unsigned int irq)
290{
291 return set_irq_wake(irq, 0);
292}
293
c01d403b
IM
294#else /* !CONFIG_GENERIC_HARDIRQS */
295/*
296 * NOTE: non-genirq architectures, if they want to support the lock
297 * validator need to define the methods below in their asm/irq.h
298 * files, under an #ifdef CONFIG_LOCKDEP section.
299 */
b3e2fd9c 300#ifndef CONFIG_LOCKDEP
c01d403b 301# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
b3e2fd9c
RZ
302# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
303 disable_irq_nosync(irq)
c01d403b
IM
304# define disable_irq_lockdep(irq) disable_irq(irq)
305# define enable_irq_lockdep(irq) enable_irq(irq)
b3e2fd9c
RZ
306# define enable_irq_lockdep_irqrestore(irq, flags) \
307 enable_irq(irq)
c01d403b
IM
308# endif
309
aa5346a2
GL
310static inline int enable_irq_wake(unsigned int irq)
311{
312 return 0;
313}
314
315static inline int disable_irq_wake(unsigned int irq)
316{
317 return 0;
318}
c01d403b 319#endif /* CONFIG_GENERIC_HARDIRQS */
1da177e4 320
3f74478b
AK
321#ifndef __ARCH_SET_SOFTIRQ_PENDING
322#define set_softirq_pending(x) (local_softirq_pending() = (x))
323#define or_softirq_pending(x) (local_softirq_pending() |= (x))
324#endif
325
2d3fbbb3
BH
326/* Some architectures might implement lazy enabling/disabling of
327 * interrupts. In some cases, such as stop_machine, we might want
328 * to ensure that after a local_irq_disable(), interrupts have
329 * really been disabled in hardware. Such architectures need to
330 * implement the following hook.
331 */
332#ifndef hard_irq_disable
333#define hard_irq_disable() do { } while(0)
334#endif
335
1da177e4
LT
336/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
337 frequency threaded job scheduling. For almost all the purposes
338 tasklets are more than enough. F.e. all serial device BHs et
339 al. should be converted to tasklets, not to softirqs.
340 */
341
342enum
343{
344 HI_SOFTIRQ=0,
345 TIMER_SOFTIRQ,
346 NET_TX_SOFTIRQ,
347 NET_RX_SOFTIRQ,
ff856bad 348 BLOCK_SOFTIRQ,
5e605b64 349 BLOCK_IOPOLL_SOFTIRQ,
c9819f45
CL
350 TASKLET_SOFTIRQ,
351 SCHED_SOFTIRQ,
a6037b61
PZ
352 HRTIMER_SOFTIRQ,
353 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
978b0116
AD
354
355 NR_SOFTIRQS
1da177e4
LT
356};
357
5d592b44
JB
358/* map softirq index to softirq name. update 'softirq_to_name' in
359 * kernel/softirq.c when adding a new softirq.
360 */
361extern char *softirq_to_name[NR_SOFTIRQS];
362
1da177e4
LT
363/* softirq mask and active fields moved to irq_cpustat_t in
364 * asm/hardirq.h to get better cache usage. KAO
365 */
366
367struct softirq_action
368{
369 void (*action)(struct softirq_action *);
1da177e4
LT
370};
371
372asmlinkage void do_softirq(void);
eb0f1c44 373asmlinkage void __do_softirq(void);
962cf36c 374extern void open_softirq(int nr, void (*action)(struct softirq_action *));
1da177e4 375extern void softirq_init(void);
3f74478b 376#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
b3c97528
HH
377extern void raise_softirq_irqoff(unsigned int nr);
378extern void raise_softirq(unsigned int nr);
7f1e2ca9 379extern void wakeup_softirqd(void);
1da177e4 380
54514a70
DM
381/* This is the worklist that queues up per-cpu softirq work.
382 *
383 * send_remote_sendirq() adds work to these lists, and
384 * the softirq handler itself dequeues from them. The queues
385 * are protected by disabling local cpu interrupts and they must
386 * only be accessed by the local cpu that they are for.
387 */
388DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
389
390/* Try to send a softirq to a remote cpu. If this cannot be done, the
391 * work will be queued to the local cpu.
392 */
393extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
394
395/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
396 * and compute the current cpu, passed in as 'this_cpu'.
397 */
398extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
399 int this_cpu, int softirq);
1da177e4
LT
400
401/* Tasklets --- multithreaded analogue of BHs.
402
403 Main feature differing them of generic softirqs: tasklet
404 is running only on one CPU simultaneously.
405
406 Main feature differing them of BHs: different tasklets
407 may be run simultaneously on different CPUs.
408
409 Properties:
410 * If tasklet_schedule() is called, then tasklet is guaranteed
411 to be executed on some cpu at least once after this.
412 * If the tasklet is already scheduled, but its excecution is still not
413 started, it will be executed only once.
414 * If this tasklet is already running on another CPU (or schedule is called
415 from tasklet itself), it is rescheduled for later.
416 * Tasklet is strictly serialized wrt itself, but not
417 wrt another tasklets. If client needs some intertask synchronization,
418 he makes it with spinlocks.
419 */
420
421struct tasklet_struct
422{
423 struct tasklet_struct *next;
424 unsigned long state;
425 atomic_t count;
426 void (*func)(unsigned long);
427 unsigned long data;
428};
429
430#define DECLARE_TASKLET(name, func, data) \
431struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
432
433#define DECLARE_TASKLET_DISABLED(name, func, data) \
434struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
435
436
437enum
438{
439 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
440 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
441};
442
443#ifdef CONFIG_SMP
444static inline int tasklet_trylock(struct tasklet_struct *t)
445{
446 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
447}
448
449static inline void tasklet_unlock(struct tasklet_struct *t)
450{
451 smp_mb__before_clear_bit();
452 clear_bit(TASKLET_STATE_RUN, &(t)->state);
453}
454
455static inline void tasklet_unlock_wait(struct tasklet_struct *t)
456{
457 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
458}
459#else
460#define tasklet_trylock(t) 1
461#define tasklet_unlock_wait(t) do { } while (0)
462#define tasklet_unlock(t) do { } while (0)
463#endif
464
b3c97528 465extern void __tasklet_schedule(struct tasklet_struct *t);
1da177e4
LT
466
467static inline void tasklet_schedule(struct tasklet_struct *t)
468{
469 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
470 __tasklet_schedule(t);
471}
472
b3c97528 473extern void __tasklet_hi_schedule(struct tasklet_struct *t);
1da177e4
LT
474
475static inline void tasklet_hi_schedule(struct tasklet_struct *t)
476{
477 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
478 __tasklet_hi_schedule(t);
479}
480
7c692cba
VN
481extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
482
483/*
484 * This version avoids touching any other tasklets. Needed for kmemcheck
485 * in order not to take any page faults while enqueueing this tasklet;
486 * consider VERY carefully whether you really need this or
487 * tasklet_hi_schedule()...
488 */
489static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
490{
491 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
492 __tasklet_hi_schedule_first(t);
493}
494
1da177e4
LT
495
496static inline void tasklet_disable_nosync(struct tasklet_struct *t)
497{
498 atomic_inc(&t->count);
499 smp_mb__after_atomic_inc();
500}
501
502static inline void tasklet_disable(struct tasklet_struct *t)
503{
504 tasklet_disable_nosync(t);
505 tasklet_unlock_wait(t);
506 smp_mb();
507}
508
509static inline void tasklet_enable(struct tasklet_struct *t)
510{
511 smp_mb__before_atomic_dec();
512 atomic_dec(&t->count);
513}
514
515static inline void tasklet_hi_enable(struct tasklet_struct *t)
516{
517 smp_mb__before_atomic_dec();
518 atomic_dec(&t->count);
519}
520
521extern void tasklet_kill(struct tasklet_struct *t);
522extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
523extern void tasklet_init(struct tasklet_struct *t,
524 void (*func)(unsigned long), unsigned long data);
525
9ba5f005
PZ
526struct tasklet_hrtimer {
527 struct hrtimer timer;
528 struct tasklet_struct tasklet;
529 enum hrtimer_restart (*function)(struct hrtimer *);
530};
531
532extern void
533tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
534 enum hrtimer_restart (*function)(struct hrtimer *),
535 clockid_t which_clock, enum hrtimer_mode mode);
536
537static inline
538int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
539 const enum hrtimer_mode mode)
540{
541 return hrtimer_start(&ttimer->timer, time, mode);
542}
543
544static inline
545void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
546{
547 hrtimer_cancel(&ttimer->timer);
548 tasklet_kill(&ttimer->tasklet);
549}
550
1da177e4
LT
551/*
552 * Autoprobing for irqs:
553 *
554 * probe_irq_on() and probe_irq_off() provide robust primitives
555 * for accurate IRQ probing during kernel initialization. They are
556 * reasonably simple to use, are not "fooled" by spurious interrupts,
557 * and, unlike other attempts at IRQ probing, they do not get hung on
558 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
559 *
560 * For reasonably foolproof probing, use them as follows:
561 *
562 * 1. clear and/or mask the device's internal interrupt.
563 * 2. sti();
564 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
565 * 4. enable the device and cause it to trigger an interrupt.
566 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
567 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
568 * 7. service the device to clear its pending interrupt.
569 * 8. loop again if paranoia is required.
570 *
571 * probe_irq_on() returns a mask of allocated irq's.
572 *
573 * probe_irq_off() takes the mask as a parameter,
574 * and returns the irq number which occurred,
575 * or zero if none occurred, or a negative irq number
576 * if more than one irq occurred.
577 */
578
579#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
580static inline unsigned long probe_irq_on(void)
581{
582 return 0;
583}
584static inline int probe_irq_off(unsigned long val)
585{
586 return 0;
587}
588static inline unsigned int probe_irq_mask(unsigned long val)
589{
590 return 0;
591}
592#else
593extern unsigned long probe_irq_on(void); /* returns 0 on failure */
594extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
595extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
596#endif
597
6168a702
AM
598#ifdef CONFIG_PROC_FS
599/* Initialize /proc/irq/ */
600extern void init_irq_proc(void);
601#else
602static inline void init_irq_proc(void)
603{
604}
605#endif
606
74296a8e
IM
607#if defined(CONFIG_GENERIC_HARDIRQS) && defined(CONFIG_DEBUG_SHIRQ)
608extern void debug_poll_all_shared_irqs(void);
609#else
610static inline void debug_poll_all_shared_irqs(void) { }
611#endif
612
f74596d0
AB
613int show_interrupts(struct seq_file *p, void *v);
614
43a25632
YL
615struct irq_desc;
616
617extern int early_irq_init(void);
4a046d17 618extern int arch_probe_nr_irqs(void);
43a25632 619extern int arch_early_irq_init(void);
85ac16d0 620extern int arch_init_chip_data(struct irq_desc *desc, int node);
43a25632 621
1da177e4 622#endif