]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/interrupt.h
mm: clean up mm_counter
[mirror_ubuntu-zesty-kernel.git] / include / linux / interrupt.h
CommitLineData
1da177e4
LT
1/* interrupt.h */
2#ifndef _LINUX_INTERRUPT_H
3#define _LINUX_INTERRUPT_H
4
1da177e4
LT
5#include <linux/kernel.h>
6#include <linux/linkage.h>
7#include <linux/bitops.h>
8#include <linux/preempt.h>
9#include <linux/cpumask.h>
908dcecd 10#include <linux/irqreturn.h>
dd3a1db9 11#include <linux/irqnr.h>
1da177e4 12#include <linux/hardirq.h>
de30a2b3 13#include <linux/irqflags.h>
54514a70
DM
14#include <linux/smp.h>
15#include <linux/percpu.h>
9ba5f005 16#include <linux/hrtimer.h>
0ebb26e7 17
1da177e4
LT
18#include <asm/atomic.h>
19#include <asm/ptrace.h>
20#include <asm/system.h>
21
6e213616
TG
22/*
23 * These correspond to the IORESOURCE_IRQ_* defines in
24 * linux/ioport.h to select the interrupt line behaviour. When
25 * requesting an interrupt without specifying a IRQF_TRIGGER, the
26 * setting should be assumed to be "as already configured", which
27 * may be as per machine or firmware initialisation.
28 */
29#define IRQF_TRIGGER_NONE 0x00000000
30#define IRQF_TRIGGER_RISING 0x00000001
31#define IRQF_TRIGGER_FALLING 0x00000002
32#define IRQF_TRIGGER_HIGH 0x00000004
33#define IRQF_TRIGGER_LOW 0x00000008
34#define IRQF_TRIGGER_MASK (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
35 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
36#define IRQF_TRIGGER_PROBE 0x00000010
37
38/*
39 * These flags used only by the kernel as part of the
40 * irq handling routines.
41 *
42 * IRQF_DISABLED - keep irqs disabled when calling the action handler
43 * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
44 * IRQF_SHARED - allow sharing the irq among several devices
45 * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
46 * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
950f4427
TG
47 * IRQF_PERCPU - Interrupt is per cpu
48 * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
d85a60d8
BW
49 * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
50 * registered first in an shared interrupt is considered for
51 * performance reasons)
b25c340c
TG
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run.
6e213616
TG
55 */
56#define IRQF_DISABLED 0x00000020
57#define IRQF_SAMPLE_RANDOM 0x00000040
58#define IRQF_SHARED 0x00000080
59#define IRQF_PROBE_SHARED 0x00000100
60#define IRQF_TIMER 0x00000200
284c6680 61#define IRQF_PERCPU 0x00000400
950f4427 62#define IRQF_NOBALANCING 0x00000800
d85a60d8 63#define IRQF_IRQPOLL 0x00001000
b25c340c 64#define IRQF_ONESHOT 0x00002000
6e213616 65
3aa551c9
TG
66/*
67 * Bits used by threaded handlers:
68 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
69 * IRQTF_DIED - handler thread died
f48fe81e 70 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
591d2fb0 71 * IRQTF_AFFINITY - irq thread is requested to adjust affinity
3aa551c9
TG
72 */
73enum {
74 IRQTF_RUNTHREAD,
75 IRQTF_DIED,
f48fe81e 76 IRQTF_WARNED,
591d2fb0 77 IRQTF_AFFINITY,
3aa551c9
TG
78};
79
7d12e780 80typedef irqreturn_t (*irq_handler_t)(int, void *);
da482792 81
a9d0a1a3
TG
82/**
83 * struct irqaction - per interrupt action descriptor
84 * @handler: interrupt handler function
85 * @flags: flags (see IRQF_* above)
a9d0a1a3
TG
86 * @name: name of the device
87 * @dev_id: cookie to identify the device
88 * @next: pointer to the next irqaction for shared interrupts
89 * @irq: interrupt number
90 * @dir: pointer to the proc/irq/NN/name entry
3aa551c9
TG
91 * @thread_fn: interupt handler function for threaded interrupts
92 * @thread: thread pointer for threaded interrupts
93 * @thread_flags: flags related to @thread
a9d0a1a3 94 */
1da177e4 95struct irqaction {
da482792 96 irq_handler_t handler;
1da177e4 97 unsigned long flags;
1da177e4
LT
98 const char *name;
99 void *dev_id;
100 struct irqaction *next;
101 int irq;
102 struct proc_dir_entry *dir;
3aa551c9
TG
103 irq_handler_t thread_fn;
104 struct task_struct *thread;
105 unsigned long thread_flags;
1da177e4
LT
106};
107
7d12e780 108extern irqreturn_t no_action(int cpl, void *dev_id);
3aa551c9 109
3a38148f 110#ifdef CONFIG_GENERIC_HARDIRQS
3aa551c9
TG
111extern int __must_check
112request_threaded_irq(unsigned int irq, irq_handler_t handler,
113 irq_handler_t thread_fn,
114 unsigned long flags, const char *name, void *dev);
115
116static inline int __must_check
117request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
118 const char *name, void *dev)
119{
120 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
121}
122
3aa551c9
TG
123extern void exit_irq_thread(void);
124#else
3a38148f
TG
125
126extern int __must_check
127request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
128 const char *name, void *dev);
129
de18836e
TG
130/*
131 * Special function to avoid ifdeffery in kernel/irq/devres.c which
132 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
133 * m68k). I really love these $@%#!* obvious Makefile references:
134 * ../../../kernel/irq/devres.o
135 */
136static inline int __must_check
137request_threaded_irq(unsigned int irq, irq_handler_t handler,
138 irq_handler_t thread_fn,
139 unsigned long flags, const char *name, void *dev)
140{
141 return request_irq(irq, handler, flags, name, dev);
142}
143
3aa551c9
TG
144static inline void exit_irq_thread(void) { }
145#endif
146
1da177e4
LT
147extern void free_irq(unsigned int, void *);
148
0af3678f
AV
149struct device;
150
935bd5b9
AV
151extern int __must_check
152devm_request_threaded_irq(struct device *dev, unsigned int irq,
153 irq_handler_t handler, irq_handler_t thread_fn,
154 unsigned long irqflags, const char *devname,
155 void *dev_id);
156
157static inline int __must_check
158devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
159 unsigned long irqflags, const char *devname, void *dev_id)
160{
161 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
162 devname, dev_id);
163}
164
9ac7849e
TH
165extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
166
d7e9629d
IM
167/*
168 * On lockdep we dont want to enable hardirqs in hardirq
169 * context. Use local_irq_enable_in_hardirq() to annotate
170 * kernel code that has to do this nevertheless (pretty much
171 * the only valid case is for old/broken hardware that is
172 * insanely slow).
173 *
174 * NOTE: in theory this might break fragile code that relies
175 * on hardirq delivery - in practice we dont seem to have such
176 * places left. So the only effect should be slightly increased
177 * irqs-off latencies.
178 */
179#ifdef CONFIG_LOCKDEP
180# define local_irq_enable_in_hardirq() do { } while (0)
181#else
182# define local_irq_enable_in_hardirq() local_irq_enable()
183#endif
1da177e4 184
1da177e4
LT
185extern void disable_irq_nosync(unsigned int irq);
186extern void disable_irq(unsigned int irq);
187extern void enable_irq(unsigned int irq);
ba9a2331 188
0a0c5168 189/* The following three functions are for the core kernel use only. */
5818a6e2 190#ifdef CONFIG_GENERIC_HARDIRQS
0a0c5168
RW
191extern void suspend_device_irqs(void);
192extern void resume_device_irqs(void);
193#ifdef CONFIG_PM_SLEEP
194extern int check_wakeup_irqs(void);
195#else
196static inline int check_wakeup_irqs(void) { return 0; }
197#endif
5818a6e2
HC
198#else
199static inline void suspend_device_irqs(void) { };
200static inline void resume_device_irqs(void) { };
201static inline int check_wakeup_irqs(void) { return 0; }
0a0c5168
RW
202#endif
203
d7b90689
RK
204#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
205
d036e67b 206extern cpumask_var_t irq_default_affinity;
18404756 207
0de26520 208extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
d7b90689 209extern int irq_can_set_affinity(unsigned int irq);
18404756 210extern int irq_select_affinity(unsigned int irq);
d7b90689
RK
211
212#else /* CONFIG_SMP */
213
0de26520 214static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
d7b90689
RK
215{
216 return -EINVAL;
217}
218
219static inline int irq_can_set_affinity(unsigned int irq)
220{
221 return 0;
222}
223
18404756
MK
224static inline int irq_select_affinity(unsigned int irq) { return 0; }
225
d7b90689
RK
226#endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
227
e9ed7e72 228#ifdef CONFIG_GENERIC_HARDIRQS
c01d403b
IM
229/*
230 * Special lockdep variants of irq disabling/enabling.
231 * These should be used for locking constructs that
232 * know that a particular irq context which is disabled,
233 * and which is the only irq-context user of a lock,
234 * that it's safe to take the lock in the irq-disabled
235 * section without disabling hardirqs.
236 *
237 * On !CONFIG_LOCKDEP they are equivalent to the normal
238 * irq disable/enable methods.
239 */
240static inline void disable_irq_nosync_lockdep(unsigned int irq)
241{
242 disable_irq_nosync(irq);
243#ifdef CONFIG_LOCKDEP
244 local_irq_disable();
245#endif
246}
247
e8106b94
AV
248static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
249{
250 disable_irq_nosync(irq);
251#ifdef CONFIG_LOCKDEP
252 local_irq_save(*flags);
253#endif
254}
255
c01d403b
IM
256static inline void disable_irq_lockdep(unsigned int irq)
257{
258 disable_irq(irq);
259#ifdef CONFIG_LOCKDEP
260 local_irq_disable();
261#endif
262}
263
264static inline void enable_irq_lockdep(unsigned int irq)
265{
266#ifdef CONFIG_LOCKDEP
267 local_irq_enable();
268#endif
269 enable_irq(irq);
270}
271
e8106b94
AV
272static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
273{
274#ifdef CONFIG_LOCKDEP
275 local_irq_restore(*flags);
276#endif
277 enable_irq(irq);
278}
279
ba9a2331
TG
280/* IRQ wakeup (PM) control: */
281extern int set_irq_wake(unsigned int irq, unsigned int on);
282
283static inline int enable_irq_wake(unsigned int irq)
284{
285 return set_irq_wake(irq, 1);
286}
287
288static inline int disable_irq_wake(unsigned int irq)
289{
290 return set_irq_wake(irq, 0);
291}
292
c01d403b
IM
293#else /* !CONFIG_GENERIC_HARDIRQS */
294/*
295 * NOTE: non-genirq architectures, if they want to support the lock
296 * validator need to define the methods below in their asm/irq.h
297 * files, under an #ifdef CONFIG_LOCKDEP section.
298 */
b3e2fd9c 299#ifndef CONFIG_LOCKDEP
c01d403b 300# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
b3e2fd9c
RZ
301# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
302 disable_irq_nosync(irq)
c01d403b
IM
303# define disable_irq_lockdep(irq) disable_irq(irq)
304# define enable_irq_lockdep(irq) enable_irq(irq)
b3e2fd9c
RZ
305# define enable_irq_lockdep_irqrestore(irq, flags) \
306 enable_irq(irq)
c01d403b
IM
307# endif
308
aa5346a2
GL
309static inline int enable_irq_wake(unsigned int irq)
310{
311 return 0;
312}
313
314static inline int disable_irq_wake(unsigned int irq)
315{
316 return 0;
317}
c01d403b 318#endif /* CONFIG_GENERIC_HARDIRQS */
1da177e4 319
3f74478b
AK
320#ifndef __ARCH_SET_SOFTIRQ_PENDING
321#define set_softirq_pending(x) (local_softirq_pending() = (x))
322#define or_softirq_pending(x) (local_softirq_pending() |= (x))
323#endif
324
2d3fbbb3
BH
325/* Some architectures might implement lazy enabling/disabling of
326 * interrupts. In some cases, such as stop_machine, we might want
327 * to ensure that after a local_irq_disable(), interrupts have
328 * really been disabled in hardware. Such architectures need to
329 * implement the following hook.
330 */
331#ifndef hard_irq_disable
332#define hard_irq_disable() do { } while(0)
333#endif
334
1da177e4
LT
335/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
336 frequency threaded job scheduling. For almost all the purposes
337 tasklets are more than enough. F.e. all serial device BHs et
338 al. should be converted to tasklets, not to softirqs.
339 */
340
341enum
342{
343 HI_SOFTIRQ=0,
344 TIMER_SOFTIRQ,
345 NET_TX_SOFTIRQ,
346 NET_RX_SOFTIRQ,
ff856bad 347 BLOCK_SOFTIRQ,
5e605b64 348 BLOCK_IOPOLL_SOFTIRQ,
c9819f45
CL
349 TASKLET_SOFTIRQ,
350 SCHED_SOFTIRQ,
a6037b61
PZ
351 HRTIMER_SOFTIRQ,
352 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
978b0116
AD
353
354 NR_SOFTIRQS
1da177e4
LT
355};
356
5d592b44
JB
357/* map softirq index to softirq name. update 'softirq_to_name' in
358 * kernel/softirq.c when adding a new softirq.
359 */
360extern char *softirq_to_name[NR_SOFTIRQS];
361
1da177e4
LT
362/* softirq mask and active fields moved to irq_cpustat_t in
363 * asm/hardirq.h to get better cache usage. KAO
364 */
365
366struct softirq_action
367{
368 void (*action)(struct softirq_action *);
1da177e4
LT
369};
370
371asmlinkage void do_softirq(void);
eb0f1c44 372asmlinkage void __do_softirq(void);
962cf36c 373extern void open_softirq(int nr, void (*action)(struct softirq_action *));
1da177e4 374extern void softirq_init(void);
3f74478b 375#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
b3c97528
HH
376extern void raise_softirq_irqoff(unsigned int nr);
377extern void raise_softirq(unsigned int nr);
7f1e2ca9 378extern void wakeup_softirqd(void);
1da177e4 379
54514a70
DM
380/* This is the worklist that queues up per-cpu softirq work.
381 *
382 * send_remote_sendirq() adds work to these lists, and
383 * the softirq handler itself dequeues from them. The queues
384 * are protected by disabling local cpu interrupts and they must
385 * only be accessed by the local cpu that they are for.
386 */
387DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
388
389/* Try to send a softirq to a remote cpu. If this cannot be done, the
390 * work will be queued to the local cpu.
391 */
392extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
393
394/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
395 * and compute the current cpu, passed in as 'this_cpu'.
396 */
397extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
398 int this_cpu, int softirq);
1da177e4
LT
399
400/* Tasklets --- multithreaded analogue of BHs.
401
402 Main feature differing them of generic softirqs: tasklet
403 is running only on one CPU simultaneously.
404
405 Main feature differing them of BHs: different tasklets
406 may be run simultaneously on different CPUs.
407
408 Properties:
409 * If tasklet_schedule() is called, then tasklet is guaranteed
410 to be executed on some cpu at least once after this.
411 * If the tasklet is already scheduled, but its excecution is still not
412 started, it will be executed only once.
413 * If this tasklet is already running on another CPU (or schedule is called
414 from tasklet itself), it is rescheduled for later.
415 * Tasklet is strictly serialized wrt itself, but not
416 wrt another tasklets. If client needs some intertask synchronization,
417 he makes it with spinlocks.
418 */
419
420struct tasklet_struct
421{
422 struct tasklet_struct *next;
423 unsigned long state;
424 atomic_t count;
425 void (*func)(unsigned long);
426 unsigned long data;
427};
428
429#define DECLARE_TASKLET(name, func, data) \
430struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
431
432#define DECLARE_TASKLET_DISABLED(name, func, data) \
433struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
434
435
436enum
437{
438 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
439 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
440};
441
442#ifdef CONFIG_SMP
443static inline int tasklet_trylock(struct tasklet_struct *t)
444{
445 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
446}
447
448static inline void tasklet_unlock(struct tasklet_struct *t)
449{
450 smp_mb__before_clear_bit();
451 clear_bit(TASKLET_STATE_RUN, &(t)->state);
452}
453
454static inline void tasklet_unlock_wait(struct tasklet_struct *t)
455{
456 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
457}
458#else
459#define tasklet_trylock(t) 1
460#define tasklet_unlock_wait(t) do { } while (0)
461#define tasklet_unlock(t) do { } while (0)
462#endif
463
b3c97528 464extern void __tasklet_schedule(struct tasklet_struct *t);
1da177e4
LT
465
466static inline void tasklet_schedule(struct tasklet_struct *t)
467{
468 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
469 __tasklet_schedule(t);
470}
471
b3c97528 472extern void __tasklet_hi_schedule(struct tasklet_struct *t);
1da177e4
LT
473
474static inline void tasklet_hi_schedule(struct tasklet_struct *t)
475{
476 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
477 __tasklet_hi_schedule(t);
478}
479
7c692cba
VN
480extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
481
482/*
483 * This version avoids touching any other tasklets. Needed for kmemcheck
484 * in order not to take any page faults while enqueueing this tasklet;
485 * consider VERY carefully whether you really need this or
486 * tasklet_hi_schedule()...
487 */
488static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
489{
490 if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
491 __tasklet_hi_schedule_first(t);
492}
493
1da177e4
LT
494
495static inline void tasklet_disable_nosync(struct tasklet_struct *t)
496{
497 atomic_inc(&t->count);
498 smp_mb__after_atomic_inc();
499}
500
501static inline void tasklet_disable(struct tasklet_struct *t)
502{
503 tasklet_disable_nosync(t);
504 tasklet_unlock_wait(t);
505 smp_mb();
506}
507
508static inline void tasklet_enable(struct tasklet_struct *t)
509{
510 smp_mb__before_atomic_dec();
511 atomic_dec(&t->count);
512}
513
514static inline void tasklet_hi_enable(struct tasklet_struct *t)
515{
516 smp_mb__before_atomic_dec();
517 atomic_dec(&t->count);
518}
519
520extern void tasklet_kill(struct tasklet_struct *t);
521extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
522extern void tasklet_init(struct tasklet_struct *t,
523 void (*func)(unsigned long), unsigned long data);
524
9ba5f005
PZ
525struct tasklet_hrtimer {
526 struct hrtimer timer;
527 struct tasklet_struct tasklet;
528 enum hrtimer_restart (*function)(struct hrtimer *);
529};
530
531extern void
532tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
533 enum hrtimer_restart (*function)(struct hrtimer *),
534 clockid_t which_clock, enum hrtimer_mode mode);
535
536static inline
537int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
538 const enum hrtimer_mode mode)
539{
540 return hrtimer_start(&ttimer->timer, time, mode);
541}
542
543static inline
544void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
545{
546 hrtimer_cancel(&ttimer->timer);
547 tasklet_kill(&ttimer->tasklet);
548}
549
1da177e4
LT
550/*
551 * Autoprobing for irqs:
552 *
553 * probe_irq_on() and probe_irq_off() provide robust primitives
554 * for accurate IRQ probing during kernel initialization. They are
555 * reasonably simple to use, are not "fooled" by spurious interrupts,
556 * and, unlike other attempts at IRQ probing, they do not get hung on
557 * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
558 *
559 * For reasonably foolproof probing, use them as follows:
560 *
561 * 1. clear and/or mask the device's internal interrupt.
562 * 2. sti();
563 * 3. irqs = probe_irq_on(); // "take over" all unassigned idle IRQs
564 * 4. enable the device and cause it to trigger an interrupt.
565 * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
566 * 6. irq = probe_irq_off(irqs); // get IRQ number, 0=none, negative=multiple
567 * 7. service the device to clear its pending interrupt.
568 * 8. loop again if paranoia is required.
569 *
570 * probe_irq_on() returns a mask of allocated irq's.
571 *
572 * probe_irq_off() takes the mask as a parameter,
573 * and returns the irq number which occurred,
574 * or zero if none occurred, or a negative irq number
575 * if more than one irq occurred.
576 */
577
578#if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE)
579static inline unsigned long probe_irq_on(void)
580{
581 return 0;
582}
583static inline int probe_irq_off(unsigned long val)
584{
585 return 0;
586}
587static inline unsigned int probe_irq_mask(unsigned long val)
588{
589 return 0;
590}
591#else
592extern unsigned long probe_irq_on(void); /* returns 0 on failure */
593extern int probe_irq_off(unsigned long); /* returns 0 or negative on failure */
594extern unsigned int probe_irq_mask(unsigned long); /* returns mask of ISA interrupts */
595#endif
596
6168a702
AM
597#ifdef CONFIG_PROC_FS
598/* Initialize /proc/irq/ */
599extern void init_irq_proc(void);
600#else
601static inline void init_irq_proc(void)
602{
603}
604#endif
605
d43c36dc 606struct seq_file;
f74596d0
AB
607int show_interrupts(struct seq_file *p, void *v);
608
43a25632
YL
609struct irq_desc;
610
611extern int early_irq_init(void);
4a046d17 612extern int arch_probe_nr_irqs(void);
43a25632 613extern int arch_early_irq_init(void);
85ac16d0 614extern int arch_init_chip_data(struct irq_desc *desc, int node);
43a25632 615
1da177e4 616#endif