]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* irq.c: FRV IRQ handling |
2 | * | |
3 | * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | /* | |
13 | * (mostly architecture independent, will move to kernel/irq.c in 2.5.) | |
14 | * | |
15 | * IRQs are in fact implemented a bit like signal handlers for the kernel. | |
16 | * Naturally it's not a 1:1 relation, but there are similarities. | |
17 | */ | |
18 | ||
19 | #include <linux/config.h> | |
20 | #include <linux/ptrace.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/signal.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/random.h> | |
29 | #include <linux/smp_lock.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/kernel_stat.h> | |
32 | #include <linux/irq.h> | |
33 | #include <linux/proc_fs.h> | |
34 | #include <linux/seq_file.h> | |
40234401 | 35 | #include <linux/module.h> |
1da177e4 LT |
36 | |
37 | #include <asm/atomic.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/smp.h> | |
40 | #include <asm/system.h> | |
41 | #include <asm/bitops.h> | |
42 | #include <asm/uaccess.h> | |
43 | #include <asm/pgalloc.h> | |
44 | #include <asm/delay.h> | |
45 | #include <asm/irq.h> | |
46 | #include <asm/irc-regs.h> | |
47 | #include <asm/irq-routing.h> | |
48 | #include <asm/gdb-stub.h> | |
49 | ||
50 | extern void __init fpga_init(void); | |
51 | extern void __init route_mb93493_irqs(void); | |
52 | ||
53 | static void register_irq_proc (unsigned int irq); | |
54 | ||
55 | /* | |
56 | * Special irq handlers. | |
57 | */ | |
58 | ||
59 | irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_HANDLED; } | |
60 | ||
61 | atomic_t irq_err_count; | |
62 | ||
63 | /* | |
64 | * Generic, controller-independent functions: | |
65 | */ | |
66 | int show_interrupts(struct seq_file *p, void *v) | |
67 | { | |
68 | struct irqaction *action; | |
69 | struct irq_group *group; | |
70 | unsigned long flags; | |
71 | int level, grp, ix, i, j; | |
72 | ||
73 | i = *(loff_t *) v; | |
74 | ||
75 | switch (i) { | |
76 | case 0: | |
77 | seq_printf(p, " "); | |
78 | for (j = 0; j < NR_CPUS; j++) | |
79 | if (cpu_online(j)) | |
80 | seq_printf(p, "CPU%d ",j); | |
81 | ||
82 | seq_putc(p, '\n'); | |
83 | break; | |
84 | ||
85 | case 1 ... NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP: | |
86 | local_irq_save(flags); | |
87 | ||
88 | grp = (i - 1) / NR_IRQ_ACTIONS_PER_GROUP; | |
89 | group = irq_groups[grp]; | |
90 | if (!group) | |
91 | goto skip; | |
92 | ||
93 | ix = (i - 1) % NR_IRQ_ACTIONS_PER_GROUP; | |
94 | action = group->actions[ix]; | |
95 | if (!action) | |
96 | goto skip; | |
97 | ||
98 | seq_printf(p, "%3d: ", i - 1); | |
99 | ||
100 | #ifndef CONFIG_SMP | |
101 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
102 | #else | |
103 | for (j = 0; j < NR_CPUS; j++) | |
104 | if (cpu_online(j)) | |
105 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); | |
106 | #endif | |
107 | ||
108 | level = group->sources[ix]->level - frv_irq_levels; | |
109 | ||
110 | seq_printf(p, " %12s@%x", group->sources[ix]->muxname, level); | |
111 | seq_printf(p, " %s", action->name); | |
112 | ||
113 | for (action = action->next; action; action = action->next) | |
114 | seq_printf(p, ", %s", action->name); | |
115 | ||
116 | seq_putc(p, '\n'); | |
117 | skip: | |
118 | local_irq_restore(flags); | |
119 | break; | |
120 | ||
121 | case NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP + 1: | |
122 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | |
123 | break; | |
124 | ||
125 | default: | |
126 | break; | |
127 | } | |
128 | ||
129 | return 0; | |
130 | } | |
131 | ||
132 | ||
133 | /* | |
134 | * Generic enable/disable code: this just calls | |
135 | * down into the PIC-specific version for the actual | |
136 | * hardware disable after having gotten the irq | |
137 | * controller lock. | |
138 | */ | |
139 | ||
140 | /** | |
141 | * disable_irq_nosync - disable an irq without waiting | |
142 | * @irq: Interrupt to disable | |
143 | * | |
144 | * Disable the selected interrupt line. Disables and Enables are | |
145 | * nested. | |
146 | * Unlike disable_irq(), this function does not ensure existing | |
147 | * instances of the IRQ handler have completed before returning. | |
148 | * | |
149 | * This function may be called from IRQ context. | |
150 | */ | |
151 | ||
152 | void disable_irq_nosync(unsigned int irq) | |
153 | { | |
154 | struct irq_source *source; | |
155 | struct irq_group *group; | |
156 | struct irq_level *level; | |
157 | unsigned long flags; | |
158 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | |
159 | ||
160 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
161 | if (!group) | |
162 | BUG(); | |
163 | ||
164 | source = group->sources[idx]; | |
165 | if (!source) | |
166 | BUG(); | |
167 | ||
168 | level = source->level; | |
169 | ||
170 | spin_lock_irqsave(&level->lock, flags); | |
171 | ||
172 | if (group->control) { | |
173 | if (!group->disable_cnt[idx]++) | |
174 | group->control(group, idx, 0); | |
175 | } else if (!level->disable_count++) { | |
176 | __set_MASK(level - frv_irq_levels); | |
177 | } | |
178 | ||
179 | spin_unlock_irqrestore(&level->lock, flags); | |
180 | } | |
181 | ||
40234401 DH |
182 | EXPORT_SYMBOL(disable_irq_nosync); |
183 | ||
1da177e4 LT |
184 | /** |
185 | * disable_irq - disable an irq and wait for completion | |
186 | * @irq: Interrupt to disable | |
187 | * | |
188 | * Disable the selected interrupt line. Enables and Disables are | |
189 | * nested. | |
190 | * This function waits for any pending IRQ handlers for this interrupt | |
191 | * to complete before returning. If you use this function while | |
192 | * holding a resource the IRQ handler may need you will deadlock. | |
193 | * | |
194 | * This function may be called - with care - from IRQ context. | |
195 | */ | |
196 | ||
197 | void disable_irq(unsigned int irq) | |
198 | { | |
199 | disable_irq_nosync(irq); | |
200 | ||
201 | #ifdef CONFIG_SMP | |
202 | if (!local_irq_count(smp_processor_id())) { | |
203 | do { | |
204 | barrier(); | |
205 | } while (irq_desc[irq].status & IRQ_INPROGRESS); | |
206 | } | |
207 | #endif | |
208 | } | |
209 | ||
40234401 DH |
210 | EXPORT_SYMBOL(disable_irq); |
211 | ||
1da177e4 LT |
212 | /** |
213 | * enable_irq - enable handling of an irq | |
214 | * @irq: Interrupt to enable | |
215 | * | |
216 | * Undoes the effect of one call to disable_irq(). If this | |
217 | * matches the last disable, processing of interrupts on this | |
218 | * IRQ line is re-enabled. | |
219 | * | |
220 | * This function may be called from IRQ context. | |
221 | */ | |
222 | ||
223 | void enable_irq(unsigned int irq) | |
224 | { | |
225 | struct irq_source *source; | |
226 | struct irq_group *group; | |
227 | struct irq_level *level; | |
228 | unsigned long flags; | |
229 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | |
230 | int count; | |
231 | ||
232 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
233 | if (!group) | |
234 | BUG(); | |
235 | ||
236 | source = group->sources[idx]; | |
237 | if (!source) | |
238 | BUG(); | |
239 | ||
240 | level = source->level; | |
241 | ||
242 | spin_lock_irqsave(&level->lock, flags); | |
243 | ||
244 | if (group->control) | |
245 | count = group->disable_cnt[idx]; | |
246 | else | |
247 | count = level->disable_count; | |
248 | ||
249 | switch (count) { | |
250 | case 1: | |
251 | if (group->control) { | |
252 | if (group->actions[idx]) | |
253 | group->control(group, idx, 1); | |
254 | } else { | |
255 | if (level->usage) | |
256 | __clr_MASK(level - frv_irq_levels); | |
257 | } | |
258 | /* fall-through */ | |
259 | ||
260 | default: | |
261 | count--; | |
262 | break; | |
263 | ||
264 | case 0: | |
265 | printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0)); | |
266 | } | |
267 | ||
268 | if (group->control) | |
269 | group->disable_cnt[idx] = count; | |
270 | else | |
271 | level->disable_count = count; | |
272 | ||
273 | spin_unlock_irqrestore(&level->lock, flags); | |
274 | } | |
275 | ||
40234401 DH |
276 | EXPORT_SYMBOL(enable_irq); |
277 | ||
1da177e4 LT |
278 | /*****************************************************************************/ |
279 | /* | |
280 | * handles all normal device IRQ's | |
281 | * - registers are referred to by the __frame variable (GR28) | |
282 | * - IRQ distribution is complicated in this arch because of the many PICs, the | |
283 | * way they work and the way they cascade | |
284 | */ | |
285 | asmlinkage void do_IRQ(void) | |
286 | { | |
287 | struct irq_source *source; | |
288 | int level, cpu; | |
289 | ||
290 | level = (__frame->tbr >> 4) & 0xf; | |
291 | cpu = smp_processor_id(); | |
292 | ||
293 | #if 0 | |
294 | { | |
295 | static u32 irqcount; | |
296 | *(volatile u32 *) 0xe1200004 = ~((irqcount++ << 8) | level); | |
297 | *(volatile u16 *) 0xffc00100 = (u16) ~0x9999; | |
298 | mb(); | |
299 | } | |
300 | #endif | |
301 | ||
302 | if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) | |
303 | BUG(); | |
304 | ||
305 | __set_MASK(level); | |
306 | __clr_RC(level); | |
307 | __clr_IRL(); | |
308 | ||
309 | kstat_this_cpu.irqs[level]++; | |
310 | ||
311 | irq_enter(); | |
312 | ||
313 | for (source = frv_irq_levels[level].sources; source; source = source->next) | |
314 | source->doirq(source); | |
315 | ||
316 | irq_exit(); | |
317 | ||
318 | __clr_MASK(level); | |
319 | ||
320 | /* only process softirqs if we didn't interrupt another interrupt handler */ | |
321 | if ((__frame->psr & PSR_PIL) == PSR_PIL_0) | |
322 | if (local_softirq_pending()) | |
323 | do_softirq(); | |
324 | ||
325 | #ifdef CONFIG_PREEMPT | |
326 | local_irq_disable(); | |
327 | while (--current->preempt_count == 0) { | |
328 | if (!(__frame->psr & PSR_S) || | |
329 | current->need_resched == 0 || | |
330 | in_interrupt()) | |
331 | break; | |
332 | current->preempt_count++; | |
333 | local_irq_enable(); | |
334 | preempt_schedule(); | |
335 | local_irq_disable(); | |
336 | } | |
337 | #endif | |
338 | ||
339 | #if 0 | |
340 | { | |
341 | *(volatile u16 *) 0xffc00100 = (u16) ~0x6666; | |
342 | mb(); | |
343 | } | |
344 | #endif | |
345 | ||
346 | } /* end do_IRQ() */ | |
347 | ||
348 | /*****************************************************************************/ | |
349 | /* | |
350 | * handles all NMIs when not co-opted by the debugger | |
351 | * - registers are referred to by the __frame variable (GR28) | |
352 | */ | |
353 | asmlinkage void do_NMI(void) | |
354 | { | |
355 | } /* end do_NMI() */ | |
356 | ||
357 | /*****************************************************************************/ | |
358 | /** | |
359 | * request_irq - allocate an interrupt line | |
360 | * @irq: Interrupt line to allocate | |
361 | * @handler: Function to be called when the IRQ occurs | |
362 | * @irqflags: Interrupt type flags | |
363 | * @devname: An ascii name for the claiming device | |
364 | * @dev_id: A cookie passed back to the handler function | |
365 | * | |
366 | * This call allocates interrupt resources and enables the | |
367 | * interrupt line and IRQ handling. From the point this | |
368 | * call is made your handler function may be invoked. Since | |
369 | * your handler function must clear any interrupt the board | |
370 | * raises, you must take care both to initialise your hardware | |
371 | * and to set up the interrupt handler in the right order. | |
372 | * | |
373 | * Dev_id must be globally unique. Normally the address of the | |
374 | * device data structure is used as the cookie. Since the handler | |
375 | * receives this value it makes sense to use it. | |
376 | * | |
377 | * If your interrupt is shared you must pass a non NULL dev_id | |
378 | * as this is required when freeing the interrupt. | |
379 | * | |
380 | * Flags: | |
381 | * | |
382 | * SA_SHIRQ Interrupt is shared | |
383 | * | |
384 | * SA_INTERRUPT Disable local interrupts while processing | |
385 | * | |
386 | * SA_SAMPLE_RANDOM The interrupt can be used for entropy | |
387 | * | |
388 | */ | |
389 | ||
390 | int request_irq(unsigned int irq, | |
391 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
392 | unsigned long irqflags, | |
393 | const char * devname, | |
394 | void *dev_id) | |
395 | { | |
396 | int retval; | |
397 | struct irqaction *action; | |
398 | ||
399 | #if 1 | |
400 | /* | |
401 | * Sanity-check: shared interrupts should REALLY pass in | |
402 | * a real dev-ID, otherwise we'll have trouble later trying | |
403 | * to figure out which interrupt is which (messes up the | |
404 | * interrupt freeing logic etc). | |
405 | */ | |
406 | if (irqflags & SA_SHIRQ) { | |
407 | if (!dev_id) | |
408 | printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", | |
409 | devname, (&irq)[-1]); | |
410 | } | |
411 | #endif | |
412 | ||
413 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | |
414 | return -EINVAL; | |
415 | if (!handler) | |
416 | return -EINVAL; | |
417 | ||
418 | action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL); | |
419 | if (!action) | |
420 | return -ENOMEM; | |
421 | ||
422 | action->handler = handler; | |
423 | action->flags = irqflags; | |
424 | action->mask = CPU_MASK_NONE; | |
425 | action->name = devname; | |
426 | action->next = NULL; | |
427 | action->dev_id = dev_id; | |
428 | ||
429 | retval = setup_irq(irq, action); | |
430 | if (retval) | |
431 | kfree(action); | |
432 | return retval; | |
433 | } | |
434 | ||
40234401 DH |
435 | EXPORT_SYMBOL(request_irq); |
436 | ||
1da177e4 LT |
437 | /** |
438 | * free_irq - free an interrupt | |
439 | * @irq: Interrupt line to free | |
440 | * @dev_id: Device identity to free | |
441 | * | |
442 | * Remove an interrupt handler. The handler is removed and if the | |
443 | * interrupt line is no longer in use by any driver it is disabled. | |
444 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
445 | * on the card it drives before calling this function. The function | |
446 | * does not return until any executing interrupts for this IRQ | |
447 | * have completed. | |
448 | * | |
449 | * This function may be called from interrupt context. | |
450 | * | |
451 | * Bugs: Attempting to free an irq in a handler for the same irq hangs | |
452 | * the machine. | |
453 | */ | |
454 | ||
455 | void free_irq(unsigned int irq, void *dev_id) | |
456 | { | |
457 | struct irq_source *source; | |
458 | struct irq_group *group; | |
459 | struct irq_level *level; | |
460 | struct irqaction **p, **pp; | |
461 | unsigned long flags; | |
462 | ||
463 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | |
464 | return; | |
465 | ||
466 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
467 | if (!group) | |
468 | BUG(); | |
469 | ||
470 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
471 | if (!source) | |
472 | BUG(); | |
473 | ||
474 | level = source->level; | |
475 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
476 | ||
477 | spin_lock_irqsave(&level->lock, flags); | |
478 | ||
479 | for (pp = p; *pp; pp = &(*pp)->next) { | |
480 | struct irqaction *action = *pp; | |
481 | ||
482 | if (action->dev_id != dev_id) | |
483 | continue; | |
484 | ||
485 | /* found it - remove from the list of entries */ | |
486 | *pp = action->next; | |
487 | ||
488 | level->usage--; | |
489 | ||
490 | if (p == pp && group->control) | |
491 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 0); | |
492 | ||
493 | if (level->usage == 0) | |
494 | __set_MASK(level - frv_irq_levels); | |
495 | ||
496 | spin_unlock_irqrestore(&level->lock,flags); | |
497 | ||
498 | #ifdef CONFIG_SMP | |
499 | /* Wait to make sure it's not being used on another CPU */ | |
500 | while (desc->status & IRQ_INPROGRESS) | |
501 | barrier(); | |
502 | #endif | |
503 | kfree(action); | |
504 | return; | |
505 | } | |
506 | } | |
507 | ||
40234401 DH |
508 | EXPORT_SYMBOL(free_irq); |
509 | ||
1da177e4 LT |
510 | /* |
511 | * IRQ autodetection code.. | |
512 | * | |
513 | * This depends on the fact that any interrupt that comes in on to an | |
514 | * unassigned IRQ will cause GxICR_DETECT to be set | |
515 | */ | |
516 | ||
517 | static DECLARE_MUTEX(probe_sem); | |
518 | ||
519 | /** | |
520 | * probe_irq_on - begin an interrupt autodetect | |
521 | * | |
522 | * Commence probing for an interrupt. The interrupts are scanned | |
523 | * and a mask of potential interrupt lines is returned. | |
524 | * | |
525 | */ | |
526 | ||
527 | unsigned long probe_irq_on(void) | |
528 | { | |
529 | down(&probe_sem); | |
530 | return 0; | |
531 | } | |
532 | ||
40234401 DH |
533 | EXPORT_SYMBOL(probe_irq_on); |
534 | ||
1da177e4 LT |
535 | /* |
536 | * Return a mask of triggered interrupts (this | |
537 | * can handle only legacy ISA interrupts). | |
538 | */ | |
539 | ||
540 | /** | |
541 | * probe_irq_mask - scan a bitmap of interrupt lines | |
542 | * @val: mask of interrupts to consider | |
543 | * | |
544 | * Scan the ISA bus interrupt lines and return a bitmap of | |
545 | * active interrupts. The interrupt probe logic state is then | |
546 | * returned to its previous value. | |
547 | * | |
548 | * Note: we need to scan all the irq's even though we will | |
549 | * only return ISA irq numbers - just so that we reset them | |
550 | * all to a known state. | |
551 | */ | |
552 | unsigned int probe_irq_mask(unsigned long xmask) | |
553 | { | |
554 | up(&probe_sem); | |
555 | return 0; | |
556 | } | |
557 | ||
40234401 DH |
558 | EXPORT_SYMBOL(probe_irq_mask); |
559 | ||
1da177e4 LT |
560 | /* |
561 | * Return the one interrupt that triggered (this can | |
562 | * handle any interrupt source). | |
563 | */ | |
564 | ||
565 | /** | |
566 | * probe_irq_off - end an interrupt autodetect | |
567 | * @xmask: mask of potential interrupts (unused) | |
568 | * | |
569 | * Scans the unused interrupt lines and returns the line which | |
570 | * appears to have triggered the interrupt. If no interrupt was | |
571 | * found then zero is returned. If more than one interrupt is | |
572 | * found then minus the first candidate is returned to indicate | |
573 | * their is doubt. | |
574 | * | |
575 | * The interrupt probe logic state is returned to its previous | |
576 | * value. | |
577 | * | |
578 | * BUGS: When used in a module (which arguably shouldnt happen) | |
579 | * nothing prevents two IRQ probe callers from overlapping. The | |
580 | * results of this are non-optimal. | |
581 | */ | |
582 | ||
583 | int probe_irq_off(unsigned long xmask) | |
584 | { | |
585 | up(&probe_sem); | |
586 | return -1; | |
587 | } | |
588 | ||
40234401 DH |
589 | EXPORT_SYMBOL(probe_irq_off); |
590 | ||
1da177e4 LT |
591 | /* this was setup_x86_irq but it seems pretty generic */ |
592 | int setup_irq(unsigned int irq, struct irqaction *new) | |
593 | { | |
594 | struct irq_source *source; | |
595 | struct irq_group *group; | |
596 | struct irq_level *level; | |
597 | struct irqaction **p, **pp; | |
598 | unsigned long flags; | |
599 | ||
600 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
601 | if (!group) | |
602 | BUG(); | |
603 | ||
604 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
605 | if (!source) | |
606 | BUG(); | |
607 | ||
608 | level = source->level; | |
609 | ||
610 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
611 | ||
612 | /* | |
613 | * Some drivers like serial.c use request_irq() heavily, | |
614 | * so we have to be careful not to interfere with a | |
615 | * running system. | |
616 | */ | |
617 | if (new->flags & SA_SAMPLE_RANDOM) { | |
618 | /* | |
619 | * This function might sleep, we want to call it first, | |
620 | * outside of the atomic block. | |
621 | * Yes, this might clear the entropy pool if the wrong | |
622 | * driver is attempted to be loaded, without actually | |
623 | * installing a new handler, but is this really a problem, | |
624 | * only the sysadmin is able to do this. | |
625 | */ | |
626 | rand_initialize_irq(irq); | |
627 | } | |
628 | ||
629 | /* must juggle the interrupt processing stuff with interrupts disabled */ | |
630 | spin_lock_irqsave(&level->lock, flags); | |
631 | ||
632 | /* can't share interrupts unless all parties agree to */ | |
633 | if (level->usage != 0 && !(level->flags & new->flags & SA_SHIRQ)) { | |
634 | spin_unlock_irqrestore(&level->lock,flags); | |
635 | return -EBUSY; | |
636 | } | |
637 | ||
638 | /* add new interrupt at end of irq queue */ | |
639 | pp = p; | |
640 | while (*pp) | |
641 | pp = &(*pp)->next; | |
642 | ||
643 | *pp = new; | |
644 | ||
645 | level->usage++; | |
646 | level->flags = new->flags; | |
647 | ||
648 | /* turn the interrupts on */ | |
649 | if (level->usage == 1) | |
650 | __clr_MASK(level - frv_irq_levels); | |
651 | ||
652 | if (p == pp && group->control) | |
653 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 1); | |
654 | ||
655 | spin_unlock_irqrestore(&level->lock, flags); | |
656 | register_irq_proc(irq); | |
657 | return 0; | |
658 | } | |
659 | ||
660 | static struct proc_dir_entry * root_irq_dir; | |
661 | static struct proc_dir_entry * irq_dir [NR_IRQS]; | |
662 | ||
663 | #define HEX_DIGITS 8 | |
664 | ||
665 | static unsigned int parse_hex_value (const char *buffer, | |
666 | unsigned long count, unsigned long *ret) | |
667 | { | |
668 | unsigned char hexnum [HEX_DIGITS]; | |
669 | unsigned long value; | |
670 | int i; | |
671 | ||
672 | if (!count) | |
673 | return -EINVAL; | |
674 | if (count > HEX_DIGITS) | |
675 | count = HEX_DIGITS; | |
676 | if (copy_from_user(hexnum, buffer, count)) | |
677 | return -EFAULT; | |
678 | ||
679 | /* | |
680 | * Parse the first 8 characters as a hex string, any non-hex char | |
681 | * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. | |
682 | */ | |
683 | value = 0; | |
684 | ||
685 | for (i = 0; i < count; i++) { | |
686 | unsigned int c = hexnum[i]; | |
687 | ||
688 | switch (c) { | |
689 | case '0' ... '9': c -= '0'; break; | |
690 | case 'a' ... 'f': c -= 'a'-10; break; | |
691 | case 'A' ... 'F': c -= 'A'-10; break; | |
692 | default: | |
693 | goto out; | |
694 | } | |
695 | value = (value << 4) | c; | |
696 | } | |
697 | out: | |
698 | *ret = value; | |
699 | return 0; | |
700 | } | |
701 | ||
702 | ||
703 | static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, | |
704 | int count, int *eof, void *data) | |
705 | { | |
706 | unsigned long *mask = (unsigned long *) data; | |
707 | if (count < HEX_DIGITS+1) | |
708 | return -EINVAL; | |
709 | return sprintf (page, "%08lx\n", *mask); | |
710 | } | |
711 | ||
712 | static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, | |
713 | unsigned long count, void *data) | |
714 | { | |
715 | unsigned long *mask = (unsigned long *) data, full_count = count, err; | |
716 | unsigned long new_value; | |
717 | ||
718 | show_state(); | |
719 | err = parse_hex_value(buffer, count, &new_value); | |
720 | if (err) | |
721 | return err; | |
722 | ||
723 | *mask = new_value; | |
724 | return full_count; | |
725 | } | |
726 | ||
727 | #define MAX_NAMELEN 10 | |
728 | ||
729 | static void register_irq_proc (unsigned int irq) | |
730 | { | |
731 | char name [MAX_NAMELEN]; | |
732 | ||
733 | if (!root_irq_dir || irq_dir[irq]) | |
734 | return; | |
735 | ||
736 | memset(name, 0, MAX_NAMELEN); | |
737 | sprintf(name, "%d", irq); | |
738 | ||
739 | /* create /proc/irq/1234 */ | |
740 | irq_dir[irq] = proc_mkdir(name, root_irq_dir); | |
741 | } | |
742 | ||
743 | unsigned long prof_cpu_mask = -1; | |
744 | ||
745 | void init_irq_proc (void) | |
746 | { | |
747 | struct proc_dir_entry *entry; | |
748 | int i; | |
749 | ||
750 | /* create /proc/irq */ | |
751 | root_irq_dir = proc_mkdir("irq", 0); | |
752 | ||
753 | /* create /proc/irq/prof_cpu_mask */ | |
754 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | |
755 | if (!entry) | |
756 | return; | |
757 | ||
758 | entry->nlink = 1; | |
759 | entry->data = (void *)&prof_cpu_mask; | |
760 | entry->read_proc = prof_cpu_mask_read_proc; | |
761 | entry->write_proc = prof_cpu_mask_write_proc; | |
762 | ||
763 | /* | |
764 | * Create entries for all existing IRQs. | |
765 | */ | |
766 | for (i = 0; i < NR_IRQS; i++) | |
767 | register_irq_proc(i); | |
768 | } | |
769 | ||
770 | /*****************************************************************************/ | |
771 | /* | |
772 | * initialise the interrupt system | |
773 | */ | |
774 | void __init init_IRQ(void) | |
775 | { | |
776 | route_cpu_irqs(); | |
777 | fpga_init(); | |
778 | #ifdef CONFIG_FUJITSU_MB93493 | |
779 | route_mb93493_irqs(); | |
780 | #endif | |
781 | } /* end init_IRQ() */ |