]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* irq.c: FRV IRQ handling |
2 | * | |
3 | * Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells (dhowells@redhat.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | /* | |
13 | * (mostly architecture independent, will move to kernel/irq.c in 2.5.) | |
14 | * | |
15 | * IRQs are in fact implemented a bit like signal handlers for the kernel. | |
16 | * Naturally it's not a 1:1 relation, but there are similarities. | |
17 | */ | |
18 | ||
19 | #include <linux/config.h> | |
20 | #include <linux/ptrace.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/signal.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/timex.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/random.h> | |
29 | #include <linux/smp_lock.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/kernel_stat.h> | |
32 | #include <linux/irq.h> | |
33 | #include <linux/proc_fs.h> | |
34 | #include <linux/seq_file.h> | |
40234401 | 35 | #include <linux/module.h> |
1da177e4 LT |
36 | |
37 | #include <asm/atomic.h> | |
38 | #include <asm/io.h> | |
39 | #include <asm/smp.h> | |
40 | #include <asm/system.h> | |
41 | #include <asm/bitops.h> | |
42 | #include <asm/uaccess.h> | |
43 | #include <asm/pgalloc.h> | |
44 | #include <asm/delay.h> | |
45 | #include <asm/irq.h> | |
46 | #include <asm/irc-regs.h> | |
47 | #include <asm/irq-routing.h> | |
48 | #include <asm/gdb-stub.h> | |
49 | ||
50 | extern void __init fpga_init(void); | |
51 | extern void __init route_mb93493_irqs(void); | |
52 | ||
53 | static void register_irq_proc (unsigned int irq); | |
54 | ||
55 | /* | |
56 | * Special irq handlers. | |
57 | */ | |
58 | ||
59 | irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs) { return IRQ_HANDLED; } | |
60 | ||
61 | atomic_t irq_err_count; | |
62 | ||
63 | /* | |
64 | * Generic, controller-independent functions: | |
65 | */ | |
66 | int show_interrupts(struct seq_file *p, void *v) | |
67 | { | |
68 | struct irqaction *action; | |
69 | struct irq_group *group; | |
70 | unsigned long flags; | |
71 | int level, grp, ix, i, j; | |
72 | ||
73 | i = *(loff_t *) v; | |
74 | ||
75 | switch (i) { | |
76 | case 0: | |
77 | seq_printf(p, " "); | |
394e3902 AM |
78 | for_each_online_cpu(j) |
79 | seq_printf(p, "CPU%d ",j); | |
1da177e4 LT |
80 | |
81 | seq_putc(p, '\n'); | |
82 | break; | |
83 | ||
84 | case 1 ... NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP: | |
85 | local_irq_save(flags); | |
86 | ||
87 | grp = (i - 1) / NR_IRQ_ACTIONS_PER_GROUP; | |
88 | group = irq_groups[grp]; | |
89 | if (!group) | |
90 | goto skip; | |
91 | ||
92 | ix = (i - 1) % NR_IRQ_ACTIONS_PER_GROUP; | |
93 | action = group->actions[ix]; | |
94 | if (!action) | |
95 | goto skip; | |
96 | ||
97 | seq_printf(p, "%3d: ", i - 1); | |
98 | ||
99 | #ifndef CONFIG_SMP | |
100 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
101 | #else | |
394e3902 AM |
102 | for_each_online_cpu(j) |
103 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]); | |
1da177e4 LT |
104 | #endif |
105 | ||
106 | level = group->sources[ix]->level - frv_irq_levels; | |
107 | ||
108 | seq_printf(p, " %12s@%x", group->sources[ix]->muxname, level); | |
109 | seq_printf(p, " %s", action->name); | |
110 | ||
111 | for (action = action->next; action; action = action->next) | |
112 | seq_printf(p, ", %s", action->name); | |
113 | ||
114 | seq_putc(p, '\n'); | |
115 | skip: | |
116 | local_irq_restore(flags); | |
117 | break; | |
118 | ||
119 | case NR_IRQ_GROUPS * NR_IRQ_ACTIONS_PER_GROUP + 1: | |
120 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | |
121 | break; | |
122 | ||
123 | default: | |
124 | break; | |
125 | } | |
126 | ||
127 | return 0; | |
128 | } | |
129 | ||
130 | ||
131 | /* | |
132 | * Generic enable/disable code: this just calls | |
133 | * down into the PIC-specific version for the actual | |
134 | * hardware disable after having gotten the irq | |
135 | * controller lock. | |
136 | */ | |
137 | ||
138 | /** | |
139 | * disable_irq_nosync - disable an irq without waiting | |
140 | * @irq: Interrupt to disable | |
141 | * | |
142 | * Disable the selected interrupt line. Disables and Enables are | |
143 | * nested. | |
144 | * Unlike disable_irq(), this function does not ensure existing | |
145 | * instances of the IRQ handler have completed before returning. | |
146 | * | |
147 | * This function may be called from IRQ context. | |
148 | */ | |
149 | ||
150 | void disable_irq_nosync(unsigned int irq) | |
151 | { | |
152 | struct irq_source *source; | |
153 | struct irq_group *group; | |
154 | struct irq_level *level; | |
155 | unsigned long flags; | |
156 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | |
157 | ||
158 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
159 | if (!group) | |
160 | BUG(); | |
161 | ||
162 | source = group->sources[idx]; | |
163 | if (!source) | |
164 | BUG(); | |
165 | ||
166 | level = source->level; | |
167 | ||
168 | spin_lock_irqsave(&level->lock, flags); | |
169 | ||
170 | if (group->control) { | |
171 | if (!group->disable_cnt[idx]++) | |
172 | group->control(group, idx, 0); | |
173 | } else if (!level->disable_count++) { | |
174 | __set_MASK(level - frv_irq_levels); | |
175 | } | |
176 | ||
177 | spin_unlock_irqrestore(&level->lock, flags); | |
178 | } | |
179 | ||
40234401 DH |
180 | EXPORT_SYMBOL(disable_irq_nosync); |
181 | ||
1da177e4 LT |
182 | /** |
183 | * disable_irq - disable an irq and wait for completion | |
184 | * @irq: Interrupt to disable | |
185 | * | |
186 | * Disable the selected interrupt line. Enables and Disables are | |
187 | * nested. | |
188 | * This function waits for any pending IRQ handlers for this interrupt | |
189 | * to complete before returning. If you use this function while | |
190 | * holding a resource the IRQ handler may need you will deadlock. | |
191 | * | |
192 | * This function may be called - with care - from IRQ context. | |
193 | */ | |
194 | ||
195 | void disable_irq(unsigned int irq) | |
196 | { | |
197 | disable_irq_nosync(irq); | |
198 | ||
199 | #ifdef CONFIG_SMP | |
200 | if (!local_irq_count(smp_processor_id())) { | |
201 | do { | |
202 | barrier(); | |
203 | } while (irq_desc[irq].status & IRQ_INPROGRESS); | |
204 | } | |
205 | #endif | |
206 | } | |
207 | ||
40234401 DH |
208 | EXPORT_SYMBOL(disable_irq); |
209 | ||
1da177e4 LT |
210 | /** |
211 | * enable_irq - enable handling of an irq | |
212 | * @irq: Interrupt to enable | |
213 | * | |
214 | * Undoes the effect of one call to disable_irq(). If this | |
215 | * matches the last disable, processing of interrupts on this | |
216 | * IRQ line is re-enabled. | |
217 | * | |
218 | * This function may be called from IRQ context. | |
219 | */ | |
220 | ||
221 | void enable_irq(unsigned int irq) | |
222 | { | |
223 | struct irq_source *source; | |
224 | struct irq_group *group; | |
225 | struct irq_level *level; | |
226 | unsigned long flags; | |
227 | int idx = irq & (NR_IRQ_ACTIONS_PER_GROUP - 1); | |
228 | int count; | |
229 | ||
230 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
231 | if (!group) | |
232 | BUG(); | |
233 | ||
234 | source = group->sources[idx]; | |
235 | if (!source) | |
236 | BUG(); | |
237 | ||
238 | level = source->level; | |
239 | ||
240 | spin_lock_irqsave(&level->lock, flags); | |
241 | ||
242 | if (group->control) | |
243 | count = group->disable_cnt[idx]; | |
244 | else | |
245 | count = level->disable_count; | |
246 | ||
247 | switch (count) { | |
248 | case 1: | |
249 | if (group->control) { | |
250 | if (group->actions[idx]) | |
251 | group->control(group, idx, 1); | |
252 | } else { | |
253 | if (level->usage) | |
254 | __clr_MASK(level - frv_irq_levels); | |
255 | } | |
256 | /* fall-through */ | |
257 | ||
258 | default: | |
259 | count--; | |
260 | break; | |
261 | ||
262 | case 0: | |
263 | printk("enable_irq(%u) unbalanced from %p\n", irq, __builtin_return_address(0)); | |
264 | } | |
265 | ||
266 | if (group->control) | |
267 | group->disable_cnt[idx] = count; | |
268 | else | |
269 | level->disable_count = count; | |
270 | ||
271 | spin_unlock_irqrestore(&level->lock, flags); | |
272 | } | |
273 | ||
40234401 DH |
274 | EXPORT_SYMBOL(enable_irq); |
275 | ||
1da177e4 LT |
276 | /*****************************************************************************/ |
277 | /* | |
278 | * handles all normal device IRQ's | |
279 | * - registers are referred to by the __frame variable (GR28) | |
280 | * - IRQ distribution is complicated in this arch because of the many PICs, the | |
281 | * way they work and the way they cascade | |
282 | */ | |
283 | asmlinkage void do_IRQ(void) | |
284 | { | |
285 | struct irq_source *source; | |
286 | int level, cpu; | |
287 | ||
28baebae DH |
288 | irq_enter(); |
289 | ||
1da177e4 LT |
290 | level = (__frame->tbr >> 4) & 0xf; |
291 | cpu = smp_processor_id(); | |
292 | ||
1da177e4 LT |
293 | if ((unsigned long) __frame - (unsigned long) (current + 1) < 512) |
294 | BUG(); | |
295 | ||
296 | __set_MASK(level); | |
297 | __clr_RC(level); | |
298 | __clr_IRL(); | |
299 | ||
300 | kstat_this_cpu.irqs[level]++; | |
301 | ||
1da177e4 LT |
302 | for (source = frv_irq_levels[level].sources; source; source = source->next) |
303 | source->doirq(source); | |
304 | ||
1da177e4 LT |
305 | __clr_MASK(level); |
306 | ||
28baebae | 307 | irq_exit(); |
1da177e4 LT |
308 | |
309 | } /* end do_IRQ() */ | |
310 | ||
311 | /*****************************************************************************/ | |
312 | /* | |
313 | * handles all NMIs when not co-opted by the debugger | |
314 | * - registers are referred to by the __frame variable (GR28) | |
315 | */ | |
316 | asmlinkage void do_NMI(void) | |
317 | { | |
318 | } /* end do_NMI() */ | |
319 | ||
320 | /*****************************************************************************/ | |
321 | /** | |
322 | * request_irq - allocate an interrupt line | |
323 | * @irq: Interrupt line to allocate | |
324 | * @handler: Function to be called when the IRQ occurs | |
325 | * @irqflags: Interrupt type flags | |
326 | * @devname: An ascii name for the claiming device | |
327 | * @dev_id: A cookie passed back to the handler function | |
328 | * | |
329 | * This call allocates interrupt resources and enables the | |
330 | * interrupt line and IRQ handling. From the point this | |
331 | * call is made your handler function may be invoked. Since | |
332 | * your handler function must clear any interrupt the board | |
333 | * raises, you must take care both to initialise your hardware | |
334 | * and to set up the interrupt handler in the right order. | |
335 | * | |
336 | * Dev_id must be globally unique. Normally the address of the | |
337 | * device data structure is used as the cookie. Since the handler | |
338 | * receives this value it makes sense to use it. | |
339 | * | |
340 | * If your interrupt is shared you must pass a non NULL dev_id | |
341 | * as this is required when freeing the interrupt. | |
342 | * | |
343 | * Flags: | |
344 | * | |
345 | * SA_SHIRQ Interrupt is shared | |
346 | * | |
347 | * SA_INTERRUPT Disable local interrupts while processing | |
348 | * | |
349 | * SA_SAMPLE_RANDOM The interrupt can be used for entropy | |
350 | * | |
351 | */ | |
352 | ||
353 | int request_irq(unsigned int irq, | |
354 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | |
355 | unsigned long irqflags, | |
356 | const char * devname, | |
357 | void *dev_id) | |
358 | { | |
359 | int retval; | |
360 | struct irqaction *action; | |
361 | ||
362 | #if 1 | |
363 | /* | |
364 | * Sanity-check: shared interrupts should REALLY pass in | |
365 | * a real dev-ID, otherwise we'll have trouble later trying | |
366 | * to figure out which interrupt is which (messes up the | |
367 | * interrupt freeing logic etc). | |
368 | */ | |
369 | if (irqflags & SA_SHIRQ) { | |
370 | if (!dev_id) | |
371 | printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", | |
372 | devname, (&irq)[-1]); | |
373 | } | |
374 | #endif | |
375 | ||
376 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | |
377 | return -EINVAL; | |
378 | if (!handler) | |
379 | return -EINVAL; | |
380 | ||
381 | action = (struct irqaction *) kmalloc(sizeof(struct irqaction), GFP_KERNEL); | |
382 | if (!action) | |
383 | return -ENOMEM; | |
384 | ||
385 | action->handler = handler; | |
386 | action->flags = irqflags; | |
387 | action->mask = CPU_MASK_NONE; | |
388 | action->name = devname; | |
389 | action->next = NULL; | |
390 | action->dev_id = dev_id; | |
391 | ||
392 | retval = setup_irq(irq, action); | |
393 | if (retval) | |
394 | kfree(action); | |
395 | return retval; | |
396 | } | |
397 | ||
40234401 DH |
398 | EXPORT_SYMBOL(request_irq); |
399 | ||
1da177e4 LT |
400 | /** |
401 | * free_irq - free an interrupt | |
402 | * @irq: Interrupt line to free | |
403 | * @dev_id: Device identity to free | |
404 | * | |
405 | * Remove an interrupt handler. The handler is removed and if the | |
406 | * interrupt line is no longer in use by any driver it is disabled. | |
407 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
408 | * on the card it drives before calling this function. The function | |
409 | * does not return until any executing interrupts for this IRQ | |
410 | * have completed. | |
411 | * | |
412 | * This function may be called from interrupt context. | |
413 | * | |
414 | * Bugs: Attempting to free an irq in a handler for the same irq hangs | |
415 | * the machine. | |
416 | */ | |
417 | ||
418 | void free_irq(unsigned int irq, void *dev_id) | |
419 | { | |
420 | struct irq_source *source; | |
421 | struct irq_group *group; | |
422 | struct irq_level *level; | |
423 | struct irqaction **p, **pp; | |
424 | unsigned long flags; | |
425 | ||
426 | if ((irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP) >= NR_IRQ_GROUPS) | |
427 | return; | |
428 | ||
429 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
430 | if (!group) | |
431 | BUG(); | |
432 | ||
433 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
434 | if (!source) | |
435 | BUG(); | |
436 | ||
437 | level = source->level; | |
438 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
439 | ||
440 | spin_lock_irqsave(&level->lock, flags); | |
441 | ||
442 | for (pp = p; *pp; pp = &(*pp)->next) { | |
443 | struct irqaction *action = *pp; | |
444 | ||
445 | if (action->dev_id != dev_id) | |
446 | continue; | |
447 | ||
448 | /* found it - remove from the list of entries */ | |
449 | *pp = action->next; | |
450 | ||
451 | level->usage--; | |
452 | ||
453 | if (p == pp && group->control) | |
454 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 0); | |
455 | ||
456 | if (level->usage == 0) | |
457 | __set_MASK(level - frv_irq_levels); | |
458 | ||
459 | spin_unlock_irqrestore(&level->lock,flags); | |
460 | ||
461 | #ifdef CONFIG_SMP | |
462 | /* Wait to make sure it's not being used on another CPU */ | |
463 | while (desc->status & IRQ_INPROGRESS) | |
464 | barrier(); | |
465 | #endif | |
466 | kfree(action); | |
467 | return; | |
468 | } | |
469 | } | |
470 | ||
40234401 DH |
471 | EXPORT_SYMBOL(free_irq); |
472 | ||
1da177e4 LT |
473 | /* |
474 | * IRQ autodetection code.. | |
475 | * | |
476 | * This depends on the fact that any interrupt that comes in on to an | |
477 | * unassigned IRQ will cause GxICR_DETECT to be set | |
478 | */ | |
479 | ||
480 | static DECLARE_MUTEX(probe_sem); | |
481 | ||
482 | /** | |
483 | * probe_irq_on - begin an interrupt autodetect | |
484 | * | |
485 | * Commence probing for an interrupt. The interrupts are scanned | |
486 | * and a mask of potential interrupt lines is returned. | |
487 | * | |
488 | */ | |
489 | ||
490 | unsigned long probe_irq_on(void) | |
491 | { | |
492 | down(&probe_sem); | |
493 | return 0; | |
494 | } | |
495 | ||
40234401 DH |
496 | EXPORT_SYMBOL(probe_irq_on); |
497 | ||
1da177e4 LT |
498 | /* |
499 | * Return a mask of triggered interrupts (this | |
500 | * can handle only legacy ISA interrupts). | |
501 | */ | |
502 | ||
503 | /** | |
504 | * probe_irq_mask - scan a bitmap of interrupt lines | |
505 | * @val: mask of interrupts to consider | |
506 | * | |
507 | * Scan the ISA bus interrupt lines and return a bitmap of | |
508 | * active interrupts. The interrupt probe logic state is then | |
509 | * returned to its previous value. | |
510 | * | |
511 | * Note: we need to scan all the irq's even though we will | |
512 | * only return ISA irq numbers - just so that we reset them | |
513 | * all to a known state. | |
514 | */ | |
515 | unsigned int probe_irq_mask(unsigned long xmask) | |
516 | { | |
517 | up(&probe_sem); | |
518 | return 0; | |
519 | } | |
520 | ||
40234401 DH |
521 | EXPORT_SYMBOL(probe_irq_mask); |
522 | ||
1da177e4 LT |
523 | /* |
524 | * Return the one interrupt that triggered (this can | |
525 | * handle any interrupt source). | |
526 | */ | |
527 | ||
528 | /** | |
529 | * probe_irq_off - end an interrupt autodetect | |
530 | * @xmask: mask of potential interrupts (unused) | |
531 | * | |
532 | * Scans the unused interrupt lines and returns the line which | |
533 | * appears to have triggered the interrupt. If no interrupt was | |
534 | * found then zero is returned. If more than one interrupt is | |
535 | * found then minus the first candidate is returned to indicate | |
536 | * their is doubt. | |
537 | * | |
538 | * The interrupt probe logic state is returned to its previous | |
539 | * value. | |
540 | * | |
541 | * BUGS: When used in a module (which arguably shouldnt happen) | |
542 | * nothing prevents two IRQ probe callers from overlapping. The | |
543 | * results of this are non-optimal. | |
544 | */ | |
545 | ||
546 | int probe_irq_off(unsigned long xmask) | |
547 | { | |
548 | up(&probe_sem); | |
549 | return -1; | |
550 | } | |
551 | ||
40234401 DH |
552 | EXPORT_SYMBOL(probe_irq_off); |
553 | ||
1da177e4 LT |
554 | /* this was setup_x86_irq but it seems pretty generic */ |
555 | int setup_irq(unsigned int irq, struct irqaction *new) | |
556 | { | |
557 | struct irq_source *source; | |
558 | struct irq_group *group; | |
559 | struct irq_level *level; | |
560 | struct irqaction **p, **pp; | |
561 | unsigned long flags; | |
562 | ||
563 | group = irq_groups[irq >> NR_IRQ_LOG2_ACTIONS_PER_GROUP]; | |
564 | if (!group) | |
565 | BUG(); | |
566 | ||
567 | source = group->sources[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
568 | if (!source) | |
569 | BUG(); | |
570 | ||
571 | level = source->level; | |
572 | ||
573 | p = &group->actions[irq & (NR_IRQ_ACTIONS_PER_GROUP - 1)]; | |
574 | ||
575 | /* | |
576 | * Some drivers like serial.c use request_irq() heavily, | |
577 | * so we have to be careful not to interfere with a | |
578 | * running system. | |
579 | */ | |
580 | if (new->flags & SA_SAMPLE_RANDOM) { | |
581 | /* | |
582 | * This function might sleep, we want to call it first, | |
583 | * outside of the atomic block. | |
584 | * Yes, this might clear the entropy pool if the wrong | |
585 | * driver is attempted to be loaded, without actually | |
586 | * installing a new handler, but is this really a problem, | |
587 | * only the sysadmin is able to do this. | |
588 | */ | |
589 | rand_initialize_irq(irq); | |
590 | } | |
591 | ||
592 | /* must juggle the interrupt processing stuff with interrupts disabled */ | |
593 | spin_lock_irqsave(&level->lock, flags); | |
594 | ||
595 | /* can't share interrupts unless all parties agree to */ | |
596 | if (level->usage != 0 && !(level->flags & new->flags & SA_SHIRQ)) { | |
597 | spin_unlock_irqrestore(&level->lock,flags); | |
598 | return -EBUSY; | |
599 | } | |
600 | ||
601 | /* add new interrupt at end of irq queue */ | |
602 | pp = p; | |
603 | while (*pp) | |
604 | pp = &(*pp)->next; | |
605 | ||
606 | *pp = new; | |
607 | ||
608 | level->usage++; | |
609 | level->flags = new->flags; | |
610 | ||
611 | /* turn the interrupts on */ | |
612 | if (level->usage == 1) | |
613 | __clr_MASK(level - frv_irq_levels); | |
614 | ||
615 | if (p == pp && group->control) | |
616 | group->control(group, irq & (NR_IRQ_ACTIONS_PER_GROUP - 1), 1); | |
617 | ||
618 | spin_unlock_irqrestore(&level->lock, flags); | |
619 | register_irq_proc(irq); | |
620 | return 0; | |
621 | } | |
622 | ||
623 | static struct proc_dir_entry * root_irq_dir; | |
624 | static struct proc_dir_entry * irq_dir [NR_IRQS]; | |
625 | ||
626 | #define HEX_DIGITS 8 | |
627 | ||
628 | static unsigned int parse_hex_value (const char *buffer, | |
629 | unsigned long count, unsigned long *ret) | |
630 | { | |
631 | unsigned char hexnum [HEX_DIGITS]; | |
632 | unsigned long value; | |
633 | int i; | |
634 | ||
635 | if (!count) | |
636 | return -EINVAL; | |
637 | if (count > HEX_DIGITS) | |
638 | count = HEX_DIGITS; | |
639 | if (copy_from_user(hexnum, buffer, count)) | |
640 | return -EFAULT; | |
641 | ||
642 | /* | |
643 | * Parse the first 8 characters as a hex string, any non-hex char | |
644 | * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same. | |
645 | */ | |
646 | value = 0; | |
647 | ||
648 | for (i = 0; i < count; i++) { | |
649 | unsigned int c = hexnum[i]; | |
650 | ||
651 | switch (c) { | |
652 | case '0' ... '9': c -= '0'; break; | |
653 | case 'a' ... 'f': c -= 'a'-10; break; | |
654 | case 'A' ... 'F': c -= 'A'-10; break; | |
655 | default: | |
656 | goto out; | |
657 | } | |
658 | value = (value << 4) | c; | |
659 | } | |
660 | out: | |
661 | *ret = value; | |
662 | return 0; | |
663 | } | |
664 | ||
665 | ||
666 | static int prof_cpu_mask_read_proc (char *page, char **start, off_t off, | |
667 | int count, int *eof, void *data) | |
668 | { | |
669 | unsigned long *mask = (unsigned long *) data; | |
670 | if (count < HEX_DIGITS+1) | |
671 | return -EINVAL; | |
672 | return sprintf (page, "%08lx\n", *mask); | |
673 | } | |
674 | ||
675 | static int prof_cpu_mask_write_proc (struct file *file, const char *buffer, | |
676 | unsigned long count, void *data) | |
677 | { | |
678 | unsigned long *mask = (unsigned long *) data, full_count = count, err; | |
679 | unsigned long new_value; | |
680 | ||
681 | show_state(); | |
682 | err = parse_hex_value(buffer, count, &new_value); | |
683 | if (err) | |
684 | return err; | |
685 | ||
686 | *mask = new_value; | |
687 | return full_count; | |
688 | } | |
689 | ||
690 | #define MAX_NAMELEN 10 | |
691 | ||
692 | static void register_irq_proc (unsigned int irq) | |
693 | { | |
694 | char name [MAX_NAMELEN]; | |
695 | ||
696 | if (!root_irq_dir || irq_dir[irq]) | |
697 | return; | |
698 | ||
699 | memset(name, 0, MAX_NAMELEN); | |
700 | sprintf(name, "%d", irq); | |
701 | ||
702 | /* create /proc/irq/1234 */ | |
703 | irq_dir[irq] = proc_mkdir(name, root_irq_dir); | |
704 | } | |
705 | ||
706 | unsigned long prof_cpu_mask = -1; | |
707 | ||
708 | void init_irq_proc (void) | |
709 | { | |
710 | struct proc_dir_entry *entry; | |
711 | int i; | |
712 | ||
713 | /* create /proc/irq */ | |
714 | root_irq_dir = proc_mkdir("irq", 0); | |
715 | ||
716 | /* create /proc/irq/prof_cpu_mask */ | |
717 | entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir); | |
718 | if (!entry) | |
719 | return; | |
720 | ||
721 | entry->nlink = 1; | |
722 | entry->data = (void *)&prof_cpu_mask; | |
723 | entry->read_proc = prof_cpu_mask_read_proc; | |
724 | entry->write_proc = prof_cpu_mask_write_proc; | |
725 | ||
726 | /* | |
727 | * Create entries for all existing IRQs. | |
728 | */ | |
729 | for (i = 0; i < NR_IRQS; i++) | |
730 | register_irq_proc(i); | |
731 | } | |
732 | ||
733 | /*****************************************************************************/ | |
734 | /* | |
735 | * initialise the interrupt system | |
736 | */ | |
737 | void __init init_IRQ(void) | |
738 | { | |
739 | route_cpu_irqs(); | |
740 | fpga_init(); | |
741 | #ifdef CONFIG_FUJITSU_MB93493 | |
742 | route_mb93493_irqs(); | |
743 | #endif | |
744 | } /* end init_IRQ() */ |