]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
Blackfin/ipipe: upgrade to I-pipe mainline
authorPhilippe Gerum <rpm@xenomai.org>
Thu, 17 Mar 2011 06:12:48 +0000 (02:12 -0400)
committerMike Frysinger <vapier@gentoo.org>
Fri, 18 Mar 2011 08:01:10 +0000 (04:01 -0400)
This patch introduces Blackfin-specific bits to support the current
tip of the interrupt pipeline development, mainly:

- 2/3-level interrupt maps (sparse IRQs)
- generic virq handling
- sysinfo v2 format for ipipe_get_sysinfo()

Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
arch/blackfin/include/asm/ipipe.h
arch/blackfin/include/asm/ipipe_base.h
arch/blackfin/kernel/ipipe.c
arch/blackfin/mach-common/ints-priority.c

index 40f94a704c0202ace94e99429d7b67cb241d40f1..9e0cc0e2534f67376769410059b6fe76bf00600e 100644 (file)
 #include <asm/bitops.h>
 #include <asm/atomic.h>
 #include <asm/traps.h>
+#include <asm/bitsperlong.h>
 
-#define IPIPE_ARCH_STRING     "1.12-00"
+#define IPIPE_ARCH_STRING     "1.16-01"
 #define IPIPE_MAJOR_NUMBER    1
-#define IPIPE_MINOR_NUMBER    12
-#define IPIPE_PATCH_NUMBER    0
+#define IPIPE_MINOR_NUMBER    16
+#define IPIPE_PATCH_NUMBER    1
 
 #ifdef CONFIG_SMP
 #error "I-pipe/blackfin: SMP not implemented"
@@ -55,25 +56,19 @@ do {                                                \
 #define task_hijacked(p)                                               \
        ({                                                              \
                int __x__ = __ipipe_root_domain_p;                      \
-               __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \
                if (__x__)                                              \
-                       hard_local_irq_enable();                                \
+                       hard_local_irq_enable();                        \
                !__x__;                                                 \
        })
 
 struct ipipe_domain;
 
 struct ipipe_sysinfo {
-
-       int ncpus;              /* Number of CPUs on board */
-       u64 cpufreq;            /* CPU frequency (in Hz) */
-
-       /* Arch-dependent block */
-
-       struct {
-               unsigned tmirq; /* Timer tick IRQ */
-               u64 tmfreq;     /* Timer frequency */
-       } archdep;
+       int sys_nr_cpus;        /* Number of CPUs on board */
+       int sys_hrtimer_irq;    /* hrtimer device IRQ */
+       u64 sys_hrtimer_freq;   /* hrtimer device frequency */
+       u64 sys_hrclock_freq;   /* hrclock device frequency */
+       u64 sys_cpu_freq;       /* CPU frequency (Hz) */
 };
 
 #define ipipe_read_tsc(t)                                      \
@@ -115,9 +110,19 @@ void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
 void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
                             unsigned irq);
 
-#define __ipipe_enable_irq(irq)                (irq_desc[irq].chip->unmask(irq))
+#define __ipipe_enable_irq(irq)                                                \
+       do {                                                            \
+               struct irq_desc *desc = irq_to_desc(irq);               \
+               struct irq_chip *chip = get_irq_desc_chip(desc);        \
+               chip->irq_unmask(&desc->irq_data);                      \
+       } while (0)
 
-#define __ipipe_disable_irq(irq)       (irq_desc[irq].chip->mask(irq))
+#define __ipipe_disable_irq(irq)                                       \
+       do {                                                            \
+               struct irq_desc *desc = irq_to_desc(irq);               \
+               struct irq_chip *chip = get_irq_desc_chip(desc);        \
+               chip->irq_mask(&desc->irq_data);                        \
+       } while (0)
 
 static inline int __ipipe_check_tickdev(const char *devname)
 {
@@ -128,12 +133,11 @@ void __ipipe_enable_pipeline(void);
 
 #define __ipipe_hook_critical_ipi(ipd) do { } while (0)
 
-#define __ipipe_sync_pipeline  ___ipipe_sync_pipeline
-void ___ipipe_sync_pipeline(unsigned long syncmask);
+void ___ipipe_sync_pipeline(void);
 
 void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
 
-int __ipipe_get_irq_priority(unsigned irq);
+int __ipipe_get_irq_priority(unsigned int irq);
 
 void __ipipe_serial_debug(const char *fmt, ...);
 
@@ -152,7 +156,10 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
        return ffs(ul) - 1;
 }
 
-#define __ipipe_run_irqtail()  /* Must be a macro */                   \
+#define __ipipe_do_root_xirq(ipd, irq)                                 \
+       ((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)))
+
+#define __ipipe_run_irqtail(irq)  /* Must be a macro */                        \
        do {                                                            \
                unsigned long __pending;                                \
                CSYNC();                                                \
@@ -164,42 +171,8 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
                }                                                       \
        } while (0)
 
-#define __ipipe_run_isr(ipd, irq)                                      \
-       do {                                                            \
-               if (!__ipipe_pipeline_head_p(ipd))                      \
-                       hard_local_irq_enable();                                \
-               if (ipd == ipipe_root_domain) {                         \
-                       if (unlikely(ipipe_virtual_irq_p(irq))) {       \
-                               irq_enter();                            \
-                               ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-                               irq_exit();                             \
-                       } else                                          \
-                               ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
-               } else {                                                \
-                       __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
-                       ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
-                       /* Attempt to exit the outer interrupt level before \
-                        * starting the deferred IRQ processing. */     \
-                       __ipipe_run_irqtail();                          \
-                       __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
-               }                                                       \
-               hard_local_irq_disable();                                       \
-       } while (0)
-
 #define __ipipe_syscall_watched_p(p, sc)       \
-       (((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
-
-void ipipe_init_irq_threads(void);
-
-int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
-
-#ifdef CONFIG_TICKSOURCE_CORETMR
-#define IRQ_SYSTMR             IRQ_CORETMR
-#define IRQ_PRIOTMR            IRQ_CORETMR
-#else
-#define IRQ_SYSTMR             IRQ_TIMER0
-#define IRQ_PRIOTMR            CONFIG_IRQ_TIMER0
-#endif
+       (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls)
 
 #ifdef CONFIG_BF561
 #define bfin_write_TIMER_DISABLE(val)  bfin_write_TMRS8_DISABLE(val)
@@ -219,11 +192,11 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 
 #define task_hijacked(p)               0
 #define ipipe_trap_notify(t, r)        0
+#define __ipipe_root_tick_p(regs)      1
 
-#define ipipe_init_irq_threads()               do { } while (0)
-#define ipipe_start_irq_thread(irq, desc)      0
+#endif /* !CONFIG_IPIPE */
 
-#ifndef CONFIG_TICKSOURCE_GPTMR0
+#ifdef CONFIG_TICKSOURCE_CORETMR
 #define IRQ_SYSTMR             IRQ_CORETMR
 #define IRQ_PRIOTMR            IRQ_CORETMR
 #else
@@ -231,10 +204,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
 #define IRQ_PRIOTMR            CONFIG_IRQ_TIMER0
 #endif
 
-#define __ipipe_root_tick_p(regs)      1
-
-#endif /* !CONFIG_IPIPE */
-
 #define ipipe_update_tick_evtdev(evtdev)       do { } while (0)
 
 #endif /* !__ASM_BLACKFIN_IPIPE_H */
index 00409201d9edc7d69a225562caef7582e636d447..84a4ffd367475ef9c2cd32ab54f242844100fb12 100644 (file)
 
 #ifdef CONFIG_IPIPE
 
+#include <asm/bitsperlong.h>
+#include <mach/irq.h>
+
 #define IPIPE_NR_XIRQS         NR_IRQS
-#define IPIPE_IRQ_ISHIFT       5       /* 2^5 for 32bits arch. */
 
 /* Blackfin-specific, per-cpu pipeline status */
 #define IPIPE_SYNCDEFER_FLAG   15
 #define IPIPE_EVENT_INIT       (IPIPE_FIRST_EVENT + 4)
 #define IPIPE_EVENT_EXIT       (IPIPE_FIRST_EVENT + 5)
 #define IPIPE_EVENT_CLEANUP    (IPIPE_FIRST_EVENT + 6)
-#define IPIPE_LAST_EVENT       IPIPE_EVENT_CLEANUP
+#define IPIPE_EVENT_RETURN     (IPIPE_FIRST_EVENT + 7)
+#define IPIPE_LAST_EVENT       IPIPE_EVENT_RETURN
 #define IPIPE_NR_EVENTS                (IPIPE_LAST_EVENT + 1)
 
 #define IPIPE_TIMER_IRQ                IRQ_CORETMR
 
+#define __IPIPE_FEATURE_SYSINFO_V2     1
+
 #ifndef __ASSEMBLY__
 
 extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
@@ -63,6 +68,8 @@ void __ipipe_unlock_root(void);
 
 #endif /* !__ASSEMBLY__ */
 
+#define __IPIPE_FEATURE_SYSINFO_V2     1
+
 #endif /* CONFIG_IPIPE */
 
 #endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */
index 3b1da4aff2a1ed560f2c7041d85ccdf571f4972e..f37019c847c9b643c04676d0a2423f870286f16c 100644 (file)
@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
         * pending for it.
         */
        if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
-           ipipe_head_cpudom_var(irqpend_himask) == 0)
+           !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
                goto out;
 
        __ipipe_walk_pipeline(head);
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
 }
 EXPORT_SYMBOL(__ipipe_disable_irqdesc);
 
-int __ipipe_syscall_root(struct pt_regs *regs)
+asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
 {
        struct ipipe_percpu_domain_data *p;
-       unsigned long flags;
+       void (*hook)(void);
        int ret;
 
+       WARN_ON_ONCE(irqs_disabled_hw());
+
        /*
-        * We need to run the IRQ tail hook whenever we don't
-        * propagate a syscall to higher domains, because we know that
-        * important operations might be pending there (e.g. Xenomai
-        * deferred rescheduling).
+        * We need to run the IRQ tail hook each time we intercept a
+        * syscall, because we know that important operations might be
+        * pending there (e.g. Xenomai deferred rescheduling).
         */
-
-       if (regs->orig_p0 < NR_syscalls) {
-               void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
-               hook();
-               if ((current->flags & PF_EVNOTIFY) == 0)
-                       return 0;
-       }
+       hook = (__typeof__(hook))__ipipe_irq_tail_hook;
+       hook();
 
        /*
         * This routine either returns:
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs)
         * tail work has to be performed (for handling signals etc).
         */
 
-       if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
+       if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
+           !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
                return 0;
 
        ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
 
-       flags = hard_local_irq_save();
+       hard_local_irq_disable();
 
-       if (!__ipipe_root_domain_p) {
-               hard_local_irq_restore(flags);
-               return 1;
+       /*
+        * This is the end of the syscall path, so we may
+        * safely assume a valid Linux task stack here.
+        */
+       if (current->ipipe_flags & PF_EVTRET) {
+               current->ipipe_flags &= ~PF_EVTRET;
+               __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
        }
 
-       p = ipipe_root_cpudom_ptr();
-       if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
-               __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+       if (!__ipipe_root_domain_p)
+               ret = -1;
+       else {
+               p = ipipe_root_cpudom_ptr();
+               if (__ipipe_ipending_p(p))
+                       __ipipe_sync_pipeline();
+       }
 
-       hard_local_irq_restore(flags);
+       hard_local_irq_enable();
 
        return -ret;
 }
 
-unsigned long ipipe_critical_enter(void (*syncfn) (void))
-{
-       unsigned long flags;
-
-       flags = hard_local_irq_save();
-
-       return flags;
-}
-
-void ipipe_critical_exit(unsigned long flags)
-{
-       hard_local_irq_restore(flags);
-}
-
 static void __ipipe_no_irqtail(void)
 {
 }
 
 int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
 {
-       info->ncpus = num_online_cpus();
-       info->cpufreq = ipipe_cpu_freq();
-       info->archdep.tmirq = IPIPE_TIMER_IRQ;
-       info->archdep.tmfreq = info->cpufreq;
+       info->sys_nr_cpus = num_online_cpus();
+       info->sys_cpu_freq = ipipe_cpu_freq();
+       info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
+       info->sys_hrtimer_freq = __ipipe_core_clock;
+       info->sys_hrclock_freq = __ipipe_core_clock;
 
        return 0;
 }
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq)
 asmlinkage void __ipipe_sync_root(void)
 {
        void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+       struct ipipe_percpu_domain_data *p;
        unsigned long flags;
 
        BUG_ON(irqs_disabled());
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void)
 
        clear_thread_flag(TIF_IRQ_SYNC);
 
-       if (ipipe_root_cpudom_var(irqpend_himask) != 0)
-               __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
+       p = ipipe_root_cpudom_ptr();
+       if (__ipipe_ipending_p(p))
+               __ipipe_sync_pipeline();
 
        hard_local_irq_restore(flags);
 }
 
-void ___ipipe_sync_pipeline(unsigned long syncmask)
+void ___ipipe_sync_pipeline(void)
 {
        if (__ipipe_root_domain_p &&
            test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
                return;
 
-       __ipipe_sync_stage(syncmask);
+       __ipipe_sync_stage();
 }
 
 void __ipipe_disable_root_irqs_hw(void)
index 8e9d3cc30885015066882f37e7015165b30ff6db..6cd52395a999866f778d6d89e8b879ee7ab8386e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/irq.h>
+#include <linux/sched.h>
 #ifdef CONFIG_IPIPE
 #include <linux/ipipe.h>
 #endif
@@ -556,10 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
 {
 #ifdef CONFIG_IPIPE
-       _set_irq_handler(irq, handle_level_irq);
-#else
-       __set_irq_handler_unlocked(irq, handle);
+       handle = handle_level_irq;
 #endif
+       __set_irq_handler_unlocked(irq, handle);
 }
 
 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -1392,7 +1392,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
        struct ipipe_domain *this_domain = __ipipe_current_domain;
        struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
        struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
-       int irq, s;
+       int irq, s = 0;
 
        if (likely(vec == EVT_IVTMR_P))
                irq = IRQ_CORETMR;
@@ -1442,6 +1442,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
                        __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
        }
 
+       /*
+        * We don't want Linux interrupt handlers to run at the
+        * current core priority level (i.e. < EVT15), since this
+        * might delay other interrupts handled by a high priority
+        * domain. Here is what we do instead:
+        *
+        * - we raise the SYNCDEFER bit to prevent
+        * __ipipe_handle_irq() to sync the pipeline for the root
+        * stage for the incoming interrupt. Upon return, that IRQ is
+        * pending in the interrupt log.
+        *
+        * - we raise the TIF_IRQ_SYNC bit for the current thread, so
+        * that _schedule_and_signal_from_int will eventually sync the
+        * pipeline from EVT15.
+        */
        if (this_domain == ipipe_root_domain) {
                s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
                barrier();
@@ -1451,6 +1466,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
        __ipipe_handle_irq(irq, regs);
        ipipe_trace_irq_exit(irq);
 
+       if (user_mode(regs) &&
+           !ipipe_test_foreign_stack() &&
+           (current->ipipe_flags & PF_EVTRET) != 0) {
+               /*
+                * Testing for user_regs() does NOT fully eliminate
+                * foreign stack contexts, because of the forged
+                * interrupt returns we do through
+                * __ipipe_call_irqtail. In that case, we might have
+                * preempted a foreign stack context in a high
+                * priority domain, with a single interrupt level now
+                * pending after the irqtail unwinding is done. In
+                * which case user_mode() is now true, and the event
+                * gets dispatched spuriously.
+                */
+               current->ipipe_flags &= ~PF_EVTRET;
+               __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
+       }
+
        if (this_domain == ipipe_root_domain) {
                set_thread_flag(TIF_IRQ_SYNC);
                if (!s) {