]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/include/asm/hw_irq.h
perfcounters: remove powerpc definitions of perf_counter_do_pending
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / include / asm / hw_irq.h
1 /*
2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
3 */
4 #ifndef _ASM_POWERPC_HW_IRQ_H
5 #define _ASM_POWERPC_HW_IRQ_H
6
7 #ifdef __KERNEL__
8
9 #include <linux/errno.h>
10 #include <linux/compiler.h>
11 #include <asm/ptrace.h>
12 #include <asm/processor.h>
13
14 extern void timer_interrupt(struct pt_regs *);
15
16 #ifdef CONFIG_PPC64
17 #include <asm/paca.h>
18
19 static inline unsigned long local_get_flags(void)
20 {
21 unsigned long flags;
22
23 __asm__ __volatile__("lbz %0,%1(13)"
24 : "=r" (flags)
25 : "i" (offsetof(struct paca_struct, soft_enabled)));
26
27 return flags;
28 }
29
30 static inline unsigned long raw_local_irq_disable(void)
31 {
32 unsigned long flags, zero;
33
34 __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)"
35 : "=r" (flags), "=&r" (zero)
36 : "i" (offsetof(struct paca_struct, soft_enabled))
37 : "memory");
38
39 return flags;
40 }
41
42 extern void raw_local_irq_restore(unsigned long);
43 extern void iseries_handle_interrupts(void);
44
45 #define raw_local_irq_enable() raw_local_irq_restore(1)
46 #define raw_local_save_flags(flags) ((flags) = local_get_flags())
47 #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable())
48
49 #define raw_irqs_disabled() (local_get_flags() == 0)
50 #define raw_irqs_disabled_flags(flags) ((flags) == 0)
51
52 #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1)
53 #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1)
54
55 #define hard_irq_disable() \
56 do { \
57 __hard_irq_disable(); \
58 get_paca()->soft_enabled = 0; \
59 get_paca()->hard_enabled = 0; \
60 } while(0)
61
62 static inline int irqs_disabled_flags(unsigned long flags)
63 {
64 return flags == 0;
65 }
66
67 #else
68
69 #if defined(CONFIG_BOOKE)
70 #define SET_MSR_EE(x) mtmsr(x)
71 #define local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory")
72 #else
73 #define SET_MSR_EE(x) mtmsr(x)
74 #define local_irq_restore(flags) mtmsr(flags)
75 #endif
76
77 static inline void local_irq_disable(void)
78 {
79 #ifdef CONFIG_BOOKE
80 __asm__ __volatile__("wrteei 0": : :"memory");
81 #else
82 unsigned long msr;
83 __asm__ __volatile__("": : :"memory");
84 msr = mfmsr();
85 SET_MSR_EE(msr & ~MSR_EE);
86 #endif
87 }
88
89 static inline void local_irq_enable(void)
90 {
91 #ifdef CONFIG_BOOKE
92 __asm__ __volatile__("wrteei 1": : :"memory");
93 #else
94 unsigned long msr;
95 __asm__ __volatile__("": : :"memory");
96 msr = mfmsr();
97 SET_MSR_EE(msr | MSR_EE);
98 #endif
99 }
100
101 static inline void local_irq_save_ptr(unsigned long *flags)
102 {
103 unsigned long msr;
104 msr = mfmsr();
105 *flags = msr;
106 #ifdef CONFIG_BOOKE
107 __asm__ __volatile__("wrteei 0": : :"memory");
108 #else
109 SET_MSR_EE(msr & ~MSR_EE);
110 #endif
111 __asm__ __volatile__("": : :"memory");
112 }
113
114 #define local_save_flags(flags) ((flags) = mfmsr())
115 #define local_irq_save(flags) local_irq_save_ptr(&flags)
116 #define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
117
118 #define hard_irq_enable() local_irq_enable()
119 #define hard_irq_disable() local_irq_disable()
120
121 static inline int irqs_disabled_flags(unsigned long flags)
122 {
123 return (flags & MSR_EE) == 0;
124 }
125
126 #endif /* CONFIG_PPC64 */
127
128 /*
129 * interrupt-retrigger: should we handle this via lost interrupts and IPIs
130 * or should we not care like we do now ? --BenH.
131 */
132 struct irq_chip;
133
134 #ifdef CONFIG_PERF_COUNTERS
135 static inline unsigned long test_perf_counter_pending(void)
136 {
137 unsigned long x;
138
139 asm volatile("lbz %0,%1(13)"
140 : "=r" (x)
141 : "i" (offsetof(struct paca_struct, perf_counter_pending)));
142 return x;
143 }
144
145 static inline void set_perf_counter_pending(void)
146 {
147 asm volatile("stb %0,%1(13)" : :
148 "r" (1),
149 "i" (offsetof(struct paca_struct, perf_counter_pending)));
150 }
151
152 static inline void clear_perf_counter_pending(void)
153 {
154 asm volatile("stb %0,%1(13)" : :
155 "r" (0),
156 "i" (offsetof(struct paca_struct, perf_counter_pending)));
157 }
158
159 #else
160
161 static inline unsigned long test_perf_counter_pending(void)
162 {
163 return 0;
164 }
165
166 static inline void set_perf_counter_pending(void) {}
167 static inline void clear_perf_counter_pending(void) {}
168 #endif /* CONFIG_PERF_COUNTERS */
169
170 #endif /* __KERNEL__ */
171 #endif /* _ASM_POWERPC_HW_IRQ_H */