]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/asm-mips/hazards.h
Merge branch 'linux-2.6' into for-2.6.24
[mirror_ubuntu-artful-kernel.git] / include / asm-mips / hazards.h
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
98de920a 6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org>
a3c4946d
RB
7 * Copyright (C) MIPS Technologies, Inc.
8 * written by Ralf Baechle <ralf@linux-mips.org>
1da177e4
LT
9 */
10#ifndef _ASM_HAZARDS_H
11#define _ASM_HAZARDS_H
12
1da177e4 13
36396f3c 14#ifdef __ASSEMBLY__
d7d86aa8 15#define ASMMACRO(name, code...) .macro name; code; .endm
1da177e4
LT
16#else
17
d7d86aa8
RB
18#define ASMMACRO(name, code...) \
19__asm__(".macro " #name "; " #code "; .endm"); \
20 \
21static inline void name(void) \
22{ \
23 __asm__ __volatile__ (#name); \
24}
1da177e4 25
98de920a
RB
26/*
27 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
28 */
29extern void mips_ihb(void);
30
1da177e4
LT
31#endif
32
d7d86aa8
RB
33ASMMACRO(_ssnop,
34 sll $0, $0, 1
35 )
36
37ASMMACRO(_ehb,
38 sll $0, $0, 3
39 )
40
1da177e4 41/*
d7d86aa8 42 * TLB hazards
1da177e4 43 */
d7d86aa8 44#if defined(CONFIG_CPU_MIPSR2)
1da177e4 45
1da177e4 46/*
d7d86aa8 47 * MIPSR2 defines ehb for hazard avoidance
1da177e4
LT
48 */
49
d7d86aa8
RB
50ASMMACRO(mtc0_tlbw_hazard,
51 _ehb
52 )
53ASMMACRO(tlbw_use_hazard,
54 _ehb
55 )
56ASMMACRO(tlb_probe_hazard,
57 _ehb
58 )
59ASMMACRO(irq_enable_hazard,
7605b390 60 _ehb
d7d86aa8
RB
61 )
62ASMMACRO(irq_disable_hazard,
1da177e4 63 _ehb
d7d86aa8
RB
64 )
65ASMMACRO(back_to_back_c0_hazard,
66 _ehb
67 )
1da177e4 68/*
d7d86aa8
RB
69 * gcc has a tradition of misscompiling the previous construct using the
70 * address of a label as argument to inline assembler. Gas otoh has the
71 * annoying difference between la and dla which are only usable for 32-bit
72 * rsp. 64-bit code, so can't be used without conditional compilation.
73 * The alterantive is switching the assembler to 64-bit code which happens
74 * to work right even for 32-bit code ...
1da177e4 75 */
d7d86aa8
RB
76#define instruction_hazard() \
77do { \
78 unsigned long tmp; \
79 \
80 __asm__ __volatile__( \
81 " .set mips64r2 \n" \
82 " dla %0, 1f \n" \
83 " jr.hb %0 \n" \
84 " .set mips0 \n" \
85 "1: \n" \
86 : "=r" (tmp)); \
87} while (0)
1da177e4 88
d7d86aa8 89#elif defined(CONFIG_CPU_R10000)
1da177e4
LT
90
91/*
d7d86aa8 92 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
1da177e4 93 */
1da177e4 94
d7d86aa8
RB
95ASMMACRO(mtc0_tlbw_hazard,
96 )
97ASMMACRO(tlbw_use_hazard,
98 )
99ASMMACRO(tlb_probe_hazard,
100 )
101ASMMACRO(irq_enable_hazard,
102 )
103ASMMACRO(irq_disable_hazard,
104 )
105ASMMACRO(back_to_back_c0_hazard,
106 )
107#define instruction_hazard() do { } while (0)
1da177e4 108
d7d86aa8 109#elif defined(CONFIG_CPU_RM9000)
88d535b6 110
1da177e4
LT
111/*
112 * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
113 * use of the JTLB for instructions should not occur for 4 cpu cycles and use
114 * for data translations should not occur for 3 cpu cycles.
115 */
116
d7d86aa8
RB
117ASMMACRO(mtc0_tlbw_hazard,
118 _ssnop; _ssnop; _ssnop; _ssnop
119 )
120ASMMACRO(tlbw_use_hazard,
121 _ssnop; _ssnop; _ssnop; _ssnop
122 )
123ASMMACRO(tlb_probe_hazard,
124 _ssnop; _ssnop; _ssnop; _ssnop
125 )
126ASMMACRO(irq_enable_hazard,
127 )
128ASMMACRO(irq_disable_hazard,
129 )
130ASMMACRO(back_to_back_c0_hazard,
131 )
132#define instruction_hazard() do { } while (0)
1da177e4 133
d7d86aa8 134#elif defined(CONFIG_CPU_SB1)
1da177e4
LT
135
136/*
d7d86aa8 137 * Mostly like R4000 for historic reasons
1da177e4 138 */
d7d86aa8
RB
139ASMMACRO(mtc0_tlbw_hazard,
140 )
141ASMMACRO(tlbw_use_hazard,
142 )
143ASMMACRO(tlb_probe_hazard,
144 )
145ASMMACRO(irq_enable_hazard,
146 )
147ASMMACRO(irq_disable_hazard,
148 _ssnop; _ssnop; _ssnop
149 )
150ASMMACRO(back_to_back_c0_hazard,
151 )
152#define instruction_hazard() do { } while (0)
5068debf 153
1da177e4
LT
154#else
155
156/*
d7d86aa8
RB
157 * Finally the catchall case for all other processors including R4000, R4400,
158 * R4600, R4700, R5000, RM7000, NEC VR41xx etc.
a3c4946d 159 *
d7d86aa8
RB
160 * The taken branch will result in a two cycle penalty for the two killed
161 * instructions on R4000 / R4400. Other processors only have a single cycle
162 * hazard so this is nice trick to have an optimal code for a range of
163 * processors.
7043ad4f 164 */
d7d86aa8 165ASMMACRO(mtc0_tlbw_hazard,
3f318370 166 nop; nop
d7d86aa8
RB
167 )
168ASMMACRO(tlbw_use_hazard,
169 nop; nop; nop
170 )
171ASMMACRO(tlb_probe_hazard,
172 nop; nop; nop
173 )
174ASMMACRO(irq_enable_hazard,
7b0fdaa6 175 _ssnop; _ssnop; _ssnop;
d7d86aa8
RB
176 )
177ASMMACRO(irq_disable_hazard,
178 nop; nop; nop
179 )
180ASMMACRO(back_to_back_c0_hazard,
181 _ssnop; _ssnop; _ssnop;
182 )
cc61c1fe 183#define instruction_hazard() do { } while (0)
41c594ab 184
d7d86aa8 185#endif
1da177e4 186
0b624956
CD
187
188/* FPU hazards */
189
190#if defined(CONFIG_CPU_SB1)
191ASMMACRO(enable_fpu_hazard,
192 .set push;
193 .set mips64;
194 .set noreorder;
195 _ssnop;
196 bnezl $0,.+4;
a1b53a7b 197 _ssnop;
0b624956
CD
198 .set pop
199)
200ASMMACRO(disable_fpu_hazard,
201)
202
203#elif defined(CONFIG_CPU_MIPSR2)
204ASMMACRO(enable_fpu_hazard,
205 _ehb
206)
207ASMMACRO(disable_fpu_hazard,
208 _ehb
209)
210#else
211ASMMACRO(enable_fpu_hazard,
212 nop; nop; nop; nop
213)
214ASMMACRO(disable_fpu_hazard,
215 _ehb
216)
217#endif
218
1da177e4 219#endif /* _ASM_HAZARDS_H */