]>
git.proxmox.com Git - rustc.git/blob - src/binaryen/src/ir/effects.h
2 * Copyright 2017 WebAssembly Community Group participants
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #ifndef wasm_ir_effects_h
18 #define wasm_ir_effects_h
22 // Look for side effects, including control flow
25 struct EffectAnalyzer
: public PostWalker
<EffectAnalyzer
> {
26 EffectAnalyzer(PassOptions
& passOptions
, Expression
*ast
= nullptr) {
27 ignoreImplicitTraps
= passOptions
.ignoreImplicitTraps
;
28 debugInfo
= passOptions
.debugInfo
;
29 if (ast
) analyze(ast
);
32 bool ignoreImplicitTraps
;
35 void analyze(Expression
*ast
) {
38 // if we are left with breaks, they are external
39 if (breakNames
.size() > 0) branches
= true;
42 bool branches
= false; // branches out of this expression, returns, infinite loops, etc
44 std::set
<Index
> localsRead
;
45 std::set
<Index
> localsWritten
;
46 std::set
<Name
> globalsRead
;
47 std::set
<Name
> globalsWritten
;
48 bool readsMemory
= false;
49 bool writesMemory
= false;
50 bool implicitTrap
= false; // a load or div/rem, which may trap. we ignore trap
51 // differences, so it is ok to reorder these, but we can't
52 // remove them, as they count as side effects, and we
53 // can't move them in a way that would cause other noticeable
54 // (global) side effects
55 bool isAtomic
= false; // An atomic load/store/RMW/Cmpxchg or an operator that
56 // has a defined ordering wrt atomics (e.g. grow_memory)
58 bool accessesLocal() { return localsRead
.size() + localsWritten
.size() > 0; }
59 bool accessesGlobal() { return globalsRead
.size() + globalsWritten
.size() > 0; }
60 bool accessesMemory() { return calls
|| readsMemory
|| writesMemory
; }
61 bool hasGlobalSideEffects() { return calls
|| globalsWritten
.size() > 0 || writesMemory
|| isAtomic
; }
62 bool hasSideEffects() { return hasGlobalSideEffects() || localsWritten
.size() > 0 || branches
|| implicitTrap
; }
63 bool hasAnything() { return branches
|| calls
|| accessesLocal() || readsMemory
|| writesMemory
|| accessesGlobal() || implicitTrap
|| isAtomic
; }
65 // check if we break to anything external from ourselves
66 bool hasExternalBreakTargets() { return !breakNames
.empty(); }
68 // checks if these effects would invalidate another set (e.g., if we write, we invalidate someone that reads, they can't be moved past us)
69 bool invalidates(EffectAnalyzer
& other
) {
70 if (branches
|| other
.branches
71 || ((writesMemory
|| calls
) && other
.accessesMemory())
72 || (accessesMemory() && (other
.writesMemory
|| other
.calls
))) {
75 // All atomics are sequentially consistent for now, and ordered wrt other
77 if ((isAtomic
&& other
.accessesMemory()) ||
78 (other
.isAtomic
&& accessesMemory())) {
81 for (auto local
: localsWritten
) {
82 if (other
.localsWritten
.count(local
) || other
.localsRead
.count(local
)) {
86 for (auto local
: localsRead
) {
87 if (other
.localsWritten
.count(local
)) return true;
89 if ((accessesGlobal() && other
.calls
) || (other
.accessesGlobal() && calls
)) {
92 for (auto global
: globalsWritten
) {
93 if (other
.globalsWritten
.count(global
) || other
.globalsRead
.count(global
)) {
97 for (auto global
: globalsRead
) {
98 if (other
.globalsWritten
.count(global
)) return true;
100 // we are ok to reorder implicit traps, but not conditionalize them
101 if ((implicitTrap
&& other
.branches
) || (other
.implicitTrap
&& branches
)) {
104 // we can't reorder an implicit trap in a way that alters global state
105 if ((implicitTrap
&& other
.hasGlobalSideEffects()) || (other
.implicitTrap
&& hasGlobalSideEffects())) {
111 void mergeIn(EffectAnalyzer
& other
) {
112 branches
= branches
|| other
.branches
;
113 calls
= calls
|| other
.calls
;
114 readsMemory
= readsMemory
|| other
.readsMemory
;
115 writesMemory
= writesMemory
|| other
.writesMemory
;
116 for (auto i
: other
.localsRead
) localsRead
.insert(i
);
117 for (auto i
: other
.localsWritten
) localsWritten
.insert(i
);
118 for (auto i
: other
.globalsRead
) globalsRead
.insert(i
);
119 for (auto i
: other
.globalsWritten
) globalsWritten
.insert(i
);
122 // the checks above happen after the node's children were processed, in the order of execution
123 // we must also check for control flow that happens before the children, i.e., loops
124 bool checkPre(Expression
* curr
) {
125 if (curr
->is
<Loop
>()) {
132 bool checkPost(Expression
* curr
) {
134 if (curr
->is
<Loop
>()) {
137 return hasAnything();
140 std::set
<Name
> breakNames
;
142 void visitBreak(Break
*curr
) {
143 breakNames
.insert(curr
->name
);
145 void visitSwitch(Switch
*curr
) {
146 for (auto name
: curr
->targets
) {
147 breakNames
.insert(name
);
149 breakNames
.insert(curr
->default_
);
151 void visitBlock(Block
* curr
) {
152 if (curr
->name
.is()) breakNames
.erase(curr
->name
); // these were internal breaks
154 void visitLoop(Loop
* curr
) {
155 if (curr
->name
.is()) breakNames
.erase(curr
->name
); // these were internal breaks
156 // if the loop is unreachable, then there is branching control flow:
157 // (1) if the body is unreachable because of a (return), uncaught (br) etc., then we
158 // already noted branching, so it is ok to mark it again (if we have *caught*
159 // (br)s, then they did not lead to the loop body being unreachable).
160 // (same logic applies to blocks)
161 // (2) if the loop is unreachable because it only has branches up to the loop
162 // top, but no way to get out, then it is an infinite loop, and we consider
163 // that a branching side effect (note how the same logic does not apply to
165 if (curr
->type
== unreachable
) {
170 void visitCall(Call
*curr
) { calls
= true; }
171 void visitCallImport(CallImport
*curr
) {
174 // debugInfo call imports must be preserved very strongly, do not
175 // move code around them
176 branches
= true; // !
179 void visitCallIndirect(CallIndirect
*curr
) { calls
= true; }
180 void visitGetLocal(GetLocal
*curr
) {
181 localsRead
.insert(curr
->index
);
183 void visitSetLocal(SetLocal
*curr
) {
184 localsWritten
.insert(curr
->index
);
186 void visitGetGlobal(GetGlobal
*curr
) {
187 globalsRead
.insert(curr
->name
);
189 void visitSetGlobal(SetGlobal
*curr
) {
190 globalsWritten
.insert(curr
->name
);
192 void visitLoad(Load
*curr
) {
194 isAtomic
|= curr
->isAtomic
;
195 if (!ignoreImplicitTraps
) implicitTrap
= true;
197 void visitStore(Store
*curr
) {
199 isAtomic
|= curr
->isAtomic
;
200 if (!ignoreImplicitTraps
) implicitTrap
= true;
202 void visitAtomicRMW(AtomicRMW
* curr
) {
206 if (!ignoreImplicitTraps
) implicitTrap
= true;
208 void visitAtomicCmpxchg(AtomicCmpxchg
* curr
) {
212 if (!ignoreImplicitTraps
) implicitTrap
= true;
214 void visitAtomicWait(AtomicWait
* curr
) {
216 // AtomicWait doesn't strictly write memory, but it does modify the waiters
217 // list associated with the specified address, which we can think of as a
221 if (!ignoreImplicitTraps
) implicitTrap
= true;
223 void visitAtomicWake(AtomicWake
* curr
) {
224 // AtomicWake doesn't strictly write memory, but it does modify the waiters
225 // list associated with the specified address, which we can think of as a
230 if (!ignoreImplicitTraps
) implicitTrap
= true;
232 void visitUnary(Unary
*curr
) {
233 if (!ignoreImplicitTraps
) {
235 case TruncSFloat32ToInt32
:
236 case TruncSFloat32ToInt64
:
237 case TruncUFloat32ToInt32
:
238 case TruncUFloat32ToInt64
:
239 case TruncSFloat64ToInt32
:
240 case TruncSFloat64ToInt64
:
241 case TruncUFloat64ToInt32
:
242 case TruncUFloat64ToInt64
: {
250 void visitBinary(Binary
*curr
) {
251 if (!ignoreImplicitTraps
) {
268 void visitReturn(Return
*curr
) { branches
= true; }
269 void visitHost(Host
*curr
) {
271 // grow_memory modifies the set of valid addresses, and thus can be modeled as modifying memory
273 // Atomics are also sequentially consistent with grow_memory.
276 void visitUnreachable(Unreachable
*curr
) { branches
= true; }
281 #endif // wasm_ir_effects_h