]> git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
Imported Upstream version 1.0.0+dfsg1
[rustc.git] / src / llvm / lib / Target / X86 / AsmParser / X86AsmInstrumentation.cpp
1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "X86AsmInstrumentation.h"
12 #include "X86Operand.h"
13 #include "X86RegisterInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/CodeGen/MachineValueType.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/MC/MCAsmInfo.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstBuilder.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
24 #include "llvm/MC/MCStreamer.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCTargetOptions.h"
28 #include "llvm/Support/CommandLine.h"
29 #include <algorithm>
30 #include <cassert>
31 #include <vector>
32
33 // Following comment describes how assembly instrumentation works.
34 // Currently we have only AddressSanitizer instrumentation, but we're
35 // planning to implement MemorySanitizer for inline assembly too. If
36 // you're not familiar with AddressSanitizer algorithm, please, read
37 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
38 //
39 // When inline assembly is parsed by an instance of X86AsmParser, all
40 // instructions are emitted via EmitInstruction method. That's the
41 // place where X86AsmInstrumentation analyzes an instruction and
42 // decides, whether the instruction should be emitted as is or
43 // instrumentation is required. The latter case happens when an
44 // instruction reads from or writes to memory. Now instruction opcode
45 // is explicitly checked, and if an instruction has a memory operand
46 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
47 // instrumented. There're also exist instructions that modify
48 // memory but don't have an explicit memory operands, for instance,
49 // movs.
50 //
51 // Let's consider at first 8-byte memory accesses when an instruction
52 // has an explicit memory operand. In this case we need two registers -
53 // AddressReg to compute address of a memory cells which are accessed
54 // and ShadowReg to compute corresponding shadow address. So, we need
55 // to spill both registers before instrumentation code and restore them
56 // after instrumentation. Thus, in general, instrumentation code will
57 // look like this:
58 // PUSHF # Store flags, otherwise they will be overwritten
59 // PUSH AddressReg # spill AddressReg
60 // PUSH ShadowReg # spill ShadowReg
61 // LEA MemOp, AddressReg # compute address of the memory operand
62 // MOV AddressReg, ShadowReg
63 // SHR ShadowReg, 3
64 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
65 // # corresponding to MemOp.
66 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
67 // JZ .Done # when shadow equals to zero, everything is fine
68 // MOV AddressReg, RDI
69 // # Call __asan_report function with AddressReg as an argument
70 // CALL __asan_report
71 // .Done:
72 // POP ShadowReg # Restore ShadowReg
73 // POP AddressReg # Restore AddressReg
74 // POPF # Restore flags
75 //
76 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
77 // handled in a similar manner, but small memory accesses (less than 8
78 // byte) require an additional ScratchReg, which is used for shadow value.
79 //
80 // If, suppose, we're instrumenting an instruction like movs, only
81 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
82 // RCX are checked. In this case there're no need to spill and restore
83 // AddressReg , ShadowReg or flags four times, they're saved on stack
84 // just once, before instrumentation of these four addresses, and restored
85 // at the end of the instrumentation.
86 //
87 // There exist several things which complicate this simple algorithm.
88 // * Instrumented memory operand can have RSP as a base or an index
89 // register. So we need to add a constant offset before computation
90 // of memory address, since flags, AddressReg, ShadowReg, etc. were
91 // already stored on stack and RSP was modified.
92 // * Debug info (usually, DWARF) should be adjusted, because sometimes
93 // RSP is used as a frame register. So, we need to select some
94 // register as a frame register and temprorary override current CFA
95 // register.
96
97 namespace llvm {
98 namespace {
99
100 static cl::opt<bool> ClAsanInstrumentAssembly(
101 "asan-instrument-assembly",
102 cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden,
103 cl::init(false));
104
105 const int64_t MinAllowedDisplacement = std::numeric_limits<int32_t>::min();
106 const int64_t MaxAllowedDisplacement = std::numeric_limits<int32_t>::max();
107
108 int64_t ApplyDisplacementBounds(int64_t Displacement) {
109 return std::max(std::min(MaxAllowedDisplacement, Displacement),
110 MinAllowedDisplacement);
111 }
112
113 void CheckDisplacementBounds(int64_t Displacement) {
114 assert(Displacement >= MinAllowedDisplacement &&
115 Displacement <= MaxAllowedDisplacement);
116 }
117
118 bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
119
120 bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
121
122 std::string FuncName(unsigned AccessSize, bool IsWrite) {
123 return std::string("__asan_report_") + (IsWrite ? "store" : "load") +
124 utostr(AccessSize);
125 }
126
127 class X86AddressSanitizer : public X86AsmInstrumentation {
128 public:
129 struct RegisterContext {
130 private:
131 enum RegOffset {
132 REG_OFFSET_ADDRESS = 0,
133 REG_OFFSET_SHADOW,
134 REG_OFFSET_SCRATCH
135 };
136
137 public:
138 RegisterContext(unsigned AddressReg, unsigned ShadowReg,
139 unsigned ScratchReg) {
140 BusyRegs.push_back(convReg(AddressReg, MVT::i64));
141 BusyRegs.push_back(convReg(ShadowReg, MVT::i64));
142 BusyRegs.push_back(convReg(ScratchReg, MVT::i64));
143 }
144
145 unsigned AddressReg(MVT::SimpleValueType VT) const {
146 return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT);
147 }
148
149 unsigned ShadowReg(MVT::SimpleValueType VT) const {
150 return convReg(BusyRegs[REG_OFFSET_SHADOW], VT);
151 }
152
153 unsigned ScratchReg(MVT::SimpleValueType VT) const {
154 return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT);
155 }
156
157 void AddBusyReg(unsigned Reg) {
158 if (Reg != X86::NoRegister)
159 BusyRegs.push_back(convReg(Reg, MVT::i64));
160 }
161
162 void AddBusyRegs(const X86Operand &Op) {
163 AddBusyReg(Op.getMemBaseReg());
164 AddBusyReg(Op.getMemIndexReg());
165 }
166
167 unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
168 static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
169 X86::RCX, X86::RDX, X86::RDI,
170 X86::RSI };
171 for (unsigned Reg : Candidates) {
172 if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
173 return convReg(Reg, VT);
174 }
175 return X86::NoRegister;
176 }
177
178 private:
179 unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const {
180 return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT);
181 }
182
183 std::vector<unsigned> BusyRegs;
184 };
185
186 X86AddressSanitizer(const MCSubtargetInfo &STI)
187 : X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
188
189 virtual ~X86AddressSanitizer() {}
190
191 // X86AsmInstrumentation implementation:
192 virtual void InstrumentAndEmitInstruction(const MCInst &Inst,
193 OperandVector &Operands,
194 MCContext &Ctx,
195 const MCInstrInfo &MII,
196 MCStreamer &Out) override {
197 InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
198 if (RepPrefix)
199 EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
200
201 InstrumentMOV(Inst, Operands, Ctx, MII, Out);
202
203 RepPrefix = (Inst.getOpcode() == X86::REP_PREFIX);
204 if (!RepPrefix)
205 EmitInstruction(Out, Inst);
206 }
207
208 // Adjusts up stack and saves all registers used in instrumentation.
209 virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
210 MCContext &Ctx,
211 MCStreamer &Out) = 0;
212
213 // Restores all registers used in instrumentation and adjusts stack.
214 virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
215 MCContext &Ctx,
216 MCStreamer &Out) = 0;
217
218 virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
219 bool IsWrite,
220 const RegisterContext &RegCtx,
221 MCContext &Ctx, MCStreamer &Out) = 0;
222 virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
223 bool IsWrite,
224 const RegisterContext &RegCtx,
225 MCContext &Ctx, MCStreamer &Out) = 0;
226
227 virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
228 MCStreamer &Out) = 0;
229
230 void InstrumentMemOperand(X86Operand &Op, unsigned AccessSize, bool IsWrite,
231 const RegisterContext &RegCtx, MCContext &Ctx,
232 MCStreamer &Out);
233 void InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg, unsigned CntReg,
234 unsigned AccessSize, MCContext &Ctx, MCStreamer &Out);
235
236 void InstrumentMOVS(const MCInst &Inst, OperandVector &Operands,
237 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
238 void InstrumentMOV(const MCInst &Inst, OperandVector &Operands,
239 MCContext &Ctx, const MCInstrInfo &MII, MCStreamer &Out);
240
241 protected:
242 void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
243
244 void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg,
245 MCStreamer &Out) {
246 assert(VT == MVT::i32 || VT == MVT::i64);
247 MCInst Inst;
248 Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r);
249 Inst.addOperand(MCOperand::CreateReg(getX86SubSuperRegister(Reg, VT)));
250 Op.addMemOperands(Inst, 5);
251 EmitInstruction(Out, Inst);
252 }
253
254 void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT,
255 unsigned Reg, MCContext &Ctx, MCStreamer &Out);
256
257 // Creates new memory operand with Displacement added to an original
258 // displacement. Residue will contain a residue which could happen when the
259 // total displacement exceeds 32-bit limitation.
260 std::unique_ptr<X86Operand> AddDisplacement(X86Operand &Op,
261 int64_t Displacement,
262 MCContext &Ctx, int64_t *Residue);
263
264 bool is64BitMode() const {
265 return (STI.getFeatureBits() & X86::Mode64Bit) != 0;
266 }
267 bool is32BitMode() const {
268 return (STI.getFeatureBits() & X86::Mode32Bit) != 0;
269 }
270 bool is16BitMode() const {
271 return (STI.getFeatureBits() & X86::Mode16Bit) != 0;
272 }
273
274 unsigned getPointerWidth() {
275 if (is16BitMode()) return 16;
276 if (is32BitMode()) return 32;
277 if (is64BitMode()) return 64;
278 llvm_unreachable("invalid mode");
279 }
280
281 // True when previous instruction was actually REP prefix.
282 bool RepPrefix;
283
284 // Offset from the original SP register.
285 int64_t OrigSPOffset;
286 };
287
288 void X86AddressSanitizer::InstrumentMemOperand(
289 X86Operand &Op, unsigned AccessSize, bool IsWrite,
290 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
291 assert(Op.isMem() && "Op should be a memory operand.");
292 assert((AccessSize & (AccessSize - 1)) == 0 && AccessSize <= 16 &&
293 "AccessSize should be a power of two, less or equal than 16.");
294 // FIXME: take into account load/store alignment.
295 if (IsSmallMemAccess(AccessSize))
296 InstrumentMemOperandSmall(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
297 else
298 InstrumentMemOperandLarge(Op, AccessSize, IsWrite, RegCtx, Ctx, Out);
299 }
300
301 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg, unsigned SrcReg,
302 unsigned CntReg,
303 unsigned AccessSize,
304 MCContext &Ctx, MCStreamer &Out) {
305 // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
306 // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
307 RegisterContext RegCtx(X86::RDX /* AddressReg */, X86::RAX /* ShadowReg */,
308 IsSmallMemAccess(AccessSize)
309 ? X86::RBX
310 : X86::NoRegister /* ScratchReg */);
311 RegCtx.AddBusyReg(DstReg);
312 RegCtx.AddBusyReg(SrcReg);
313 RegCtx.AddBusyReg(CntReg);
314
315 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
316
317 // Test (%SrcReg)
318 {
319 const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
320 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
321 getPointerWidth(), 0, Disp, SrcReg, 0, AccessSize, SMLoc(), SMLoc()));
322 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
323 Out);
324 }
325
326 // Test -1(%SrcReg, %CntReg, AccessSize)
327 {
328 const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
329 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
330 getPointerWidth(), 0, Disp, SrcReg, CntReg, AccessSize, SMLoc(),
331 SMLoc()));
332 InstrumentMemOperand(*Op, AccessSize, false /* IsWrite */, RegCtx, Ctx,
333 Out);
334 }
335
336 // Test (%DstReg)
337 {
338 const MCExpr *Disp = MCConstantExpr::Create(0, Ctx);
339 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
340 getPointerWidth(), 0, Disp, DstReg, 0, AccessSize, SMLoc(), SMLoc()));
341 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
342 }
343
344 // Test -1(%DstReg, %CntReg, AccessSize)
345 {
346 const MCExpr *Disp = MCConstantExpr::Create(-1, Ctx);
347 std::unique_ptr<X86Operand> Op(X86Operand::CreateMem(
348 getPointerWidth(), 0, Disp, DstReg, CntReg, AccessSize, SMLoc(),
349 SMLoc()));
350 InstrumentMemOperand(*Op, AccessSize, true /* IsWrite */, RegCtx, Ctx, Out);
351 }
352
353 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
354 }
355
356 void X86AddressSanitizer::InstrumentMOVS(const MCInst &Inst,
357 OperandVector &Operands,
358 MCContext &Ctx, const MCInstrInfo &MII,
359 MCStreamer &Out) {
360 // Access size in bytes.
361 unsigned AccessSize = 0;
362
363 switch (Inst.getOpcode()) {
364 case X86::MOVSB:
365 AccessSize = 1;
366 break;
367 case X86::MOVSW:
368 AccessSize = 2;
369 break;
370 case X86::MOVSL:
371 AccessSize = 4;
372 break;
373 case X86::MOVSQ:
374 AccessSize = 8;
375 break;
376 default:
377 return;
378 }
379
380 InstrumentMOVSImpl(AccessSize, Ctx, Out);
381 }
382
383 void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
384 OperandVector &Operands, MCContext &Ctx,
385 const MCInstrInfo &MII,
386 MCStreamer &Out) {
387 // Access size in bytes.
388 unsigned AccessSize = 0;
389
390 switch (Inst.getOpcode()) {
391 case X86::MOV8mi:
392 case X86::MOV8mr:
393 case X86::MOV8rm:
394 AccessSize = 1;
395 break;
396 case X86::MOV16mi:
397 case X86::MOV16mr:
398 case X86::MOV16rm:
399 AccessSize = 2;
400 break;
401 case X86::MOV32mi:
402 case X86::MOV32mr:
403 case X86::MOV32rm:
404 AccessSize = 4;
405 break;
406 case X86::MOV64mi32:
407 case X86::MOV64mr:
408 case X86::MOV64rm:
409 AccessSize = 8;
410 break;
411 case X86::MOVAPDmr:
412 case X86::MOVAPSmr:
413 case X86::MOVAPDrm:
414 case X86::MOVAPSrm:
415 AccessSize = 16;
416 break;
417 default:
418 return;
419 }
420
421 const bool IsWrite = MII.get(Inst.getOpcode()).mayStore();
422
423 for (unsigned Ix = 0; Ix < Operands.size(); ++Ix) {
424 assert(Operands[Ix]);
425 MCParsedAsmOperand &Op = *Operands[Ix];
426 if (Op.isMem()) {
427 X86Operand &MemOp = static_cast<X86Operand &>(Op);
428 RegisterContext RegCtx(
429 X86::RDI /* AddressReg */, X86::RAX /* ShadowReg */,
430 IsSmallMemAccess(AccessSize) ? X86::RCX
431 : X86::NoRegister /* ScratchReg */);
432 RegCtx.AddBusyRegs(MemOp);
433 InstrumentMemOperandPrologue(RegCtx, Ctx, Out);
434 InstrumentMemOperand(MemOp, AccessSize, IsWrite, RegCtx, Ctx, Out);
435 InstrumentMemOperandEpilogue(RegCtx, Ctx, Out);
436 }
437 }
438 }
439
440 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
441 MVT::SimpleValueType VT,
442 unsigned Reg, MCContext &Ctx,
443 MCStreamer &Out) {
444 int64_t Displacement = 0;
445 if (IsStackReg(Op.getMemBaseReg()))
446 Displacement -= OrigSPOffset;
447 if (IsStackReg(Op.getMemIndexReg()))
448 Displacement -= OrigSPOffset * Op.getMemScale();
449
450 assert(Displacement >= 0);
451
452 // Emit Op as is.
453 if (Displacement == 0) {
454 EmitLEA(Op, VT, Reg, Out);
455 return;
456 }
457
458 int64_t Residue;
459 std::unique_ptr<X86Operand> NewOp =
460 AddDisplacement(Op, Displacement, Ctx, &Residue);
461 EmitLEA(*NewOp, VT, Reg, Out);
462
463 while (Residue != 0) {
464 const MCConstantExpr *Disp =
465 MCConstantExpr::Create(ApplyDisplacementBounds(Residue), Ctx);
466 std::unique_ptr<X86Operand> DispOp =
467 X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
468 SMLoc());
469 EmitLEA(*DispOp, VT, Reg, Out);
470 Residue -= Disp->getValue();
471 }
472 }
473
474 std::unique_ptr<X86Operand>
475 X86AddressSanitizer::AddDisplacement(X86Operand &Op, int64_t Displacement,
476 MCContext &Ctx, int64_t *Residue) {
477 assert(Displacement >= 0);
478
479 if (Displacement == 0 ||
480 (Op.getMemDisp() && Op.getMemDisp()->getKind() != MCExpr::Constant)) {
481 *Residue = Displacement;
482 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(),
483 Op.getMemDisp(), Op.getMemBaseReg(),
484 Op.getMemIndexReg(), Op.getMemScale(),
485 SMLoc(), SMLoc());
486 }
487
488 int64_t OrigDisplacement =
489 static_cast<const MCConstantExpr *>(Op.getMemDisp())->getValue();
490 CheckDisplacementBounds(OrigDisplacement);
491 Displacement += OrigDisplacement;
492
493 int64_t NewDisplacement = ApplyDisplacementBounds(Displacement);
494 CheckDisplacementBounds(NewDisplacement);
495
496 *Residue = Displacement - NewDisplacement;
497 const MCExpr *Disp = MCConstantExpr::Create(NewDisplacement, Ctx);
498 return X86Operand::CreateMem(Op.getMemModeSize(), Op.getMemSegReg(), Disp,
499 Op.getMemBaseReg(), Op.getMemIndexReg(),
500 Op.getMemScale(), SMLoc(), SMLoc());
501 }
502
503 class X86AddressSanitizer32 : public X86AddressSanitizer {
504 public:
505 static const long kShadowOffset = 0x20000000;
506
507 X86AddressSanitizer32(const MCSubtargetInfo &STI)
508 : X86AddressSanitizer(STI) {}
509
510 virtual ~X86AddressSanitizer32() {}
511
512 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
513 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
514 if (FrameReg == X86::NoRegister)
515 return FrameReg;
516 return getX86SubSuperRegister(FrameReg, MVT::i32);
517 }
518
519 void SpillReg(MCStreamer &Out, unsigned Reg) {
520 EmitInstruction(Out, MCInstBuilder(X86::PUSH32r).addReg(Reg));
521 OrigSPOffset -= 4;
522 }
523
524 void RestoreReg(MCStreamer &Out, unsigned Reg) {
525 EmitInstruction(Out, MCInstBuilder(X86::POP32r).addReg(Reg));
526 OrigSPOffset += 4;
527 }
528
529 void StoreFlags(MCStreamer &Out) {
530 EmitInstruction(Out, MCInstBuilder(X86::PUSHF32));
531 OrigSPOffset -= 4;
532 }
533
534 void RestoreFlags(MCStreamer &Out) {
535 EmitInstruction(Out, MCInstBuilder(X86::POPF32));
536 OrigSPOffset += 4;
537 }
538
539 virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
540 MCContext &Ctx,
541 MCStreamer &Out) override {
542 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
543 assert(LocalFrameReg != X86::NoRegister);
544
545 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
546 unsigned FrameReg = GetFrameReg(Ctx, Out);
547 if (MRI && FrameReg != X86::NoRegister) {
548 SpillReg(Out, LocalFrameReg);
549 if (FrameReg == X86::ESP) {
550 Out.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
551 Out.EmitCFIRelOffset(
552 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
553 }
554 EmitInstruction(
555 Out,
556 MCInstBuilder(X86::MOV32rr).addReg(LocalFrameReg).addReg(FrameReg));
557 Out.EmitCFIRememberState();
558 Out.EmitCFIDefCfaRegister(
559 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
560 }
561
562 SpillReg(Out, RegCtx.AddressReg(MVT::i32));
563 SpillReg(Out, RegCtx.ShadowReg(MVT::i32));
564 if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
565 SpillReg(Out, RegCtx.ScratchReg(MVT::i32));
566 StoreFlags(Out);
567 }
568
569 virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
570 MCContext &Ctx,
571 MCStreamer &Out) override {
572 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
573 assert(LocalFrameReg != X86::NoRegister);
574
575 RestoreFlags(Out);
576 if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
577 RestoreReg(Out, RegCtx.ScratchReg(MVT::i32));
578 RestoreReg(Out, RegCtx.ShadowReg(MVT::i32));
579 RestoreReg(Out, RegCtx.AddressReg(MVT::i32));
580
581 unsigned FrameReg = GetFrameReg(Ctx, Out);
582 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
583 RestoreReg(Out, LocalFrameReg);
584 Out.EmitCFIRestoreState();
585 if (FrameReg == X86::ESP)
586 Out.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
587 }
588 }
589
590 virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
591 bool IsWrite,
592 const RegisterContext &RegCtx,
593 MCContext &Ctx,
594 MCStreamer &Out) override;
595 virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
596 bool IsWrite,
597 const RegisterContext &RegCtx,
598 MCContext &Ctx,
599 MCStreamer &Out) override;
600 virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
601 MCStreamer &Out) override;
602
603 private:
604 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
605 MCStreamer &Out, const RegisterContext &RegCtx) {
606 EmitInstruction(Out, MCInstBuilder(X86::CLD));
607 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
608
609 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
610 .addReg(X86::ESP)
611 .addReg(X86::ESP)
612 .addImm(-16));
613 EmitInstruction(
614 Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32)));
615
616 const std::string &Fn = FuncName(AccessSize, IsWrite);
617 MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn));
618 const MCSymbolRefExpr *FnExpr =
619 MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
620 EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
621 }
622 };
623
624 void X86AddressSanitizer32::InstrumentMemOperandSmall(
625 X86Operand &Op, unsigned AccessSize, bool IsWrite,
626 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
627 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
628 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
629 unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
630
631 assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
632 unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
633
634 ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
635
636 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
637 AddressRegI32));
638 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
639 .addReg(ShadowRegI32)
640 .addReg(ShadowRegI32)
641 .addImm(3));
642
643 {
644 MCInst Inst;
645 Inst.setOpcode(X86::MOV8rm);
646 Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
647 const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
648 std::unique_ptr<X86Operand> Op(
649 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
650 SMLoc(), SMLoc()));
651 Op->addMemOperands(Inst, 5);
652 EmitInstruction(Out, Inst);
653 }
654
655 EmitInstruction(
656 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
657 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
658 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
659 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
660
661 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
662 AddressRegI32));
663 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
664 .addReg(ScratchRegI32)
665 .addReg(ScratchRegI32)
666 .addImm(7));
667
668 switch (AccessSize) {
669 default: llvm_unreachable("Incorrect access size");
670 case 1:
671 break;
672 case 2: {
673 const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
674 std::unique_ptr<X86Operand> Op(
675 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
676 SMLoc(), SMLoc()));
677 EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
678 break;
679 }
680 case 4:
681 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
682 .addReg(ScratchRegI32)
683 .addReg(ScratchRegI32)
684 .addImm(3));
685 break;
686 }
687
688 EmitInstruction(
689 Out,
690 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
691 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
692 ShadowRegI32));
693 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
694
695 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
696 EmitLabel(Out, DoneSym);
697 }
698
699 void X86AddressSanitizer32::InstrumentMemOperandLarge(
700 X86Operand &Op, unsigned AccessSize, bool IsWrite,
701 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
702 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
703 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
704
705 ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
706
707 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
708 AddressRegI32));
709 EmitInstruction(Out, MCInstBuilder(X86::SHR32ri)
710 .addReg(ShadowRegI32)
711 .addReg(ShadowRegI32)
712 .addImm(3));
713 {
714 MCInst Inst;
715 switch (AccessSize) {
716 default: llvm_unreachable("Incorrect access size");
717 case 8:
718 Inst.setOpcode(X86::CMP8mi);
719 break;
720 case 16:
721 Inst.setOpcode(X86::CMP16mi);
722 break;
723 }
724 const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
725 std::unique_ptr<X86Operand> Op(
726 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI32, 0, 1,
727 SMLoc(), SMLoc()));
728 Op->addMemOperands(Inst, 5);
729 Inst.addOperand(MCOperand::CreateImm(0));
730 EmitInstruction(Out, Inst);
731 }
732 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
733 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
734 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
735
736 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
737 EmitLabel(Out, DoneSym);
738 }
739
740 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize,
741 MCContext &Ctx,
742 MCStreamer &Out) {
743 StoreFlags(Out);
744
745 // No need to test when ECX is equals to zero.
746 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
747 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
748 EmitInstruction(
749 Out, MCInstBuilder(X86::TEST32rr).addReg(X86::ECX).addReg(X86::ECX));
750 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
751
752 // Instrument first and last elements in src and dst range.
753 InstrumentMOVSBase(X86::EDI /* DstReg */, X86::ESI /* SrcReg */,
754 X86::ECX /* CntReg */, AccessSize, Ctx, Out);
755
756 EmitLabel(Out, DoneSym);
757 RestoreFlags(Out);
758 }
759
760 class X86AddressSanitizer64 : public X86AddressSanitizer {
761 public:
762 static const long kShadowOffset = 0x7fff8000;
763
764 X86AddressSanitizer64(const MCSubtargetInfo &STI)
765 : X86AddressSanitizer(STI) {}
766
767 virtual ~X86AddressSanitizer64() {}
768
769 unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
770 unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
771 if (FrameReg == X86::NoRegister)
772 return FrameReg;
773 return getX86SubSuperRegister(FrameReg, MVT::i64);
774 }
775
776 void SpillReg(MCStreamer &Out, unsigned Reg) {
777 EmitInstruction(Out, MCInstBuilder(X86::PUSH64r).addReg(Reg));
778 OrigSPOffset -= 8;
779 }
780
781 void RestoreReg(MCStreamer &Out, unsigned Reg) {
782 EmitInstruction(Out, MCInstBuilder(X86::POP64r).addReg(Reg));
783 OrigSPOffset += 8;
784 }
785
786 void StoreFlags(MCStreamer &Out) {
787 EmitInstruction(Out, MCInstBuilder(X86::PUSHF64));
788 OrigSPOffset -= 8;
789 }
790
791 void RestoreFlags(MCStreamer &Out) {
792 EmitInstruction(Out, MCInstBuilder(X86::POPF64));
793 OrigSPOffset += 8;
794 }
795
796 virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
797 MCContext &Ctx,
798 MCStreamer &Out) override {
799 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
800 assert(LocalFrameReg != X86::NoRegister);
801
802 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
803 unsigned FrameReg = GetFrameReg(Ctx, Out);
804 if (MRI && FrameReg != X86::NoRegister) {
805 SpillReg(Out, X86::RBP);
806 if (FrameReg == X86::RSP) {
807 Out.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
808 Out.EmitCFIRelOffset(
809 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */), 0);
810 }
811 EmitInstruction(
812 Out,
813 MCInstBuilder(X86::MOV64rr).addReg(LocalFrameReg).addReg(FrameReg));
814 Out.EmitCFIRememberState();
815 Out.EmitCFIDefCfaRegister(
816 MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
817 }
818
819 EmitAdjustRSP(Ctx, Out, -128);
820 SpillReg(Out, RegCtx.ShadowReg(MVT::i64));
821 SpillReg(Out, RegCtx.AddressReg(MVT::i64));
822 if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
823 SpillReg(Out, RegCtx.ScratchReg(MVT::i64));
824 StoreFlags(Out);
825 }
826
827 virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
828 MCContext &Ctx,
829 MCStreamer &Out) override {
830 unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
831 assert(LocalFrameReg != X86::NoRegister);
832
833 RestoreFlags(Out);
834 if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
835 RestoreReg(Out, RegCtx.ScratchReg(MVT::i64));
836 RestoreReg(Out, RegCtx.AddressReg(MVT::i64));
837 RestoreReg(Out, RegCtx.ShadowReg(MVT::i64));
838 EmitAdjustRSP(Ctx, Out, 128);
839
840 unsigned FrameReg = GetFrameReg(Ctx, Out);
841 if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
842 RestoreReg(Out, LocalFrameReg);
843 Out.EmitCFIRestoreState();
844 if (FrameReg == X86::RSP)
845 Out.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
846 }
847 }
848
849 virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
850 bool IsWrite,
851 const RegisterContext &RegCtx,
852 MCContext &Ctx,
853 MCStreamer &Out) override;
854 virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
855 bool IsWrite,
856 const RegisterContext &RegCtx,
857 MCContext &Ctx,
858 MCStreamer &Out) override;
859 virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
860 MCStreamer &Out) override;
861
862 private:
863 void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
864 const MCExpr *Disp = MCConstantExpr::Create(Offset, Ctx);
865 std::unique_ptr<X86Operand> Op(
866 X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
867 SMLoc(), SMLoc()));
868 EmitLEA(*Op, MVT::i64, X86::RSP, Out);
869 OrigSPOffset += Offset;
870 }
871
872 void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
873 MCStreamer &Out, const RegisterContext &RegCtx) {
874 EmitInstruction(Out, MCInstBuilder(X86::CLD));
875 EmitInstruction(Out, MCInstBuilder(X86::MMX_EMMS));
876
877 EmitInstruction(Out, MCInstBuilder(X86::AND64ri8)
878 .addReg(X86::RSP)
879 .addReg(X86::RSP)
880 .addImm(-16));
881
882 if (RegCtx.AddressReg(MVT::i64) != X86::RDI) {
883 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
884 RegCtx.AddressReg(MVT::i64)));
885 }
886 const std::string &Fn = FuncName(AccessSize, IsWrite);
887 MCSymbol *FnSym = Ctx.GetOrCreateSymbol(StringRef(Fn));
888 const MCSymbolRefExpr *FnExpr =
889 MCSymbolRefExpr::Create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
890 EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
891 }
892 };
893
894 void X86AddressSanitizer64::InstrumentMemOperandSmall(
895 X86Operand &Op, unsigned AccessSize, bool IsWrite,
896 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
897 unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
898 unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
899 unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
900 unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
901 unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
902
903 assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
904 unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
905
906 ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
907
908 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
909 AddressRegI64));
910 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
911 .addReg(ShadowRegI64)
912 .addReg(ShadowRegI64)
913 .addImm(3));
914 {
915 MCInst Inst;
916 Inst.setOpcode(X86::MOV8rm);
917 Inst.addOperand(MCOperand::CreateReg(ShadowRegI8));
918 const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
919 std::unique_ptr<X86Operand> Op(
920 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
921 SMLoc(), SMLoc()));
922 Op->addMemOperands(Inst, 5);
923 EmitInstruction(Out, Inst);
924 }
925
926 EmitInstruction(
927 Out, MCInstBuilder(X86::TEST8rr).addReg(ShadowRegI8).addReg(ShadowRegI8));
928 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
929 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
930 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
931
932 EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ScratchRegI32).addReg(
933 AddressRegI32));
934 EmitInstruction(Out, MCInstBuilder(X86::AND32ri)
935 .addReg(ScratchRegI32)
936 .addReg(ScratchRegI32)
937 .addImm(7));
938
939 switch (AccessSize) {
940 default: llvm_unreachable("Incorrect access size");
941 case 1:
942 break;
943 case 2: {
944 const MCExpr *Disp = MCConstantExpr::Create(1, Ctx);
945 std::unique_ptr<X86Operand> Op(
946 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
947 SMLoc(), SMLoc()));
948 EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
949 break;
950 }
951 case 4:
952 EmitInstruction(Out, MCInstBuilder(X86::ADD32ri8)
953 .addReg(ScratchRegI32)
954 .addReg(ScratchRegI32)
955 .addImm(3));
956 break;
957 }
958
959 EmitInstruction(
960 Out,
961 MCInstBuilder(X86::MOVSX32rr8).addReg(ShadowRegI32).addReg(ShadowRegI8));
962 EmitInstruction(Out, MCInstBuilder(X86::CMP32rr).addReg(ScratchRegI32).addReg(
963 ShadowRegI32));
964 EmitInstruction(Out, MCInstBuilder(X86::JL_1).addExpr(DoneExpr));
965
966 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
967 EmitLabel(Out, DoneSym);
968 }
969
970 void X86AddressSanitizer64::InstrumentMemOperandLarge(
971 X86Operand &Op, unsigned AccessSize, bool IsWrite,
972 const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
973 unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
974 unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
975
976 ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
977
978 EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
979 AddressRegI64));
980 EmitInstruction(Out, MCInstBuilder(X86::SHR64ri)
981 .addReg(ShadowRegI64)
982 .addReg(ShadowRegI64)
983 .addImm(3));
984 {
985 MCInst Inst;
986 switch (AccessSize) {
987 default: llvm_unreachable("Incorrect access size");
988 case 8:
989 Inst.setOpcode(X86::CMP8mi);
990 break;
991 case 16:
992 Inst.setOpcode(X86::CMP16mi);
993 break;
994 }
995 const MCExpr *Disp = MCConstantExpr::Create(kShadowOffset, Ctx);
996 std::unique_ptr<X86Operand> Op(
997 X86Operand::CreateMem(getPointerWidth(), 0, Disp, ShadowRegI64, 0, 1,
998 SMLoc(), SMLoc()));
999 Op->addMemOperands(Inst, 5);
1000 Inst.addOperand(MCOperand::CreateImm(0));
1001 EmitInstruction(Out, Inst);
1002 }
1003
1004 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
1005 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
1006 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1007
1008 EmitCallAsanReport(AccessSize, IsWrite, Ctx, Out, RegCtx);
1009 EmitLabel(Out, DoneSym);
1010 }
1011
1012 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
1013 MCContext &Ctx,
1014 MCStreamer &Out) {
1015 StoreFlags(Out);
1016
1017 // No need to test when RCX is equals to zero.
1018 MCSymbol *DoneSym = Ctx.CreateTempSymbol();
1019 const MCExpr *DoneExpr = MCSymbolRefExpr::Create(DoneSym, Ctx);
1020 EmitInstruction(
1021 Out, MCInstBuilder(X86::TEST64rr).addReg(X86::RCX).addReg(X86::RCX));
1022 EmitInstruction(Out, MCInstBuilder(X86::JE_1).addExpr(DoneExpr));
1023
1024 // Instrument first and last elements in src and dst range.
1025 InstrumentMOVSBase(X86::RDI /* DstReg */, X86::RSI /* SrcReg */,
1026 X86::RCX /* CntReg */, AccessSize, Ctx, Out);
1027
1028 EmitLabel(Out, DoneSym);
1029 RestoreFlags(Out);
1030 }
1031
1032 } // End anonymous namespace
1033
1034 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo &STI)
1035 : STI(STI), InitialFrameReg(0) {}
1036
1037 X86AsmInstrumentation::~X86AsmInstrumentation() {}
1038
1039 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1040 const MCInst &Inst, OperandVector &Operands, MCContext &Ctx,
1041 const MCInstrInfo &MII, MCStreamer &Out) {
1042 EmitInstruction(Out, Inst);
1043 }
1044
1045 void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
1046 const MCInst &Inst) {
1047 Out.EmitInstruction(Inst, STI);
1048 }
1049
1050 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
1051 MCStreamer &Out) {
1052 if (!Out.getNumFrameInfos()) // No active dwarf frame
1053 return X86::NoRegister;
1054 const MCDwarfFrameInfo &Frame = Out.getDwarfFrameInfos().back();
1055 if (Frame.End) // Active dwarf frame is closed
1056 return X86::NoRegister;
1057 const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
1058 if (!MRI) // No register info
1059 return X86::NoRegister;
1060
1061 if (InitialFrameReg) {
1062 // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1063 return InitialFrameReg;
1064 }
1065
1066 return MRI->getLLVMRegNum(Frame.CurrentCfaRegister, true /* IsEH */);
1067 }
1068
1069 X86AsmInstrumentation *
1070 CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
1071 const MCContext &Ctx, const MCSubtargetInfo &STI) {
1072 Triple T(STI.getTargetTriple());
1073 const bool hasCompilerRTSupport = T.isOSLinux();
1074 if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
1075 MCOptions.SanitizeAddress) {
1076 if ((STI.getFeatureBits() & X86::Mode32Bit) != 0)
1077 return new X86AddressSanitizer32(STI);
1078 if ((STI.getFeatureBits() & X86::Mode64Bit) != 0)
1079 return new X86AddressSanitizer64(STI);
1080 }
1081 return new X86AsmInstrumentation(STI);
1082 }
1083
1084 } // End llvm namespace