]>
git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "MCTargetDesc/X86BaseInfo.h"
11 #include "X86AsmInstrumentation.h"
12 #include "X86Operand.h"
13 #include "X86RegisterInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/ADT/Triple.h"
16 #include "llvm/CodeGen/MachineValueType.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/MC/MCAsmInfo.h"
19 #include "llvm/MC/MCContext.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstBuilder.h"
22 #include "llvm/MC/MCInstrInfo.h"
23 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
24 #include "llvm/MC/MCStreamer.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/MC/MCTargetAsmParser.h"
27 #include "llvm/MC/MCTargetOptions.h"
28 #include "llvm/Support/CommandLine.h"
33 // Following comment describes how assembly instrumentation works.
34 // Currently we have only AddressSanitizer instrumentation, but we're
35 // planning to implement MemorySanitizer for inline assembly too. If
36 // you're not familiar with AddressSanitizer algorithm, please, read
37 // https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm.
39 // When inline assembly is parsed by an instance of X86AsmParser, all
40 // instructions are emitted via EmitInstruction method. That's the
41 // place where X86AsmInstrumentation analyzes an instruction and
42 // decides, whether the instruction should be emitted as is or
43 // instrumentation is required. The latter case happens when an
44 // instruction reads from or writes to memory. Now instruction opcode
45 // is explicitly checked, and if an instruction has a memory operand
46 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
47 // instrumented. There're also exist instructions that modify
48 // memory but don't have an explicit memory operands, for instance,
51 // Let's consider at first 8-byte memory accesses when an instruction
52 // has an explicit memory operand. In this case we need two registers -
53 // AddressReg to compute address of a memory cells which are accessed
54 // and ShadowReg to compute corresponding shadow address. So, we need
55 // to spill both registers before instrumentation code and restore them
56 // after instrumentation. Thus, in general, instrumentation code will
58 // PUSHF # Store flags, otherwise they will be overwritten
59 // PUSH AddressReg # spill AddressReg
60 // PUSH ShadowReg # spill ShadowReg
61 // LEA MemOp, AddressReg # compute address of the memory operand
62 // MOV AddressReg, ShadowReg
64 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
65 // # corresponding to MemOp.
66 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
67 // JZ .Done # when shadow equals to zero, everything is fine
68 // MOV AddressReg, RDI
69 // # Call __asan_report function with AddressReg as an argument
72 // POP ShadowReg # Restore ShadowReg
73 // POP AddressReg # Restore AddressReg
74 // POPF # Restore flags
76 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
77 // handled in a similar manner, but small memory accesses (less than 8
78 // byte) require an additional ScratchReg, which is used for shadow value.
80 // If, suppose, we're instrumenting an instruction like movs, only
81 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
82 // RCX are checked. In this case there're no need to spill and restore
83 // AddressReg , ShadowReg or flags four times, they're saved on stack
84 // just once, before instrumentation of these four addresses, and restored
85 // at the end of the instrumentation.
87 // There exist several things which complicate this simple algorithm.
88 // * Instrumented memory operand can have RSP as a base or an index
89 // register. So we need to add a constant offset before computation
90 // of memory address, since flags, AddressReg, ShadowReg, etc. were
91 // already stored on stack and RSP was modified.
92 // * Debug info (usually, DWARF) should be adjusted, because sometimes
93 // RSP is used as a frame register. So, we need to select some
94 // register as a frame register and temprorary override current CFA
100 static cl::opt
<bool> ClAsanInstrumentAssembly(
101 "asan-instrument-assembly",
102 cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden
,
105 const int64_t MinAllowedDisplacement
= std::numeric_limits
<int32_t>::min();
106 const int64_t MaxAllowedDisplacement
= std::numeric_limits
<int32_t>::max();
108 int64_t ApplyDisplacementBounds(int64_t Displacement
) {
109 return std::max(std::min(MaxAllowedDisplacement
, Displacement
),
110 MinAllowedDisplacement
);
113 void CheckDisplacementBounds(int64_t Displacement
) {
114 assert(Displacement
>= MinAllowedDisplacement
&&
115 Displacement
<= MaxAllowedDisplacement
);
118 bool IsStackReg(unsigned Reg
) { return Reg
== X86::RSP
|| Reg
== X86::ESP
; }
120 bool IsSmallMemAccess(unsigned AccessSize
) { return AccessSize
< 8; }
122 std::string
FuncName(unsigned AccessSize
, bool IsWrite
) {
123 return std::string("__asan_report_") + (IsWrite
? "store" : "load") +
127 class X86AddressSanitizer
: public X86AsmInstrumentation
{
129 struct RegisterContext
{
132 REG_OFFSET_ADDRESS
= 0,
138 RegisterContext(unsigned AddressReg
, unsigned ShadowReg
,
139 unsigned ScratchReg
) {
140 BusyRegs
.push_back(convReg(AddressReg
, MVT::i64
));
141 BusyRegs
.push_back(convReg(ShadowReg
, MVT::i64
));
142 BusyRegs
.push_back(convReg(ScratchReg
, MVT::i64
));
145 unsigned AddressReg(MVT::SimpleValueType VT
) const {
146 return convReg(BusyRegs
[REG_OFFSET_ADDRESS
], VT
);
149 unsigned ShadowReg(MVT::SimpleValueType VT
) const {
150 return convReg(BusyRegs
[REG_OFFSET_SHADOW
], VT
);
153 unsigned ScratchReg(MVT::SimpleValueType VT
) const {
154 return convReg(BusyRegs
[REG_OFFSET_SCRATCH
], VT
);
157 void AddBusyReg(unsigned Reg
) {
158 if (Reg
!= X86::NoRegister
)
159 BusyRegs
.push_back(convReg(Reg
, MVT::i64
));
162 void AddBusyRegs(const X86Operand
&Op
) {
163 AddBusyReg(Op
.getMemBaseReg());
164 AddBusyReg(Op
.getMemIndexReg());
167 unsigned ChooseFrameReg(MVT::SimpleValueType VT
) const {
168 static const MCPhysReg Candidates
[] = { X86::RBP
, X86::RAX
, X86::RBX
,
169 X86::RCX
, X86::RDX
, X86::RDI
,
171 for (unsigned Reg
: Candidates
) {
172 if (!std::count(BusyRegs
.begin(), BusyRegs
.end(), Reg
))
173 return convReg(Reg
, VT
);
175 return X86::NoRegister
;
179 unsigned convReg(unsigned Reg
, MVT::SimpleValueType VT
) const {
180 return Reg
== X86::NoRegister
? Reg
: getX86SubSuperRegister(Reg
, VT
);
183 std::vector
<unsigned> BusyRegs
;
186 X86AddressSanitizer(const MCSubtargetInfo
&STI
)
187 : X86AsmInstrumentation(STI
), RepPrefix(false), OrigSPOffset(0) {}
189 virtual ~X86AddressSanitizer() {}
191 // X86AsmInstrumentation implementation:
192 virtual void InstrumentAndEmitInstruction(const MCInst
&Inst
,
193 OperandVector
&Operands
,
195 const MCInstrInfo
&MII
,
196 MCStreamer
&Out
) override
{
197 InstrumentMOVS(Inst
, Operands
, Ctx
, MII
, Out
);
199 EmitInstruction(Out
, MCInstBuilder(X86::REP_PREFIX
));
201 InstrumentMOV(Inst
, Operands
, Ctx
, MII
, Out
);
203 RepPrefix
= (Inst
.getOpcode() == X86::REP_PREFIX
);
205 EmitInstruction(Out
, Inst
);
208 // Adjusts up stack and saves all registers used in instrumentation.
209 virtual void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
211 MCStreamer
&Out
) = 0;
213 // Restores all registers used in instrumentation and adjusts stack.
214 virtual void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
216 MCStreamer
&Out
) = 0;
218 virtual void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
220 const RegisterContext
&RegCtx
,
221 MCContext
&Ctx
, MCStreamer
&Out
) = 0;
222 virtual void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
224 const RegisterContext
&RegCtx
,
225 MCContext
&Ctx
, MCStreamer
&Out
) = 0;
227 virtual void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
228 MCStreamer
&Out
) = 0;
230 void InstrumentMemOperand(X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
231 const RegisterContext
&RegCtx
, MCContext
&Ctx
,
233 void InstrumentMOVSBase(unsigned DstReg
, unsigned SrcReg
, unsigned CntReg
,
234 unsigned AccessSize
, MCContext
&Ctx
, MCStreamer
&Out
);
236 void InstrumentMOVS(const MCInst
&Inst
, OperandVector
&Operands
,
237 MCContext
&Ctx
, const MCInstrInfo
&MII
, MCStreamer
&Out
);
238 void InstrumentMOV(const MCInst
&Inst
, OperandVector
&Operands
,
239 MCContext
&Ctx
, const MCInstrInfo
&MII
, MCStreamer
&Out
);
242 void EmitLabel(MCStreamer
&Out
, MCSymbol
*Label
) { Out
.EmitLabel(Label
); }
244 void EmitLEA(X86Operand
&Op
, MVT::SimpleValueType VT
, unsigned Reg
,
246 assert(VT
== MVT::i32
|| VT
== MVT::i64
);
248 Inst
.setOpcode(VT
== MVT::i32
? X86::LEA32r
: X86::LEA64r
);
249 Inst
.addOperand(MCOperand::CreateReg(getX86SubSuperRegister(Reg
, VT
)));
250 Op
.addMemOperands(Inst
, 5);
251 EmitInstruction(Out
, Inst
);
254 void ComputeMemOperandAddress(X86Operand
&Op
, MVT::SimpleValueType VT
,
255 unsigned Reg
, MCContext
&Ctx
, MCStreamer
&Out
);
257 // Creates new memory operand with Displacement added to an original
258 // displacement. Residue will contain a residue which could happen when the
259 // total displacement exceeds 32-bit limitation.
260 std::unique_ptr
<X86Operand
> AddDisplacement(X86Operand
&Op
,
261 int64_t Displacement
,
262 MCContext
&Ctx
, int64_t *Residue
);
264 bool is64BitMode() const {
265 return (STI
.getFeatureBits() & X86::Mode64Bit
) != 0;
267 bool is32BitMode() const {
268 return (STI
.getFeatureBits() & X86::Mode32Bit
) != 0;
270 bool is16BitMode() const {
271 return (STI
.getFeatureBits() & X86::Mode16Bit
) != 0;
274 unsigned getPointerWidth() {
275 if (is16BitMode()) return 16;
276 if (is32BitMode()) return 32;
277 if (is64BitMode()) return 64;
278 llvm_unreachable("invalid mode");
281 // True when previous instruction was actually REP prefix.
284 // Offset from the original SP register.
285 int64_t OrigSPOffset
;
288 void X86AddressSanitizer::InstrumentMemOperand(
289 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
290 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
291 assert(Op
.isMem() && "Op should be a memory operand.");
292 assert((AccessSize
& (AccessSize
- 1)) == 0 && AccessSize
<= 16 &&
293 "AccessSize should be a power of two, less or equal than 16.");
294 // FIXME: take into account load/store alignment.
295 if (IsSmallMemAccess(AccessSize
))
296 InstrumentMemOperandSmall(Op
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
298 InstrumentMemOperandLarge(Op
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
301 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg
, unsigned SrcReg
,
304 MCContext
&Ctx
, MCStreamer
&Out
) {
305 // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
306 // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
307 RegisterContext
RegCtx(X86::RDX
/* AddressReg */, X86::RAX
/* ShadowReg */,
308 IsSmallMemAccess(AccessSize
)
310 : X86::NoRegister
/* ScratchReg */);
311 RegCtx
.AddBusyReg(DstReg
);
312 RegCtx
.AddBusyReg(SrcReg
);
313 RegCtx
.AddBusyReg(CntReg
);
315 InstrumentMemOperandPrologue(RegCtx
, Ctx
, Out
);
319 const MCExpr
*Disp
= MCConstantExpr::Create(0, Ctx
);
320 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
321 getPointerWidth(), 0, Disp
, SrcReg
, 0, AccessSize
, SMLoc(), SMLoc()));
322 InstrumentMemOperand(*Op
, AccessSize
, false /* IsWrite */, RegCtx
, Ctx
,
326 // Test -1(%SrcReg, %CntReg, AccessSize)
328 const MCExpr
*Disp
= MCConstantExpr::Create(-1, Ctx
);
329 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
330 getPointerWidth(), 0, Disp
, SrcReg
, CntReg
, AccessSize
, SMLoc(),
332 InstrumentMemOperand(*Op
, AccessSize
, false /* IsWrite */, RegCtx
, Ctx
,
338 const MCExpr
*Disp
= MCConstantExpr::Create(0, Ctx
);
339 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
340 getPointerWidth(), 0, Disp
, DstReg
, 0, AccessSize
, SMLoc(), SMLoc()));
341 InstrumentMemOperand(*Op
, AccessSize
, true /* IsWrite */, RegCtx
, Ctx
, Out
);
344 // Test -1(%DstReg, %CntReg, AccessSize)
346 const MCExpr
*Disp
= MCConstantExpr::Create(-1, Ctx
);
347 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
348 getPointerWidth(), 0, Disp
, DstReg
, CntReg
, AccessSize
, SMLoc(),
350 InstrumentMemOperand(*Op
, AccessSize
, true /* IsWrite */, RegCtx
, Ctx
, Out
);
353 InstrumentMemOperandEpilogue(RegCtx
, Ctx
, Out
);
356 void X86AddressSanitizer::InstrumentMOVS(const MCInst
&Inst
,
357 OperandVector
&Operands
,
358 MCContext
&Ctx
, const MCInstrInfo
&MII
,
360 // Access size in bytes.
361 unsigned AccessSize
= 0;
363 switch (Inst
.getOpcode()) {
380 InstrumentMOVSImpl(AccessSize
, Ctx
, Out
);
383 void X86AddressSanitizer::InstrumentMOV(const MCInst
&Inst
,
384 OperandVector
&Operands
, MCContext
&Ctx
,
385 const MCInstrInfo
&MII
,
387 // Access size in bytes.
388 unsigned AccessSize
= 0;
390 switch (Inst
.getOpcode()) {
421 const bool IsWrite
= MII
.get(Inst
.getOpcode()).mayStore();
423 for (unsigned Ix
= 0; Ix
< Operands
.size(); ++Ix
) {
424 assert(Operands
[Ix
]);
425 MCParsedAsmOperand
&Op
= *Operands
[Ix
];
427 X86Operand
&MemOp
= static_cast<X86Operand
&>(Op
);
428 RegisterContext
RegCtx(
429 X86::RDI
/* AddressReg */, X86::RAX
/* ShadowReg */,
430 IsSmallMemAccess(AccessSize
) ? X86::RCX
431 : X86::NoRegister
/* ScratchReg */);
432 RegCtx
.AddBusyRegs(MemOp
);
433 InstrumentMemOperandPrologue(RegCtx
, Ctx
, Out
);
434 InstrumentMemOperand(MemOp
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
435 InstrumentMemOperandEpilogue(RegCtx
, Ctx
, Out
);
440 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand
&Op
,
441 MVT::SimpleValueType VT
,
442 unsigned Reg
, MCContext
&Ctx
,
444 int64_t Displacement
= 0;
445 if (IsStackReg(Op
.getMemBaseReg()))
446 Displacement
-= OrigSPOffset
;
447 if (IsStackReg(Op
.getMemIndexReg()))
448 Displacement
-= OrigSPOffset
* Op
.getMemScale();
450 assert(Displacement
>= 0);
453 if (Displacement
== 0) {
454 EmitLEA(Op
, VT
, Reg
, Out
);
459 std::unique_ptr
<X86Operand
> NewOp
=
460 AddDisplacement(Op
, Displacement
, Ctx
, &Residue
);
461 EmitLEA(*NewOp
, VT
, Reg
, Out
);
463 while (Residue
!= 0) {
464 const MCConstantExpr
*Disp
=
465 MCConstantExpr::Create(ApplyDisplacementBounds(Residue
), Ctx
);
466 std::unique_ptr
<X86Operand
> DispOp
=
467 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, Reg
, 0, 1, SMLoc(),
469 EmitLEA(*DispOp
, VT
, Reg
, Out
);
470 Residue
-= Disp
->getValue();
474 std::unique_ptr
<X86Operand
>
475 X86AddressSanitizer::AddDisplacement(X86Operand
&Op
, int64_t Displacement
,
476 MCContext
&Ctx
, int64_t *Residue
) {
477 assert(Displacement
>= 0);
479 if (Displacement
== 0 ||
480 (Op
.getMemDisp() && Op
.getMemDisp()->getKind() != MCExpr::Constant
)) {
481 *Residue
= Displacement
;
482 return X86Operand::CreateMem(Op
.getMemModeSize(), Op
.getMemSegReg(),
483 Op
.getMemDisp(), Op
.getMemBaseReg(),
484 Op
.getMemIndexReg(), Op
.getMemScale(),
488 int64_t OrigDisplacement
=
489 static_cast<const MCConstantExpr
*>(Op
.getMemDisp())->getValue();
490 CheckDisplacementBounds(OrigDisplacement
);
491 Displacement
+= OrigDisplacement
;
493 int64_t NewDisplacement
= ApplyDisplacementBounds(Displacement
);
494 CheckDisplacementBounds(NewDisplacement
);
496 *Residue
= Displacement
- NewDisplacement
;
497 const MCExpr
*Disp
= MCConstantExpr::Create(NewDisplacement
, Ctx
);
498 return X86Operand::CreateMem(Op
.getMemModeSize(), Op
.getMemSegReg(), Disp
,
499 Op
.getMemBaseReg(), Op
.getMemIndexReg(),
500 Op
.getMemScale(), SMLoc(), SMLoc());
503 class X86AddressSanitizer32
: public X86AddressSanitizer
{
505 static const long kShadowOffset
= 0x20000000;
507 X86AddressSanitizer32(const MCSubtargetInfo
&STI
)
508 : X86AddressSanitizer(STI
) {}
510 virtual ~X86AddressSanitizer32() {}
512 unsigned GetFrameReg(const MCContext
&Ctx
, MCStreamer
&Out
) {
513 unsigned FrameReg
= GetFrameRegGeneric(Ctx
, Out
);
514 if (FrameReg
== X86::NoRegister
)
516 return getX86SubSuperRegister(FrameReg
, MVT::i32
);
519 void SpillReg(MCStreamer
&Out
, unsigned Reg
) {
520 EmitInstruction(Out
, MCInstBuilder(X86::PUSH32r
).addReg(Reg
));
524 void RestoreReg(MCStreamer
&Out
, unsigned Reg
) {
525 EmitInstruction(Out
, MCInstBuilder(X86::POP32r
).addReg(Reg
));
529 void StoreFlags(MCStreamer
&Out
) {
530 EmitInstruction(Out
, MCInstBuilder(X86::PUSHF32
));
534 void RestoreFlags(MCStreamer
&Out
) {
535 EmitInstruction(Out
, MCInstBuilder(X86::POPF32
));
539 virtual void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
541 MCStreamer
&Out
) override
{
542 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(MVT::i32
);
543 assert(LocalFrameReg
!= X86::NoRegister
);
545 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
546 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
547 if (MRI
&& FrameReg
!= X86::NoRegister
) {
548 SpillReg(Out
, LocalFrameReg
);
549 if (FrameReg
== X86::ESP
) {
550 Out
.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
551 Out
.EmitCFIRelOffset(
552 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */), 0);
556 MCInstBuilder(X86::MOV32rr
).addReg(LocalFrameReg
).addReg(FrameReg
));
557 Out
.EmitCFIRememberState();
558 Out
.EmitCFIDefCfaRegister(
559 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */));
562 SpillReg(Out
, RegCtx
.AddressReg(MVT::i32
));
563 SpillReg(Out
, RegCtx
.ShadowReg(MVT::i32
));
564 if (RegCtx
.ScratchReg(MVT::i32
) != X86::NoRegister
)
565 SpillReg(Out
, RegCtx
.ScratchReg(MVT::i32
));
569 virtual void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
571 MCStreamer
&Out
) override
{
572 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(MVT::i32
);
573 assert(LocalFrameReg
!= X86::NoRegister
);
576 if (RegCtx
.ScratchReg(MVT::i32
) != X86::NoRegister
)
577 RestoreReg(Out
, RegCtx
.ScratchReg(MVT::i32
));
578 RestoreReg(Out
, RegCtx
.ShadowReg(MVT::i32
));
579 RestoreReg(Out
, RegCtx
.AddressReg(MVT::i32
));
581 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
582 if (Ctx
.getRegisterInfo() && FrameReg
!= X86::NoRegister
) {
583 RestoreReg(Out
, LocalFrameReg
);
584 Out
.EmitCFIRestoreState();
585 if (FrameReg
== X86::ESP
)
586 Out
.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
590 virtual void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
592 const RegisterContext
&RegCtx
,
594 MCStreamer
&Out
) override
;
595 virtual void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
597 const RegisterContext
&RegCtx
,
599 MCStreamer
&Out
) override
;
600 virtual void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
601 MCStreamer
&Out
) override
;
604 void EmitCallAsanReport(unsigned AccessSize
, bool IsWrite
, MCContext
&Ctx
,
605 MCStreamer
&Out
, const RegisterContext
&RegCtx
) {
606 EmitInstruction(Out
, MCInstBuilder(X86::CLD
));
607 EmitInstruction(Out
, MCInstBuilder(X86::MMX_EMMS
));
609 EmitInstruction(Out
, MCInstBuilder(X86::AND64ri8
)
614 Out
, MCInstBuilder(X86::PUSH32r
).addReg(RegCtx
.AddressReg(MVT::i32
)));
616 const std::string
&Fn
= FuncName(AccessSize
, IsWrite
);
617 MCSymbol
*FnSym
= Ctx
.GetOrCreateSymbol(StringRef(Fn
));
618 const MCSymbolRefExpr
*FnExpr
=
619 MCSymbolRefExpr::Create(FnSym
, MCSymbolRefExpr::VK_PLT
, Ctx
);
620 EmitInstruction(Out
, MCInstBuilder(X86::CALLpcrel32
).addExpr(FnExpr
));
624 void X86AddressSanitizer32::InstrumentMemOperandSmall(
625 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
626 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
627 unsigned AddressRegI32
= RegCtx
.AddressReg(MVT::i32
);
628 unsigned ShadowRegI32
= RegCtx
.ShadowReg(MVT::i32
);
629 unsigned ShadowRegI8
= RegCtx
.ShadowReg(MVT::i8
);
631 assert(RegCtx
.ScratchReg(MVT::i32
) != X86::NoRegister
);
632 unsigned ScratchRegI32
= RegCtx
.ScratchReg(MVT::i32
);
634 ComputeMemOperandAddress(Op
, MVT::i32
, AddressRegI32
, Ctx
, Out
);
636 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ShadowRegI32
).addReg(
638 EmitInstruction(Out
, MCInstBuilder(X86::SHR32ri
)
639 .addReg(ShadowRegI32
)
640 .addReg(ShadowRegI32
)
645 Inst
.setOpcode(X86::MOV8rm
);
646 Inst
.addOperand(MCOperand::CreateReg(ShadowRegI8
));
647 const MCExpr
*Disp
= MCConstantExpr::Create(kShadowOffset
, Ctx
);
648 std::unique_ptr
<X86Operand
> Op(
649 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI32
, 0, 1,
651 Op
->addMemOperands(Inst
, 5);
652 EmitInstruction(Out
, Inst
);
656 Out
, MCInstBuilder(X86::TEST8rr
).addReg(ShadowRegI8
).addReg(ShadowRegI8
));
657 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
658 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
659 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
661 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ScratchRegI32
).addReg(
663 EmitInstruction(Out
, MCInstBuilder(X86::AND32ri
)
664 .addReg(ScratchRegI32
)
665 .addReg(ScratchRegI32
)
668 switch (AccessSize
) {
669 default: llvm_unreachable("Incorrect access size");
673 const MCExpr
*Disp
= MCConstantExpr::Create(1, Ctx
);
674 std::unique_ptr
<X86Operand
> Op(
675 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ScratchRegI32
, 0, 1,
677 EmitLEA(*Op
, MVT::i32
, ScratchRegI32
, Out
);
681 EmitInstruction(Out
, MCInstBuilder(X86::ADD32ri8
)
682 .addReg(ScratchRegI32
)
683 .addReg(ScratchRegI32
)
690 MCInstBuilder(X86::MOVSX32rr8
).addReg(ShadowRegI32
).addReg(ShadowRegI8
));
691 EmitInstruction(Out
, MCInstBuilder(X86::CMP32rr
).addReg(ScratchRegI32
).addReg(
693 EmitInstruction(Out
, MCInstBuilder(X86::JL_1
).addExpr(DoneExpr
));
695 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
696 EmitLabel(Out
, DoneSym
);
699 void X86AddressSanitizer32::InstrumentMemOperandLarge(
700 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
701 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
702 unsigned AddressRegI32
= RegCtx
.AddressReg(MVT::i32
);
703 unsigned ShadowRegI32
= RegCtx
.ShadowReg(MVT::i32
);
705 ComputeMemOperandAddress(Op
, MVT::i32
, AddressRegI32
, Ctx
, Out
);
707 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ShadowRegI32
).addReg(
709 EmitInstruction(Out
, MCInstBuilder(X86::SHR32ri
)
710 .addReg(ShadowRegI32
)
711 .addReg(ShadowRegI32
)
715 switch (AccessSize
) {
716 default: llvm_unreachable("Incorrect access size");
718 Inst
.setOpcode(X86::CMP8mi
);
721 Inst
.setOpcode(X86::CMP16mi
);
724 const MCExpr
*Disp
= MCConstantExpr::Create(kShadowOffset
, Ctx
);
725 std::unique_ptr
<X86Operand
> Op(
726 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI32
, 0, 1,
728 Op
->addMemOperands(Inst
, 5);
729 Inst
.addOperand(MCOperand::CreateImm(0));
730 EmitInstruction(Out
, Inst
);
732 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
733 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
734 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
736 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
737 EmitLabel(Out
, DoneSym
);
740 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize
,
745 // No need to test when ECX is equals to zero.
746 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
747 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
749 Out
, MCInstBuilder(X86::TEST32rr
).addReg(X86::ECX
).addReg(X86::ECX
));
750 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
752 // Instrument first and last elements in src and dst range.
753 InstrumentMOVSBase(X86::EDI
/* DstReg */, X86::ESI
/* SrcReg */,
754 X86::ECX
/* CntReg */, AccessSize
, Ctx
, Out
);
756 EmitLabel(Out
, DoneSym
);
760 class X86AddressSanitizer64
: public X86AddressSanitizer
{
762 static const long kShadowOffset
= 0x7fff8000;
764 X86AddressSanitizer64(const MCSubtargetInfo
&STI
)
765 : X86AddressSanitizer(STI
) {}
767 virtual ~X86AddressSanitizer64() {}
769 unsigned GetFrameReg(const MCContext
&Ctx
, MCStreamer
&Out
) {
770 unsigned FrameReg
= GetFrameRegGeneric(Ctx
, Out
);
771 if (FrameReg
== X86::NoRegister
)
773 return getX86SubSuperRegister(FrameReg
, MVT::i64
);
776 void SpillReg(MCStreamer
&Out
, unsigned Reg
) {
777 EmitInstruction(Out
, MCInstBuilder(X86::PUSH64r
).addReg(Reg
));
781 void RestoreReg(MCStreamer
&Out
, unsigned Reg
) {
782 EmitInstruction(Out
, MCInstBuilder(X86::POP64r
).addReg(Reg
));
786 void StoreFlags(MCStreamer
&Out
) {
787 EmitInstruction(Out
, MCInstBuilder(X86::PUSHF64
));
791 void RestoreFlags(MCStreamer
&Out
) {
792 EmitInstruction(Out
, MCInstBuilder(X86::POPF64
));
796 virtual void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
798 MCStreamer
&Out
) override
{
799 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(MVT::i64
);
800 assert(LocalFrameReg
!= X86::NoRegister
);
802 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
803 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
804 if (MRI
&& FrameReg
!= X86::NoRegister
) {
805 SpillReg(Out
, X86::RBP
);
806 if (FrameReg
== X86::RSP
) {
807 Out
.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
808 Out
.EmitCFIRelOffset(
809 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */), 0);
813 MCInstBuilder(X86::MOV64rr
).addReg(LocalFrameReg
).addReg(FrameReg
));
814 Out
.EmitCFIRememberState();
815 Out
.EmitCFIDefCfaRegister(
816 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */));
819 EmitAdjustRSP(Ctx
, Out
, -128);
820 SpillReg(Out
, RegCtx
.ShadowReg(MVT::i64
));
821 SpillReg(Out
, RegCtx
.AddressReg(MVT::i64
));
822 if (RegCtx
.ScratchReg(MVT::i64
) != X86::NoRegister
)
823 SpillReg(Out
, RegCtx
.ScratchReg(MVT::i64
));
827 virtual void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
829 MCStreamer
&Out
) override
{
830 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(MVT::i64
);
831 assert(LocalFrameReg
!= X86::NoRegister
);
834 if (RegCtx
.ScratchReg(MVT::i64
) != X86::NoRegister
)
835 RestoreReg(Out
, RegCtx
.ScratchReg(MVT::i64
));
836 RestoreReg(Out
, RegCtx
.AddressReg(MVT::i64
));
837 RestoreReg(Out
, RegCtx
.ShadowReg(MVT::i64
));
838 EmitAdjustRSP(Ctx
, Out
, 128);
840 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
841 if (Ctx
.getRegisterInfo() && FrameReg
!= X86::NoRegister
) {
842 RestoreReg(Out
, LocalFrameReg
);
843 Out
.EmitCFIRestoreState();
844 if (FrameReg
== X86::RSP
)
845 Out
.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
849 virtual void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
851 const RegisterContext
&RegCtx
,
853 MCStreamer
&Out
) override
;
854 virtual void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
856 const RegisterContext
&RegCtx
,
858 MCStreamer
&Out
) override
;
859 virtual void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
860 MCStreamer
&Out
) override
;
863 void EmitAdjustRSP(MCContext
&Ctx
, MCStreamer
&Out
, long Offset
) {
864 const MCExpr
*Disp
= MCConstantExpr::Create(Offset
, Ctx
);
865 std::unique_ptr
<X86Operand
> Op(
866 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, X86::RSP
, 0, 1,
868 EmitLEA(*Op
, MVT::i64
, X86::RSP
, Out
);
869 OrigSPOffset
+= Offset
;
872 void EmitCallAsanReport(unsigned AccessSize
, bool IsWrite
, MCContext
&Ctx
,
873 MCStreamer
&Out
, const RegisterContext
&RegCtx
) {
874 EmitInstruction(Out
, MCInstBuilder(X86::CLD
));
875 EmitInstruction(Out
, MCInstBuilder(X86::MMX_EMMS
));
877 EmitInstruction(Out
, MCInstBuilder(X86::AND64ri8
)
882 if (RegCtx
.AddressReg(MVT::i64
) != X86::RDI
) {
883 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(X86::RDI
).addReg(
884 RegCtx
.AddressReg(MVT::i64
)));
886 const std::string
&Fn
= FuncName(AccessSize
, IsWrite
);
887 MCSymbol
*FnSym
= Ctx
.GetOrCreateSymbol(StringRef(Fn
));
888 const MCSymbolRefExpr
*FnExpr
=
889 MCSymbolRefExpr::Create(FnSym
, MCSymbolRefExpr::VK_PLT
, Ctx
);
890 EmitInstruction(Out
, MCInstBuilder(X86::CALL64pcrel32
).addExpr(FnExpr
));
894 void X86AddressSanitizer64::InstrumentMemOperandSmall(
895 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
896 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
897 unsigned AddressRegI64
= RegCtx
.AddressReg(MVT::i64
);
898 unsigned AddressRegI32
= RegCtx
.AddressReg(MVT::i32
);
899 unsigned ShadowRegI64
= RegCtx
.ShadowReg(MVT::i64
);
900 unsigned ShadowRegI32
= RegCtx
.ShadowReg(MVT::i32
);
901 unsigned ShadowRegI8
= RegCtx
.ShadowReg(MVT::i8
);
903 assert(RegCtx
.ScratchReg(MVT::i32
) != X86::NoRegister
);
904 unsigned ScratchRegI32
= RegCtx
.ScratchReg(MVT::i32
);
906 ComputeMemOperandAddress(Op
, MVT::i64
, AddressRegI64
, Ctx
, Out
);
908 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(ShadowRegI64
).addReg(
910 EmitInstruction(Out
, MCInstBuilder(X86::SHR64ri
)
911 .addReg(ShadowRegI64
)
912 .addReg(ShadowRegI64
)
916 Inst
.setOpcode(X86::MOV8rm
);
917 Inst
.addOperand(MCOperand::CreateReg(ShadowRegI8
));
918 const MCExpr
*Disp
= MCConstantExpr::Create(kShadowOffset
, Ctx
);
919 std::unique_ptr
<X86Operand
> Op(
920 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI64
, 0, 1,
922 Op
->addMemOperands(Inst
, 5);
923 EmitInstruction(Out
, Inst
);
927 Out
, MCInstBuilder(X86::TEST8rr
).addReg(ShadowRegI8
).addReg(ShadowRegI8
));
928 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
929 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
930 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
932 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ScratchRegI32
).addReg(
934 EmitInstruction(Out
, MCInstBuilder(X86::AND32ri
)
935 .addReg(ScratchRegI32
)
936 .addReg(ScratchRegI32
)
939 switch (AccessSize
) {
940 default: llvm_unreachable("Incorrect access size");
944 const MCExpr
*Disp
= MCConstantExpr::Create(1, Ctx
);
945 std::unique_ptr
<X86Operand
> Op(
946 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ScratchRegI32
, 0, 1,
948 EmitLEA(*Op
, MVT::i32
, ScratchRegI32
, Out
);
952 EmitInstruction(Out
, MCInstBuilder(X86::ADD32ri8
)
953 .addReg(ScratchRegI32
)
954 .addReg(ScratchRegI32
)
961 MCInstBuilder(X86::MOVSX32rr8
).addReg(ShadowRegI32
).addReg(ShadowRegI8
));
962 EmitInstruction(Out
, MCInstBuilder(X86::CMP32rr
).addReg(ScratchRegI32
).addReg(
964 EmitInstruction(Out
, MCInstBuilder(X86::JL_1
).addExpr(DoneExpr
));
966 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
967 EmitLabel(Out
, DoneSym
);
970 void X86AddressSanitizer64::InstrumentMemOperandLarge(
971 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
972 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
973 unsigned AddressRegI64
= RegCtx
.AddressReg(MVT::i64
);
974 unsigned ShadowRegI64
= RegCtx
.ShadowReg(MVT::i64
);
976 ComputeMemOperandAddress(Op
, MVT::i64
, AddressRegI64
, Ctx
, Out
);
978 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(ShadowRegI64
).addReg(
980 EmitInstruction(Out
, MCInstBuilder(X86::SHR64ri
)
981 .addReg(ShadowRegI64
)
982 .addReg(ShadowRegI64
)
986 switch (AccessSize
) {
987 default: llvm_unreachable("Incorrect access size");
989 Inst
.setOpcode(X86::CMP8mi
);
992 Inst
.setOpcode(X86::CMP16mi
);
995 const MCExpr
*Disp
= MCConstantExpr::Create(kShadowOffset
, Ctx
);
996 std::unique_ptr
<X86Operand
> Op(
997 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI64
, 0, 1,
999 Op
->addMemOperands(Inst
, 5);
1000 Inst
.addOperand(MCOperand::CreateImm(0));
1001 EmitInstruction(Out
, Inst
);
1004 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
1005 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
1006 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
1008 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
1009 EmitLabel(Out
, DoneSym
);
1012 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize
,
1017 // No need to test when RCX is equals to zero.
1018 MCSymbol
*DoneSym
= Ctx
.CreateTempSymbol();
1019 const MCExpr
*DoneExpr
= MCSymbolRefExpr::Create(DoneSym
, Ctx
);
1021 Out
, MCInstBuilder(X86::TEST64rr
).addReg(X86::RCX
).addReg(X86::RCX
));
1022 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
1024 // Instrument first and last elements in src and dst range.
1025 InstrumentMOVSBase(X86::RDI
/* DstReg */, X86::RSI
/* SrcReg */,
1026 X86::RCX
/* CntReg */, AccessSize
, Ctx
, Out
);
1028 EmitLabel(Out
, DoneSym
);
1032 } // End anonymous namespace
1034 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo
&STI
)
1035 : STI(STI
), InitialFrameReg(0) {}
1037 X86AsmInstrumentation::~X86AsmInstrumentation() {}
1039 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1040 const MCInst
&Inst
, OperandVector
&Operands
, MCContext
&Ctx
,
1041 const MCInstrInfo
&MII
, MCStreamer
&Out
) {
1042 EmitInstruction(Out
, Inst
);
1045 void X86AsmInstrumentation::EmitInstruction(MCStreamer
&Out
,
1046 const MCInst
&Inst
) {
1047 Out
.EmitInstruction(Inst
, STI
);
1050 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext
&Ctx
,
1052 if (!Out
.getNumFrameInfos()) // No active dwarf frame
1053 return X86::NoRegister
;
1054 const MCDwarfFrameInfo
&Frame
= Out
.getDwarfFrameInfos().back();
1055 if (Frame
.End
) // Active dwarf frame is closed
1056 return X86::NoRegister
;
1057 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
1058 if (!MRI
) // No register info
1059 return X86::NoRegister
;
1061 if (InitialFrameReg
) {
1062 // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1063 return InitialFrameReg
;
1066 return MRI
->getLLVMRegNum(Frame
.CurrentCfaRegister
, true /* IsEH */);
1069 X86AsmInstrumentation
*
1070 CreateX86AsmInstrumentation(const MCTargetOptions
&MCOptions
,
1071 const MCContext
&Ctx
, const MCSubtargetInfo
&STI
) {
1072 Triple
T(STI
.getTargetTriple());
1073 const bool hasCompilerRTSupport
= T
.isOSLinux();
1074 if (ClAsanInstrumentAssembly
&& hasCompilerRTSupport
&&
1075 MCOptions
.SanitizeAddress
) {
1076 if ((STI
.getFeatureBits() & X86::Mode32Bit
) != 0)
1077 return new X86AddressSanitizer32(STI
);
1078 if ((STI
.getFeatureBits() & X86::Mode64Bit
) != 0)
1079 return new X86AddressSanitizer64(STI
);
1081 return new X86AsmInstrumentation(STI
);
1084 } // End llvm namespace