]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
New upstream version 1.58.1+dfsg1
[rustc.git] / compiler / rustc_llvm / llvm-wrapper / PassWrapper.cpp
1 #include <stdio.h>
2
3 #include <vector>
4 #include <set>
5
6 #include "LLVMWrapper.h"
7
8 #include "llvm/Analysis/AliasAnalysis.h"
9 #include "llvm/Analysis/TargetLibraryInfo.h"
10 #include "llvm/Analysis/TargetTransformInfo.h"
11 #include "llvm/CodeGen/TargetSubtargetInfo.h"
12 #include "llvm/InitializePasses.h"
13 #include "llvm/IR/AutoUpgrade.h"
14 #include "llvm/IR/AssemblyAnnotationWriter.h"
15 #include "llvm/IR/IntrinsicInst.h"
16 #include "llvm/IR/Verifier.h"
17 #include "llvm/Object/ObjectFile.h"
18 #include "llvm/Object/IRObjectFile.h"
19 #include "llvm/Passes/PassBuilder.h"
20 #include "llvm/Passes/StandardInstrumentations.h"
21 #include "llvm/Support/CBindingWrapping.h"
22 #include "llvm/Support/FileSystem.h"
23 #include "llvm/Support/Host.h"
24 #if LLVM_VERSION_LT(14, 0)
25 #include "llvm/Support/TargetRegistry.h"
26 #else
27 #include "llvm/MC/TargetRegistry.h"
28 #endif
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
31 #include "llvm/Transforms/IPO/AlwaysInliner.h"
32 #include "llvm/Transforms/IPO/FunctionImport.h"
33 #include "llvm/Transforms/Utils/AddDiscriminators.h"
34 #include "llvm/Transforms/Utils/FunctionImportUtils.h"
35 #include "llvm/LTO/LTO.h"
36 #include "llvm-c/Transforms/PassManagerBuilder.h"
37
38 #include "llvm/Transforms/Instrumentation.h"
39 #include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
40 #include "llvm/Support/TimeProfiler.h"
41 #include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
42 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
43 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
44 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
45 #include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
46 #include "llvm/Transforms/Utils/CanonicalizeAliases.h"
47 #include "llvm/Transforms/Utils/NameAnonGlobals.h"
48 #include "llvm/Transforms/Utils.h"
49
50 using namespace llvm;
51
52 typedef struct LLVMOpaquePass *LLVMPassRef;
53 typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
54
55 DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
56 DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
57
58 extern "C" void LLVMInitializePasses() {
59 PassRegistry &Registry = *PassRegistry::getPassRegistry();
60 initializeCore(Registry);
61 initializeCodeGen(Registry);
62 initializeScalarOpts(Registry);
63 initializeVectorization(Registry);
64 initializeIPO(Registry);
65 initializeAnalysis(Registry);
66 initializeTransformUtils(Registry);
67 initializeInstCombine(Registry);
68 initializeInstrumentation(Registry);
69 initializeTarget(Registry);
70 }
71
72 extern "C" void LLVMTimeTraceProfilerInitialize() {
73 timeTraceProfilerInitialize(
74 /* TimeTraceGranularity */ 0,
75 /* ProcName */ "rustc");
76 }
77
78 extern "C" void LLVMTimeTraceProfilerFinishThread() {
79 timeTraceProfilerFinishThread();
80 }
81
82 extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
83 StringRef FN(FileName);
84 std::error_code EC;
85 raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
86
87 timeTraceProfilerWrite(OS);
88 timeTraceProfilerCleanup();
89 }
90
91 enum class LLVMRustPassKind {
92 Other,
93 Function,
94 Module,
95 };
96
97 static LLVMRustPassKind toRust(PassKind Kind) {
98 switch (Kind) {
99 case PT_Function:
100 return LLVMRustPassKind::Function;
101 case PT_Module:
102 return LLVMRustPassKind::Module;
103 default:
104 return LLVMRustPassKind::Other;
105 }
106 }
107
108 extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
109 StringRef SR(PassName);
110 PassRegistry *PR = PassRegistry::getPassRegistry();
111
112 const PassInfo *PI = PR->getPassInfo(SR);
113 if (PI) {
114 return wrap(PI->createPass());
115 }
116 return nullptr;
117 }
118
119 extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
120 const bool CompileKernel = false;
121 const bool UseAfterScope = true;
122
123 return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
124 }
125
126 extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
127 const bool CompileKernel = false;
128
129 return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
130 }
131
132 extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
133 const bool CompileKernel = false;
134
135 return wrap(createMemorySanitizerLegacyPassPass(
136 MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
137 }
138
139 extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
140 return wrap(createThreadSanitizerLegacyPassPass());
141 }
142
143 extern "C" LLVMPassRef LLVMRustCreateHWAddressSanitizerPass(bool Recover) {
144 const bool CompileKernel = false;
145
146 return wrap(createHWAddressSanitizerLegacyPassPass(CompileKernel, Recover));
147 }
148
149 extern "C" LLVMRustPassKind LLVMRustPassKind(LLVMPassRef RustPass) {
150 assert(RustPass);
151 Pass *Pass = unwrap(RustPass);
152 return toRust(Pass->getPassKind());
153 }
154
155 extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
156 assert(RustPass);
157 Pass *Pass = unwrap(RustPass);
158 PassManagerBase *PMB = unwrap(PMR);
159 PMB->add(Pass);
160 }
161
162 extern "C"
163 void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
164 LLVMPassManagerBuilderRef PMBR,
165 LLVMPassManagerRef PMR
166 ) {
167 unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
168 }
169
170 extern "C"
171 void LLVMRustAddLastExtensionPasses(
172 LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
173 auto AddExtensionPasses = [Passes, NumPasses](
174 const PassManagerBuilder &Builder, PassManagerBase &PM) {
175 for (size_t I = 0; I < NumPasses; I++) {
176 PM.add(unwrap(Passes[I]));
177 }
178 };
179 // Add the passes to both of the pre-finalization extension points,
180 // so they are run for optimized and non-optimized builds.
181 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
182 AddExtensionPasses);
183 unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
184 AddExtensionPasses);
185 }
186
187 #ifdef LLVM_COMPONENT_X86
188 #define SUBTARGET_X86 SUBTARGET(X86)
189 #else
190 #define SUBTARGET_X86
191 #endif
192
193 #ifdef LLVM_COMPONENT_ARM
194 #define SUBTARGET_ARM SUBTARGET(ARM)
195 #else
196 #define SUBTARGET_ARM
197 #endif
198
199 #ifdef LLVM_COMPONENT_AARCH64
200 #define SUBTARGET_AARCH64 SUBTARGET(AArch64)
201 #else
202 #define SUBTARGET_AARCH64
203 #endif
204
205 #ifdef LLVM_COMPONENT_AVR
206 #define SUBTARGET_AVR SUBTARGET(AVR)
207 #else
208 #define SUBTARGET_AVR
209 #endif
210
211 #ifdef LLVM_COMPONENT_M68k
212 #define SUBTARGET_M68K SUBTARGET(M68k)
213 #else
214 #define SUBTARGET_M68K
215 #endif
216
217 #ifdef LLVM_COMPONENT_MIPS
218 #define SUBTARGET_MIPS SUBTARGET(Mips)
219 #else
220 #define SUBTARGET_MIPS
221 #endif
222
223 #ifdef LLVM_COMPONENT_POWERPC
224 #define SUBTARGET_PPC SUBTARGET(PPC)
225 #else
226 #define SUBTARGET_PPC
227 #endif
228
229 #ifdef LLVM_COMPONENT_SYSTEMZ
230 #define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
231 #else
232 #define SUBTARGET_SYSTEMZ
233 #endif
234
235 #ifdef LLVM_COMPONENT_MSP430
236 #define SUBTARGET_MSP430 SUBTARGET(MSP430)
237 #else
238 #define SUBTARGET_MSP430
239 #endif
240
241 #ifdef LLVM_COMPONENT_RISCV
242 #define SUBTARGET_RISCV SUBTARGET(RISCV)
243 #else
244 #define SUBTARGET_RISCV
245 #endif
246
247 #ifdef LLVM_COMPONENT_SPARC
248 #define SUBTARGET_SPARC SUBTARGET(Sparc)
249 #else
250 #define SUBTARGET_SPARC
251 #endif
252
253 #ifdef LLVM_COMPONENT_HEXAGON
254 #define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
255 #else
256 #define SUBTARGET_HEXAGON
257 #endif
258
259 #define GEN_SUBTARGETS \
260 SUBTARGET_X86 \
261 SUBTARGET_ARM \
262 SUBTARGET_AARCH64 \
263 SUBTARGET_AVR \
264 SUBTARGET_M68K \
265 SUBTARGET_MIPS \
266 SUBTARGET_PPC \
267 SUBTARGET_SYSTEMZ \
268 SUBTARGET_MSP430 \
269 SUBTARGET_SPARC \
270 SUBTARGET_HEXAGON \
271 SUBTARGET_RISCV \
272
273 #define SUBTARGET(x) \
274 namespace llvm { \
275 extern const SubtargetFeatureKV x##FeatureKV[]; \
276 extern const SubtargetFeatureKV x##SubTypeKV[]; \
277 }
278
279 GEN_SUBTARGETS
280 #undef SUBTARGET
281
282 extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
283 const char *Feature) {
284 TargetMachine *Target = unwrap(TM);
285 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
286 return MCInfo->checkFeatures(std::string("+") + Feature);
287 }
288
289 enum class LLVMRustCodeModel {
290 Tiny,
291 Small,
292 Kernel,
293 Medium,
294 Large,
295 None,
296 };
297
298 static Optional<CodeModel::Model> fromRust(LLVMRustCodeModel Model) {
299 switch (Model) {
300 case LLVMRustCodeModel::Tiny:
301 return CodeModel::Tiny;
302 case LLVMRustCodeModel::Small:
303 return CodeModel::Small;
304 case LLVMRustCodeModel::Kernel:
305 return CodeModel::Kernel;
306 case LLVMRustCodeModel::Medium:
307 return CodeModel::Medium;
308 case LLVMRustCodeModel::Large:
309 return CodeModel::Large;
310 case LLVMRustCodeModel::None:
311 return None;
312 default:
313 report_fatal_error("Bad CodeModel.");
314 }
315 }
316
317 enum class LLVMRustCodeGenOptLevel {
318 None,
319 Less,
320 Default,
321 Aggressive,
322 };
323
324 static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
325 switch (Level) {
326 case LLVMRustCodeGenOptLevel::None:
327 return CodeGenOpt::None;
328 case LLVMRustCodeGenOptLevel::Less:
329 return CodeGenOpt::Less;
330 case LLVMRustCodeGenOptLevel::Default:
331 return CodeGenOpt::Default;
332 case LLVMRustCodeGenOptLevel::Aggressive:
333 return CodeGenOpt::Aggressive;
334 default:
335 report_fatal_error("Bad CodeGenOptLevel.");
336 }
337 }
338
339 enum class LLVMRustPassBuilderOptLevel {
340 O0,
341 O1,
342 O2,
343 O3,
344 Os,
345 Oz,
346 };
347
348 #if LLVM_VERSION_LT(14,0)
349 using OptimizationLevel = PassBuilder::OptimizationLevel;
350 #endif
351
352 static OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
353 switch (Level) {
354 case LLVMRustPassBuilderOptLevel::O0:
355 return OptimizationLevel::O0;
356 case LLVMRustPassBuilderOptLevel::O1:
357 return OptimizationLevel::O1;
358 case LLVMRustPassBuilderOptLevel::O2:
359 return OptimizationLevel::O2;
360 case LLVMRustPassBuilderOptLevel::O3:
361 return OptimizationLevel::O3;
362 case LLVMRustPassBuilderOptLevel::Os:
363 return OptimizationLevel::Os;
364 case LLVMRustPassBuilderOptLevel::Oz:
365 return OptimizationLevel::Oz;
366 default:
367 report_fatal_error("Bad PassBuilderOptLevel.");
368 }
369 }
370
371 enum class LLVMRustRelocModel {
372 Static,
373 PIC,
374 DynamicNoPic,
375 ROPI,
376 RWPI,
377 ROPIRWPI,
378 };
379
380 static Reloc::Model fromRust(LLVMRustRelocModel RustReloc) {
381 switch (RustReloc) {
382 case LLVMRustRelocModel::Static:
383 return Reloc::Static;
384 case LLVMRustRelocModel::PIC:
385 return Reloc::PIC_;
386 case LLVMRustRelocModel::DynamicNoPic:
387 return Reloc::DynamicNoPIC;
388 case LLVMRustRelocModel::ROPI:
389 return Reloc::ROPI;
390 case LLVMRustRelocModel::RWPI:
391 return Reloc::RWPI;
392 case LLVMRustRelocModel::ROPIRWPI:
393 return Reloc::ROPI_RWPI;
394 }
395 report_fatal_error("Bad RelocModel.");
396 }
397
398 #ifdef LLVM_RUSTLLVM
399 /// getLongestEntryLength - Return the length of the longest entry in the table.
400 template<typename KV>
401 static size_t getLongestEntryLength(ArrayRef<KV> Table) {
402 size_t MaxLen = 0;
403 for (auto &I : Table)
404 MaxLen = std::max(MaxLen, std::strlen(I.Key));
405 return MaxLen;
406 }
407
408 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
409 const TargetMachine *Target = unwrap(TM);
410 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
411 const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
412 const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
413 const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
414 unsigned MaxCPULen = getLongestEntryLength(CPUTable);
415
416 printf("Available CPUs for this target:\n");
417 if (HostArch == TargetArch) {
418 const StringRef HostCPU = sys::getHostCPUName();
419 printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
420 MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
421 }
422 for (auto &CPU : CPUTable)
423 printf(" %-*s\n", MaxCPULen, CPU.Key);
424 printf("\n");
425 }
426
427 extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
428 const TargetMachine *Target = unwrap(TM);
429 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
430 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
431 return FeatTable.size();
432 }
433
434 extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef TM, size_t Index,
435 const char** Feature, const char** Desc) {
436 const TargetMachine *Target = unwrap(TM);
437 const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
438 const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
439 const SubtargetFeatureKV Feat = FeatTable[Index];
440 *Feature = Feat.Key;
441 *Desc = Feat.Desc;
442 }
443
444 #else
445
446 extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
447 printf("Target CPU help is not supported by this LLVM version.\n\n");
448 }
449
450 extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef) {
451 return 0;
452 }
453
454 extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef, const char**, const char**) {}
455 #endif
456
457 extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
458 StringRef Name = sys::getHostCPUName();
459 *len = Name.size();
460 return Name.data();
461 }
462
463 extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
464 const char *TripleStr, const char *CPU, const char *Feature,
465 const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocModel RustReloc,
466 LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
467 bool FunctionSections,
468 bool DataSections,
469 bool UniqueSectionNames,
470 bool TrapUnreachable,
471 bool Singlethread,
472 bool AsmComments,
473 bool EmitStackSizeSection,
474 bool RelaxELFRelocations,
475 bool UseInitArray,
476 const char *SplitDwarfFile) {
477
478 auto OptLevel = fromRust(RustOptLevel);
479 auto RM = fromRust(RustReloc);
480 auto CM = fromRust(RustCM);
481
482 std::string Error;
483 Triple Trip(Triple::normalize(TripleStr));
484 const llvm::Target *TheTarget =
485 TargetRegistry::lookupTarget(Trip.getTriple(), Error);
486 if (TheTarget == nullptr) {
487 LLVMRustSetLastError(Error.c_str());
488 return nullptr;
489 }
490
491 TargetOptions Options;
492
493 Options.FloatABIType = FloatABI::Default;
494 if (UseSoftFloat) {
495 Options.FloatABIType = FloatABI::Soft;
496 }
497 Options.DataSections = DataSections;
498 Options.FunctionSections = FunctionSections;
499 Options.UniqueSectionNames = UniqueSectionNames;
500 Options.MCOptions.AsmVerbose = AsmComments;
501 Options.MCOptions.PreserveAsmComments = AsmComments;
502 Options.MCOptions.ABIName = ABIStr;
503 if (SplitDwarfFile) {
504 Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
505 }
506 Options.RelaxELFRelocations = RelaxELFRelocations;
507 Options.UseInitArray = UseInitArray;
508
509 if (TrapUnreachable) {
510 // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
511 // This limits the extent of possible undefined behavior in some cases, as
512 // it prevents control flow from "falling through" into whatever code
513 // happens to be laid out next in memory.
514 Options.TrapUnreachable = true;
515 }
516
517 if (Singlethread) {
518 Options.ThreadModel = ThreadModel::Single;
519 }
520
521 Options.EmitStackSizeSection = EmitStackSizeSection;
522
523 TargetMachine *TM = TheTarget->createTargetMachine(
524 Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
525 return wrap(TM);
526 }
527
528 extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
529 delete unwrap(TM);
530 }
531
532 extern "C" void LLVMRustConfigurePassManagerBuilder(
533 LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
534 bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
535 const char* PGOGenPath, const char* PGOUsePath, const char* PGOSampleUsePath) {
536 unwrap(PMBR)->MergeFunctions = MergeFunctions;
537 unwrap(PMBR)->SLPVectorize = SLPVectorize;
538 unwrap(PMBR)->OptLevel = fromRust(OptLevel);
539 unwrap(PMBR)->LoopVectorize = LoopVectorize;
540 unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
541
542 if (PGOGenPath) {
543 assert(!PGOUsePath && !PGOSampleUsePath);
544 unwrap(PMBR)->EnablePGOInstrGen = true;
545 unwrap(PMBR)->PGOInstrGen = PGOGenPath;
546 } else if (PGOUsePath) {
547 assert(!PGOSampleUsePath);
548 unwrap(PMBR)->PGOInstrUse = PGOUsePath;
549 } else if (PGOSampleUsePath) {
550 unwrap(PMBR)->PGOSampleUse = PGOSampleUsePath;
551 }
552 }
553
554 // Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
555 // field of a PassManagerBuilder, we expose our own method of doing so.
556 extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
557 LLVMModuleRef M,
558 bool DisableSimplifyLibCalls) {
559 Triple TargetTriple(unwrap(M)->getTargetTriple());
560 TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
561 if (DisableSimplifyLibCalls)
562 TLI->disableAllFunctions();
563 unwrap(PMBR)->LibraryInfo = TLI;
564 }
565
566 // Unfortunately, the LLVM C API doesn't provide a way to create the
567 // TargetLibraryInfo pass, so we use this method to do so.
568 extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
569 bool DisableSimplifyLibCalls) {
570 Triple TargetTriple(unwrap(M)->getTargetTriple());
571 TargetLibraryInfoImpl TLII(TargetTriple);
572 if (DisableSimplifyLibCalls)
573 TLII.disableAllFunctions();
574 unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
575 }
576
577 // Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
578 // all the functions in a module, so we do that manually here. You'll find
579 // similar code in clang's BackendUtil.cpp file.
580 extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
581 LLVMModuleRef M) {
582 llvm::legacy::FunctionPassManager *P =
583 unwrap<llvm::legacy::FunctionPassManager>(PMR);
584 P->doInitialization();
585
586 // Upgrade all calls to old intrinsics first.
587 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
588 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
589
590 for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
591 ++I)
592 if (!I->isDeclaration())
593 P->run(*I);
594
595 P->doFinalization();
596 }
597
598 extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
599 // Initializing the command-line options more than once is not allowed. So,
600 // check if they've already been initialized. (This could happen if we're
601 // being called from rustpkg, for example). If the arguments change, then
602 // that's just kinda unfortunate.
603 static bool Initialized = false;
604 if (Initialized)
605 return;
606 Initialized = true;
607 cl::ParseCommandLineOptions(Argc, Argv);
608 }
609
610 enum class LLVMRustFileType {
611 AssemblyFile,
612 ObjectFile,
613 };
614
615 static CodeGenFileType fromRust(LLVMRustFileType Type) {
616 switch (Type) {
617 case LLVMRustFileType::AssemblyFile:
618 return CGFT_AssemblyFile;
619 case LLVMRustFileType::ObjectFile:
620 return CGFT_ObjectFile;
621 default:
622 report_fatal_error("Bad FileType.");
623 }
624 }
625
626 extern "C" LLVMRustResult
627 LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
628 LLVMModuleRef M, const char *Path, const char *DwoPath,
629 LLVMRustFileType RustFileType) {
630 llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
631 auto FileType = fromRust(RustFileType);
632
633 std::string ErrorInfo;
634 std::error_code EC;
635 raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
636 if (EC)
637 ErrorInfo = EC.message();
638 if (ErrorInfo != "") {
639 LLVMRustSetLastError(ErrorInfo.c_str());
640 return LLVMRustResult::Failure;
641 }
642
643 buffer_ostream BOS(OS);
644 if (DwoPath) {
645 raw_fd_ostream DOS(DwoPath, EC, sys::fs::OF_None);
646 EC.clear();
647 if (EC)
648 ErrorInfo = EC.message();
649 if (ErrorInfo != "") {
650 LLVMRustSetLastError(ErrorInfo.c_str());
651 return LLVMRustResult::Failure;
652 }
653 buffer_ostream DBOS(DOS);
654 unwrap(Target)->addPassesToEmitFile(*PM, BOS, &DBOS, FileType, false);
655 PM->run(*unwrap(M));
656 } else {
657 unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
658 PM->run(*unwrap(M));
659 }
660
661 // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
662 // stream (OS), so the only real safe place to delete this is here? Don't we
663 // wish this was written in Rust?
664 LLVMDisposePassManager(PMR);
665 return LLVMRustResult::Success;
666 }
667
668 extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
669 const char*, // pass name
670 const char*); // IR name
671 extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
672
673 std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
674 if (any_isa<const Module *>(WrappedIr))
675 return any_cast<const Module *>(WrappedIr)->getName().str();
676 if (any_isa<const Function *>(WrappedIr))
677 return any_cast<const Function *>(WrappedIr)->getName().str();
678 if (any_isa<const Loop *>(WrappedIr))
679 return any_cast<const Loop *>(WrappedIr)->getName().str();
680 if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
681 return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
682 return "<UNKNOWN>";
683 }
684
685
686 void LLVMSelfProfileInitializeCallbacks(
687 PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
688 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
689 LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
690 PIC.registerBeforeNonSkippedPassCallback([LlvmSelfProfiler, BeforePassCallback](
691 StringRef Pass, llvm::Any Ir) {
692 std::string PassName = Pass.str();
693 std::string IrName = LLVMRustwrappedIrGetName(Ir);
694 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
695 });
696
697 PIC.registerAfterPassCallback(
698 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any IR,
699 const PreservedAnalyses &Preserved) {
700 AfterPassCallback(LlvmSelfProfiler);
701 });
702
703 PIC.registerAfterPassInvalidatedCallback(
704 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
705 AfterPassCallback(LlvmSelfProfiler);
706 });
707
708 PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
709 StringRef Pass, llvm::Any Ir) {
710 std::string PassName = Pass.str();
711 std::string IrName = LLVMRustwrappedIrGetName(Ir);
712 BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
713 });
714
715 PIC.registerAfterAnalysisCallback(
716 [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
717 AfterPassCallback(LlvmSelfProfiler);
718 });
719 }
720
721 enum class LLVMRustOptStage {
722 PreLinkNoLTO,
723 PreLinkThinLTO,
724 PreLinkFatLTO,
725 ThinLTO,
726 FatLTO,
727 };
728
729 struct LLVMRustSanitizerOptions {
730 bool SanitizeAddress;
731 bool SanitizeAddressRecover;
732 bool SanitizeMemory;
733 bool SanitizeMemoryRecover;
734 int SanitizeMemoryTrackOrigins;
735 bool SanitizeThread;
736 bool SanitizeHWAddress;
737 bool SanitizeHWAddressRecover;
738 };
739
740 extern "C" LLVMRustResult
741 LLVMRustOptimizeWithNewPassManager(
742 LLVMModuleRef ModuleRef,
743 LLVMTargetMachineRef TMRef,
744 LLVMRustPassBuilderOptLevel OptLevelRust,
745 LLVMRustOptStage OptStage,
746 bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
747 bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
748 bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
749 LLVMRustSanitizerOptions *SanitizerOptions,
750 const char *PGOGenPath, const char *PGOUsePath,
751 bool InstrumentCoverage, bool InstrumentGCOV,
752 const char *PGOSampleUsePath, bool DebugInfoForProfiling,
753 void* LlvmSelfProfiler,
754 LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
755 LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
756 const char *ExtraPasses, size_t ExtraPassesLen) {
757 Module *TheModule = unwrap(ModuleRef);
758 TargetMachine *TM = unwrap(TMRef);
759 OptimizationLevel OptLevel = fromRust(OptLevelRust);
760
761
762 PipelineTuningOptions PTO;
763 PTO.LoopUnrolling = UnrollLoops;
764 PTO.LoopInterleaving = UnrollLoops;
765 PTO.LoopVectorization = LoopVectorize;
766 PTO.SLPVectorization = SLPVectorize;
767 PTO.MergeFunctions = MergeFunctions;
768
769 // FIXME: We may want to expose this as an option.
770 bool DebugPassManager = false;
771
772 PassInstrumentationCallbacks PIC;
773 StandardInstrumentations SI(DebugPassManager);
774 SI.registerCallbacks(PIC);
775
776 if (LlvmSelfProfiler){
777 LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
778 }
779
780 Optional<PGOOptions> PGOOpt;
781 if (PGOGenPath) {
782 assert(!PGOUsePath && !PGOSampleUsePath);
783 PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr,
784 PGOOptions::NoCSAction, DebugInfoForProfiling);
785 } else if (PGOUsePath) {
786 assert(!PGOSampleUsePath);
787 PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse,
788 PGOOptions::NoCSAction, DebugInfoForProfiling);
789 } else if (PGOSampleUsePath) {
790 PGOOpt = PGOOptions(PGOSampleUsePath, "", "", PGOOptions::SampleUse,
791 PGOOptions::NoCSAction, DebugInfoForProfiling);
792 } else if (DebugInfoForProfiling) {
793 PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
794 PGOOptions::NoCSAction, DebugInfoForProfiling);
795 }
796
797 #if LLVM_VERSION_GE(13, 0)
798 PassBuilder PB(TM, PTO, PGOOpt, &PIC);
799 LoopAnalysisManager LAM;
800 FunctionAnalysisManager FAM;
801 CGSCCAnalysisManager CGAM;
802 ModuleAnalysisManager MAM;
803 #else
804 PassBuilder PB(DebugPassManager, TM, PTO, PGOOpt, &PIC);
805 LoopAnalysisManager LAM(DebugPassManager);
806 FunctionAnalysisManager FAM(DebugPassManager);
807 CGSCCAnalysisManager CGAM(DebugPassManager);
808 ModuleAnalysisManager MAM(DebugPassManager);
809 #endif
810
811 FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
812
813 Triple TargetTriple(TheModule->getTargetTriple());
814 std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
815 if (DisableSimplifyLibCalls)
816 TLII->disableAllFunctions();
817 FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
818
819 PB.registerModuleAnalyses(MAM);
820 PB.registerCGSCCAnalyses(CGAM);
821 PB.registerFunctionAnalyses(FAM);
822 PB.registerLoopAnalyses(LAM);
823 PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
824
825 // We manually collect pipeline callbacks so we can apply them at O0, where the
826 // PassBuilder does not create a pipeline.
827 std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
828 PipelineStartEPCallbacks;
829 std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
830 OptimizerLastEPCallbacks;
831
832 if (VerifyIR) {
833 PipelineStartEPCallbacks.push_back(
834 [VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
835 MPM.addPass(VerifierPass());
836 }
837 );
838 }
839
840 if (InstrumentGCOV) {
841 PipelineStartEPCallbacks.push_back(
842 [](ModulePassManager &MPM, OptimizationLevel Level) {
843 MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
844 }
845 );
846 }
847
848 if (InstrumentCoverage) {
849 PipelineStartEPCallbacks.push_back(
850 [](ModulePassManager &MPM, OptimizationLevel Level) {
851 InstrProfOptions Options;
852 MPM.addPass(InstrProfiling(Options, false));
853 }
854 );
855 }
856
857 if (SanitizerOptions) {
858 if (SanitizerOptions->SanitizeMemory) {
859 MemorySanitizerOptions Options(
860 SanitizerOptions->SanitizeMemoryTrackOrigins,
861 SanitizerOptions->SanitizeMemoryRecover,
862 /*CompileKernel=*/false);
863 OptimizerLastEPCallbacks.push_back(
864 [Options](ModulePassManager &MPM, OptimizationLevel Level) {
865 #if LLVM_VERSION_GE(14, 0)
866 MPM.addPass(ModuleMemorySanitizerPass(Options));
867 #else
868 MPM.addPass(MemorySanitizerPass(Options));
869 #endif
870 MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass(Options)));
871 }
872 );
873 }
874
875 if (SanitizerOptions->SanitizeThread) {
876 OptimizerLastEPCallbacks.push_back(
877 [](ModulePassManager &MPM, OptimizationLevel Level) {
878 #if LLVM_VERSION_GE(14, 0)
879 MPM.addPass(ModuleThreadSanitizerPass());
880 #else
881 MPM.addPass(ThreadSanitizerPass());
882 #endif
883 MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
884 }
885 );
886 }
887
888 if (SanitizerOptions->SanitizeAddress) {
889 OptimizerLastEPCallbacks.push_back(
890 [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
891 MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
892 #if LLVM_VERSION_GE(14, 0)
893 AddressSanitizerOptions opts = AddressSanitizerOptions{
894 /*CompileKernel=*/false,
895 SanitizerOptions->SanitizeAddressRecover,
896 /*UseAfterScope=*/true,
897 AsanDetectStackUseAfterReturnMode::Runtime,
898 };
899 MPM.addPass(ModuleAddressSanitizerPass(opts));
900 #else
901 MPM.addPass(ModuleAddressSanitizerPass(
902 /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
903 MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
904 /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover,
905 /*UseAfterScope=*/true)));
906 #endif
907 }
908 );
909 }
910 if (SanitizerOptions->SanitizeHWAddress) {
911 OptimizerLastEPCallbacks.push_back(
912 [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
913 #if LLVM_VERSION_GE(14, 0)
914 HWAddressSanitizerOptions opts(
915 /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover,
916 /*DisableOptimization=*/false);
917 MPM.addPass(HWAddressSanitizerPass(opts));
918 #else
919 MPM.addPass(HWAddressSanitizerPass(
920 /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
921 #endif
922 }
923 );
924 }
925 }
926
927 #if LLVM_VERSION_GE(13, 0)
928 ModulePassManager MPM;
929 #else
930 ModulePassManager MPM(DebugPassManager);
931 #endif
932 bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
933 if (!NoPrepopulatePasses) {
934 // The pre-link pipelines don't support O0 and require using budilO0DefaultPipeline() instead.
935 // At the same time, the LTO pipelines do support O0 and using them is required.
936 bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || OptStage == LLVMRustOptStage::FatLTO;
937 if (OptLevel == OptimizationLevel::O0 && !IsLTO) {
938 for (const auto &C : PipelineStartEPCallbacks)
939 PB.registerPipelineStartEPCallback(C);
940 for (const auto &C : OptimizerLastEPCallbacks)
941 PB.registerOptimizerLastEPCallback(C);
942
943 // Pass false as we manually schedule ThinLTOBufferPasses below.
944 MPM = PB.buildO0DefaultPipeline(OptLevel, /* PreLinkLTO */ false);
945 } else {
946 for (const auto &C : PipelineStartEPCallbacks)
947 PB.registerPipelineStartEPCallback(C);
948 if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
949 for (const auto &C : OptimizerLastEPCallbacks)
950 PB.registerOptimizerLastEPCallback(C);
951 }
952
953 switch (OptStage) {
954 case LLVMRustOptStage::PreLinkNoLTO:
955 MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
956 break;
957 case LLVMRustOptStage::PreLinkThinLTO:
958 MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel);
959 // The ThinLTOPreLink pipeline already includes ThinLTOBuffer passes. However, callback
960 // passes may still run afterwards. This means we need to run the buffer passes again.
961 // FIXME: In LLVM 13, the ThinLTOPreLink pipeline also runs OptimizerLastEPCallbacks
962 // before the RequiredLTOPreLinkPasses, in which case we can remove these hacks.
963 if (OptimizerLastEPCallbacks.empty())
964 NeedThinLTOBufferPasses = false;
965 for (const auto &C : OptimizerLastEPCallbacks)
966 C(MPM, OptLevel);
967 break;
968 case LLVMRustOptStage::PreLinkFatLTO:
969 MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel);
970 NeedThinLTOBufferPasses = false;
971 break;
972 case LLVMRustOptStage::ThinLTO:
973 // FIXME: Does it make sense to pass the ModuleSummaryIndex?
974 // It only seems to be needed for C++ specific optimizations.
975 MPM = PB.buildThinLTODefaultPipeline(OptLevel, nullptr);
976 break;
977 case LLVMRustOptStage::FatLTO:
978 MPM = PB.buildLTODefaultPipeline(OptLevel, nullptr);
979 break;
980 }
981 }
982 }
983
984 if (ExtraPassesLen) {
985 if (auto Err = PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
986 std::string ErrMsg = toString(std::move(Err));
987 LLVMRustSetLastError(ErrMsg.c_str());
988 return LLVMRustResult::Failure;
989 }
990 }
991
992 if (NeedThinLTOBufferPasses) {
993 MPM.addPass(CanonicalizeAliasesPass());
994 MPM.addPass(NameAnonGlobalPass());
995 }
996
997 // Upgrade all calls to old intrinsics first.
998 for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
999 UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
1000
1001 MPM.run(*TheModule, MAM);
1002 return LLVMRustResult::Success;
1003 }
1004
1005 // Callback to demangle function name
1006 // Parameters:
1007 // * name to be demangled
1008 // * name len
1009 // * output buffer
1010 // * output buffer len
1011 // Returns len of demangled string, or 0 if demangle failed.
1012 typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
1013
1014
1015 namespace {
1016
1017 class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
1018 DemangleFn Demangle;
1019 std::vector<char> Buf;
1020
1021 public:
1022 RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
1023
1024 // Return empty string if demangle failed
1025 // or if name does not need to be demangled
1026 StringRef CallDemangle(StringRef name) {
1027 if (!Demangle) {
1028 return StringRef();
1029 }
1030
1031 if (Buf.size() < name.size() * 2) {
1032 // Semangled name usually shorter than mangled,
1033 // but allocate twice as much memory just in case
1034 Buf.resize(name.size() * 2);
1035 }
1036
1037 auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
1038 if (!R) {
1039 // Demangle failed.
1040 return StringRef();
1041 }
1042
1043 auto Demangled = StringRef(Buf.data(), R);
1044 if (Demangled == name) {
1045 // Do not print anything if demangled name is equal to mangled.
1046 return StringRef();
1047 }
1048
1049 return Demangled;
1050 }
1051
1052 void emitFunctionAnnot(const Function *F,
1053 formatted_raw_ostream &OS) override {
1054 StringRef Demangled = CallDemangle(F->getName());
1055 if (Demangled.empty()) {
1056 return;
1057 }
1058
1059 OS << "; " << Demangled << "\n";
1060 }
1061
1062 void emitInstructionAnnot(const Instruction *I,
1063 formatted_raw_ostream &OS) override {
1064 const char *Name;
1065 const Value *Value;
1066 if (const CallInst *CI = dyn_cast<CallInst>(I)) {
1067 Name = "call";
1068 Value = CI->getCalledOperand();
1069 } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
1070 Name = "invoke";
1071 Value = II->getCalledOperand();
1072 } else {
1073 // Could demangle more operations, e. g.
1074 // `store %place, @function`.
1075 return;
1076 }
1077
1078 if (!Value->hasName()) {
1079 return;
1080 }
1081
1082 StringRef Demangled = CallDemangle(Value->getName());
1083 if (Demangled.empty()) {
1084 return;
1085 }
1086
1087 OS << "; " << Name << " " << Demangled << "\n";
1088 }
1089 };
1090
1091 } // namespace
1092
1093 extern "C" LLVMRustResult
1094 LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
1095 std::string ErrorInfo;
1096 std::error_code EC;
1097 raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
1098 if (EC)
1099 ErrorInfo = EC.message();
1100 if (ErrorInfo != "") {
1101 LLVMRustSetLastError(ErrorInfo.c_str());
1102 return LLVMRustResult::Failure;
1103 }
1104
1105 RustAssemblyAnnotationWriter AAW(Demangle);
1106 formatted_raw_ostream FOS(OS);
1107 unwrap(M)->print(FOS, &AAW);
1108
1109 return LLVMRustResult::Success;
1110 }
1111
1112 extern "C" void LLVMRustPrintPasses() {
1113 LLVMInitializePasses();
1114 struct MyListener : PassRegistrationListener {
1115 void passEnumerate(const PassInfo *Info) {
1116 StringRef PassArg = Info->getPassArgument();
1117 StringRef PassName = Info->getPassName();
1118 if (!PassArg.empty()) {
1119 // These unsigned->signed casts could theoretically overflow, but
1120 // realistically never will (and even if, the result is implementation
1121 // defined rather plain UB).
1122 printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
1123 (int)PassName.size(), PassName.data());
1124 }
1125 }
1126 } Listener;
1127
1128 PassRegistry *PR = PassRegistry::getPassRegistry();
1129 PR->enumerateWith(&Listener);
1130 }
1131
1132 extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
1133 bool AddLifetimes) {
1134 unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
1135 }
1136
1137 extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
1138 size_t Len) {
1139 llvm::legacy::PassManager passes;
1140
1141 auto PreserveFunctions = [=](const GlobalValue &GV) {
1142 for (size_t I = 0; I < Len; I++) {
1143 if (GV.getName() == Symbols[I]) {
1144 return true;
1145 }
1146 }
1147 return false;
1148 };
1149
1150 passes.add(llvm::createInternalizePass(PreserveFunctions));
1151
1152 passes.run(*unwrap(M));
1153 }
1154
1155 extern "C" void LLVMRustMarkAllFunctionsNounwind(LLVMModuleRef M) {
1156 for (Module::iterator GV = unwrap(M)->begin(), E = unwrap(M)->end(); GV != E;
1157 ++GV) {
1158 GV->setDoesNotThrow();
1159 Function *F = dyn_cast<Function>(GV);
1160 if (F == nullptr)
1161 continue;
1162
1163 for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
1164 for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ++I) {
1165 if (isa<InvokeInst>(I)) {
1166 InvokeInst *CI = cast<InvokeInst>(I);
1167 CI->setDoesNotThrow();
1168 }
1169 }
1170 }
1171 }
1172 }
1173
1174 extern "C" void
1175 LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
1176 LLVMTargetMachineRef TMR) {
1177 TargetMachine *Target = unwrap(TMR);
1178 unwrap(Module)->setDataLayout(Target->createDataLayout());
1179 }
1180
1181 extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
1182 unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
1183 }
1184
1185 extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
1186 unwrap(M)->setPIELevel(PIELevel::Level::Large);
1187 }
1188
1189 extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
1190 LLVMRustCodeModel Model) {
1191 auto CM = fromRust(Model);
1192 if (!CM.hasValue())
1193 return;
1194 unwrap(M)->setCodeModel(*CM);
1195 }
1196
1197 // Here you'll find an implementation of ThinLTO as used by the Rust compiler
1198 // right now. This ThinLTO support is only enabled on "recent ish" versions of
1199 // LLVM, and otherwise it's just blanket rejected from other compilers.
1200 //
1201 // Most of this implementation is straight copied from LLVM. At the time of
1202 // this writing it wasn't *quite* suitable to reuse more code from upstream
1203 // for our purposes, but we should strive to upstream this support once it's
1204 // ready to go! I figure we may want a bit of testing locally first before
1205 // sending this upstream to LLVM. I hear though they're quite eager to receive
1206 // feedback like this!
1207 //
1208 // If you're reading this code and wondering "what in the world" or you're
1209 // working "good lord by LLVM upgrade is *still* failing due to these bindings"
1210 // then fear not! (ok maybe fear a little). All code here is mostly based
1211 // on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
1212 //
1213 // You'll find that the general layout here roughly corresponds to the `run`
1214 // method in that file as well as `ProcessThinLTOModule`. Functions are
1215 // specifically commented below as well, but if you're updating this code
1216 // or otherwise trying to understand it, the LLVM source will be useful in
1217 // interpreting the mysteries within.
1218 //
1219 // Otherwise I'll apologize in advance, it probably requires a relatively
1220 // significant investment on your part to "truly understand" what's going on
1221 // here. Not saying I do myself, but it took me awhile staring at LLVM's source
1222 // and various online resources about ThinLTO to make heads or tails of all
1223 // this.
1224
1225 // This is a shared data structure which *must* be threadsafe to share
1226 // read-only amongst threads. This also corresponds basically to the arguments
1227 // of the `ProcessThinLTOModule` function in the LLVM source.
1228 struct LLVMRustThinLTOData {
1229 // The combined index that is the global analysis over all modules we're
1230 // performing ThinLTO for. This is mostly managed by LLVM.
1231 ModuleSummaryIndex Index;
1232
1233 // All modules we may look at, stored as in-memory serialized versions. This
1234 // is later used when inlining to ensure we can extract any module to inline
1235 // from.
1236 StringMap<MemoryBufferRef> ModuleMap;
1237
1238 // A set that we manage of everything we *don't* want internalized. Note that
1239 // this includes all transitive references right now as well, but it may not
1240 // always!
1241 DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
1242
1243 // Not 100% sure what these are, but they impact what's internalized and
1244 // what's inlined across modules, I believe.
1245 StringMap<FunctionImporter::ImportMapTy> ImportLists;
1246 StringMap<FunctionImporter::ExportSetTy> ExportLists;
1247 StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
1248 StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
1249
1250 LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
1251 };
1252
1253 // Just an argument to the `LLVMRustCreateThinLTOData` function below.
1254 struct LLVMRustThinLTOModule {
1255 const char *identifier;
1256 const char *data;
1257 size_t len;
1258 };
1259
1260 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
1261 // does.
1262 static const GlobalValueSummary *
1263 getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
1264 auto StrongDefForLinker = llvm::find_if(
1265 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1266 auto Linkage = Summary->linkage();
1267 return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
1268 !GlobalValue::isWeakForLinker(Linkage);
1269 });
1270 if (StrongDefForLinker != GVSummaryList.end())
1271 return StrongDefForLinker->get();
1272
1273 auto FirstDefForLinker = llvm::find_if(
1274 GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
1275 auto Linkage = Summary->linkage();
1276 return !GlobalValue::isAvailableExternallyLinkage(Linkage);
1277 });
1278 if (FirstDefForLinker == GVSummaryList.end())
1279 return nullptr;
1280 return FirstDefForLinker->get();
1281 }
1282
1283 // The main entry point for creating the global ThinLTO analysis. The structure
1284 // here is basically the same as before threads are spawned in the `run`
1285 // function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
1286 extern "C" LLVMRustThinLTOData*
1287 LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
1288 int num_modules,
1289 const char **preserved_symbols,
1290 int num_symbols) {
1291 auto Ret = std::make_unique<LLVMRustThinLTOData>();
1292
1293 // Load each module's summary and merge it into one combined index
1294 for (int i = 0; i < num_modules; i++) {
1295 auto module = &modules[i];
1296 StringRef buffer(module->data, module->len);
1297 MemoryBufferRef mem_buffer(buffer, module->identifier);
1298
1299 Ret->ModuleMap[module->identifier] = mem_buffer;
1300
1301 if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
1302 LLVMRustSetLastError(toString(std::move(Err)).c_str());
1303 return nullptr;
1304 }
1305 }
1306
1307 // Collect for each module the list of function it defines (GUID -> Summary)
1308 Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
1309
1310 // Convert the preserved symbols set from string to GUID, this is then needed
1311 // for internalization.
1312 for (int i = 0; i < num_symbols; i++) {
1313 auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
1314 Ret->GUIDPreservedSymbols.insert(GUID);
1315 }
1316
1317 // Collect the import/export lists for all modules from the call-graph in the
1318 // combined index
1319 //
1320 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
1321 auto deadIsPrevailing = [&](GlobalValue::GUID G) {
1322 return PrevailingType::Unknown;
1323 };
1324 // We don't have a complete picture in our use of ThinLTO, just our immediate
1325 // crate, so we need `ImportEnabled = false` to limit internalization.
1326 // Otherwise, we sometimes lose `static` values -- see #60184.
1327 computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
1328 deadIsPrevailing, /* ImportEnabled = */ false);
1329 ComputeCrossModuleImport(
1330 Ret->Index,
1331 Ret->ModuleToDefinedGVSummaries,
1332 Ret->ImportLists,
1333 Ret->ExportLists
1334 );
1335
1336 // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
1337 // impacts the caching.
1338 //
1339 // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
1340 // being lifted from `lib/LTO/LTO.cpp` as well
1341 DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
1342 for (auto &I : Ret->Index) {
1343 if (I.second.SummaryList.size() > 1)
1344 PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
1345 }
1346 auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
1347 const auto &Prevailing = PrevailingCopy.find(GUID);
1348 if (Prevailing == PrevailingCopy.end())
1349 return true;
1350 return Prevailing->second == S;
1351 };
1352 auto recordNewLinkage = [&](StringRef ModuleIdentifier,
1353 GlobalValue::GUID GUID,
1354 GlobalValue::LinkageTypes NewLinkage) {
1355 Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
1356 };
1357
1358 #if LLVM_VERSION_GE(13,0)
1359 // Uses FromPrevailing visibility scheme which works for many binary
1360 // formats. We probably could and should use ELF visibility scheme for many of
1361 // our targets, however.
1362 lto::Config conf;
1363 thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing, recordNewLinkage,
1364 Ret->GUIDPreservedSymbols);
1365 #else
1366 thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
1367 Ret->GUIDPreservedSymbols);
1368 #endif
1369 // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
1370 // callback below. This callback below will dictate the linkage for all
1371 // summaries in the index, and we basically just only want to ensure that dead
1372 // symbols are internalized. Otherwise everything that's already external
1373 // linkage will stay as external, and internal will stay as internal.
1374 std::set<GlobalValue::GUID> ExportedGUIDs;
1375 for (auto &List : Ret->Index) {
1376 for (auto &GVS: List.second.SummaryList) {
1377 if (GlobalValue::isLocalLinkage(GVS->linkage()))
1378 continue;
1379 auto GUID = GVS->getOriginalName();
1380 if (GVS->flags().Live)
1381 ExportedGUIDs.insert(GUID);
1382 }
1383 }
1384 auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
1385 const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
1386 return (ExportList != Ret->ExportLists.end() &&
1387 ExportList->second.count(VI)) ||
1388 ExportedGUIDs.count(VI.getGUID());
1389 };
1390 thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
1391
1392 return Ret.release();
1393 }
1394
1395 extern "C" void
1396 LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
1397 delete Data;
1398 }
1399
1400 // Below are the various passes that happen *per module* when doing ThinLTO.
1401 //
1402 // In other words, these are the functions that are all run concurrently
1403 // with one another, one per module. The passes here correspond to the analysis
1404 // passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
1405 // `ProcessThinLTOModule` function. Here they're split up into separate steps
1406 // so rustc can save off the intermediate bytecode between each step.
1407
1408 static bool
1409 clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) {
1410 // When linking an ELF shared object, dso_local should be dropped. We
1411 // conservatively do this for -fpic.
1412 bool ClearDSOLocalOnDeclarations =
1413 TM.getTargetTriple().isOSBinFormatELF() &&
1414 TM.getRelocationModel() != Reloc::Static &&
1415 Mod.getPIELevel() == PIELevel::Default;
1416 return ClearDSOLocalOnDeclarations;
1417 }
1418
1419 extern "C" bool
1420 LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
1421 LLVMTargetMachineRef TM) {
1422 Module &Mod = *unwrap(M);
1423 TargetMachine &Target = *unwrap(TM);
1424
1425 bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
1426 bool error = renameModuleForThinLTO(Mod, Data->Index, ClearDSOLocal);
1427
1428 if (error) {
1429 LLVMRustSetLastError("renameModuleForThinLTO failed");
1430 return false;
1431 }
1432 return true;
1433 }
1434
1435 extern "C" bool
1436 LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1437 Module &Mod = *unwrap(M);
1438 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1439 #if LLVM_VERSION_GE(14, 0)
1440 thinLTOFinalizeInModule(Mod, DefinedGlobals, /*PropagateAttrs=*/true);
1441 #else
1442 thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
1443 #endif
1444 return true;
1445 }
1446
1447 extern "C" bool
1448 LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
1449 Module &Mod = *unwrap(M);
1450 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
1451 thinLTOInternalizeModule(Mod, DefinedGlobals);
1452 return true;
1453 }
1454
1455 extern "C" bool
1456 LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
1457 LLVMTargetMachineRef TM) {
1458 Module &Mod = *unwrap(M);
1459 TargetMachine &Target = *unwrap(TM);
1460
1461 const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
1462 auto Loader = [&](StringRef Identifier) {
1463 const auto &Memory = Data->ModuleMap.lookup(Identifier);
1464 auto &Context = Mod.getContext();
1465 auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
1466
1467 if (!MOrErr)
1468 return MOrErr;
1469
1470 // The rest of this closure is a workaround for
1471 // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
1472 // we accidentally import wasm custom sections into different modules,
1473 // duplicating them by in the final output artifact.
1474 //
1475 // The issue is worked around here by manually removing the
1476 // `wasm.custom_sections` named metadata node from any imported module. This
1477 // we know isn't used by any optimization pass so there's no need for it to
1478 // be imported.
1479 //
1480 // Note that the metadata is currently lazily loaded, so we materialize it
1481 // here before looking up if there's metadata inside. The `FunctionImporter`
1482 // will immediately materialize metadata anyway after an import, so this
1483 // shouldn't be a perf hit.
1484 if (Error Err = (*MOrErr)->materializeMetadata()) {
1485 Expected<std::unique_ptr<Module>> Ret(std::move(Err));
1486 return Ret;
1487 }
1488
1489 auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
1490 if (WasmCustomSections)
1491 WasmCustomSections->eraseFromParent();
1492
1493 return MOrErr;
1494 };
1495 bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
1496 FunctionImporter Importer(Data->Index, Loader, ClearDSOLocal);
1497 Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
1498 if (!Result) {
1499 LLVMRustSetLastError(toString(Result.takeError()).c_str());
1500 return false;
1501 }
1502 return true;
1503 }
1504
1505 extern "C" typedef void (*LLVMRustModuleNameCallback)(void*, // payload
1506 const char*, // importing module name
1507 const char*); // imported module name
1508
1509 // Calls `module_name_callback` for each module import done by ThinLTO.
1510 // The callback is provided with regular null-terminated C strings.
1511 extern "C" void
1512 LLVMRustGetThinLTOModules(const LLVMRustThinLTOData *data,
1513 LLVMRustModuleNameCallback module_name_callback,
1514 void* callback_payload) {
1515 for (const auto& importing_module : data->ImportLists) {
1516 const std::string importing_module_id = importing_module.getKey().str();
1517 const auto& imports = importing_module.getValue();
1518 for (const auto& imported_module : imports) {
1519 const std::string imported_module_id = imported_module.getKey().str();
1520 module_name_callback(callback_payload,
1521 importing_module_id.c_str(),
1522 imported_module_id.c_str());
1523 }
1524 }
1525 }
1526
1527 // This struct and various functions are sort of a hack right now, but the
1528 // problem is that we've got in-memory LLVM modules after we generate and
1529 // optimize all codegen-units for one compilation in rustc. To be compatible
1530 // with the LTO support above we need to serialize the modules plus their
1531 // ThinLTO summary into memory.
1532 //
1533 // This structure is basically an owned version of a serialize module, with
1534 // a ThinLTO summary attached.
1535 struct LLVMRustThinLTOBuffer {
1536 std::string data;
1537 };
1538
1539 extern "C" LLVMRustThinLTOBuffer*
1540 LLVMRustThinLTOBufferCreate(LLVMModuleRef M) {
1541 auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
1542 {
1543 raw_string_ostream OS(Ret->data);
1544 {
1545 legacy::PassManager PM;
1546 PM.add(createWriteThinLTOBitcodePass(OS));
1547 PM.run(*unwrap(M));
1548 }
1549 }
1550 return Ret.release();
1551 }
1552
1553 extern "C" void
1554 LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
1555 delete Buffer;
1556 }
1557
1558 extern "C" const void*
1559 LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
1560 return Buffer->data.data();
1561 }
1562
1563 extern "C" size_t
1564 LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
1565 return Buffer->data.length();
1566 }
1567
1568 // This is what we used to parse upstream bitcode for actual ThinLTO
1569 // processing. We'll call this once per module optimized through ThinLTO, and
1570 // it'll be called concurrently on many threads.
1571 extern "C" LLVMModuleRef
1572 LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
1573 const char *data,
1574 size_t len,
1575 const char *identifier) {
1576 StringRef Data(data, len);
1577 MemoryBufferRef Buffer(Data, identifier);
1578 unwrap(Context)->enableDebugTypeODRUniquing();
1579 Expected<std::unique_ptr<Module>> SrcOrError =
1580 parseBitcodeFile(Buffer, *unwrap(Context));
1581 if (!SrcOrError) {
1582 LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
1583 return nullptr;
1584 }
1585 return wrap(std::move(*SrcOrError).release());
1586 }
1587
1588 // Find the bitcode section in the object file data and return it as a slice.
1589 // Fail if the bitcode section is present but empty.
1590 //
1591 // On success, the return value is the pointer to the start of the slice and
1592 // `out_len` is filled with the (non-zero) length. On failure, the return value
1593 // is `nullptr` and `out_len` is set to zero.
1594 extern "C" const char*
1595 LLVMRustGetBitcodeSliceFromObjectData(const char *data,
1596 size_t len,
1597 size_t *out_len) {
1598 *out_len = 0;
1599
1600 StringRef Data(data, len);
1601 MemoryBufferRef Buffer(Data, ""); // The id is unused.
1602
1603 Expected<MemoryBufferRef> BitcodeOrError =
1604 object::IRObjectFile::findBitcodeInMemBuffer(Buffer);
1605 if (!BitcodeOrError) {
1606 LLVMRustSetLastError(toString(BitcodeOrError.takeError()).c_str());
1607 return nullptr;
1608 }
1609
1610 *out_len = BitcodeOrError->getBufferSize();
1611 return BitcodeOrError->getBufferStart();
1612 }
1613
1614 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1615 // the comment in `back/lto.rs` for why this exists.
1616 extern "C" void
1617 LLVMRustLTOGetDICompileUnit(LLVMModuleRef Mod,
1618 DICompileUnit **A,
1619 DICompileUnit **B) {
1620 Module *M = unwrap(Mod);
1621 DICompileUnit **Cur = A;
1622 DICompileUnit **Next = B;
1623 for (DICompileUnit *CU : M->debug_compile_units()) {
1624 *Cur = CU;
1625 Cur = Next;
1626 Next = nullptr;
1627 if (Cur == nullptr)
1628 break;
1629 }
1630 }
1631
1632 // Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
1633 // the comment in `back/lto.rs` for why this exists.
1634 extern "C" void
1635 LLVMRustLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
1636 Module *M = unwrap(Mod);
1637
1638 // If the original source module didn't have a `DICompileUnit` then try to
1639 // merge all the existing compile units. If there aren't actually any though
1640 // then there's not much for us to do so return.
1641 if (Unit == nullptr) {
1642 for (DICompileUnit *CU : M->debug_compile_units()) {
1643 Unit = CU;
1644 break;
1645 }
1646 if (Unit == nullptr)
1647 return;
1648 }
1649
1650 // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
1651 // process it recursively. Note that we used to specifically iterate over
1652 // instructions to ensure we feed everything into it, but `processModule`
1653 // started doing this the same way in LLVM 7 (commit d769eb36ab2b8).
1654 DebugInfoFinder Finder;
1655 Finder.processModule(*M);
1656
1657 // After we've found all our debuginfo, rewrite all subprograms to point to
1658 // the same `DICompileUnit`.
1659 for (auto &F : Finder.subprograms()) {
1660 F->replaceUnit(Unit);
1661 }
1662
1663 // Erase any other references to other `DICompileUnit` instances, the verifier
1664 // will later ensure that we don't actually have any other stale references to
1665 // worry about.
1666 auto *MD = M->getNamedMetadata("llvm.dbg.cu");
1667 MD->clearOperands();
1668 MD->addOperand(Unit);
1669 }
1670
1671 // Computes the LTO cache key for the provided 'ModId' in the given 'Data',
1672 // storing the result in 'KeyOut'.
1673 // Currently, this cache key is a SHA-1 hash of anything that could affect
1674 // the result of optimizing this module (e.g. module imports, exports, liveness
1675 // of access globals, etc).
1676 // The precise details are determined by LLVM in `computeLTOCacheKey`, which is
1677 // used during the normal linker-plugin incremental thin-LTO process.
1678 extern "C" void
1679 LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThinLTOData *Data) {
1680 SmallString<40> Key;
1681 llvm::lto::Config conf;
1682 const auto &ImportList = Data->ImportLists.lookup(ModId);
1683 const auto &ExportList = Data->ExportLists.lookup(ModId);
1684 const auto &ResolvedODR = Data->ResolvedODR.lookup(ModId);
1685 const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(ModId);
1686 std::set<GlobalValue::GUID> CfiFunctionDefs;
1687 std::set<GlobalValue::GUID> CfiFunctionDecls;
1688
1689 // Based on the 'InProcessThinBackend' constructor in LLVM
1690 for (auto &Name : Data->Index.cfiFunctionDefs())
1691 CfiFunctionDefs.insert(
1692 GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
1693 for (auto &Name : Data->Index.cfiFunctionDecls())
1694 CfiFunctionDecls.insert(
1695 GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
1696
1697 llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId,
1698 ImportList, ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs, CfiFunctionDecls
1699 );
1700
1701 LLVMRustStringWriteImpl(KeyOut, Key.c_str(), Key.size());
1702 }